1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 Facebook 3 */ 4 #define _GNU_SOURCE 5 #include "test_progs.h" 6 #include "testing_helpers.h" 7 #include "cgroup_helpers.h" 8 #include <argp.h> 9 #include <pthread.h> 10 #include <sched.h> 11 #include <signal.h> 12 #include <string.h> 13 #include <sys/sysinfo.h> /* get_nprocs */ 14 #include <netinet/in.h> 15 #include <sys/select.h> 16 #include <sys/socket.h> 17 #include <sys/un.h> 18 #include <bpf/btf.h> 19 #include "json_writer.h" 20 21 #ifdef __GLIBC__ 22 #include <execinfo.h> /* backtrace */ 23 #endif 24 25 /* Default backtrace funcs if missing at link */ 26 __weak int backtrace(void **buffer, int size) 27 { 28 return 0; 29 } 30 31 __weak void backtrace_symbols_fd(void *const *buffer, int size, int fd) 32 { 33 dprintf(fd, "<backtrace not supported>\n"); 34 } 35 36 static bool verbose(void) 37 { 38 return env.verbosity > VERBOSE_NONE; 39 } 40 41 static void stdio_hijack_init(char **log_buf, size_t *log_cnt) 42 { 43 #ifdef __GLIBC__ 44 if (verbose() && env.worker_id == -1) { 45 /* nothing to do, output to stdout by default */ 46 return; 47 } 48 49 fflush(stdout); 50 fflush(stderr); 51 52 stdout = open_memstream(log_buf, log_cnt); 53 if (!stdout) { 54 stdout = env.stdout; 55 perror("open_memstream"); 56 return; 57 } 58 59 if (env.subtest_state) 60 env.subtest_state->stdout = stdout; 61 else 62 env.test_state->stdout = stdout; 63 64 stderr = stdout; 65 #endif 66 } 67 68 static void stdio_hijack(char **log_buf, size_t *log_cnt) 69 { 70 #ifdef __GLIBC__ 71 if (verbose() && env.worker_id == -1) { 72 /* nothing to do, output to stdout by default */ 73 return; 74 } 75 76 env.stdout = stdout; 77 env.stderr = stderr; 78 79 stdio_hijack_init(log_buf, log_cnt); 80 #endif 81 } 82 83 static void stdio_restore_cleanup(void) 84 { 85 #ifdef __GLIBC__ 86 if (verbose() && env.worker_id == -1) { 87 /* nothing to do, output to stdout by default */ 88 return; 89 } 90 91 fflush(stdout); 92 93 if (env.subtest_state) { 94 fclose(env.subtest_state->stdout); 95 env.subtest_state->stdout = NULL; 96 stdout = env.test_state->stdout; 97 stderr = env.test_state->stdout; 98 } else { 99 fclose(env.test_state->stdout); 100 env.test_state->stdout = NULL; 101 } 102 #endif 103 } 104 105 static void stdio_restore(void) 106 { 107 #ifdef __GLIBC__ 108 if (verbose() && env.worker_id == -1) { 109 /* nothing to do, output to stdout by default */ 110 return; 111 } 112 113 if (stdout == env.stdout) 114 return; 115 116 stdio_restore_cleanup(); 117 118 stdout = env.stdout; 119 stderr = env.stderr; 120 #endif 121 } 122 123 /* Adapted from perf/util/string.c */ 124 static bool glob_match(const char *str, const char *pat) 125 { 126 while (*str && *pat && *pat != '*') { 127 if (*str != *pat) 128 return false; 129 str++; 130 pat++; 131 } 132 /* Check wild card */ 133 if (*pat == '*') { 134 while (*pat == '*') 135 pat++; 136 if (!*pat) /* Tail wild card matches all */ 137 return true; 138 while (*str) 139 if (glob_match(str++, pat)) 140 return true; 141 } 142 return !*str && !*pat; 143 } 144 145 #define EXIT_NO_TEST 2 146 #define EXIT_ERR_SETUP_INFRA 3 147 148 /* defined in test_progs.h */ 149 struct test_env env = {}; 150 151 struct prog_test_def { 152 const char *test_name; 153 int test_num; 154 void (*run_test)(void); 155 void (*run_serial_test)(void); 156 bool should_run; 157 bool need_cgroup_cleanup; 158 }; 159 160 /* Override C runtime library's usleep() implementation to ensure nanosleep() 161 * is always called. Usleep is frequently used in selftests as a way to 162 * trigger kprobe and tracepoints. 163 */ 164 int usleep(useconds_t usec) 165 { 166 struct timespec ts = { 167 .tv_sec = usec / 1000000, 168 .tv_nsec = (usec % 1000000) * 1000, 169 }; 170 171 return syscall(__NR_nanosleep, &ts, NULL); 172 } 173 174 static bool should_run(struct test_selector *sel, int num, const char *name) 175 { 176 int i; 177 178 for (i = 0; i < sel->blacklist.cnt; i++) { 179 if (glob_match(name, sel->blacklist.tests[i].name) && 180 !sel->blacklist.tests[i].subtest_cnt) 181 return false; 182 } 183 184 for (i = 0; i < sel->whitelist.cnt; i++) { 185 if (glob_match(name, sel->whitelist.tests[i].name)) 186 return true; 187 } 188 189 if (!sel->whitelist.cnt && !sel->num_set) 190 return true; 191 192 return num < sel->num_set_len && sel->num_set[num]; 193 } 194 195 static bool should_run_subtest(struct test_selector *sel, 196 struct test_selector *subtest_sel, 197 int subtest_num, 198 const char *test_name, 199 const char *subtest_name) 200 { 201 int i, j; 202 203 for (i = 0; i < sel->blacklist.cnt; i++) { 204 if (glob_match(test_name, sel->blacklist.tests[i].name)) { 205 if (!sel->blacklist.tests[i].subtest_cnt) 206 return false; 207 208 for (j = 0; j < sel->blacklist.tests[i].subtest_cnt; j++) { 209 if (glob_match(subtest_name, 210 sel->blacklist.tests[i].subtests[j])) 211 return false; 212 } 213 } 214 } 215 216 for (i = 0; i < sel->whitelist.cnt; i++) { 217 if (glob_match(test_name, sel->whitelist.tests[i].name)) { 218 if (!sel->whitelist.tests[i].subtest_cnt) 219 return true; 220 221 for (j = 0; j < sel->whitelist.tests[i].subtest_cnt; j++) { 222 if (glob_match(subtest_name, 223 sel->whitelist.tests[i].subtests[j])) 224 return true; 225 } 226 } 227 } 228 229 if (!sel->whitelist.cnt && !subtest_sel->num_set) 230 return true; 231 232 return subtest_num < subtest_sel->num_set_len && subtest_sel->num_set[subtest_num]; 233 } 234 235 static char *test_result(bool failed, bool skipped) 236 { 237 return failed ? "FAIL" : (skipped ? "SKIP" : "OK"); 238 } 239 240 #define TEST_NUM_WIDTH 7 241 242 static void print_test_result(const struct prog_test_def *test, const struct test_state *test_state) 243 { 244 int skipped_cnt = test_state->skip_cnt; 245 int subtests_cnt = test_state->subtest_num; 246 247 fprintf(env.stdout, "#%-*d %s:", TEST_NUM_WIDTH, test->test_num, test->test_name); 248 if (test_state->error_cnt) 249 fprintf(env.stdout, "FAIL"); 250 else if (!skipped_cnt) 251 fprintf(env.stdout, "OK"); 252 else if (skipped_cnt == subtests_cnt || !subtests_cnt) 253 fprintf(env.stdout, "SKIP"); 254 else 255 fprintf(env.stdout, "OK (SKIP: %d/%d)", skipped_cnt, subtests_cnt); 256 257 fprintf(env.stdout, "\n"); 258 } 259 260 static void print_test_log(char *log_buf, size_t log_cnt) 261 { 262 log_buf[log_cnt] = '\0'; 263 fprintf(env.stdout, "%s", log_buf); 264 if (log_buf[log_cnt - 1] != '\n') 265 fprintf(env.stdout, "\n"); 266 } 267 268 static void print_subtest_name(int test_num, int subtest_num, 269 const char *test_name, char *subtest_name, 270 char *result) 271 { 272 char test_num_str[TEST_NUM_WIDTH + 1]; 273 274 snprintf(test_num_str, sizeof(test_num_str), "%d/%d", test_num, subtest_num); 275 276 fprintf(env.stdout, "#%-*s %s/%s", 277 TEST_NUM_WIDTH, test_num_str, 278 test_name, subtest_name); 279 280 if (result) 281 fprintf(env.stdout, ":%s", result); 282 283 fprintf(env.stdout, "\n"); 284 } 285 286 static void jsonw_write_log_message(json_writer_t *w, char *log_buf, size_t log_cnt) 287 { 288 /* open_memstream (from stdio_hijack_init) ensures that log_bug is terminated by a 289 * null byte. Yet in parallel mode, log_buf will be NULL if there is no message. 290 */ 291 if (log_cnt) { 292 jsonw_string_field(w, "message", log_buf); 293 } else { 294 jsonw_string_field(w, "message", ""); 295 } 296 } 297 298 static void dump_test_log(const struct prog_test_def *test, 299 const struct test_state *test_state, 300 bool skip_ok_subtests, 301 bool par_exec_result, 302 json_writer_t *w) 303 { 304 bool test_failed = test_state->error_cnt > 0; 305 bool force_log = test_state->force_log; 306 bool print_test = verbose() || force_log || test_failed; 307 int i; 308 struct subtest_state *subtest_state; 309 bool subtest_failed; 310 bool subtest_filtered; 311 bool print_subtest; 312 313 /* we do not print anything in the worker thread */ 314 if (env.worker_id != -1) 315 return; 316 317 /* there is nothing to print when verbose log is used and execution 318 * is not in parallel mode 319 */ 320 if (verbose() && !par_exec_result) 321 return; 322 323 if (test_state->log_cnt && print_test) 324 print_test_log(test_state->log_buf, test_state->log_cnt); 325 326 if (w && print_test) { 327 jsonw_start_object(w); 328 jsonw_string_field(w, "name", test->test_name); 329 jsonw_uint_field(w, "number", test->test_num); 330 jsonw_write_log_message(w, test_state->log_buf, test_state->log_cnt); 331 jsonw_bool_field(w, "failed", test_failed); 332 jsonw_name(w, "subtests"); 333 jsonw_start_array(w); 334 } 335 336 for (i = 0; i < test_state->subtest_num; i++) { 337 subtest_state = &test_state->subtest_states[i]; 338 subtest_failed = subtest_state->error_cnt; 339 subtest_filtered = subtest_state->filtered; 340 print_subtest = verbose() || force_log || subtest_failed; 341 342 if ((skip_ok_subtests && !subtest_failed) || subtest_filtered) 343 continue; 344 345 if (subtest_state->log_cnt && print_subtest) { 346 print_test_log(subtest_state->log_buf, 347 subtest_state->log_cnt); 348 } 349 350 print_subtest_name(test->test_num, i + 1, 351 test->test_name, subtest_state->name, 352 test_result(subtest_state->error_cnt, 353 subtest_state->skipped)); 354 355 if (w && print_subtest) { 356 jsonw_start_object(w); 357 jsonw_string_field(w, "name", subtest_state->name); 358 jsonw_uint_field(w, "number", i+1); 359 jsonw_write_log_message(w, subtest_state->log_buf, subtest_state->log_cnt); 360 jsonw_bool_field(w, "failed", subtest_failed); 361 jsonw_end_object(w); 362 } 363 } 364 365 if (w && print_test) { 366 jsonw_end_array(w); 367 jsonw_end_object(w); 368 } 369 370 print_test_result(test, test_state); 371 } 372 373 static void stdio_restore(void); 374 375 /* A bunch of tests set custom affinity per-thread and/or per-process. Reset 376 * it after each test/sub-test. 377 */ 378 static void reset_affinity(void) 379 { 380 cpu_set_t cpuset; 381 int i, err; 382 383 CPU_ZERO(&cpuset); 384 for (i = 0; i < env.nr_cpus; i++) 385 CPU_SET(i, &cpuset); 386 387 err = sched_setaffinity(0, sizeof(cpuset), &cpuset); 388 if (err < 0) { 389 stdio_restore(); 390 fprintf(stderr, "Failed to reset process affinity: %d!\n", err); 391 exit(EXIT_ERR_SETUP_INFRA); 392 } 393 err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset); 394 if (err < 0) { 395 stdio_restore(); 396 fprintf(stderr, "Failed to reset thread affinity: %d!\n", err); 397 exit(EXIT_ERR_SETUP_INFRA); 398 } 399 } 400 401 static void save_netns(void) 402 { 403 env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY); 404 if (env.saved_netns_fd == -1) { 405 perror("open(/proc/self/ns/net)"); 406 exit(EXIT_ERR_SETUP_INFRA); 407 } 408 } 409 410 static void restore_netns(void) 411 { 412 if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) { 413 stdio_restore(); 414 perror("setns(CLONE_NEWNS)"); 415 exit(EXIT_ERR_SETUP_INFRA); 416 } 417 } 418 419 void test__end_subtest(void) 420 { 421 struct prog_test_def *test = env.test; 422 struct test_state *test_state = env.test_state; 423 struct subtest_state *subtest_state = env.subtest_state; 424 425 if (subtest_state->error_cnt) { 426 test_state->error_cnt++; 427 } else { 428 if (!subtest_state->skipped) 429 test_state->sub_succ_cnt++; 430 else 431 test_state->skip_cnt++; 432 } 433 434 if (verbose() && !env.workers) 435 print_subtest_name(test->test_num, test_state->subtest_num, 436 test->test_name, subtest_state->name, 437 test_result(subtest_state->error_cnt, 438 subtest_state->skipped)); 439 440 stdio_restore_cleanup(); 441 env.subtest_state = NULL; 442 } 443 444 bool test__start_subtest(const char *subtest_name) 445 { 446 struct prog_test_def *test = env.test; 447 struct test_state *state = env.test_state; 448 struct subtest_state *subtest_state; 449 size_t sub_state_size = sizeof(*subtest_state); 450 451 if (env.subtest_state) 452 test__end_subtest(); 453 454 state->subtest_num++; 455 state->subtest_states = 456 realloc(state->subtest_states, 457 state->subtest_num * sub_state_size); 458 if (!state->subtest_states) { 459 fprintf(stderr, "Not enough memory to allocate subtest result\n"); 460 return false; 461 } 462 463 subtest_state = &state->subtest_states[state->subtest_num - 1]; 464 465 memset(subtest_state, 0, sub_state_size); 466 467 if (!subtest_name || !subtest_name[0]) { 468 fprintf(env.stderr, 469 "Subtest #%d didn't provide sub-test name!\n", 470 state->subtest_num); 471 return false; 472 } 473 474 subtest_state->name = strdup(subtest_name); 475 if (!subtest_state->name) { 476 fprintf(env.stderr, 477 "Subtest #%d: failed to copy subtest name!\n", 478 state->subtest_num); 479 return false; 480 } 481 482 if (!should_run_subtest(&env.test_selector, 483 &env.subtest_selector, 484 state->subtest_num, 485 test->test_name, 486 subtest_name)) { 487 subtest_state->filtered = true; 488 return false; 489 } 490 491 env.subtest_state = subtest_state; 492 stdio_hijack_init(&subtest_state->log_buf, &subtest_state->log_cnt); 493 494 return true; 495 } 496 497 void test__force_log(void) 498 { 499 env.test_state->force_log = true; 500 } 501 502 void test__skip(void) 503 { 504 if (env.subtest_state) 505 env.subtest_state->skipped = true; 506 else 507 env.test_state->skip_cnt++; 508 } 509 510 void test__fail(void) 511 { 512 if (env.subtest_state) 513 env.subtest_state->error_cnt++; 514 else 515 env.test_state->error_cnt++; 516 } 517 518 int test__join_cgroup(const char *path) 519 { 520 int fd; 521 522 if (!env.test->need_cgroup_cleanup) { 523 if (setup_cgroup_environment()) { 524 fprintf(stderr, 525 "#%d %s: Failed to setup cgroup environment\n", 526 env.test->test_num, env.test->test_name); 527 return -1; 528 } 529 530 env.test->need_cgroup_cleanup = true; 531 } 532 533 fd = create_and_get_cgroup(path); 534 if (fd < 0) { 535 fprintf(stderr, 536 "#%d %s: Failed to create cgroup '%s' (errno=%d)\n", 537 env.test->test_num, env.test->test_name, path, errno); 538 return fd; 539 } 540 541 if (join_cgroup(path)) { 542 fprintf(stderr, 543 "#%d %s: Failed to join cgroup '%s' (errno=%d)\n", 544 env.test->test_num, env.test->test_name, path, errno); 545 return -1; 546 } 547 548 return fd; 549 } 550 551 int bpf_find_map(const char *test, struct bpf_object *obj, const char *name) 552 { 553 struct bpf_map *map; 554 555 map = bpf_object__find_map_by_name(obj, name); 556 if (!map) { 557 fprintf(stdout, "%s:FAIL:map '%s' not found\n", test, name); 558 test__fail(); 559 return -1; 560 } 561 return bpf_map__fd(map); 562 } 563 564 static bool is_jit_enabled(void) 565 { 566 const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable"; 567 bool enabled = false; 568 int sysctl_fd; 569 570 sysctl_fd = open(jit_sysctl, 0, O_RDONLY); 571 if (sysctl_fd != -1) { 572 char tmpc; 573 574 if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1) 575 enabled = (tmpc != '0'); 576 close(sysctl_fd); 577 } 578 579 return enabled; 580 } 581 582 int compare_map_keys(int map1_fd, int map2_fd) 583 { 584 __u32 key, next_key; 585 char val_buf[PERF_MAX_STACK_DEPTH * 586 sizeof(struct bpf_stack_build_id)]; 587 int err; 588 589 err = bpf_map_get_next_key(map1_fd, NULL, &key); 590 if (err) 591 return err; 592 err = bpf_map_lookup_elem(map2_fd, &key, val_buf); 593 if (err) 594 return err; 595 596 while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) { 597 err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf); 598 if (err) 599 return err; 600 601 key = next_key; 602 } 603 if (errno != ENOENT) 604 return -1; 605 606 return 0; 607 } 608 609 int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len) 610 { 611 __u32 key, next_key, *cur_key_p, *next_key_p; 612 char *val_buf1, *val_buf2; 613 int i, err = 0; 614 615 val_buf1 = malloc(stack_trace_len); 616 val_buf2 = malloc(stack_trace_len); 617 cur_key_p = NULL; 618 next_key_p = &key; 619 while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) { 620 err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1); 621 if (err) 622 goto out; 623 err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2); 624 if (err) 625 goto out; 626 for (i = 0; i < stack_trace_len; i++) { 627 if (val_buf1[i] != val_buf2[i]) { 628 err = -1; 629 goto out; 630 } 631 } 632 key = *next_key_p; 633 cur_key_p = &key; 634 next_key_p = &next_key; 635 } 636 if (errno != ENOENT) 637 err = -1; 638 639 out: 640 free(val_buf1); 641 free(val_buf2); 642 return err; 643 } 644 645 /* extern declarations for test funcs */ 646 #define DEFINE_TEST(name) \ 647 extern void test_##name(void) __weak; \ 648 extern void serial_test_##name(void) __weak; 649 #include <prog_tests/tests.h> 650 #undef DEFINE_TEST 651 652 static struct prog_test_def prog_test_defs[] = { 653 #define DEFINE_TEST(name) { \ 654 .test_name = #name, \ 655 .run_test = &test_##name, \ 656 .run_serial_test = &serial_test_##name, \ 657 }, 658 #include <prog_tests/tests.h> 659 #undef DEFINE_TEST 660 }; 661 662 static const int prog_test_cnt = ARRAY_SIZE(prog_test_defs); 663 664 static struct test_state test_states[ARRAY_SIZE(prog_test_defs)]; 665 666 const char *argp_program_version = "test_progs 0.1"; 667 const char *argp_program_bug_address = "<bpf@vger.kernel.org>"; 668 static const char argp_program_doc[] = 669 "BPF selftests test runner\v" 670 "Options accepting the NAMES parameter take either a comma-separated list\n" 671 "of test names, or a filename prefixed with @. The file contains one name\n" 672 "(or wildcard pattern) per line, and comments beginning with # are ignored.\n" 673 "\n" 674 "These options can be passed repeatedly to read multiple files.\n"; 675 676 enum ARG_KEYS { 677 ARG_TEST_NUM = 'n', 678 ARG_TEST_NAME = 't', 679 ARG_TEST_NAME_BLACKLIST = 'b', 680 ARG_VERIFIER_STATS = 's', 681 ARG_VERBOSE = 'v', 682 ARG_GET_TEST_CNT = 'c', 683 ARG_LIST_TEST_NAMES = 'l', 684 ARG_TEST_NAME_GLOB_ALLOWLIST = 'a', 685 ARG_TEST_NAME_GLOB_DENYLIST = 'd', 686 ARG_NUM_WORKERS = 'j', 687 ARG_DEBUG = -1, 688 ARG_JSON_SUMMARY = 'J' 689 }; 690 691 static const struct argp_option opts[] = { 692 { "num", ARG_TEST_NUM, "NUM", 0, 693 "Run test number NUM only " }, 694 { "name", ARG_TEST_NAME, "NAMES", 0, 695 "Run tests with names containing any string from NAMES list" }, 696 { "name-blacklist", ARG_TEST_NAME_BLACKLIST, "NAMES", 0, 697 "Don't run tests with names containing any string from NAMES list" }, 698 { "verifier-stats", ARG_VERIFIER_STATS, NULL, 0, 699 "Output verifier statistics", }, 700 { "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL, 701 "Verbose output (use -vv or -vvv for progressively verbose output)" }, 702 { "count", ARG_GET_TEST_CNT, NULL, 0, 703 "Get number of selected top-level tests " }, 704 { "list", ARG_LIST_TEST_NAMES, NULL, 0, 705 "List test names that would run (without running them) " }, 706 { "allow", ARG_TEST_NAME_GLOB_ALLOWLIST, "NAMES", 0, 707 "Run tests with name matching the pattern (supports '*' wildcard)." }, 708 { "deny", ARG_TEST_NAME_GLOB_DENYLIST, "NAMES", 0, 709 "Don't run tests with name matching the pattern (supports '*' wildcard)." }, 710 { "workers", ARG_NUM_WORKERS, "WORKERS", OPTION_ARG_OPTIONAL, 711 "Number of workers to run in parallel, default to number of cpus." }, 712 { "debug", ARG_DEBUG, NULL, 0, 713 "print extra debug information for test_progs." }, 714 { "json-summary", ARG_JSON_SUMMARY, "FILE", 0, "Write report in json format to this file."}, 715 {}, 716 }; 717 718 static int libbpf_print_fn(enum libbpf_print_level level, 719 const char *format, va_list args) 720 { 721 if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG) 722 return 0; 723 vfprintf(stdout, format, args); 724 return 0; 725 } 726 727 static void free_test_filter_set(const struct test_filter_set *set) 728 { 729 int i, j; 730 731 if (!set) 732 return; 733 734 for (i = 0; i < set->cnt; i++) { 735 free((void *)set->tests[i].name); 736 for (j = 0; j < set->tests[i].subtest_cnt; j++) 737 free((void *)set->tests[i].subtests[j]); 738 739 free((void *)set->tests[i].subtests); 740 } 741 742 free((void *)set->tests); 743 } 744 745 static void free_test_selector(struct test_selector *test_selector) 746 { 747 free_test_filter_set(&test_selector->blacklist); 748 free_test_filter_set(&test_selector->whitelist); 749 free(test_selector->num_set); 750 } 751 752 extern int extra_prog_load_log_flags; 753 754 static error_t parse_arg(int key, char *arg, struct argp_state *state) 755 { 756 struct test_env *env = state->input; 757 int err = 0; 758 759 switch (key) { 760 case ARG_TEST_NUM: { 761 char *subtest_str = strchr(arg, '/'); 762 763 if (subtest_str) { 764 *subtest_str = '\0'; 765 if (parse_num_list(subtest_str + 1, 766 &env->subtest_selector.num_set, 767 &env->subtest_selector.num_set_len)) { 768 fprintf(stderr, 769 "Failed to parse subtest numbers.\n"); 770 return -EINVAL; 771 } 772 } 773 if (parse_num_list(arg, &env->test_selector.num_set, 774 &env->test_selector.num_set_len)) { 775 fprintf(stderr, "Failed to parse test numbers.\n"); 776 return -EINVAL; 777 } 778 break; 779 } 780 case ARG_TEST_NAME_GLOB_ALLOWLIST: 781 case ARG_TEST_NAME: { 782 if (arg[0] == '@') 783 err = parse_test_list_file(arg + 1, 784 &env->test_selector.whitelist, 785 key == ARG_TEST_NAME_GLOB_ALLOWLIST); 786 else 787 err = parse_test_list(arg, 788 &env->test_selector.whitelist, 789 key == ARG_TEST_NAME_GLOB_ALLOWLIST); 790 791 break; 792 } 793 case ARG_TEST_NAME_GLOB_DENYLIST: 794 case ARG_TEST_NAME_BLACKLIST: { 795 if (arg[0] == '@') 796 err = parse_test_list_file(arg + 1, 797 &env->test_selector.blacklist, 798 key == ARG_TEST_NAME_GLOB_DENYLIST); 799 else 800 err = parse_test_list(arg, 801 &env->test_selector.blacklist, 802 key == ARG_TEST_NAME_GLOB_DENYLIST); 803 804 break; 805 } 806 case ARG_VERIFIER_STATS: 807 env->verifier_stats = true; 808 break; 809 case ARG_VERBOSE: 810 env->verbosity = VERBOSE_NORMAL; 811 if (arg) { 812 if (strcmp(arg, "v") == 0) { 813 env->verbosity = VERBOSE_VERY; 814 extra_prog_load_log_flags = 1; 815 } else if (strcmp(arg, "vv") == 0) { 816 env->verbosity = VERBOSE_SUPER; 817 extra_prog_load_log_flags = 2; 818 } else { 819 fprintf(stderr, 820 "Unrecognized verbosity setting ('%s'), only -v and -vv are supported\n", 821 arg); 822 return -EINVAL; 823 } 824 } 825 826 if (verbose()) { 827 if (setenv("SELFTESTS_VERBOSE", "1", 1) == -1) { 828 fprintf(stderr, 829 "Unable to setenv SELFTESTS_VERBOSE=1 (errno=%d)", 830 errno); 831 return -EINVAL; 832 } 833 } 834 835 break; 836 case ARG_GET_TEST_CNT: 837 env->get_test_cnt = true; 838 break; 839 case ARG_LIST_TEST_NAMES: 840 env->list_test_names = true; 841 break; 842 case ARG_NUM_WORKERS: 843 if (arg) { 844 env->workers = atoi(arg); 845 if (!env->workers) { 846 fprintf(stderr, "Invalid number of worker: %s.", arg); 847 return -EINVAL; 848 } 849 } else { 850 env->workers = get_nprocs(); 851 } 852 break; 853 case ARG_DEBUG: 854 env->debug = true; 855 break; 856 case ARG_JSON_SUMMARY: 857 env->json = fopen(arg, "w"); 858 if (env->json == NULL) { 859 perror("Failed to open json summary file"); 860 return -errno; 861 } 862 break; 863 case ARGP_KEY_ARG: 864 argp_usage(state); 865 break; 866 case ARGP_KEY_END: 867 break; 868 default: 869 return ARGP_ERR_UNKNOWN; 870 } 871 return err; 872 } 873 874 /* 875 * Determine if test_progs is running as a "flavored" test runner and switch 876 * into corresponding sub-directory to load correct BPF objects. 877 * 878 * This is done by looking at executable name. If it contains "-flavor" 879 * suffix, then we are running as a flavored test runner. 880 */ 881 int cd_flavor_subdir(const char *exec_name) 882 { 883 /* General form of argv[0] passed here is: 884 * some/path/to/test_progs[-flavor], where -flavor part is optional. 885 * First cut out "test_progs[-flavor]" part, then extract "flavor" 886 * part, if it's there. 887 */ 888 const char *flavor = strrchr(exec_name, '/'); 889 890 if (!flavor) 891 flavor = exec_name; 892 else 893 flavor++; 894 895 flavor = strrchr(flavor, '-'); 896 if (!flavor) 897 return 0; 898 flavor++; 899 if (verbose()) 900 fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor); 901 902 return chdir(flavor); 903 } 904 905 int trigger_module_test_read(int read_sz) 906 { 907 int fd, err; 908 909 fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY); 910 err = -errno; 911 if (!ASSERT_GE(fd, 0, "testmod_file_open")) 912 return err; 913 914 read(fd, NULL, read_sz); 915 close(fd); 916 917 return 0; 918 } 919 920 int trigger_module_test_write(int write_sz) 921 { 922 int fd, err; 923 char *buf = malloc(write_sz); 924 925 if (!buf) 926 return -ENOMEM; 927 928 memset(buf, 'a', write_sz); 929 buf[write_sz-1] = '\0'; 930 931 fd = open(BPF_TESTMOD_TEST_FILE, O_WRONLY); 932 err = -errno; 933 if (!ASSERT_GE(fd, 0, "testmod_file_open")) { 934 free(buf); 935 return err; 936 } 937 938 write(fd, buf, write_sz); 939 close(fd); 940 free(buf); 941 return 0; 942 } 943 944 int write_sysctl(const char *sysctl, const char *value) 945 { 946 int fd, err, len; 947 948 fd = open(sysctl, O_WRONLY); 949 if (!ASSERT_NEQ(fd, -1, "open sysctl")) 950 return -1; 951 952 len = strlen(value); 953 err = write(fd, value, len); 954 close(fd); 955 if (!ASSERT_EQ(err, len, "write sysctl")) 956 return -1; 957 958 return 0; 959 } 960 961 int get_bpf_max_tramp_links_from(struct btf *btf) 962 { 963 const struct btf_enum *e; 964 const struct btf_type *t; 965 __u32 i, type_cnt; 966 const char *name; 967 __u16 j, vlen; 968 969 for (i = 1, type_cnt = btf__type_cnt(btf); i < type_cnt; i++) { 970 t = btf__type_by_id(btf, i); 971 if (!t || !btf_is_enum(t) || t->name_off) 972 continue; 973 e = btf_enum(t); 974 for (j = 0, vlen = btf_vlen(t); j < vlen; j++, e++) { 975 name = btf__str_by_offset(btf, e->name_off); 976 if (name && !strcmp(name, "BPF_MAX_TRAMP_LINKS")) 977 return e->val; 978 } 979 } 980 981 return -1; 982 } 983 984 int get_bpf_max_tramp_links(void) 985 { 986 struct btf *vmlinux_btf; 987 int ret; 988 989 vmlinux_btf = btf__load_vmlinux_btf(); 990 if (!ASSERT_OK_PTR(vmlinux_btf, "vmlinux btf")) 991 return -1; 992 ret = get_bpf_max_tramp_links_from(vmlinux_btf); 993 btf__free(vmlinux_btf); 994 995 return ret; 996 } 997 998 #define MAX_BACKTRACE_SZ 128 999 void crash_handler(int signum) 1000 { 1001 void *bt[MAX_BACKTRACE_SZ]; 1002 size_t sz; 1003 1004 sz = backtrace(bt, ARRAY_SIZE(bt)); 1005 1006 if (env.stdout) 1007 stdio_restore(); 1008 if (env.test) { 1009 env.test_state->error_cnt++; 1010 dump_test_log(env.test, env.test_state, true, false, NULL); 1011 } 1012 if (env.worker_id != -1) 1013 fprintf(stderr, "[%d]: ", env.worker_id); 1014 fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum); 1015 backtrace_symbols_fd(bt, sz, STDERR_FILENO); 1016 } 1017 1018 static void sigint_handler(int signum) 1019 { 1020 int i; 1021 1022 for (i = 0; i < env.workers; i++) 1023 if (env.worker_socks[i] > 0) 1024 close(env.worker_socks[i]); 1025 } 1026 1027 static int current_test_idx; 1028 static pthread_mutex_t current_test_lock; 1029 static pthread_mutex_t stdout_output_lock; 1030 1031 static inline const char *str_msg(const struct msg *msg, char *buf) 1032 { 1033 switch (msg->type) { 1034 case MSG_DO_TEST: 1035 sprintf(buf, "MSG_DO_TEST %d", msg->do_test.num); 1036 break; 1037 case MSG_TEST_DONE: 1038 sprintf(buf, "MSG_TEST_DONE %d (log: %d)", 1039 msg->test_done.num, 1040 msg->test_done.have_log); 1041 break; 1042 case MSG_SUBTEST_DONE: 1043 sprintf(buf, "MSG_SUBTEST_DONE %d (log: %d)", 1044 msg->subtest_done.num, 1045 msg->subtest_done.have_log); 1046 break; 1047 case MSG_TEST_LOG: 1048 sprintf(buf, "MSG_TEST_LOG (cnt: %zu, last: %d)", 1049 strlen(msg->test_log.log_buf), 1050 msg->test_log.is_last); 1051 break; 1052 case MSG_EXIT: 1053 sprintf(buf, "MSG_EXIT"); 1054 break; 1055 default: 1056 sprintf(buf, "UNKNOWN"); 1057 break; 1058 } 1059 1060 return buf; 1061 } 1062 1063 static int send_message(int sock, const struct msg *msg) 1064 { 1065 char buf[256]; 1066 1067 if (env.debug) 1068 fprintf(stderr, "Sending msg: %s\n", str_msg(msg, buf)); 1069 return send(sock, msg, sizeof(*msg), 0); 1070 } 1071 1072 static int recv_message(int sock, struct msg *msg) 1073 { 1074 int ret; 1075 char buf[256]; 1076 1077 memset(msg, 0, sizeof(*msg)); 1078 ret = recv(sock, msg, sizeof(*msg), 0); 1079 if (ret >= 0) { 1080 if (env.debug) 1081 fprintf(stderr, "Received msg: %s\n", str_msg(msg, buf)); 1082 } 1083 return ret; 1084 } 1085 1086 static void run_one_test(int test_num) 1087 { 1088 struct prog_test_def *test = &prog_test_defs[test_num]; 1089 struct test_state *state = &test_states[test_num]; 1090 1091 env.test = test; 1092 env.test_state = state; 1093 1094 stdio_hijack(&state->log_buf, &state->log_cnt); 1095 1096 if (test->run_test) 1097 test->run_test(); 1098 else if (test->run_serial_test) 1099 test->run_serial_test(); 1100 1101 /* ensure last sub-test is finalized properly */ 1102 if (env.subtest_state) 1103 test__end_subtest(); 1104 1105 state->tested = true; 1106 1107 if (verbose() && env.worker_id == -1) 1108 print_test_result(test, state); 1109 1110 reset_affinity(); 1111 restore_netns(); 1112 if (test->need_cgroup_cleanup) 1113 cleanup_cgroup_environment(); 1114 1115 stdio_restore(); 1116 1117 dump_test_log(test, state, false, false, NULL); 1118 } 1119 1120 struct dispatch_data { 1121 int worker_id; 1122 int sock_fd; 1123 }; 1124 1125 static int read_prog_test_msg(int sock_fd, struct msg *msg, enum msg_type type) 1126 { 1127 if (recv_message(sock_fd, msg) < 0) 1128 return 1; 1129 1130 if (msg->type != type) { 1131 printf("%s: unexpected message type %d. expected %d\n", __func__, msg->type, type); 1132 return 1; 1133 } 1134 1135 return 0; 1136 } 1137 1138 static int dispatch_thread_read_log(int sock_fd, char **log_buf, size_t *log_cnt) 1139 { 1140 FILE *log_fp = NULL; 1141 int result = 0; 1142 1143 log_fp = open_memstream(log_buf, log_cnt); 1144 if (!log_fp) 1145 return 1; 1146 1147 while (true) { 1148 struct msg msg; 1149 1150 if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_LOG)) { 1151 result = 1; 1152 goto out; 1153 } 1154 1155 fprintf(log_fp, "%s", msg.test_log.log_buf); 1156 if (msg.test_log.is_last) 1157 break; 1158 } 1159 1160 out: 1161 fclose(log_fp); 1162 log_fp = NULL; 1163 return result; 1164 } 1165 1166 static int dispatch_thread_send_subtests(int sock_fd, struct test_state *state) 1167 { 1168 struct msg msg; 1169 struct subtest_state *subtest_state; 1170 int subtest_num = state->subtest_num; 1171 1172 state->subtest_states = malloc(subtest_num * sizeof(*subtest_state)); 1173 1174 for (int i = 0; i < subtest_num; i++) { 1175 subtest_state = &state->subtest_states[i]; 1176 1177 memset(subtest_state, 0, sizeof(*subtest_state)); 1178 1179 if (read_prog_test_msg(sock_fd, &msg, MSG_SUBTEST_DONE)) 1180 return 1; 1181 1182 subtest_state->name = strdup(msg.subtest_done.name); 1183 subtest_state->error_cnt = msg.subtest_done.error_cnt; 1184 subtest_state->skipped = msg.subtest_done.skipped; 1185 subtest_state->filtered = msg.subtest_done.filtered; 1186 1187 /* collect all logs */ 1188 if (msg.subtest_done.have_log) 1189 if (dispatch_thread_read_log(sock_fd, 1190 &subtest_state->log_buf, 1191 &subtest_state->log_cnt)) 1192 return 1; 1193 } 1194 1195 return 0; 1196 } 1197 1198 static void *dispatch_thread(void *ctx) 1199 { 1200 struct dispatch_data *data = ctx; 1201 int sock_fd; 1202 1203 sock_fd = data->sock_fd; 1204 1205 while (true) { 1206 int test_to_run = -1; 1207 struct prog_test_def *test; 1208 struct test_state *state; 1209 1210 /* grab a test */ 1211 { 1212 pthread_mutex_lock(¤t_test_lock); 1213 1214 if (current_test_idx >= prog_test_cnt) { 1215 pthread_mutex_unlock(¤t_test_lock); 1216 goto done; 1217 } 1218 1219 test = &prog_test_defs[current_test_idx]; 1220 test_to_run = current_test_idx; 1221 current_test_idx++; 1222 1223 pthread_mutex_unlock(¤t_test_lock); 1224 } 1225 1226 if (!test->should_run || test->run_serial_test) 1227 continue; 1228 1229 /* run test through worker */ 1230 { 1231 struct msg msg_do_test; 1232 1233 memset(&msg_do_test, 0, sizeof(msg_do_test)); 1234 msg_do_test.type = MSG_DO_TEST; 1235 msg_do_test.do_test.num = test_to_run; 1236 if (send_message(sock_fd, &msg_do_test) < 0) { 1237 perror("Fail to send command"); 1238 goto done; 1239 } 1240 env.worker_current_test[data->worker_id] = test_to_run; 1241 } 1242 1243 /* wait for test done */ 1244 do { 1245 struct msg msg; 1246 1247 if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_DONE)) 1248 goto error; 1249 if (test_to_run != msg.test_done.num) 1250 goto error; 1251 1252 state = &test_states[test_to_run]; 1253 state->tested = true; 1254 state->error_cnt = msg.test_done.error_cnt; 1255 state->skip_cnt = msg.test_done.skip_cnt; 1256 state->sub_succ_cnt = msg.test_done.sub_succ_cnt; 1257 state->subtest_num = msg.test_done.subtest_num; 1258 1259 /* collect all logs */ 1260 if (msg.test_done.have_log) { 1261 if (dispatch_thread_read_log(sock_fd, 1262 &state->log_buf, 1263 &state->log_cnt)) 1264 goto error; 1265 } 1266 1267 /* collect all subtests and subtest logs */ 1268 if (!state->subtest_num) 1269 break; 1270 1271 if (dispatch_thread_send_subtests(sock_fd, state)) 1272 goto error; 1273 } while (false); 1274 1275 pthread_mutex_lock(&stdout_output_lock); 1276 dump_test_log(test, state, false, true, NULL); 1277 pthread_mutex_unlock(&stdout_output_lock); 1278 } /* while (true) */ 1279 error: 1280 if (env.debug) 1281 fprintf(stderr, "[%d]: Protocol/IO error: %s.\n", data->worker_id, strerror(errno)); 1282 1283 done: 1284 { 1285 struct msg msg_exit; 1286 1287 msg_exit.type = MSG_EXIT; 1288 if (send_message(sock_fd, &msg_exit) < 0) { 1289 if (env.debug) 1290 fprintf(stderr, "[%d]: send_message msg_exit: %s.\n", 1291 data->worker_id, strerror(errno)); 1292 } 1293 } 1294 return NULL; 1295 } 1296 1297 static void calculate_summary_and_print_errors(struct test_env *env) 1298 { 1299 int i; 1300 int succ_cnt = 0, fail_cnt = 0, sub_succ_cnt = 0, skip_cnt = 0; 1301 json_writer_t *w = NULL; 1302 1303 for (i = 0; i < prog_test_cnt; i++) { 1304 struct test_state *state = &test_states[i]; 1305 1306 if (!state->tested) 1307 continue; 1308 1309 sub_succ_cnt += state->sub_succ_cnt; 1310 skip_cnt += state->skip_cnt; 1311 1312 if (state->error_cnt) 1313 fail_cnt++; 1314 else 1315 succ_cnt++; 1316 } 1317 1318 if (env->json) { 1319 w = jsonw_new(env->json); 1320 if (!w) 1321 fprintf(env->stderr, "Failed to create new JSON stream."); 1322 } 1323 1324 if (w) { 1325 jsonw_start_object(w); 1326 jsonw_uint_field(w, "success", succ_cnt); 1327 jsonw_uint_field(w, "success_subtest", sub_succ_cnt); 1328 jsonw_uint_field(w, "skipped", skip_cnt); 1329 jsonw_uint_field(w, "failed", fail_cnt); 1330 jsonw_name(w, "results"); 1331 jsonw_start_array(w); 1332 } 1333 1334 /* 1335 * We only print error logs summary when there are failed tests and 1336 * verbose mode is not enabled. Otherwise, results may be incosistent. 1337 * 1338 */ 1339 if (!verbose() && fail_cnt) { 1340 printf("\nAll error logs:\n"); 1341 1342 /* print error logs again */ 1343 for (i = 0; i < prog_test_cnt; i++) { 1344 struct prog_test_def *test = &prog_test_defs[i]; 1345 struct test_state *state = &test_states[i]; 1346 1347 if (!state->tested || !state->error_cnt) 1348 continue; 1349 1350 dump_test_log(test, state, true, true, w); 1351 } 1352 } 1353 1354 if (w) { 1355 jsonw_end_array(w); 1356 jsonw_end_object(w); 1357 jsonw_destroy(&w); 1358 } 1359 1360 if (env->json) 1361 fclose(env->json); 1362 1363 printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n", 1364 succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt); 1365 1366 env->succ_cnt = succ_cnt; 1367 env->sub_succ_cnt = sub_succ_cnt; 1368 env->fail_cnt = fail_cnt; 1369 env->skip_cnt = skip_cnt; 1370 } 1371 1372 static void server_main(void) 1373 { 1374 pthread_t *dispatcher_threads; 1375 struct dispatch_data *data; 1376 struct sigaction sigact_int = { 1377 .sa_handler = sigint_handler, 1378 .sa_flags = SA_RESETHAND, 1379 }; 1380 int i; 1381 1382 sigaction(SIGINT, &sigact_int, NULL); 1383 1384 dispatcher_threads = calloc(sizeof(pthread_t), env.workers); 1385 data = calloc(sizeof(struct dispatch_data), env.workers); 1386 1387 env.worker_current_test = calloc(sizeof(int), env.workers); 1388 for (i = 0; i < env.workers; i++) { 1389 int rc; 1390 1391 data[i].worker_id = i; 1392 data[i].sock_fd = env.worker_socks[i]; 1393 rc = pthread_create(&dispatcher_threads[i], NULL, dispatch_thread, &data[i]); 1394 if (rc < 0) { 1395 perror("Failed to launch dispatcher thread"); 1396 exit(EXIT_ERR_SETUP_INFRA); 1397 } 1398 } 1399 1400 /* wait for all dispatcher to finish */ 1401 for (i = 0; i < env.workers; i++) { 1402 while (true) { 1403 int ret = pthread_tryjoin_np(dispatcher_threads[i], NULL); 1404 1405 if (!ret) { 1406 break; 1407 } else if (ret == EBUSY) { 1408 if (env.debug) 1409 fprintf(stderr, "Still waiting for thread %d (test %d).\n", 1410 i, env.worker_current_test[i] + 1); 1411 usleep(1000 * 1000); 1412 continue; 1413 } else { 1414 fprintf(stderr, "Unexpected error joining dispatcher thread: %d", ret); 1415 break; 1416 } 1417 } 1418 } 1419 free(dispatcher_threads); 1420 free(env.worker_current_test); 1421 free(data); 1422 1423 /* run serial tests */ 1424 save_netns(); 1425 1426 for (int i = 0; i < prog_test_cnt; i++) { 1427 struct prog_test_def *test = &prog_test_defs[i]; 1428 1429 if (!test->should_run || !test->run_serial_test) 1430 continue; 1431 1432 run_one_test(i); 1433 } 1434 1435 /* generate summary */ 1436 fflush(stderr); 1437 fflush(stdout); 1438 1439 calculate_summary_and_print_errors(&env); 1440 1441 /* reap all workers */ 1442 for (i = 0; i < env.workers; i++) { 1443 int wstatus, pid; 1444 1445 pid = waitpid(env.worker_pids[i], &wstatus, 0); 1446 if (pid != env.worker_pids[i]) 1447 perror("Unable to reap worker"); 1448 } 1449 } 1450 1451 static void worker_main_send_log(int sock, char *log_buf, size_t log_cnt) 1452 { 1453 char *src; 1454 size_t slen; 1455 1456 src = log_buf; 1457 slen = log_cnt; 1458 while (slen) { 1459 struct msg msg_log; 1460 char *dest; 1461 size_t len; 1462 1463 memset(&msg_log, 0, sizeof(msg_log)); 1464 msg_log.type = MSG_TEST_LOG; 1465 dest = msg_log.test_log.log_buf; 1466 len = slen >= MAX_LOG_TRUNK_SIZE ? MAX_LOG_TRUNK_SIZE : slen; 1467 memcpy(dest, src, len); 1468 1469 src += len; 1470 slen -= len; 1471 if (!slen) 1472 msg_log.test_log.is_last = true; 1473 1474 assert(send_message(sock, &msg_log) >= 0); 1475 } 1476 } 1477 1478 static void free_subtest_state(struct subtest_state *state) 1479 { 1480 if (state->log_buf) { 1481 free(state->log_buf); 1482 state->log_buf = NULL; 1483 state->log_cnt = 0; 1484 } 1485 free(state->name); 1486 state->name = NULL; 1487 } 1488 1489 static int worker_main_send_subtests(int sock, struct test_state *state) 1490 { 1491 int i, result = 0; 1492 struct msg msg; 1493 struct subtest_state *subtest_state; 1494 1495 memset(&msg, 0, sizeof(msg)); 1496 msg.type = MSG_SUBTEST_DONE; 1497 1498 for (i = 0; i < state->subtest_num; i++) { 1499 subtest_state = &state->subtest_states[i]; 1500 1501 msg.subtest_done.num = i; 1502 1503 strncpy(msg.subtest_done.name, subtest_state->name, MAX_SUBTEST_NAME); 1504 1505 msg.subtest_done.error_cnt = subtest_state->error_cnt; 1506 msg.subtest_done.skipped = subtest_state->skipped; 1507 msg.subtest_done.filtered = subtest_state->filtered; 1508 msg.subtest_done.have_log = false; 1509 1510 if (verbose() || state->force_log || subtest_state->error_cnt) { 1511 if (subtest_state->log_cnt) 1512 msg.subtest_done.have_log = true; 1513 } 1514 1515 if (send_message(sock, &msg) < 0) { 1516 perror("Fail to send message done"); 1517 result = 1; 1518 goto out; 1519 } 1520 1521 /* send logs */ 1522 if (msg.subtest_done.have_log) 1523 worker_main_send_log(sock, subtest_state->log_buf, subtest_state->log_cnt); 1524 1525 free_subtest_state(subtest_state); 1526 free(subtest_state->name); 1527 } 1528 1529 out: 1530 for (; i < state->subtest_num; i++) 1531 free_subtest_state(&state->subtest_states[i]); 1532 free(state->subtest_states); 1533 return result; 1534 } 1535 1536 static int worker_main(int sock) 1537 { 1538 save_netns(); 1539 1540 while (true) { 1541 /* receive command */ 1542 struct msg msg; 1543 1544 if (recv_message(sock, &msg) < 0) 1545 goto out; 1546 1547 switch (msg.type) { 1548 case MSG_EXIT: 1549 if (env.debug) 1550 fprintf(stderr, "[%d]: worker exit.\n", 1551 env.worker_id); 1552 goto out; 1553 case MSG_DO_TEST: { 1554 int test_to_run = msg.do_test.num; 1555 struct prog_test_def *test = &prog_test_defs[test_to_run]; 1556 struct test_state *state = &test_states[test_to_run]; 1557 struct msg msg; 1558 1559 if (env.debug) 1560 fprintf(stderr, "[%d]: #%d:%s running.\n", 1561 env.worker_id, 1562 test_to_run + 1, 1563 test->test_name); 1564 1565 run_one_test(test_to_run); 1566 1567 memset(&msg, 0, sizeof(msg)); 1568 msg.type = MSG_TEST_DONE; 1569 msg.test_done.num = test_to_run; 1570 msg.test_done.error_cnt = state->error_cnt; 1571 msg.test_done.skip_cnt = state->skip_cnt; 1572 msg.test_done.sub_succ_cnt = state->sub_succ_cnt; 1573 msg.test_done.subtest_num = state->subtest_num; 1574 msg.test_done.have_log = false; 1575 1576 if (verbose() || state->force_log || state->error_cnt) { 1577 if (state->log_cnt) 1578 msg.test_done.have_log = true; 1579 } 1580 if (send_message(sock, &msg) < 0) { 1581 perror("Fail to send message done"); 1582 goto out; 1583 } 1584 1585 /* send logs */ 1586 if (msg.test_done.have_log) 1587 worker_main_send_log(sock, state->log_buf, state->log_cnt); 1588 1589 if (state->log_buf) { 1590 free(state->log_buf); 1591 state->log_buf = NULL; 1592 state->log_cnt = 0; 1593 } 1594 1595 if (state->subtest_num) 1596 if (worker_main_send_subtests(sock, state)) 1597 goto out; 1598 1599 if (env.debug) 1600 fprintf(stderr, "[%d]: #%d:%s done.\n", 1601 env.worker_id, 1602 test_to_run + 1, 1603 test->test_name); 1604 break; 1605 } /* case MSG_DO_TEST */ 1606 default: 1607 if (env.debug) 1608 fprintf(stderr, "[%d]: unknown message.\n", env.worker_id); 1609 return -1; 1610 } 1611 } 1612 out: 1613 return 0; 1614 } 1615 1616 static void free_test_states(void) 1617 { 1618 int i, j; 1619 1620 for (i = 0; i < ARRAY_SIZE(prog_test_defs); i++) { 1621 struct test_state *test_state = &test_states[i]; 1622 1623 for (j = 0; j < test_state->subtest_num; j++) 1624 free_subtest_state(&test_state->subtest_states[j]); 1625 1626 free(test_state->subtest_states); 1627 free(test_state->log_buf); 1628 test_state->subtest_states = NULL; 1629 test_state->log_buf = NULL; 1630 } 1631 } 1632 1633 int main(int argc, char **argv) 1634 { 1635 static const struct argp argp = { 1636 .options = opts, 1637 .parser = parse_arg, 1638 .doc = argp_program_doc, 1639 }; 1640 struct sigaction sigact = { 1641 .sa_handler = crash_handler, 1642 .sa_flags = SA_RESETHAND, 1643 }; 1644 int err, i; 1645 1646 sigaction(SIGSEGV, &sigact, NULL); 1647 1648 err = argp_parse(&argp, argc, argv, 0, NULL, &env); 1649 if (err) 1650 return err; 1651 1652 err = cd_flavor_subdir(argv[0]); 1653 if (err) 1654 return err; 1655 1656 /* Use libbpf 1.0 API mode */ 1657 libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 1658 libbpf_set_print(libbpf_print_fn); 1659 1660 srand(time(NULL)); 1661 1662 env.jit_enabled = is_jit_enabled(); 1663 env.nr_cpus = libbpf_num_possible_cpus(); 1664 if (env.nr_cpus < 0) { 1665 fprintf(stderr, "Failed to get number of CPUs: %d!\n", 1666 env.nr_cpus); 1667 return -1; 1668 } 1669 1670 env.stdout = stdout; 1671 env.stderr = stderr; 1672 1673 env.has_testmod = true; 1674 if (!env.list_test_names) { 1675 /* ensure previous instance of the module is unloaded */ 1676 unload_bpf_testmod(verbose()); 1677 1678 if (load_bpf_testmod(verbose())) { 1679 fprintf(env.stderr, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n"); 1680 env.has_testmod = false; 1681 } 1682 } 1683 1684 /* initializing tests */ 1685 for (i = 0; i < prog_test_cnt; i++) { 1686 struct prog_test_def *test = &prog_test_defs[i]; 1687 1688 test->test_num = i + 1; 1689 test->should_run = should_run(&env.test_selector, 1690 test->test_num, test->test_name); 1691 1692 if ((test->run_test == NULL && test->run_serial_test == NULL) || 1693 (test->run_test != NULL && test->run_serial_test != NULL)) { 1694 fprintf(stderr, "Test %d:%s must have either test_%s() or serial_test_%sl() defined.\n", 1695 test->test_num, test->test_name, test->test_name, test->test_name); 1696 exit(EXIT_ERR_SETUP_INFRA); 1697 } 1698 } 1699 1700 /* ignore workers if we are just listing */ 1701 if (env.get_test_cnt || env.list_test_names) 1702 env.workers = 0; 1703 1704 /* launch workers if requested */ 1705 env.worker_id = -1; /* main process */ 1706 if (env.workers) { 1707 env.worker_pids = calloc(sizeof(pid_t), env.workers); 1708 env.worker_socks = calloc(sizeof(int), env.workers); 1709 if (env.debug) 1710 fprintf(stdout, "Launching %d workers.\n", env.workers); 1711 for (i = 0; i < env.workers; i++) { 1712 int sv[2]; 1713 pid_t pid; 1714 1715 if (socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, sv) < 0) { 1716 perror("Fail to create worker socket"); 1717 return -1; 1718 } 1719 pid = fork(); 1720 if (pid < 0) { 1721 perror("Failed to fork worker"); 1722 return -1; 1723 } else if (pid != 0) { /* main process */ 1724 close(sv[1]); 1725 env.worker_pids[i] = pid; 1726 env.worker_socks[i] = sv[0]; 1727 } else { /* inside each worker process */ 1728 close(sv[0]); 1729 env.worker_id = i; 1730 return worker_main(sv[1]); 1731 } 1732 } 1733 1734 if (env.worker_id == -1) { 1735 server_main(); 1736 goto out; 1737 } 1738 } 1739 1740 /* The rest of the main process */ 1741 1742 /* on single mode */ 1743 save_netns(); 1744 1745 for (i = 0; i < prog_test_cnt; i++) { 1746 struct prog_test_def *test = &prog_test_defs[i]; 1747 1748 if (!test->should_run) 1749 continue; 1750 1751 if (env.get_test_cnt) { 1752 env.succ_cnt++; 1753 continue; 1754 } 1755 1756 if (env.list_test_names) { 1757 fprintf(env.stdout, "%s\n", test->test_name); 1758 env.succ_cnt++; 1759 continue; 1760 } 1761 1762 run_one_test(i); 1763 } 1764 1765 if (env.get_test_cnt) { 1766 printf("%d\n", env.succ_cnt); 1767 goto out; 1768 } 1769 1770 if (env.list_test_names) 1771 goto out; 1772 1773 calculate_summary_and_print_errors(&env); 1774 1775 close(env.saved_netns_fd); 1776 out: 1777 if (!env.list_test_names && env.has_testmod) 1778 unload_bpf_testmod(verbose()); 1779 1780 free_test_selector(&env.test_selector); 1781 free_test_selector(&env.subtest_selector); 1782 free_test_states(); 1783 1784 if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0) 1785 return EXIT_NO_TEST; 1786 1787 return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS; 1788 } 1789