1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 Facebook 3 */ 4 #define _GNU_SOURCE 5 #include "test_progs.h" 6 #include "testing_helpers.h" 7 #include "cgroup_helpers.h" 8 #include <argp.h> 9 #include <pthread.h> 10 #include <sched.h> 11 #include <signal.h> 12 #include <string.h> 13 #include <execinfo.h> /* backtrace */ 14 #include <linux/membarrier.h> 15 #include <sys/sysinfo.h> /* get_nprocs */ 16 #include <netinet/in.h> 17 #include <sys/select.h> 18 #include <sys/socket.h> 19 #include <sys/un.h> 20 #include <bpf/btf.h> 21 #include "json_writer.h" 22 23 static bool verbose(void) 24 { 25 return env.verbosity > VERBOSE_NONE; 26 } 27 28 static void stdio_hijack_init(char **log_buf, size_t *log_cnt) 29 { 30 #ifdef __GLIBC__ 31 if (verbose() && env.worker_id == -1) { 32 /* nothing to do, output to stdout by default */ 33 return; 34 } 35 36 fflush(stdout); 37 fflush(stderr); 38 39 stdout = open_memstream(log_buf, log_cnt); 40 if (!stdout) { 41 stdout = env.stdout; 42 perror("open_memstream"); 43 return; 44 } 45 46 if (env.subtest_state) 47 env.subtest_state->stdout = stdout; 48 else 49 env.test_state->stdout = stdout; 50 51 stderr = stdout; 52 #endif 53 } 54 55 static void stdio_hijack(char **log_buf, size_t *log_cnt) 56 { 57 #ifdef __GLIBC__ 58 if (verbose() && env.worker_id == -1) { 59 /* nothing to do, output to stdout by default */ 60 return; 61 } 62 63 env.stdout = stdout; 64 env.stderr = stderr; 65 66 stdio_hijack_init(log_buf, log_cnt); 67 #endif 68 } 69 70 static void stdio_restore_cleanup(void) 71 { 72 #ifdef __GLIBC__ 73 if (verbose() && env.worker_id == -1) { 74 /* nothing to do, output to stdout by default */ 75 return; 76 } 77 78 fflush(stdout); 79 80 if (env.subtest_state) { 81 fclose(env.subtest_state->stdout); 82 env.subtest_state->stdout = NULL; 83 stdout = env.test_state->stdout; 84 stderr = env.test_state->stdout; 85 } else { 86 fclose(env.test_state->stdout); 87 env.test_state->stdout = NULL; 88 } 89 #endif 90 } 91 92 static void stdio_restore(void) 93 { 94 #ifdef __GLIBC__ 95 if (verbose() && env.worker_id == -1) { 96 /* nothing to do, output to stdout by default */ 97 return; 98 } 99 100 if (stdout == env.stdout) 101 return; 102 103 stdio_restore_cleanup(); 104 105 stdout = env.stdout; 106 stderr = env.stderr; 107 #endif 108 } 109 110 /* Adapted from perf/util/string.c */ 111 static bool glob_match(const char *str, const char *pat) 112 { 113 while (*str && *pat && *pat != '*') { 114 if (*str != *pat) 115 return false; 116 str++; 117 pat++; 118 } 119 /* Check wild card */ 120 if (*pat == '*') { 121 while (*pat == '*') 122 pat++; 123 if (!*pat) /* Tail wild card matches all */ 124 return true; 125 while (*str) 126 if (glob_match(str++, pat)) 127 return true; 128 } 129 return !*str && !*pat; 130 } 131 132 #define EXIT_NO_TEST 2 133 #define EXIT_ERR_SETUP_INFRA 3 134 135 /* defined in test_progs.h */ 136 struct test_env env = {}; 137 138 struct prog_test_def { 139 const char *test_name; 140 int test_num; 141 void (*run_test)(void); 142 void (*run_serial_test)(void); 143 bool should_run; 144 bool need_cgroup_cleanup; 145 }; 146 147 /* Override C runtime library's usleep() implementation to ensure nanosleep() 148 * is always called. Usleep is frequently used in selftests as a way to 149 * trigger kprobe and tracepoints. 150 */ 151 int usleep(useconds_t usec) 152 { 153 struct timespec ts = { 154 .tv_sec = usec / 1000000, 155 .tv_nsec = (usec % 1000000) * 1000, 156 }; 157 158 return syscall(__NR_nanosleep, &ts, NULL); 159 } 160 161 static bool should_run(struct test_selector *sel, int num, const char *name) 162 { 163 int i; 164 165 for (i = 0; i < sel->blacklist.cnt; i++) { 166 if (glob_match(name, sel->blacklist.tests[i].name) && 167 !sel->blacklist.tests[i].subtest_cnt) 168 return false; 169 } 170 171 for (i = 0; i < sel->whitelist.cnt; i++) { 172 if (glob_match(name, sel->whitelist.tests[i].name)) 173 return true; 174 } 175 176 if (!sel->whitelist.cnt && !sel->num_set) 177 return true; 178 179 return num < sel->num_set_len && sel->num_set[num]; 180 } 181 182 static bool should_run_subtest(struct test_selector *sel, 183 struct test_selector *subtest_sel, 184 int subtest_num, 185 const char *test_name, 186 const char *subtest_name) 187 { 188 int i, j; 189 190 for (i = 0; i < sel->blacklist.cnt; i++) { 191 if (glob_match(test_name, sel->blacklist.tests[i].name)) { 192 if (!sel->blacklist.tests[i].subtest_cnt) 193 return false; 194 195 for (j = 0; j < sel->blacklist.tests[i].subtest_cnt; j++) { 196 if (glob_match(subtest_name, 197 sel->blacklist.tests[i].subtests[j])) 198 return false; 199 } 200 } 201 } 202 203 for (i = 0; i < sel->whitelist.cnt; i++) { 204 if (glob_match(test_name, sel->whitelist.tests[i].name)) { 205 if (!sel->whitelist.tests[i].subtest_cnt) 206 return true; 207 208 for (j = 0; j < sel->whitelist.tests[i].subtest_cnt; j++) { 209 if (glob_match(subtest_name, 210 sel->whitelist.tests[i].subtests[j])) 211 return true; 212 } 213 } 214 } 215 216 if (!sel->whitelist.cnt && !subtest_sel->num_set) 217 return true; 218 219 return subtest_num < subtest_sel->num_set_len && subtest_sel->num_set[subtest_num]; 220 } 221 222 static char *test_result(bool failed, bool skipped) 223 { 224 return failed ? "FAIL" : (skipped ? "SKIP" : "OK"); 225 } 226 227 #define TEST_NUM_WIDTH 7 228 229 static void print_test_result(const struct prog_test_def *test, const struct test_state *test_state) 230 { 231 int skipped_cnt = test_state->skip_cnt; 232 int subtests_cnt = test_state->subtest_num; 233 234 fprintf(env.stdout, "#%-*d %s:", TEST_NUM_WIDTH, test->test_num, test->test_name); 235 if (test_state->error_cnt) 236 fprintf(env.stdout, "FAIL"); 237 else if (!skipped_cnt) 238 fprintf(env.stdout, "OK"); 239 else if (skipped_cnt == subtests_cnt || !subtests_cnt) 240 fprintf(env.stdout, "SKIP"); 241 else 242 fprintf(env.stdout, "OK (SKIP: %d/%d)", skipped_cnt, subtests_cnt); 243 244 fprintf(env.stdout, "\n"); 245 } 246 247 static void print_test_log(char *log_buf, size_t log_cnt) 248 { 249 log_buf[log_cnt] = '\0'; 250 fprintf(env.stdout, "%s", log_buf); 251 if (log_buf[log_cnt - 1] != '\n') 252 fprintf(env.stdout, "\n"); 253 } 254 255 static void print_subtest_name(int test_num, int subtest_num, 256 const char *test_name, char *subtest_name, 257 char *result) 258 { 259 char test_num_str[TEST_NUM_WIDTH + 1]; 260 261 snprintf(test_num_str, sizeof(test_num_str), "%d/%d", test_num, subtest_num); 262 263 fprintf(env.stdout, "#%-*s %s/%s", 264 TEST_NUM_WIDTH, test_num_str, 265 test_name, subtest_name); 266 267 if (result) 268 fprintf(env.stdout, ":%s", result); 269 270 fprintf(env.stdout, "\n"); 271 } 272 273 static void jsonw_write_log_message(json_writer_t *w, char *log_buf, size_t log_cnt) 274 { 275 /* open_memstream (from stdio_hijack_init) ensures that log_bug is terminated by a 276 * null byte. Yet in parallel mode, log_buf will be NULL if there is no message. 277 */ 278 if (log_cnt) { 279 jsonw_string_field(w, "message", log_buf); 280 } else { 281 jsonw_string_field(w, "message", ""); 282 } 283 } 284 285 static void dump_test_log(const struct prog_test_def *test, 286 const struct test_state *test_state, 287 bool skip_ok_subtests, 288 bool par_exec_result, 289 json_writer_t *w) 290 { 291 bool test_failed = test_state->error_cnt > 0; 292 bool force_log = test_state->force_log; 293 bool print_test = verbose() || force_log || test_failed; 294 int i; 295 struct subtest_state *subtest_state; 296 bool subtest_failed; 297 bool subtest_filtered; 298 bool print_subtest; 299 300 /* we do not print anything in the worker thread */ 301 if (env.worker_id != -1) 302 return; 303 304 /* there is nothing to print when verbose log is used and execution 305 * is not in parallel mode 306 */ 307 if (verbose() && !par_exec_result) 308 return; 309 310 if (test_state->log_cnt && print_test) 311 print_test_log(test_state->log_buf, test_state->log_cnt); 312 313 if (w && print_test) { 314 jsonw_start_object(w); 315 jsonw_string_field(w, "name", test->test_name); 316 jsonw_uint_field(w, "number", test->test_num); 317 jsonw_write_log_message(w, test_state->log_buf, test_state->log_cnt); 318 jsonw_bool_field(w, "failed", test_failed); 319 jsonw_name(w, "subtests"); 320 jsonw_start_array(w); 321 } 322 323 for (i = 0; i < test_state->subtest_num; i++) { 324 subtest_state = &test_state->subtest_states[i]; 325 subtest_failed = subtest_state->error_cnt; 326 subtest_filtered = subtest_state->filtered; 327 print_subtest = verbose() || force_log || subtest_failed; 328 329 if ((skip_ok_subtests && !subtest_failed) || subtest_filtered) 330 continue; 331 332 if (subtest_state->log_cnt && print_subtest) { 333 print_test_log(subtest_state->log_buf, 334 subtest_state->log_cnt); 335 } 336 337 print_subtest_name(test->test_num, i + 1, 338 test->test_name, subtest_state->name, 339 test_result(subtest_state->error_cnt, 340 subtest_state->skipped)); 341 342 if (w && print_subtest) { 343 jsonw_start_object(w); 344 jsonw_string_field(w, "name", subtest_state->name); 345 jsonw_uint_field(w, "number", i+1); 346 jsonw_write_log_message(w, subtest_state->log_buf, subtest_state->log_cnt); 347 jsonw_bool_field(w, "failed", subtest_failed); 348 jsonw_end_object(w); 349 } 350 } 351 352 if (w && print_test) { 353 jsonw_end_array(w); 354 jsonw_end_object(w); 355 } 356 357 print_test_result(test, test_state); 358 } 359 360 static void stdio_restore(void); 361 362 /* A bunch of tests set custom affinity per-thread and/or per-process. Reset 363 * it after each test/sub-test. 364 */ 365 static void reset_affinity(void) 366 { 367 cpu_set_t cpuset; 368 int i, err; 369 370 CPU_ZERO(&cpuset); 371 for (i = 0; i < env.nr_cpus; i++) 372 CPU_SET(i, &cpuset); 373 374 err = sched_setaffinity(0, sizeof(cpuset), &cpuset); 375 if (err < 0) { 376 stdio_restore(); 377 fprintf(stderr, "Failed to reset process affinity: %d!\n", err); 378 exit(EXIT_ERR_SETUP_INFRA); 379 } 380 err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset); 381 if (err < 0) { 382 stdio_restore(); 383 fprintf(stderr, "Failed to reset thread affinity: %d!\n", err); 384 exit(EXIT_ERR_SETUP_INFRA); 385 } 386 } 387 388 static void save_netns(void) 389 { 390 env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY); 391 if (env.saved_netns_fd == -1) { 392 perror("open(/proc/self/ns/net)"); 393 exit(EXIT_ERR_SETUP_INFRA); 394 } 395 } 396 397 static void restore_netns(void) 398 { 399 if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) { 400 stdio_restore(); 401 perror("setns(CLONE_NEWNS)"); 402 exit(EXIT_ERR_SETUP_INFRA); 403 } 404 } 405 406 void test__end_subtest(void) 407 { 408 struct prog_test_def *test = env.test; 409 struct test_state *test_state = env.test_state; 410 struct subtest_state *subtest_state = env.subtest_state; 411 412 if (subtest_state->error_cnt) { 413 test_state->error_cnt++; 414 } else { 415 if (!subtest_state->skipped) 416 test_state->sub_succ_cnt++; 417 else 418 test_state->skip_cnt++; 419 } 420 421 if (verbose() && !env.workers) 422 print_subtest_name(test->test_num, test_state->subtest_num, 423 test->test_name, subtest_state->name, 424 test_result(subtest_state->error_cnt, 425 subtest_state->skipped)); 426 427 stdio_restore_cleanup(); 428 env.subtest_state = NULL; 429 } 430 431 bool test__start_subtest(const char *subtest_name) 432 { 433 struct prog_test_def *test = env.test; 434 struct test_state *state = env.test_state; 435 struct subtest_state *subtest_state; 436 size_t sub_state_size = sizeof(*subtest_state); 437 438 if (env.subtest_state) 439 test__end_subtest(); 440 441 state->subtest_num++; 442 state->subtest_states = 443 realloc(state->subtest_states, 444 state->subtest_num * sub_state_size); 445 if (!state->subtest_states) { 446 fprintf(stderr, "Not enough memory to allocate subtest result\n"); 447 return false; 448 } 449 450 subtest_state = &state->subtest_states[state->subtest_num - 1]; 451 452 memset(subtest_state, 0, sub_state_size); 453 454 if (!subtest_name || !subtest_name[0]) { 455 fprintf(env.stderr, 456 "Subtest #%d didn't provide sub-test name!\n", 457 state->subtest_num); 458 return false; 459 } 460 461 subtest_state->name = strdup(subtest_name); 462 if (!subtest_state->name) { 463 fprintf(env.stderr, 464 "Subtest #%d: failed to copy subtest name!\n", 465 state->subtest_num); 466 return false; 467 } 468 469 if (!should_run_subtest(&env.test_selector, 470 &env.subtest_selector, 471 state->subtest_num, 472 test->test_name, 473 subtest_name)) { 474 subtest_state->filtered = true; 475 return false; 476 } 477 478 env.subtest_state = subtest_state; 479 stdio_hijack_init(&subtest_state->log_buf, &subtest_state->log_cnt); 480 481 return true; 482 } 483 484 void test__force_log(void) 485 { 486 env.test_state->force_log = true; 487 } 488 489 void test__skip(void) 490 { 491 if (env.subtest_state) 492 env.subtest_state->skipped = true; 493 else 494 env.test_state->skip_cnt++; 495 } 496 497 void test__fail(void) 498 { 499 if (env.subtest_state) 500 env.subtest_state->error_cnt++; 501 else 502 env.test_state->error_cnt++; 503 } 504 505 int test__join_cgroup(const char *path) 506 { 507 int fd; 508 509 if (!env.test->need_cgroup_cleanup) { 510 if (setup_cgroup_environment()) { 511 fprintf(stderr, 512 "#%d %s: Failed to setup cgroup environment\n", 513 env.test->test_num, env.test->test_name); 514 return -1; 515 } 516 517 env.test->need_cgroup_cleanup = true; 518 } 519 520 fd = create_and_get_cgroup(path); 521 if (fd < 0) { 522 fprintf(stderr, 523 "#%d %s: Failed to create cgroup '%s' (errno=%d)\n", 524 env.test->test_num, env.test->test_name, path, errno); 525 return fd; 526 } 527 528 if (join_cgroup(path)) { 529 fprintf(stderr, 530 "#%d %s: Failed to join cgroup '%s' (errno=%d)\n", 531 env.test->test_num, env.test->test_name, path, errno); 532 return -1; 533 } 534 535 return fd; 536 } 537 538 int bpf_find_map(const char *test, struct bpf_object *obj, const char *name) 539 { 540 struct bpf_map *map; 541 542 map = bpf_object__find_map_by_name(obj, name); 543 if (!map) { 544 fprintf(stdout, "%s:FAIL:map '%s' not found\n", test, name); 545 test__fail(); 546 return -1; 547 } 548 return bpf_map__fd(map); 549 } 550 551 static bool is_jit_enabled(void) 552 { 553 const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable"; 554 bool enabled = false; 555 int sysctl_fd; 556 557 sysctl_fd = open(jit_sysctl, 0, O_RDONLY); 558 if (sysctl_fd != -1) { 559 char tmpc; 560 561 if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1) 562 enabled = (tmpc != '0'); 563 close(sysctl_fd); 564 } 565 566 return enabled; 567 } 568 569 int compare_map_keys(int map1_fd, int map2_fd) 570 { 571 __u32 key, next_key; 572 char val_buf[PERF_MAX_STACK_DEPTH * 573 sizeof(struct bpf_stack_build_id)]; 574 int err; 575 576 err = bpf_map_get_next_key(map1_fd, NULL, &key); 577 if (err) 578 return err; 579 err = bpf_map_lookup_elem(map2_fd, &key, val_buf); 580 if (err) 581 return err; 582 583 while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) { 584 err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf); 585 if (err) 586 return err; 587 588 key = next_key; 589 } 590 if (errno != ENOENT) 591 return -1; 592 593 return 0; 594 } 595 596 int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len) 597 { 598 __u32 key, next_key, *cur_key_p, *next_key_p; 599 char *val_buf1, *val_buf2; 600 int i, err = 0; 601 602 val_buf1 = malloc(stack_trace_len); 603 val_buf2 = malloc(stack_trace_len); 604 cur_key_p = NULL; 605 next_key_p = &key; 606 while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) { 607 err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1); 608 if (err) 609 goto out; 610 err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2); 611 if (err) 612 goto out; 613 for (i = 0; i < stack_trace_len; i++) { 614 if (val_buf1[i] != val_buf2[i]) { 615 err = -1; 616 goto out; 617 } 618 } 619 key = *next_key_p; 620 cur_key_p = &key; 621 next_key_p = &next_key; 622 } 623 if (errno != ENOENT) 624 err = -1; 625 626 out: 627 free(val_buf1); 628 free(val_buf2); 629 return err; 630 } 631 632 static int finit_module(int fd, const char *param_values, int flags) 633 { 634 return syscall(__NR_finit_module, fd, param_values, flags); 635 } 636 637 static int delete_module(const char *name, int flags) 638 { 639 return syscall(__NR_delete_module, name, flags); 640 } 641 642 /* 643 * Trigger synchronize_rcu() in kernel. 644 */ 645 int kern_sync_rcu(void) 646 { 647 return syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0, 0); 648 } 649 650 static void unload_bpf_testmod(void) 651 { 652 if (kern_sync_rcu()) 653 fprintf(env.stderr, "Failed to trigger kernel-side RCU sync!\n"); 654 if (delete_module("bpf_testmod", 0)) { 655 if (errno == ENOENT) { 656 if (verbose()) 657 fprintf(stdout, "bpf_testmod.ko is already unloaded.\n"); 658 return; 659 } 660 fprintf(env.stderr, "Failed to unload bpf_testmod.ko from kernel: %d\n", -errno); 661 return; 662 } 663 if (verbose()) 664 fprintf(stdout, "Successfully unloaded bpf_testmod.ko.\n"); 665 } 666 667 static int load_bpf_testmod(void) 668 { 669 int fd; 670 671 /* ensure previous instance of the module is unloaded */ 672 unload_bpf_testmod(); 673 674 if (verbose()) 675 fprintf(stdout, "Loading bpf_testmod.ko...\n"); 676 677 fd = open("bpf_testmod.ko", O_RDONLY); 678 if (fd < 0) { 679 fprintf(env.stderr, "Can't find bpf_testmod.ko kernel module: %d\n", -errno); 680 return -ENOENT; 681 } 682 if (finit_module(fd, "", 0)) { 683 fprintf(env.stderr, "Failed to load bpf_testmod.ko into the kernel: %d\n", -errno); 684 close(fd); 685 return -EINVAL; 686 } 687 close(fd); 688 689 if (verbose()) 690 fprintf(stdout, "Successfully loaded bpf_testmod.ko.\n"); 691 return 0; 692 } 693 694 /* extern declarations for test funcs */ 695 #define DEFINE_TEST(name) \ 696 extern void test_##name(void) __weak; \ 697 extern void serial_test_##name(void) __weak; 698 #include <prog_tests/tests.h> 699 #undef DEFINE_TEST 700 701 static struct prog_test_def prog_test_defs[] = { 702 #define DEFINE_TEST(name) { \ 703 .test_name = #name, \ 704 .run_test = &test_##name, \ 705 .run_serial_test = &serial_test_##name, \ 706 }, 707 #include <prog_tests/tests.h> 708 #undef DEFINE_TEST 709 }; 710 711 static const int prog_test_cnt = ARRAY_SIZE(prog_test_defs); 712 713 static struct test_state test_states[ARRAY_SIZE(prog_test_defs)]; 714 715 const char *argp_program_version = "test_progs 0.1"; 716 const char *argp_program_bug_address = "<bpf@vger.kernel.org>"; 717 static const char argp_program_doc[] = 718 "BPF selftests test runner\v" 719 "Options accepting the NAMES parameter take either a comma-separated list\n" 720 "of test names, or a filename prefixed with @. The file contains one name\n" 721 "(or wildcard pattern) per line, and comments beginning with # are ignored.\n" 722 "\n" 723 "These options can be passed repeatedly to read multiple files.\n"; 724 725 enum ARG_KEYS { 726 ARG_TEST_NUM = 'n', 727 ARG_TEST_NAME = 't', 728 ARG_TEST_NAME_BLACKLIST = 'b', 729 ARG_VERIFIER_STATS = 's', 730 ARG_VERBOSE = 'v', 731 ARG_GET_TEST_CNT = 'c', 732 ARG_LIST_TEST_NAMES = 'l', 733 ARG_TEST_NAME_GLOB_ALLOWLIST = 'a', 734 ARG_TEST_NAME_GLOB_DENYLIST = 'd', 735 ARG_NUM_WORKERS = 'j', 736 ARG_DEBUG = -1, 737 ARG_JSON_SUMMARY = 'J' 738 }; 739 740 static const struct argp_option opts[] = { 741 { "num", ARG_TEST_NUM, "NUM", 0, 742 "Run test number NUM only " }, 743 { "name", ARG_TEST_NAME, "NAMES", 0, 744 "Run tests with names containing any string from NAMES list" }, 745 { "name-blacklist", ARG_TEST_NAME_BLACKLIST, "NAMES", 0, 746 "Don't run tests with names containing any string from NAMES list" }, 747 { "verifier-stats", ARG_VERIFIER_STATS, NULL, 0, 748 "Output verifier statistics", }, 749 { "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL, 750 "Verbose output (use -vv or -vvv for progressively verbose output)" }, 751 { "count", ARG_GET_TEST_CNT, NULL, 0, 752 "Get number of selected top-level tests " }, 753 { "list", ARG_LIST_TEST_NAMES, NULL, 0, 754 "List test names that would run (without running them) " }, 755 { "allow", ARG_TEST_NAME_GLOB_ALLOWLIST, "NAMES", 0, 756 "Run tests with name matching the pattern (supports '*' wildcard)." }, 757 { "deny", ARG_TEST_NAME_GLOB_DENYLIST, "NAMES", 0, 758 "Don't run tests with name matching the pattern (supports '*' wildcard)." }, 759 { "workers", ARG_NUM_WORKERS, "WORKERS", OPTION_ARG_OPTIONAL, 760 "Number of workers to run in parallel, default to number of cpus." }, 761 { "debug", ARG_DEBUG, NULL, 0, 762 "print extra debug information for test_progs." }, 763 { "json-summary", ARG_JSON_SUMMARY, "FILE", 0, "Write report in json format to this file."}, 764 {}, 765 }; 766 767 static int libbpf_print_fn(enum libbpf_print_level level, 768 const char *format, va_list args) 769 { 770 if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG) 771 return 0; 772 vfprintf(stdout, format, args); 773 return 0; 774 } 775 776 static void free_test_filter_set(const struct test_filter_set *set) 777 { 778 int i, j; 779 780 if (!set) 781 return; 782 783 for (i = 0; i < set->cnt; i++) { 784 free((void *)set->tests[i].name); 785 for (j = 0; j < set->tests[i].subtest_cnt; j++) 786 free((void *)set->tests[i].subtests[j]); 787 788 free((void *)set->tests[i].subtests); 789 } 790 791 free((void *)set->tests); 792 } 793 794 static void free_test_selector(struct test_selector *test_selector) 795 { 796 free_test_filter_set(&test_selector->blacklist); 797 free_test_filter_set(&test_selector->whitelist); 798 free(test_selector->num_set); 799 } 800 801 extern int extra_prog_load_log_flags; 802 803 static error_t parse_arg(int key, char *arg, struct argp_state *state) 804 { 805 struct test_env *env = state->input; 806 int err = 0; 807 808 switch (key) { 809 case ARG_TEST_NUM: { 810 char *subtest_str = strchr(arg, '/'); 811 812 if (subtest_str) { 813 *subtest_str = '\0'; 814 if (parse_num_list(subtest_str + 1, 815 &env->subtest_selector.num_set, 816 &env->subtest_selector.num_set_len)) { 817 fprintf(stderr, 818 "Failed to parse subtest numbers.\n"); 819 return -EINVAL; 820 } 821 } 822 if (parse_num_list(arg, &env->test_selector.num_set, 823 &env->test_selector.num_set_len)) { 824 fprintf(stderr, "Failed to parse test numbers.\n"); 825 return -EINVAL; 826 } 827 break; 828 } 829 case ARG_TEST_NAME_GLOB_ALLOWLIST: 830 case ARG_TEST_NAME: { 831 if (arg[0] == '@') 832 err = parse_test_list_file(arg + 1, 833 &env->test_selector.whitelist, 834 key == ARG_TEST_NAME_GLOB_ALLOWLIST); 835 else 836 err = parse_test_list(arg, 837 &env->test_selector.whitelist, 838 key == ARG_TEST_NAME_GLOB_ALLOWLIST); 839 840 break; 841 } 842 case ARG_TEST_NAME_GLOB_DENYLIST: 843 case ARG_TEST_NAME_BLACKLIST: { 844 if (arg[0] == '@') 845 err = parse_test_list_file(arg + 1, 846 &env->test_selector.blacklist, 847 key == ARG_TEST_NAME_GLOB_DENYLIST); 848 else 849 err = parse_test_list(arg, 850 &env->test_selector.blacklist, 851 key == ARG_TEST_NAME_GLOB_DENYLIST); 852 853 break; 854 } 855 case ARG_VERIFIER_STATS: 856 env->verifier_stats = true; 857 break; 858 case ARG_VERBOSE: 859 env->verbosity = VERBOSE_NORMAL; 860 if (arg) { 861 if (strcmp(arg, "v") == 0) { 862 env->verbosity = VERBOSE_VERY; 863 extra_prog_load_log_flags = 1; 864 } else if (strcmp(arg, "vv") == 0) { 865 env->verbosity = VERBOSE_SUPER; 866 extra_prog_load_log_flags = 2; 867 } else { 868 fprintf(stderr, 869 "Unrecognized verbosity setting ('%s'), only -v and -vv are supported\n", 870 arg); 871 return -EINVAL; 872 } 873 } 874 875 if (verbose()) { 876 if (setenv("SELFTESTS_VERBOSE", "1", 1) == -1) { 877 fprintf(stderr, 878 "Unable to setenv SELFTESTS_VERBOSE=1 (errno=%d)", 879 errno); 880 return -EINVAL; 881 } 882 } 883 884 break; 885 case ARG_GET_TEST_CNT: 886 env->get_test_cnt = true; 887 break; 888 case ARG_LIST_TEST_NAMES: 889 env->list_test_names = true; 890 break; 891 case ARG_NUM_WORKERS: 892 if (arg) { 893 env->workers = atoi(arg); 894 if (!env->workers) { 895 fprintf(stderr, "Invalid number of worker: %s.", arg); 896 return -EINVAL; 897 } 898 } else { 899 env->workers = get_nprocs(); 900 } 901 break; 902 case ARG_DEBUG: 903 env->debug = true; 904 break; 905 case ARG_JSON_SUMMARY: 906 env->json = fopen(arg, "w"); 907 if (env->json == NULL) { 908 perror("Failed to open json summary file"); 909 return -errno; 910 } 911 break; 912 case ARGP_KEY_ARG: 913 argp_usage(state); 914 break; 915 case ARGP_KEY_END: 916 break; 917 default: 918 return ARGP_ERR_UNKNOWN; 919 } 920 return err; 921 } 922 923 /* 924 * Determine if test_progs is running as a "flavored" test runner and switch 925 * into corresponding sub-directory to load correct BPF objects. 926 * 927 * This is done by looking at executable name. If it contains "-flavor" 928 * suffix, then we are running as a flavored test runner. 929 */ 930 int cd_flavor_subdir(const char *exec_name) 931 { 932 /* General form of argv[0] passed here is: 933 * some/path/to/test_progs[-flavor], where -flavor part is optional. 934 * First cut out "test_progs[-flavor]" part, then extract "flavor" 935 * part, if it's there. 936 */ 937 const char *flavor = strrchr(exec_name, '/'); 938 939 if (!flavor) 940 flavor = exec_name; 941 else 942 flavor++; 943 944 flavor = strrchr(flavor, '-'); 945 if (!flavor) 946 return 0; 947 flavor++; 948 if (verbose()) 949 fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor); 950 951 return chdir(flavor); 952 } 953 954 int trigger_module_test_read(int read_sz) 955 { 956 int fd, err; 957 958 fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY); 959 err = -errno; 960 if (!ASSERT_GE(fd, 0, "testmod_file_open")) 961 return err; 962 963 read(fd, NULL, read_sz); 964 close(fd); 965 966 return 0; 967 } 968 969 int trigger_module_test_write(int write_sz) 970 { 971 int fd, err; 972 char *buf = malloc(write_sz); 973 974 if (!buf) 975 return -ENOMEM; 976 977 memset(buf, 'a', write_sz); 978 buf[write_sz-1] = '\0'; 979 980 fd = open(BPF_TESTMOD_TEST_FILE, O_WRONLY); 981 err = -errno; 982 if (!ASSERT_GE(fd, 0, "testmod_file_open")) { 983 free(buf); 984 return err; 985 } 986 987 write(fd, buf, write_sz); 988 close(fd); 989 free(buf); 990 return 0; 991 } 992 993 int write_sysctl(const char *sysctl, const char *value) 994 { 995 int fd, err, len; 996 997 fd = open(sysctl, O_WRONLY); 998 if (!ASSERT_NEQ(fd, -1, "open sysctl")) 999 return -1; 1000 1001 len = strlen(value); 1002 err = write(fd, value, len); 1003 close(fd); 1004 if (!ASSERT_EQ(err, len, "write sysctl")) 1005 return -1; 1006 1007 return 0; 1008 } 1009 1010 int get_bpf_max_tramp_links_from(struct btf *btf) 1011 { 1012 const struct btf_enum *e; 1013 const struct btf_type *t; 1014 __u32 i, type_cnt; 1015 const char *name; 1016 __u16 j, vlen; 1017 1018 for (i = 1, type_cnt = btf__type_cnt(btf); i < type_cnt; i++) { 1019 t = btf__type_by_id(btf, i); 1020 if (!t || !btf_is_enum(t) || t->name_off) 1021 continue; 1022 e = btf_enum(t); 1023 for (j = 0, vlen = btf_vlen(t); j < vlen; j++, e++) { 1024 name = btf__str_by_offset(btf, e->name_off); 1025 if (name && !strcmp(name, "BPF_MAX_TRAMP_LINKS")) 1026 return e->val; 1027 } 1028 } 1029 1030 return -1; 1031 } 1032 1033 int get_bpf_max_tramp_links(void) 1034 { 1035 struct btf *vmlinux_btf; 1036 int ret; 1037 1038 vmlinux_btf = btf__load_vmlinux_btf(); 1039 if (!ASSERT_OK_PTR(vmlinux_btf, "vmlinux btf")) 1040 return -1; 1041 ret = get_bpf_max_tramp_links_from(vmlinux_btf); 1042 btf__free(vmlinux_btf); 1043 1044 return ret; 1045 } 1046 1047 #define MAX_BACKTRACE_SZ 128 1048 void crash_handler(int signum) 1049 { 1050 void *bt[MAX_BACKTRACE_SZ]; 1051 size_t sz; 1052 1053 sz = backtrace(bt, ARRAY_SIZE(bt)); 1054 1055 if (env.stdout) 1056 stdio_restore(); 1057 if (env.test) { 1058 env.test_state->error_cnt++; 1059 dump_test_log(env.test, env.test_state, true, false, NULL); 1060 } 1061 if (env.worker_id != -1) 1062 fprintf(stderr, "[%d]: ", env.worker_id); 1063 fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum); 1064 backtrace_symbols_fd(bt, sz, STDERR_FILENO); 1065 } 1066 1067 static void sigint_handler(int signum) 1068 { 1069 int i; 1070 1071 for (i = 0; i < env.workers; i++) 1072 if (env.worker_socks[i] > 0) 1073 close(env.worker_socks[i]); 1074 } 1075 1076 static int current_test_idx; 1077 static pthread_mutex_t current_test_lock; 1078 static pthread_mutex_t stdout_output_lock; 1079 1080 static inline const char *str_msg(const struct msg *msg, char *buf) 1081 { 1082 switch (msg->type) { 1083 case MSG_DO_TEST: 1084 sprintf(buf, "MSG_DO_TEST %d", msg->do_test.num); 1085 break; 1086 case MSG_TEST_DONE: 1087 sprintf(buf, "MSG_TEST_DONE %d (log: %d)", 1088 msg->test_done.num, 1089 msg->test_done.have_log); 1090 break; 1091 case MSG_SUBTEST_DONE: 1092 sprintf(buf, "MSG_SUBTEST_DONE %d (log: %d)", 1093 msg->subtest_done.num, 1094 msg->subtest_done.have_log); 1095 break; 1096 case MSG_TEST_LOG: 1097 sprintf(buf, "MSG_TEST_LOG (cnt: %zu, last: %d)", 1098 strlen(msg->test_log.log_buf), 1099 msg->test_log.is_last); 1100 break; 1101 case MSG_EXIT: 1102 sprintf(buf, "MSG_EXIT"); 1103 break; 1104 default: 1105 sprintf(buf, "UNKNOWN"); 1106 break; 1107 } 1108 1109 return buf; 1110 } 1111 1112 static int send_message(int sock, const struct msg *msg) 1113 { 1114 char buf[256]; 1115 1116 if (env.debug) 1117 fprintf(stderr, "Sending msg: %s\n", str_msg(msg, buf)); 1118 return send(sock, msg, sizeof(*msg), 0); 1119 } 1120 1121 static int recv_message(int sock, struct msg *msg) 1122 { 1123 int ret; 1124 char buf[256]; 1125 1126 memset(msg, 0, sizeof(*msg)); 1127 ret = recv(sock, msg, sizeof(*msg), 0); 1128 if (ret >= 0) { 1129 if (env.debug) 1130 fprintf(stderr, "Received msg: %s\n", str_msg(msg, buf)); 1131 } 1132 return ret; 1133 } 1134 1135 static void run_one_test(int test_num) 1136 { 1137 struct prog_test_def *test = &prog_test_defs[test_num]; 1138 struct test_state *state = &test_states[test_num]; 1139 1140 env.test = test; 1141 env.test_state = state; 1142 1143 stdio_hijack(&state->log_buf, &state->log_cnt); 1144 1145 if (test->run_test) 1146 test->run_test(); 1147 else if (test->run_serial_test) 1148 test->run_serial_test(); 1149 1150 /* ensure last sub-test is finalized properly */ 1151 if (env.subtest_state) 1152 test__end_subtest(); 1153 1154 state->tested = true; 1155 1156 if (verbose() && env.worker_id == -1) 1157 print_test_result(test, state); 1158 1159 reset_affinity(); 1160 restore_netns(); 1161 if (test->need_cgroup_cleanup) 1162 cleanup_cgroup_environment(); 1163 1164 stdio_restore(); 1165 1166 dump_test_log(test, state, false, false, NULL); 1167 } 1168 1169 struct dispatch_data { 1170 int worker_id; 1171 int sock_fd; 1172 }; 1173 1174 static int read_prog_test_msg(int sock_fd, struct msg *msg, enum msg_type type) 1175 { 1176 if (recv_message(sock_fd, msg) < 0) 1177 return 1; 1178 1179 if (msg->type != type) { 1180 printf("%s: unexpected message type %d. expected %d\n", __func__, msg->type, type); 1181 return 1; 1182 } 1183 1184 return 0; 1185 } 1186 1187 static int dispatch_thread_read_log(int sock_fd, char **log_buf, size_t *log_cnt) 1188 { 1189 FILE *log_fp = NULL; 1190 int result = 0; 1191 1192 log_fp = open_memstream(log_buf, log_cnt); 1193 if (!log_fp) 1194 return 1; 1195 1196 while (true) { 1197 struct msg msg; 1198 1199 if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_LOG)) { 1200 result = 1; 1201 goto out; 1202 } 1203 1204 fprintf(log_fp, "%s", msg.test_log.log_buf); 1205 if (msg.test_log.is_last) 1206 break; 1207 } 1208 1209 out: 1210 fclose(log_fp); 1211 log_fp = NULL; 1212 return result; 1213 } 1214 1215 static int dispatch_thread_send_subtests(int sock_fd, struct test_state *state) 1216 { 1217 struct msg msg; 1218 struct subtest_state *subtest_state; 1219 int subtest_num = state->subtest_num; 1220 1221 state->subtest_states = malloc(subtest_num * sizeof(*subtest_state)); 1222 1223 for (int i = 0; i < subtest_num; i++) { 1224 subtest_state = &state->subtest_states[i]; 1225 1226 memset(subtest_state, 0, sizeof(*subtest_state)); 1227 1228 if (read_prog_test_msg(sock_fd, &msg, MSG_SUBTEST_DONE)) 1229 return 1; 1230 1231 subtest_state->name = strdup(msg.subtest_done.name); 1232 subtest_state->error_cnt = msg.subtest_done.error_cnt; 1233 subtest_state->skipped = msg.subtest_done.skipped; 1234 subtest_state->filtered = msg.subtest_done.filtered; 1235 1236 /* collect all logs */ 1237 if (msg.subtest_done.have_log) 1238 if (dispatch_thread_read_log(sock_fd, 1239 &subtest_state->log_buf, 1240 &subtest_state->log_cnt)) 1241 return 1; 1242 } 1243 1244 return 0; 1245 } 1246 1247 static void *dispatch_thread(void *ctx) 1248 { 1249 struct dispatch_data *data = ctx; 1250 int sock_fd; 1251 1252 sock_fd = data->sock_fd; 1253 1254 while (true) { 1255 int test_to_run = -1; 1256 struct prog_test_def *test; 1257 struct test_state *state; 1258 1259 /* grab a test */ 1260 { 1261 pthread_mutex_lock(¤t_test_lock); 1262 1263 if (current_test_idx >= prog_test_cnt) { 1264 pthread_mutex_unlock(¤t_test_lock); 1265 goto done; 1266 } 1267 1268 test = &prog_test_defs[current_test_idx]; 1269 test_to_run = current_test_idx; 1270 current_test_idx++; 1271 1272 pthread_mutex_unlock(¤t_test_lock); 1273 } 1274 1275 if (!test->should_run || test->run_serial_test) 1276 continue; 1277 1278 /* run test through worker */ 1279 { 1280 struct msg msg_do_test; 1281 1282 memset(&msg_do_test, 0, sizeof(msg_do_test)); 1283 msg_do_test.type = MSG_DO_TEST; 1284 msg_do_test.do_test.num = test_to_run; 1285 if (send_message(sock_fd, &msg_do_test) < 0) { 1286 perror("Fail to send command"); 1287 goto done; 1288 } 1289 env.worker_current_test[data->worker_id] = test_to_run; 1290 } 1291 1292 /* wait for test done */ 1293 do { 1294 struct msg msg; 1295 1296 if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_DONE)) 1297 goto error; 1298 if (test_to_run != msg.test_done.num) 1299 goto error; 1300 1301 state = &test_states[test_to_run]; 1302 state->tested = true; 1303 state->error_cnt = msg.test_done.error_cnt; 1304 state->skip_cnt = msg.test_done.skip_cnt; 1305 state->sub_succ_cnt = msg.test_done.sub_succ_cnt; 1306 state->subtest_num = msg.test_done.subtest_num; 1307 1308 /* collect all logs */ 1309 if (msg.test_done.have_log) { 1310 if (dispatch_thread_read_log(sock_fd, 1311 &state->log_buf, 1312 &state->log_cnt)) 1313 goto error; 1314 } 1315 1316 /* collect all subtests and subtest logs */ 1317 if (!state->subtest_num) 1318 break; 1319 1320 if (dispatch_thread_send_subtests(sock_fd, state)) 1321 goto error; 1322 } while (false); 1323 1324 pthread_mutex_lock(&stdout_output_lock); 1325 dump_test_log(test, state, false, true, NULL); 1326 pthread_mutex_unlock(&stdout_output_lock); 1327 } /* while (true) */ 1328 error: 1329 if (env.debug) 1330 fprintf(stderr, "[%d]: Protocol/IO error: %s.\n", data->worker_id, strerror(errno)); 1331 1332 done: 1333 { 1334 struct msg msg_exit; 1335 1336 msg_exit.type = MSG_EXIT; 1337 if (send_message(sock_fd, &msg_exit) < 0) { 1338 if (env.debug) 1339 fprintf(stderr, "[%d]: send_message msg_exit: %s.\n", 1340 data->worker_id, strerror(errno)); 1341 } 1342 } 1343 return NULL; 1344 } 1345 1346 static void calculate_summary_and_print_errors(struct test_env *env) 1347 { 1348 int i; 1349 int succ_cnt = 0, fail_cnt = 0, sub_succ_cnt = 0, skip_cnt = 0; 1350 json_writer_t *w = NULL; 1351 1352 for (i = 0; i < prog_test_cnt; i++) { 1353 struct test_state *state = &test_states[i]; 1354 1355 if (!state->tested) 1356 continue; 1357 1358 sub_succ_cnt += state->sub_succ_cnt; 1359 skip_cnt += state->skip_cnt; 1360 1361 if (state->error_cnt) 1362 fail_cnt++; 1363 else 1364 succ_cnt++; 1365 } 1366 1367 if (env->json) { 1368 w = jsonw_new(env->json); 1369 if (!w) 1370 fprintf(env->stderr, "Failed to create new JSON stream."); 1371 } 1372 1373 if (w) { 1374 jsonw_start_object(w); 1375 jsonw_uint_field(w, "success", succ_cnt); 1376 jsonw_uint_field(w, "success_subtest", sub_succ_cnt); 1377 jsonw_uint_field(w, "skipped", skip_cnt); 1378 jsonw_uint_field(w, "failed", fail_cnt); 1379 jsonw_name(w, "results"); 1380 jsonw_start_array(w); 1381 } 1382 1383 /* 1384 * We only print error logs summary when there are failed tests and 1385 * verbose mode is not enabled. Otherwise, results may be incosistent. 1386 * 1387 */ 1388 if (!verbose() && fail_cnt) { 1389 printf("\nAll error logs:\n"); 1390 1391 /* print error logs again */ 1392 for (i = 0; i < prog_test_cnt; i++) { 1393 struct prog_test_def *test = &prog_test_defs[i]; 1394 struct test_state *state = &test_states[i]; 1395 1396 if (!state->tested || !state->error_cnt) 1397 continue; 1398 1399 dump_test_log(test, state, true, true, w); 1400 } 1401 } 1402 1403 if (w) { 1404 jsonw_end_array(w); 1405 jsonw_end_object(w); 1406 jsonw_destroy(&w); 1407 } 1408 1409 if (env->json) 1410 fclose(env->json); 1411 1412 printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n", 1413 succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt); 1414 1415 env->succ_cnt = succ_cnt; 1416 env->sub_succ_cnt = sub_succ_cnt; 1417 env->fail_cnt = fail_cnt; 1418 env->skip_cnt = skip_cnt; 1419 } 1420 1421 static void server_main(void) 1422 { 1423 pthread_t *dispatcher_threads; 1424 struct dispatch_data *data; 1425 struct sigaction sigact_int = { 1426 .sa_handler = sigint_handler, 1427 .sa_flags = SA_RESETHAND, 1428 }; 1429 int i; 1430 1431 sigaction(SIGINT, &sigact_int, NULL); 1432 1433 dispatcher_threads = calloc(sizeof(pthread_t), env.workers); 1434 data = calloc(sizeof(struct dispatch_data), env.workers); 1435 1436 env.worker_current_test = calloc(sizeof(int), env.workers); 1437 for (i = 0; i < env.workers; i++) { 1438 int rc; 1439 1440 data[i].worker_id = i; 1441 data[i].sock_fd = env.worker_socks[i]; 1442 rc = pthread_create(&dispatcher_threads[i], NULL, dispatch_thread, &data[i]); 1443 if (rc < 0) { 1444 perror("Failed to launch dispatcher thread"); 1445 exit(EXIT_ERR_SETUP_INFRA); 1446 } 1447 } 1448 1449 /* wait for all dispatcher to finish */ 1450 for (i = 0; i < env.workers; i++) { 1451 while (true) { 1452 int ret = pthread_tryjoin_np(dispatcher_threads[i], NULL); 1453 1454 if (!ret) { 1455 break; 1456 } else if (ret == EBUSY) { 1457 if (env.debug) 1458 fprintf(stderr, "Still waiting for thread %d (test %d).\n", 1459 i, env.worker_current_test[i] + 1); 1460 usleep(1000 * 1000); 1461 continue; 1462 } else { 1463 fprintf(stderr, "Unexpected error joining dispatcher thread: %d", ret); 1464 break; 1465 } 1466 } 1467 } 1468 free(dispatcher_threads); 1469 free(env.worker_current_test); 1470 free(data); 1471 1472 /* run serial tests */ 1473 save_netns(); 1474 1475 for (int i = 0; i < prog_test_cnt; i++) { 1476 struct prog_test_def *test = &prog_test_defs[i]; 1477 1478 if (!test->should_run || !test->run_serial_test) 1479 continue; 1480 1481 run_one_test(i); 1482 } 1483 1484 /* generate summary */ 1485 fflush(stderr); 1486 fflush(stdout); 1487 1488 calculate_summary_and_print_errors(&env); 1489 1490 /* reap all workers */ 1491 for (i = 0; i < env.workers; i++) { 1492 int wstatus, pid; 1493 1494 pid = waitpid(env.worker_pids[i], &wstatus, 0); 1495 if (pid != env.worker_pids[i]) 1496 perror("Unable to reap worker"); 1497 } 1498 } 1499 1500 static void worker_main_send_log(int sock, char *log_buf, size_t log_cnt) 1501 { 1502 char *src; 1503 size_t slen; 1504 1505 src = log_buf; 1506 slen = log_cnt; 1507 while (slen) { 1508 struct msg msg_log; 1509 char *dest; 1510 size_t len; 1511 1512 memset(&msg_log, 0, sizeof(msg_log)); 1513 msg_log.type = MSG_TEST_LOG; 1514 dest = msg_log.test_log.log_buf; 1515 len = slen >= MAX_LOG_TRUNK_SIZE ? MAX_LOG_TRUNK_SIZE : slen; 1516 memcpy(dest, src, len); 1517 1518 src += len; 1519 slen -= len; 1520 if (!slen) 1521 msg_log.test_log.is_last = true; 1522 1523 assert(send_message(sock, &msg_log) >= 0); 1524 } 1525 } 1526 1527 static void free_subtest_state(struct subtest_state *state) 1528 { 1529 if (state->log_buf) { 1530 free(state->log_buf); 1531 state->log_buf = NULL; 1532 state->log_cnt = 0; 1533 } 1534 free(state->name); 1535 state->name = NULL; 1536 } 1537 1538 static int worker_main_send_subtests(int sock, struct test_state *state) 1539 { 1540 int i, result = 0; 1541 struct msg msg; 1542 struct subtest_state *subtest_state; 1543 1544 memset(&msg, 0, sizeof(msg)); 1545 msg.type = MSG_SUBTEST_DONE; 1546 1547 for (i = 0; i < state->subtest_num; i++) { 1548 subtest_state = &state->subtest_states[i]; 1549 1550 msg.subtest_done.num = i; 1551 1552 strncpy(msg.subtest_done.name, subtest_state->name, MAX_SUBTEST_NAME); 1553 1554 msg.subtest_done.error_cnt = subtest_state->error_cnt; 1555 msg.subtest_done.skipped = subtest_state->skipped; 1556 msg.subtest_done.filtered = subtest_state->filtered; 1557 msg.subtest_done.have_log = false; 1558 1559 if (verbose() || state->force_log || subtest_state->error_cnt) { 1560 if (subtest_state->log_cnt) 1561 msg.subtest_done.have_log = true; 1562 } 1563 1564 if (send_message(sock, &msg) < 0) { 1565 perror("Fail to send message done"); 1566 result = 1; 1567 goto out; 1568 } 1569 1570 /* send logs */ 1571 if (msg.subtest_done.have_log) 1572 worker_main_send_log(sock, subtest_state->log_buf, subtest_state->log_cnt); 1573 1574 free_subtest_state(subtest_state); 1575 free(subtest_state->name); 1576 } 1577 1578 out: 1579 for (; i < state->subtest_num; i++) 1580 free_subtest_state(&state->subtest_states[i]); 1581 free(state->subtest_states); 1582 return result; 1583 } 1584 1585 static int worker_main(int sock) 1586 { 1587 save_netns(); 1588 1589 while (true) { 1590 /* receive command */ 1591 struct msg msg; 1592 1593 if (recv_message(sock, &msg) < 0) 1594 goto out; 1595 1596 switch (msg.type) { 1597 case MSG_EXIT: 1598 if (env.debug) 1599 fprintf(stderr, "[%d]: worker exit.\n", 1600 env.worker_id); 1601 goto out; 1602 case MSG_DO_TEST: { 1603 int test_to_run = msg.do_test.num; 1604 struct prog_test_def *test = &prog_test_defs[test_to_run]; 1605 struct test_state *state = &test_states[test_to_run]; 1606 struct msg msg; 1607 1608 if (env.debug) 1609 fprintf(stderr, "[%d]: #%d:%s running.\n", 1610 env.worker_id, 1611 test_to_run + 1, 1612 test->test_name); 1613 1614 run_one_test(test_to_run); 1615 1616 memset(&msg, 0, sizeof(msg)); 1617 msg.type = MSG_TEST_DONE; 1618 msg.test_done.num = test_to_run; 1619 msg.test_done.error_cnt = state->error_cnt; 1620 msg.test_done.skip_cnt = state->skip_cnt; 1621 msg.test_done.sub_succ_cnt = state->sub_succ_cnt; 1622 msg.test_done.subtest_num = state->subtest_num; 1623 msg.test_done.have_log = false; 1624 1625 if (verbose() || state->force_log || state->error_cnt) { 1626 if (state->log_cnt) 1627 msg.test_done.have_log = true; 1628 } 1629 if (send_message(sock, &msg) < 0) { 1630 perror("Fail to send message done"); 1631 goto out; 1632 } 1633 1634 /* send logs */ 1635 if (msg.test_done.have_log) 1636 worker_main_send_log(sock, state->log_buf, state->log_cnt); 1637 1638 if (state->log_buf) { 1639 free(state->log_buf); 1640 state->log_buf = NULL; 1641 state->log_cnt = 0; 1642 } 1643 1644 if (state->subtest_num) 1645 if (worker_main_send_subtests(sock, state)) 1646 goto out; 1647 1648 if (env.debug) 1649 fprintf(stderr, "[%d]: #%d:%s done.\n", 1650 env.worker_id, 1651 test_to_run + 1, 1652 test->test_name); 1653 break; 1654 } /* case MSG_DO_TEST */ 1655 default: 1656 if (env.debug) 1657 fprintf(stderr, "[%d]: unknown message.\n", env.worker_id); 1658 return -1; 1659 } 1660 } 1661 out: 1662 return 0; 1663 } 1664 1665 static void free_test_states(void) 1666 { 1667 int i, j; 1668 1669 for (i = 0; i < ARRAY_SIZE(prog_test_defs); i++) { 1670 struct test_state *test_state = &test_states[i]; 1671 1672 for (j = 0; j < test_state->subtest_num; j++) 1673 free_subtest_state(&test_state->subtest_states[j]); 1674 1675 free(test_state->subtest_states); 1676 free(test_state->log_buf); 1677 test_state->subtest_states = NULL; 1678 test_state->log_buf = NULL; 1679 } 1680 } 1681 1682 int main(int argc, char **argv) 1683 { 1684 static const struct argp argp = { 1685 .options = opts, 1686 .parser = parse_arg, 1687 .doc = argp_program_doc, 1688 }; 1689 struct sigaction sigact = { 1690 .sa_handler = crash_handler, 1691 .sa_flags = SA_RESETHAND, 1692 }; 1693 int err, i; 1694 1695 sigaction(SIGSEGV, &sigact, NULL); 1696 1697 err = argp_parse(&argp, argc, argv, 0, NULL, &env); 1698 if (err) 1699 return err; 1700 1701 err = cd_flavor_subdir(argv[0]); 1702 if (err) 1703 return err; 1704 1705 /* Use libbpf 1.0 API mode */ 1706 libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 1707 libbpf_set_print(libbpf_print_fn); 1708 1709 srand(time(NULL)); 1710 1711 env.jit_enabled = is_jit_enabled(); 1712 env.nr_cpus = libbpf_num_possible_cpus(); 1713 if (env.nr_cpus < 0) { 1714 fprintf(stderr, "Failed to get number of CPUs: %d!\n", 1715 env.nr_cpus); 1716 return -1; 1717 } 1718 1719 env.stdout = stdout; 1720 env.stderr = stderr; 1721 1722 env.has_testmod = true; 1723 if (!env.list_test_names && load_bpf_testmod()) { 1724 fprintf(env.stderr, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n"); 1725 env.has_testmod = false; 1726 } 1727 1728 /* initializing tests */ 1729 for (i = 0; i < prog_test_cnt; i++) { 1730 struct prog_test_def *test = &prog_test_defs[i]; 1731 1732 test->test_num = i + 1; 1733 test->should_run = should_run(&env.test_selector, 1734 test->test_num, test->test_name); 1735 1736 if ((test->run_test == NULL && test->run_serial_test == NULL) || 1737 (test->run_test != NULL && test->run_serial_test != NULL)) { 1738 fprintf(stderr, "Test %d:%s must have either test_%s() or serial_test_%sl() defined.\n", 1739 test->test_num, test->test_name, test->test_name, test->test_name); 1740 exit(EXIT_ERR_SETUP_INFRA); 1741 } 1742 } 1743 1744 /* ignore workers if we are just listing */ 1745 if (env.get_test_cnt || env.list_test_names) 1746 env.workers = 0; 1747 1748 /* launch workers if requested */ 1749 env.worker_id = -1; /* main process */ 1750 if (env.workers) { 1751 env.worker_pids = calloc(sizeof(__pid_t), env.workers); 1752 env.worker_socks = calloc(sizeof(int), env.workers); 1753 if (env.debug) 1754 fprintf(stdout, "Launching %d workers.\n", env.workers); 1755 for (i = 0; i < env.workers; i++) { 1756 int sv[2]; 1757 pid_t pid; 1758 1759 if (socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, sv) < 0) { 1760 perror("Fail to create worker socket"); 1761 return -1; 1762 } 1763 pid = fork(); 1764 if (pid < 0) { 1765 perror("Failed to fork worker"); 1766 return -1; 1767 } else if (pid != 0) { /* main process */ 1768 close(sv[1]); 1769 env.worker_pids[i] = pid; 1770 env.worker_socks[i] = sv[0]; 1771 } else { /* inside each worker process */ 1772 close(sv[0]); 1773 env.worker_id = i; 1774 return worker_main(sv[1]); 1775 } 1776 } 1777 1778 if (env.worker_id == -1) { 1779 server_main(); 1780 goto out; 1781 } 1782 } 1783 1784 /* The rest of the main process */ 1785 1786 /* on single mode */ 1787 save_netns(); 1788 1789 for (i = 0; i < prog_test_cnt; i++) { 1790 struct prog_test_def *test = &prog_test_defs[i]; 1791 1792 if (!test->should_run) 1793 continue; 1794 1795 if (env.get_test_cnt) { 1796 env.succ_cnt++; 1797 continue; 1798 } 1799 1800 if (env.list_test_names) { 1801 fprintf(env.stdout, "%s\n", test->test_name); 1802 env.succ_cnt++; 1803 continue; 1804 } 1805 1806 run_one_test(i); 1807 } 1808 1809 if (env.get_test_cnt) { 1810 printf("%d\n", env.succ_cnt); 1811 goto out; 1812 } 1813 1814 if (env.list_test_names) 1815 goto out; 1816 1817 calculate_summary_and_print_errors(&env); 1818 1819 close(env.saved_netns_fd); 1820 out: 1821 if (!env.list_test_names && env.has_testmod) 1822 unload_bpf_testmod(); 1823 1824 free_test_selector(&env.test_selector); 1825 free_test_selector(&env.subtest_selector); 1826 free_test_states(); 1827 1828 if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0) 1829 return EXIT_NO_TEST; 1830 1831 return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS; 1832 } 1833