1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 Facebook 3 */ 4 #define _GNU_SOURCE 5 #include "test_progs.h" 6 #include "testing_helpers.h" 7 #include "cgroup_helpers.h" 8 #include <argp.h> 9 #include <pthread.h> 10 #include <sched.h> 11 #include <signal.h> 12 #include <string.h> 13 #include <execinfo.h> /* backtrace */ 14 #include <linux/membarrier.h> 15 #include <sys/sysinfo.h> /* get_nprocs */ 16 #include <netinet/in.h> 17 #include <sys/select.h> 18 #include <sys/socket.h> 19 #include <sys/un.h> 20 21 /* Adapted from perf/util/string.c */ 22 static bool glob_match(const char *str, const char *pat) 23 { 24 while (*str && *pat && *pat != '*') { 25 if (*str != *pat) 26 return false; 27 str++; 28 pat++; 29 } 30 /* Check wild card */ 31 if (*pat == '*') { 32 while (*pat == '*') 33 pat++; 34 if (!*pat) /* Tail wild card matches all */ 35 return true; 36 while (*str) 37 if (glob_match(str++, pat)) 38 return true; 39 } 40 return !*str && !*pat; 41 } 42 43 #define EXIT_NO_TEST 2 44 #define EXIT_ERR_SETUP_INFRA 3 45 46 /* defined in test_progs.h */ 47 struct test_env env = {}; 48 49 struct prog_test_def { 50 const char *test_name; 51 int test_num; 52 void (*run_test)(void); 53 void (*run_serial_test)(void); 54 bool should_run; 55 bool need_cgroup_cleanup; 56 }; 57 58 /* Override C runtime library's usleep() implementation to ensure nanosleep() 59 * is always called. Usleep is frequently used in selftests as a way to 60 * trigger kprobe and tracepoints. 61 */ 62 int usleep(useconds_t usec) 63 { 64 struct timespec ts = { 65 .tv_sec = usec / 1000000, 66 .tv_nsec = (usec % 1000000) * 1000, 67 }; 68 69 return syscall(__NR_nanosleep, &ts, NULL); 70 } 71 72 static bool should_run(struct test_selector *sel, int num, const char *name) 73 { 74 int i; 75 76 for (i = 0; i < sel->blacklist.cnt; i++) { 77 if (glob_match(name, sel->blacklist.tests[i].name) && 78 !sel->blacklist.tests[i].subtest_cnt) 79 return false; 80 } 81 82 for (i = 0; i < sel->whitelist.cnt; i++) { 83 if (glob_match(name, sel->whitelist.tests[i].name)) 84 return true; 85 } 86 87 if (!sel->whitelist.cnt && !sel->num_set) 88 return true; 89 90 return num < sel->num_set_len && sel->num_set[num]; 91 } 92 93 static bool should_run_subtest(struct test_selector *sel, 94 struct test_selector *subtest_sel, 95 int subtest_num, 96 const char *test_name, 97 const char *subtest_name) 98 { 99 int i, j; 100 101 for (i = 0; i < sel->blacklist.cnt; i++) { 102 if (glob_match(test_name, sel->blacklist.tests[i].name)) { 103 if (!sel->blacklist.tests[i].subtest_cnt) 104 return false; 105 106 for (j = 0; j < sel->blacklist.tests[i].subtest_cnt; j++) { 107 if (glob_match(subtest_name, 108 sel->blacklist.tests[i].subtests[j])) 109 return false; 110 } 111 } 112 } 113 114 for (i = 0; i < sel->whitelist.cnt; i++) { 115 if (glob_match(test_name, sel->whitelist.tests[i].name)) { 116 if (!sel->whitelist.tests[i].subtest_cnt) 117 return true; 118 119 for (j = 0; j < sel->whitelist.tests[i].subtest_cnt; j++) { 120 if (glob_match(subtest_name, 121 sel->whitelist.tests[i].subtests[j])) 122 return true; 123 } 124 } 125 } 126 127 if (!sel->whitelist.cnt && !subtest_sel->num_set) 128 return true; 129 130 return subtest_num < subtest_sel->num_set_len && subtest_sel->num_set[subtest_num]; 131 } 132 133 static void dump_test_log(const struct prog_test_def *test, 134 const struct test_state *test_state, 135 bool force_failed) 136 { 137 bool failed = test_state->error_cnt > 0 || force_failed; 138 139 /* worker always holds log */ 140 if (env.worker_id != -1) 141 return; 142 143 fflush(stdout); /* exports test_state->log_buf & test_state->log_cnt */ 144 145 fprintf(env.stdout, "#%-3d %s:%s\n", 146 test->test_num, test->test_name, 147 failed ? "FAIL" : (test_state->skip_cnt ? "SKIP" : "OK")); 148 149 if (env.verbosity > VERBOSE_NONE || test_state->force_log || failed) { 150 if (test_state->log_cnt) { 151 test_state->log_buf[test_state->log_cnt] = '\0'; 152 fprintf(env.stdout, "%s", test_state->log_buf); 153 if (test_state->log_buf[test_state->log_cnt - 1] != '\n') 154 fprintf(env.stdout, "\n"); 155 } 156 } 157 } 158 159 static void stdio_restore(void); 160 161 /* A bunch of tests set custom affinity per-thread and/or per-process. Reset 162 * it after each test/sub-test. 163 */ 164 static void reset_affinity(void) 165 { 166 cpu_set_t cpuset; 167 int i, err; 168 169 CPU_ZERO(&cpuset); 170 for (i = 0; i < env.nr_cpus; i++) 171 CPU_SET(i, &cpuset); 172 173 err = sched_setaffinity(0, sizeof(cpuset), &cpuset); 174 if (err < 0) { 175 stdio_restore(); 176 fprintf(stderr, "Failed to reset process affinity: %d!\n", err); 177 exit(EXIT_ERR_SETUP_INFRA); 178 } 179 err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset); 180 if (err < 0) { 181 stdio_restore(); 182 fprintf(stderr, "Failed to reset thread affinity: %d!\n", err); 183 exit(EXIT_ERR_SETUP_INFRA); 184 } 185 } 186 187 static void save_netns(void) 188 { 189 env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY); 190 if (env.saved_netns_fd == -1) { 191 perror("open(/proc/self/ns/net)"); 192 exit(EXIT_ERR_SETUP_INFRA); 193 } 194 } 195 196 static void restore_netns(void) 197 { 198 if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) { 199 stdio_restore(); 200 perror("setns(CLONE_NEWNS)"); 201 exit(EXIT_ERR_SETUP_INFRA); 202 } 203 } 204 205 void test__end_subtest(void) 206 { 207 struct prog_test_def *test = env.test; 208 struct test_state *state = env.test_state; 209 int sub_error_cnt = state->error_cnt - state->old_error_cnt; 210 211 fprintf(stdout, "#%d/%d %s/%s:%s\n", 212 test->test_num, state->subtest_num, test->test_name, state->subtest_name, 213 sub_error_cnt ? "FAIL" : (state->subtest_skip_cnt ? "SKIP" : "OK")); 214 215 if (sub_error_cnt == 0) { 216 if (state->subtest_skip_cnt == 0) { 217 state->sub_succ_cnt++; 218 } else { 219 state->subtest_skip_cnt = 0; 220 state->skip_cnt++; 221 } 222 } 223 224 free(state->subtest_name); 225 state->subtest_name = NULL; 226 } 227 228 bool test__start_subtest(const char *subtest_name) 229 { 230 struct prog_test_def *test = env.test; 231 struct test_state *state = env.test_state; 232 233 if (state->subtest_name) 234 test__end_subtest(); 235 236 state->subtest_num++; 237 238 if (!subtest_name || !subtest_name[0]) { 239 fprintf(env.stderr, 240 "Subtest #%d didn't provide sub-test name!\n", 241 state->subtest_num); 242 return false; 243 } 244 245 if (!should_run_subtest(&env.test_selector, 246 &env.subtest_selector, 247 state->subtest_num, 248 test->test_name, 249 subtest_name)) 250 return false; 251 252 state->subtest_name = strdup(subtest_name); 253 if (!state->subtest_name) { 254 fprintf(env.stderr, 255 "Subtest #%d: failed to copy subtest name!\n", 256 state->subtest_num); 257 return false; 258 } 259 state->old_error_cnt = state->error_cnt; 260 261 return true; 262 } 263 264 void test__force_log(void) 265 { 266 env.test_state->force_log = true; 267 } 268 269 void test__skip(void) 270 { 271 if (env.test_state->subtest_name) 272 env.test_state->subtest_skip_cnt++; 273 else 274 env.test_state->skip_cnt++; 275 } 276 277 void test__fail(void) 278 { 279 env.test_state->error_cnt++; 280 } 281 282 int test__join_cgroup(const char *path) 283 { 284 int fd; 285 286 if (!env.test->need_cgroup_cleanup) { 287 if (setup_cgroup_environment()) { 288 fprintf(stderr, 289 "#%d %s: Failed to setup cgroup environment\n", 290 env.test->test_num, env.test->test_name); 291 return -1; 292 } 293 294 env.test->need_cgroup_cleanup = true; 295 } 296 297 fd = create_and_get_cgroup(path); 298 if (fd < 0) { 299 fprintf(stderr, 300 "#%d %s: Failed to create cgroup '%s' (errno=%d)\n", 301 env.test->test_num, env.test->test_name, path, errno); 302 return fd; 303 } 304 305 if (join_cgroup(path)) { 306 fprintf(stderr, 307 "#%d %s: Failed to join cgroup '%s' (errno=%d)\n", 308 env.test->test_num, env.test->test_name, path, errno); 309 return -1; 310 } 311 312 return fd; 313 } 314 315 int bpf_find_map(const char *test, struct bpf_object *obj, const char *name) 316 { 317 struct bpf_map *map; 318 319 map = bpf_object__find_map_by_name(obj, name); 320 if (!map) { 321 fprintf(stdout, "%s:FAIL:map '%s' not found\n", test, name); 322 test__fail(); 323 return -1; 324 } 325 return bpf_map__fd(map); 326 } 327 328 static bool is_jit_enabled(void) 329 { 330 const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable"; 331 bool enabled = false; 332 int sysctl_fd; 333 334 sysctl_fd = open(jit_sysctl, 0, O_RDONLY); 335 if (sysctl_fd != -1) { 336 char tmpc; 337 338 if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1) 339 enabled = (tmpc != '0'); 340 close(sysctl_fd); 341 } 342 343 return enabled; 344 } 345 346 int compare_map_keys(int map1_fd, int map2_fd) 347 { 348 __u32 key, next_key; 349 char val_buf[PERF_MAX_STACK_DEPTH * 350 sizeof(struct bpf_stack_build_id)]; 351 int err; 352 353 err = bpf_map_get_next_key(map1_fd, NULL, &key); 354 if (err) 355 return err; 356 err = bpf_map_lookup_elem(map2_fd, &key, val_buf); 357 if (err) 358 return err; 359 360 while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) { 361 err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf); 362 if (err) 363 return err; 364 365 key = next_key; 366 } 367 if (errno != ENOENT) 368 return -1; 369 370 return 0; 371 } 372 373 int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len) 374 { 375 __u32 key, next_key, *cur_key_p, *next_key_p; 376 char *val_buf1, *val_buf2; 377 int i, err = 0; 378 379 val_buf1 = malloc(stack_trace_len); 380 val_buf2 = malloc(stack_trace_len); 381 cur_key_p = NULL; 382 next_key_p = &key; 383 while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) { 384 err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1); 385 if (err) 386 goto out; 387 err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2); 388 if (err) 389 goto out; 390 for (i = 0; i < stack_trace_len; i++) { 391 if (val_buf1[i] != val_buf2[i]) { 392 err = -1; 393 goto out; 394 } 395 } 396 key = *next_key_p; 397 cur_key_p = &key; 398 next_key_p = &next_key; 399 } 400 if (errno != ENOENT) 401 err = -1; 402 403 out: 404 free(val_buf1); 405 free(val_buf2); 406 return err; 407 } 408 409 int extract_build_id(char *build_id, size_t size) 410 { 411 FILE *fp; 412 char *line = NULL; 413 size_t len = 0; 414 415 fp = popen("readelf -n ./urandom_read | grep 'Build ID'", "r"); 416 if (fp == NULL) 417 return -1; 418 419 if (getline(&line, &len, fp) == -1) 420 goto err; 421 pclose(fp); 422 423 if (len > size) 424 len = size; 425 memcpy(build_id, line, len); 426 build_id[len] = '\0'; 427 free(line); 428 return 0; 429 err: 430 pclose(fp); 431 return -1; 432 } 433 434 static int finit_module(int fd, const char *param_values, int flags) 435 { 436 return syscall(__NR_finit_module, fd, param_values, flags); 437 } 438 439 static int delete_module(const char *name, int flags) 440 { 441 return syscall(__NR_delete_module, name, flags); 442 } 443 444 /* 445 * Trigger synchronize_rcu() in kernel. 446 */ 447 int kern_sync_rcu(void) 448 { 449 return syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0, 0); 450 } 451 452 static void unload_bpf_testmod(void) 453 { 454 if (kern_sync_rcu()) 455 fprintf(env.stderr, "Failed to trigger kernel-side RCU sync!\n"); 456 if (delete_module("bpf_testmod", 0)) { 457 if (errno == ENOENT) { 458 if (env.verbosity > VERBOSE_NONE) 459 fprintf(stdout, "bpf_testmod.ko is already unloaded.\n"); 460 return; 461 } 462 fprintf(env.stderr, "Failed to unload bpf_testmod.ko from kernel: %d\n", -errno); 463 return; 464 } 465 if (env.verbosity > VERBOSE_NONE) 466 fprintf(stdout, "Successfully unloaded bpf_testmod.ko.\n"); 467 } 468 469 static int load_bpf_testmod(void) 470 { 471 int fd; 472 473 /* ensure previous instance of the module is unloaded */ 474 unload_bpf_testmod(); 475 476 if (env.verbosity > VERBOSE_NONE) 477 fprintf(stdout, "Loading bpf_testmod.ko...\n"); 478 479 fd = open("bpf_testmod.ko", O_RDONLY); 480 if (fd < 0) { 481 fprintf(env.stderr, "Can't find bpf_testmod.ko kernel module: %d\n", -errno); 482 return -ENOENT; 483 } 484 if (finit_module(fd, "", 0)) { 485 fprintf(env.stderr, "Failed to load bpf_testmod.ko into the kernel: %d\n", -errno); 486 close(fd); 487 return -EINVAL; 488 } 489 close(fd); 490 491 if (env.verbosity > VERBOSE_NONE) 492 fprintf(stdout, "Successfully loaded bpf_testmod.ko.\n"); 493 return 0; 494 } 495 496 /* extern declarations for test funcs */ 497 #define DEFINE_TEST(name) \ 498 extern void test_##name(void) __weak; \ 499 extern void serial_test_##name(void) __weak; 500 #include <prog_tests/tests.h> 501 #undef DEFINE_TEST 502 503 static struct prog_test_def prog_test_defs[] = { 504 #define DEFINE_TEST(name) { \ 505 .test_name = #name, \ 506 .run_test = &test_##name, \ 507 .run_serial_test = &serial_test_##name, \ 508 }, 509 #include <prog_tests/tests.h> 510 #undef DEFINE_TEST 511 }; 512 513 static const int prog_test_cnt = ARRAY_SIZE(prog_test_defs); 514 515 static struct test_state test_states[ARRAY_SIZE(prog_test_defs)]; 516 517 const char *argp_program_version = "test_progs 0.1"; 518 const char *argp_program_bug_address = "<bpf@vger.kernel.org>"; 519 static const char argp_program_doc[] = "BPF selftests test runner"; 520 521 enum ARG_KEYS { 522 ARG_TEST_NUM = 'n', 523 ARG_TEST_NAME = 't', 524 ARG_TEST_NAME_BLACKLIST = 'b', 525 ARG_VERIFIER_STATS = 's', 526 ARG_VERBOSE = 'v', 527 ARG_GET_TEST_CNT = 'c', 528 ARG_LIST_TEST_NAMES = 'l', 529 ARG_TEST_NAME_GLOB_ALLOWLIST = 'a', 530 ARG_TEST_NAME_GLOB_DENYLIST = 'd', 531 ARG_NUM_WORKERS = 'j', 532 ARG_DEBUG = -1, 533 }; 534 535 static const struct argp_option opts[] = { 536 { "num", ARG_TEST_NUM, "NUM", 0, 537 "Run test number NUM only " }, 538 { "name", ARG_TEST_NAME, "NAMES", 0, 539 "Run tests with names containing any string from NAMES list" }, 540 { "name-blacklist", ARG_TEST_NAME_BLACKLIST, "NAMES", 0, 541 "Don't run tests with names containing any string from NAMES list" }, 542 { "verifier-stats", ARG_VERIFIER_STATS, NULL, 0, 543 "Output verifier statistics", }, 544 { "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL, 545 "Verbose output (use -vv or -vvv for progressively verbose output)" }, 546 { "count", ARG_GET_TEST_CNT, NULL, 0, 547 "Get number of selected top-level tests " }, 548 { "list", ARG_LIST_TEST_NAMES, NULL, 0, 549 "List test names that would run (without running them) " }, 550 { "allow", ARG_TEST_NAME_GLOB_ALLOWLIST, "NAMES", 0, 551 "Run tests with name matching the pattern (supports '*' wildcard)." }, 552 { "deny", ARG_TEST_NAME_GLOB_DENYLIST, "NAMES", 0, 553 "Don't run tests with name matching the pattern (supports '*' wildcard)." }, 554 { "workers", ARG_NUM_WORKERS, "WORKERS", OPTION_ARG_OPTIONAL, 555 "Number of workers to run in parallel, default to number of cpus." }, 556 { "debug", ARG_DEBUG, NULL, 0, 557 "print extra debug information for test_progs." }, 558 {}, 559 }; 560 561 static int libbpf_print_fn(enum libbpf_print_level level, 562 const char *format, va_list args) 563 { 564 if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG) 565 return 0; 566 vfprintf(stdout, format, args); 567 return 0; 568 } 569 570 static void free_test_filter_set(const struct test_filter_set *set) 571 { 572 int i, j; 573 574 if (!set) 575 return; 576 577 for (i = 0; i < set->cnt; i++) { 578 free((void *)set->tests[i].name); 579 for (j = 0; j < set->tests[i].subtest_cnt; j++) 580 free((void *)set->tests[i].subtests[j]); 581 582 free((void *)set->tests[i].subtests); 583 } 584 585 free((void *)set->tests); 586 } 587 588 static void free_test_selector(struct test_selector *test_selector) 589 { 590 free_test_filter_set(&test_selector->blacklist); 591 free_test_filter_set(&test_selector->whitelist); 592 free(test_selector->num_set); 593 } 594 595 extern int extra_prog_load_log_flags; 596 597 static error_t parse_arg(int key, char *arg, struct argp_state *state) 598 { 599 struct test_env *env = state->input; 600 601 switch (key) { 602 case ARG_TEST_NUM: { 603 char *subtest_str = strchr(arg, '/'); 604 605 if (subtest_str) { 606 *subtest_str = '\0'; 607 if (parse_num_list(subtest_str + 1, 608 &env->subtest_selector.num_set, 609 &env->subtest_selector.num_set_len)) { 610 fprintf(stderr, 611 "Failed to parse subtest numbers.\n"); 612 return -EINVAL; 613 } 614 } 615 if (parse_num_list(arg, &env->test_selector.num_set, 616 &env->test_selector.num_set_len)) { 617 fprintf(stderr, "Failed to parse test numbers.\n"); 618 return -EINVAL; 619 } 620 break; 621 } 622 case ARG_TEST_NAME_GLOB_ALLOWLIST: 623 case ARG_TEST_NAME: { 624 if (parse_test_list(arg, 625 &env->test_selector.whitelist, 626 key == ARG_TEST_NAME_GLOB_ALLOWLIST)) 627 return -ENOMEM; 628 break; 629 } 630 case ARG_TEST_NAME_GLOB_DENYLIST: 631 case ARG_TEST_NAME_BLACKLIST: { 632 if (parse_test_list(arg, 633 &env->test_selector.blacklist, 634 key == ARG_TEST_NAME_GLOB_DENYLIST)) 635 return -ENOMEM; 636 break; 637 } 638 case ARG_VERIFIER_STATS: 639 env->verifier_stats = true; 640 break; 641 case ARG_VERBOSE: 642 env->verbosity = VERBOSE_NORMAL; 643 if (arg) { 644 if (strcmp(arg, "v") == 0) { 645 env->verbosity = VERBOSE_VERY; 646 extra_prog_load_log_flags = 1; 647 } else if (strcmp(arg, "vv") == 0) { 648 env->verbosity = VERBOSE_SUPER; 649 extra_prog_load_log_flags = 2; 650 } else { 651 fprintf(stderr, 652 "Unrecognized verbosity setting ('%s'), only -v and -vv are supported\n", 653 arg); 654 return -EINVAL; 655 } 656 } 657 658 if (env->verbosity > VERBOSE_NONE) { 659 if (setenv("SELFTESTS_VERBOSE", "1", 1) == -1) { 660 fprintf(stderr, 661 "Unable to setenv SELFTESTS_VERBOSE=1 (errno=%d)", 662 errno); 663 return -EINVAL; 664 } 665 } 666 667 break; 668 case ARG_GET_TEST_CNT: 669 env->get_test_cnt = true; 670 break; 671 case ARG_LIST_TEST_NAMES: 672 env->list_test_names = true; 673 break; 674 case ARG_NUM_WORKERS: 675 if (arg) { 676 env->workers = atoi(arg); 677 if (!env->workers) { 678 fprintf(stderr, "Invalid number of worker: %s.", arg); 679 return -EINVAL; 680 } 681 } else { 682 env->workers = get_nprocs(); 683 } 684 break; 685 case ARG_DEBUG: 686 env->debug = true; 687 break; 688 case ARGP_KEY_ARG: 689 argp_usage(state); 690 break; 691 case ARGP_KEY_END: 692 break; 693 default: 694 return ARGP_ERR_UNKNOWN; 695 } 696 return 0; 697 } 698 699 static void stdio_hijack(char **log_buf, size_t *log_cnt) 700 { 701 #ifdef __GLIBC__ 702 env.stdout = stdout; 703 env.stderr = stderr; 704 705 if (env.verbosity > VERBOSE_NONE && env.worker_id == -1) { 706 /* nothing to do, output to stdout by default */ 707 return; 708 } 709 710 /* stdout and stderr -> buffer */ 711 fflush(stdout); 712 713 stdout = open_memstream(log_buf, log_cnt); 714 if (!stdout) { 715 stdout = env.stdout; 716 perror("open_memstream"); 717 return; 718 } 719 720 stderr = stdout; 721 #endif 722 } 723 724 static void stdio_restore(void) 725 { 726 #ifdef __GLIBC__ 727 if (stdout == env.stdout) 728 return; 729 730 fclose(stdout); 731 732 stdout = env.stdout; 733 stderr = env.stderr; 734 #endif 735 } 736 737 /* 738 * Determine if test_progs is running as a "flavored" test runner and switch 739 * into corresponding sub-directory to load correct BPF objects. 740 * 741 * This is done by looking at executable name. If it contains "-flavor" 742 * suffix, then we are running as a flavored test runner. 743 */ 744 int cd_flavor_subdir(const char *exec_name) 745 { 746 /* General form of argv[0] passed here is: 747 * some/path/to/test_progs[-flavor], where -flavor part is optional. 748 * First cut out "test_progs[-flavor]" part, then extract "flavor" 749 * part, if it's there. 750 */ 751 const char *flavor = strrchr(exec_name, '/'); 752 753 if (!flavor) 754 flavor = exec_name; 755 else 756 flavor++; 757 758 flavor = strrchr(flavor, '-'); 759 if (!flavor) 760 return 0; 761 flavor++; 762 if (env.verbosity > VERBOSE_NONE) 763 fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor); 764 765 return chdir(flavor); 766 } 767 768 int trigger_module_test_read(int read_sz) 769 { 770 int fd, err; 771 772 fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY); 773 err = -errno; 774 if (!ASSERT_GE(fd, 0, "testmod_file_open")) 775 return err; 776 777 read(fd, NULL, read_sz); 778 close(fd); 779 780 return 0; 781 } 782 783 int trigger_module_test_write(int write_sz) 784 { 785 int fd, err; 786 char *buf = malloc(write_sz); 787 788 if (!buf) 789 return -ENOMEM; 790 791 memset(buf, 'a', write_sz); 792 buf[write_sz-1] = '\0'; 793 794 fd = open(BPF_TESTMOD_TEST_FILE, O_WRONLY); 795 err = -errno; 796 if (!ASSERT_GE(fd, 0, "testmod_file_open")) { 797 free(buf); 798 return err; 799 } 800 801 write(fd, buf, write_sz); 802 close(fd); 803 free(buf); 804 return 0; 805 } 806 807 #define MAX_BACKTRACE_SZ 128 808 void crash_handler(int signum) 809 { 810 void *bt[MAX_BACKTRACE_SZ]; 811 size_t sz; 812 813 sz = backtrace(bt, ARRAY_SIZE(bt)); 814 815 if (env.test) 816 dump_test_log(env.test, env.test_state, true); 817 if (env.stdout) 818 stdio_restore(); 819 if (env.worker_id != -1) 820 fprintf(stderr, "[%d]: ", env.worker_id); 821 fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum); 822 backtrace_symbols_fd(bt, sz, STDERR_FILENO); 823 } 824 825 static void sigint_handler(int signum) 826 { 827 int i; 828 829 for (i = 0; i < env.workers; i++) 830 if (env.worker_socks[i] > 0) 831 close(env.worker_socks[i]); 832 } 833 834 static int current_test_idx; 835 static pthread_mutex_t current_test_lock; 836 static pthread_mutex_t stdout_output_lock; 837 838 static inline const char *str_msg(const struct msg *msg, char *buf) 839 { 840 switch (msg->type) { 841 case MSG_DO_TEST: 842 sprintf(buf, "MSG_DO_TEST %d", msg->do_test.test_num); 843 break; 844 case MSG_TEST_DONE: 845 sprintf(buf, "MSG_TEST_DONE %d (log: %d)", 846 msg->test_done.test_num, 847 msg->test_done.have_log); 848 break; 849 case MSG_TEST_LOG: 850 sprintf(buf, "MSG_TEST_LOG (cnt: %ld, last: %d)", 851 strlen(msg->test_log.log_buf), 852 msg->test_log.is_last); 853 break; 854 case MSG_EXIT: 855 sprintf(buf, "MSG_EXIT"); 856 break; 857 default: 858 sprintf(buf, "UNKNOWN"); 859 break; 860 } 861 862 return buf; 863 } 864 865 static int send_message(int sock, const struct msg *msg) 866 { 867 char buf[256]; 868 869 if (env.debug) 870 fprintf(stderr, "Sending msg: %s\n", str_msg(msg, buf)); 871 return send(sock, msg, sizeof(*msg), 0); 872 } 873 874 static int recv_message(int sock, struct msg *msg) 875 { 876 int ret; 877 char buf[256]; 878 879 memset(msg, 0, sizeof(*msg)); 880 ret = recv(sock, msg, sizeof(*msg), 0); 881 if (ret >= 0) { 882 if (env.debug) 883 fprintf(stderr, "Received msg: %s\n", str_msg(msg, buf)); 884 } 885 return ret; 886 } 887 888 static void run_one_test(int test_num) 889 { 890 struct prog_test_def *test = &prog_test_defs[test_num]; 891 struct test_state *state = &test_states[test_num]; 892 893 env.test = test; 894 env.test_state = state; 895 896 stdio_hijack(&state->log_buf, &state->log_cnt); 897 898 if (test->run_test) 899 test->run_test(); 900 else if (test->run_serial_test) 901 test->run_serial_test(); 902 903 /* ensure last sub-test is finalized properly */ 904 if (state->subtest_name) 905 test__end_subtest(); 906 907 state->tested = true; 908 909 dump_test_log(test, state, false); 910 911 reset_affinity(); 912 restore_netns(); 913 if (test->need_cgroup_cleanup) 914 cleanup_cgroup_environment(); 915 916 stdio_restore(); 917 } 918 919 struct dispatch_data { 920 int worker_id; 921 int sock_fd; 922 }; 923 924 static void *dispatch_thread(void *ctx) 925 { 926 struct dispatch_data *data = ctx; 927 int sock_fd; 928 FILE *log_fp = NULL; 929 930 sock_fd = data->sock_fd; 931 932 while (true) { 933 int test_to_run = -1; 934 struct prog_test_def *test; 935 struct test_state *state; 936 937 /* grab a test */ 938 { 939 pthread_mutex_lock(¤t_test_lock); 940 941 if (current_test_idx >= prog_test_cnt) { 942 pthread_mutex_unlock(¤t_test_lock); 943 goto done; 944 } 945 946 test = &prog_test_defs[current_test_idx]; 947 test_to_run = current_test_idx; 948 current_test_idx++; 949 950 pthread_mutex_unlock(¤t_test_lock); 951 } 952 953 if (!test->should_run || test->run_serial_test) 954 continue; 955 956 /* run test through worker */ 957 { 958 struct msg msg_do_test; 959 960 msg_do_test.type = MSG_DO_TEST; 961 msg_do_test.do_test.test_num = test_to_run; 962 if (send_message(sock_fd, &msg_do_test) < 0) { 963 perror("Fail to send command"); 964 goto done; 965 } 966 env.worker_current_test[data->worker_id] = test_to_run; 967 } 968 969 /* wait for test done */ 970 { 971 int err; 972 struct msg msg_test_done; 973 974 err = recv_message(sock_fd, &msg_test_done); 975 if (err < 0) 976 goto error; 977 if (msg_test_done.type != MSG_TEST_DONE) 978 goto error; 979 if (test_to_run != msg_test_done.test_done.test_num) 980 goto error; 981 982 state = &test_states[test_to_run]; 983 state->tested = true; 984 state->error_cnt = msg_test_done.test_done.error_cnt; 985 state->skip_cnt = msg_test_done.test_done.skip_cnt; 986 state->sub_succ_cnt = msg_test_done.test_done.sub_succ_cnt; 987 988 /* collect all logs */ 989 if (msg_test_done.test_done.have_log) { 990 log_fp = open_memstream(&state->log_buf, &state->log_cnt); 991 if (!log_fp) 992 goto error; 993 994 while (true) { 995 struct msg msg_log; 996 997 if (recv_message(sock_fd, &msg_log) < 0) 998 goto error; 999 if (msg_log.type != MSG_TEST_LOG) 1000 goto error; 1001 1002 fprintf(log_fp, "%s", msg_log.test_log.log_buf); 1003 if (msg_log.test_log.is_last) 1004 break; 1005 } 1006 fclose(log_fp); 1007 log_fp = NULL; 1008 } 1009 } /* wait for test done */ 1010 1011 pthread_mutex_lock(&stdout_output_lock); 1012 dump_test_log(test, state, false); 1013 pthread_mutex_unlock(&stdout_output_lock); 1014 } /* while (true) */ 1015 error: 1016 if (env.debug) 1017 fprintf(stderr, "[%d]: Protocol/IO error: %s.\n", data->worker_id, strerror(errno)); 1018 1019 if (log_fp) 1020 fclose(log_fp); 1021 done: 1022 { 1023 struct msg msg_exit; 1024 1025 msg_exit.type = MSG_EXIT; 1026 if (send_message(sock_fd, &msg_exit) < 0) { 1027 if (env.debug) 1028 fprintf(stderr, "[%d]: send_message msg_exit: %s.\n", 1029 data->worker_id, strerror(errno)); 1030 } 1031 } 1032 return NULL; 1033 } 1034 1035 static void calculate_summary_and_print_errors(struct test_env *env) 1036 { 1037 int i; 1038 int succ_cnt = 0, fail_cnt = 0, sub_succ_cnt = 0, skip_cnt = 0; 1039 1040 for (i = 0; i < prog_test_cnt; i++) { 1041 struct test_state *state = &test_states[i]; 1042 1043 if (!state->tested) 1044 continue; 1045 1046 sub_succ_cnt += state->sub_succ_cnt; 1047 skip_cnt += state->skip_cnt; 1048 1049 if (state->error_cnt) 1050 fail_cnt++; 1051 else 1052 succ_cnt++; 1053 } 1054 1055 if (fail_cnt) 1056 printf("\nAll error logs:\n"); 1057 1058 /* print error logs again */ 1059 for (i = 0; i < prog_test_cnt; i++) { 1060 struct prog_test_def *test = &prog_test_defs[i]; 1061 struct test_state *state = &test_states[i]; 1062 1063 if (!state->tested || !state->error_cnt) 1064 continue; 1065 1066 dump_test_log(test, state, true); 1067 } 1068 1069 printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n", 1070 succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt); 1071 1072 env->succ_cnt = succ_cnt; 1073 env->sub_succ_cnt = sub_succ_cnt; 1074 env->fail_cnt = fail_cnt; 1075 env->skip_cnt = skip_cnt; 1076 } 1077 1078 static void server_main(void) 1079 { 1080 pthread_t *dispatcher_threads; 1081 struct dispatch_data *data; 1082 struct sigaction sigact_int = { 1083 .sa_handler = sigint_handler, 1084 .sa_flags = SA_RESETHAND, 1085 }; 1086 int i; 1087 1088 sigaction(SIGINT, &sigact_int, NULL); 1089 1090 dispatcher_threads = calloc(sizeof(pthread_t), env.workers); 1091 data = calloc(sizeof(struct dispatch_data), env.workers); 1092 1093 env.worker_current_test = calloc(sizeof(int), env.workers); 1094 for (i = 0; i < env.workers; i++) { 1095 int rc; 1096 1097 data[i].worker_id = i; 1098 data[i].sock_fd = env.worker_socks[i]; 1099 rc = pthread_create(&dispatcher_threads[i], NULL, dispatch_thread, &data[i]); 1100 if (rc < 0) { 1101 perror("Failed to launch dispatcher thread"); 1102 exit(EXIT_ERR_SETUP_INFRA); 1103 } 1104 } 1105 1106 /* wait for all dispatcher to finish */ 1107 for (i = 0; i < env.workers; i++) { 1108 while (true) { 1109 int ret = pthread_tryjoin_np(dispatcher_threads[i], NULL); 1110 1111 if (!ret) { 1112 break; 1113 } else if (ret == EBUSY) { 1114 if (env.debug) 1115 fprintf(stderr, "Still waiting for thread %d (test %d).\n", 1116 i, env.worker_current_test[i] + 1); 1117 usleep(1000 * 1000); 1118 continue; 1119 } else { 1120 fprintf(stderr, "Unexpected error joining dispatcher thread: %d", ret); 1121 break; 1122 } 1123 } 1124 } 1125 free(dispatcher_threads); 1126 free(env.worker_current_test); 1127 free(data); 1128 1129 /* run serial tests */ 1130 save_netns(); 1131 1132 for (int i = 0; i < prog_test_cnt; i++) { 1133 struct prog_test_def *test = &prog_test_defs[i]; 1134 1135 if (!test->should_run || !test->run_serial_test) 1136 continue; 1137 1138 run_one_test(i); 1139 } 1140 1141 /* generate summary */ 1142 fflush(stderr); 1143 fflush(stdout); 1144 1145 calculate_summary_and_print_errors(&env); 1146 1147 /* reap all workers */ 1148 for (i = 0; i < env.workers; i++) { 1149 int wstatus, pid; 1150 1151 pid = waitpid(env.worker_pids[i], &wstatus, 0); 1152 if (pid != env.worker_pids[i]) 1153 perror("Unable to reap worker"); 1154 } 1155 } 1156 1157 static int worker_main(int sock) 1158 { 1159 save_netns(); 1160 1161 while (true) { 1162 /* receive command */ 1163 struct msg msg; 1164 1165 if (recv_message(sock, &msg) < 0) 1166 goto out; 1167 1168 switch (msg.type) { 1169 case MSG_EXIT: 1170 if (env.debug) 1171 fprintf(stderr, "[%d]: worker exit.\n", 1172 env.worker_id); 1173 goto out; 1174 case MSG_DO_TEST: { 1175 int test_to_run = msg.do_test.test_num; 1176 struct prog_test_def *test = &prog_test_defs[test_to_run]; 1177 struct test_state *state = &test_states[test_to_run]; 1178 struct msg msg_done; 1179 1180 if (env.debug) 1181 fprintf(stderr, "[%d]: #%d:%s running.\n", 1182 env.worker_id, 1183 test_to_run + 1, 1184 test->test_name); 1185 1186 run_one_test(test_to_run); 1187 1188 memset(&msg_done, 0, sizeof(msg_done)); 1189 msg_done.type = MSG_TEST_DONE; 1190 msg_done.test_done.test_num = test_to_run; 1191 msg_done.test_done.error_cnt = state->error_cnt; 1192 msg_done.test_done.skip_cnt = state->skip_cnt; 1193 msg_done.test_done.sub_succ_cnt = state->sub_succ_cnt; 1194 msg_done.test_done.have_log = false; 1195 1196 if (env.verbosity > VERBOSE_NONE || state->force_log || state->error_cnt) { 1197 if (state->log_cnt) 1198 msg_done.test_done.have_log = true; 1199 } 1200 if (send_message(sock, &msg_done) < 0) { 1201 perror("Fail to send message done"); 1202 goto out; 1203 } 1204 1205 /* send logs */ 1206 if (msg_done.test_done.have_log) { 1207 char *src; 1208 size_t slen; 1209 1210 src = state->log_buf; 1211 slen = state->log_cnt; 1212 while (slen) { 1213 struct msg msg_log; 1214 char *dest; 1215 size_t len; 1216 1217 memset(&msg_log, 0, sizeof(msg_log)); 1218 msg_log.type = MSG_TEST_LOG; 1219 dest = msg_log.test_log.log_buf; 1220 len = slen >= MAX_LOG_TRUNK_SIZE ? MAX_LOG_TRUNK_SIZE : slen; 1221 memcpy(dest, src, len); 1222 1223 src += len; 1224 slen -= len; 1225 if (!slen) 1226 msg_log.test_log.is_last = true; 1227 1228 assert(send_message(sock, &msg_log) >= 0); 1229 } 1230 } 1231 if (state->log_buf) { 1232 free(state->log_buf); 1233 state->log_buf = NULL; 1234 state->log_cnt = 0; 1235 } 1236 if (env.debug) 1237 fprintf(stderr, "[%d]: #%d:%s done.\n", 1238 env.worker_id, 1239 test_to_run + 1, 1240 test->test_name); 1241 break; 1242 } /* case MSG_DO_TEST */ 1243 default: 1244 if (env.debug) 1245 fprintf(stderr, "[%d]: unknown message.\n", env.worker_id); 1246 return -1; 1247 } 1248 } 1249 out: 1250 return 0; 1251 } 1252 1253 int main(int argc, char **argv) 1254 { 1255 static const struct argp argp = { 1256 .options = opts, 1257 .parser = parse_arg, 1258 .doc = argp_program_doc, 1259 }; 1260 struct sigaction sigact = { 1261 .sa_handler = crash_handler, 1262 .sa_flags = SA_RESETHAND, 1263 }; 1264 int err, i; 1265 1266 sigaction(SIGSEGV, &sigact, NULL); 1267 1268 err = argp_parse(&argp, argc, argv, 0, NULL, &env); 1269 if (err) 1270 return err; 1271 1272 err = cd_flavor_subdir(argv[0]); 1273 if (err) 1274 return err; 1275 1276 /* Use libbpf 1.0 API mode */ 1277 libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 1278 libbpf_set_print(libbpf_print_fn); 1279 1280 srand(time(NULL)); 1281 1282 env.jit_enabled = is_jit_enabled(); 1283 env.nr_cpus = libbpf_num_possible_cpus(); 1284 if (env.nr_cpus < 0) { 1285 fprintf(stderr, "Failed to get number of CPUs: %d!\n", 1286 env.nr_cpus); 1287 return -1; 1288 } 1289 1290 env.stdout = stdout; 1291 env.stderr = stderr; 1292 1293 env.has_testmod = true; 1294 if (!env.list_test_names && load_bpf_testmod()) { 1295 fprintf(env.stderr, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n"); 1296 env.has_testmod = false; 1297 } 1298 1299 /* initializing tests */ 1300 for (i = 0; i < prog_test_cnt; i++) { 1301 struct prog_test_def *test = &prog_test_defs[i]; 1302 1303 test->test_num = i + 1; 1304 if (should_run(&env.test_selector, 1305 test->test_num, test->test_name)) 1306 test->should_run = true; 1307 else 1308 test->should_run = false; 1309 1310 if ((test->run_test == NULL && test->run_serial_test == NULL) || 1311 (test->run_test != NULL && test->run_serial_test != NULL)) { 1312 fprintf(stderr, "Test %d:%s must have either test_%s() or serial_test_%sl() defined.\n", 1313 test->test_num, test->test_name, test->test_name, test->test_name); 1314 exit(EXIT_ERR_SETUP_INFRA); 1315 } 1316 } 1317 1318 /* ignore workers if we are just listing */ 1319 if (env.get_test_cnt || env.list_test_names) 1320 env.workers = 0; 1321 1322 /* launch workers if requested */ 1323 env.worker_id = -1; /* main process */ 1324 if (env.workers) { 1325 env.worker_pids = calloc(sizeof(__pid_t), env.workers); 1326 env.worker_socks = calloc(sizeof(int), env.workers); 1327 if (env.debug) 1328 fprintf(stdout, "Launching %d workers.\n", env.workers); 1329 for (i = 0; i < env.workers; i++) { 1330 int sv[2]; 1331 pid_t pid; 1332 1333 if (socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, sv) < 0) { 1334 perror("Fail to create worker socket"); 1335 return -1; 1336 } 1337 pid = fork(); 1338 if (pid < 0) { 1339 perror("Failed to fork worker"); 1340 return -1; 1341 } else if (pid != 0) { /* main process */ 1342 close(sv[1]); 1343 env.worker_pids[i] = pid; 1344 env.worker_socks[i] = sv[0]; 1345 } else { /* inside each worker process */ 1346 close(sv[0]); 1347 env.worker_id = i; 1348 return worker_main(sv[1]); 1349 } 1350 } 1351 1352 if (env.worker_id == -1) { 1353 server_main(); 1354 goto out; 1355 } 1356 } 1357 1358 /* The rest of the main process */ 1359 1360 /* on single mode */ 1361 save_netns(); 1362 1363 for (i = 0; i < prog_test_cnt; i++) { 1364 struct prog_test_def *test = &prog_test_defs[i]; 1365 1366 if (!test->should_run) 1367 continue; 1368 1369 if (env.get_test_cnt) { 1370 env.succ_cnt++; 1371 continue; 1372 } 1373 1374 if (env.list_test_names) { 1375 fprintf(env.stdout, "%s\n", test->test_name); 1376 env.succ_cnt++; 1377 continue; 1378 } 1379 1380 run_one_test(i); 1381 } 1382 1383 if (env.get_test_cnt) { 1384 printf("%d\n", env.succ_cnt); 1385 goto out; 1386 } 1387 1388 if (env.list_test_names) 1389 goto out; 1390 1391 calculate_summary_and_print_errors(&env); 1392 1393 close(env.saved_netns_fd); 1394 out: 1395 if (!env.list_test_names && env.has_testmod) 1396 unload_bpf_testmod(); 1397 1398 free_test_selector(&env.test_selector); 1399 1400 if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0) 1401 return EXIT_NO_TEST; 1402 1403 return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS; 1404 } 1405