1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020 Facebook */ 3 #include <test_progs.h> 4 #include "bpf_iter_ipv6_route.skel.h" 5 #include "bpf_iter_netlink.skel.h" 6 #include "bpf_iter_bpf_map.skel.h" 7 #include "bpf_iter_task.skel.h" 8 #include "bpf_iter_task_stack.skel.h" 9 #include "bpf_iter_task_file.skel.h" 10 #include "bpf_iter_task_vma.skel.h" 11 #include "bpf_iter_task_btf.skel.h" 12 #include "bpf_iter_tcp4.skel.h" 13 #include "bpf_iter_tcp6.skel.h" 14 #include "bpf_iter_udp4.skel.h" 15 #include "bpf_iter_udp6.skel.h" 16 #include "bpf_iter_test_kern1.skel.h" 17 #include "bpf_iter_test_kern2.skel.h" 18 #include "bpf_iter_test_kern3.skel.h" 19 #include "bpf_iter_test_kern4.skel.h" 20 #include "bpf_iter_bpf_hash_map.skel.h" 21 #include "bpf_iter_bpf_percpu_hash_map.skel.h" 22 #include "bpf_iter_bpf_array_map.skel.h" 23 #include "bpf_iter_bpf_percpu_array_map.skel.h" 24 #include "bpf_iter_bpf_sk_storage_helpers.skel.h" 25 #include "bpf_iter_bpf_sk_storage_map.skel.h" 26 #include "bpf_iter_test_kern5.skel.h" 27 #include "bpf_iter_test_kern6.skel.h" 28 29 static int duration; 30 31 static void test_btf_id_or_null(void) 32 { 33 struct bpf_iter_test_kern3 *skel; 34 35 skel = bpf_iter_test_kern3__open_and_load(); 36 if (CHECK(skel, "bpf_iter_test_kern3__open_and_load", 37 "skeleton open_and_load unexpectedly succeeded\n")) { 38 bpf_iter_test_kern3__destroy(skel); 39 return; 40 } 41 } 42 43 static void do_dummy_read(struct bpf_program *prog) 44 { 45 struct bpf_link *link; 46 char buf[16] = {}; 47 int iter_fd, len; 48 49 link = bpf_program__attach_iter(prog, NULL); 50 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 51 return; 52 53 iter_fd = bpf_iter_create(bpf_link__fd(link)); 54 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 55 goto free_link; 56 57 /* not check contents, but ensure read() ends without error */ 58 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 59 ; 60 CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)); 61 62 close(iter_fd); 63 64 free_link: 65 bpf_link__destroy(link); 66 } 67 68 static int read_fd_into_buffer(int fd, char *buf, int size) 69 { 70 int bufleft = size; 71 int len; 72 73 do { 74 len = read(fd, buf, bufleft); 75 if (len > 0) { 76 buf += len; 77 bufleft -= len; 78 } 79 } while (len > 0); 80 81 return len < 0 ? len : size - bufleft; 82 } 83 84 static void test_ipv6_route(void) 85 { 86 struct bpf_iter_ipv6_route *skel; 87 88 skel = bpf_iter_ipv6_route__open_and_load(); 89 if (CHECK(!skel, "bpf_iter_ipv6_route__open_and_load", 90 "skeleton open_and_load failed\n")) 91 return; 92 93 do_dummy_read(skel->progs.dump_ipv6_route); 94 95 bpf_iter_ipv6_route__destroy(skel); 96 } 97 98 static void test_netlink(void) 99 { 100 struct bpf_iter_netlink *skel; 101 102 skel = bpf_iter_netlink__open_and_load(); 103 if (CHECK(!skel, "bpf_iter_netlink__open_and_load", 104 "skeleton open_and_load failed\n")) 105 return; 106 107 do_dummy_read(skel->progs.dump_netlink); 108 109 bpf_iter_netlink__destroy(skel); 110 } 111 112 static void test_bpf_map(void) 113 { 114 struct bpf_iter_bpf_map *skel; 115 116 skel = bpf_iter_bpf_map__open_and_load(); 117 if (CHECK(!skel, "bpf_iter_bpf_map__open_and_load", 118 "skeleton open_and_load failed\n")) 119 return; 120 121 do_dummy_read(skel->progs.dump_bpf_map); 122 123 bpf_iter_bpf_map__destroy(skel); 124 } 125 126 static void test_task(void) 127 { 128 struct bpf_iter_task *skel; 129 130 skel = bpf_iter_task__open_and_load(); 131 if (CHECK(!skel, "bpf_iter_task__open_and_load", 132 "skeleton open_and_load failed\n")) 133 return; 134 135 do_dummy_read(skel->progs.dump_task); 136 137 bpf_iter_task__destroy(skel); 138 } 139 140 static void test_task_stack(void) 141 { 142 struct bpf_iter_task_stack *skel; 143 144 skel = bpf_iter_task_stack__open_and_load(); 145 if (CHECK(!skel, "bpf_iter_task_stack__open_and_load", 146 "skeleton open_and_load failed\n")) 147 return; 148 149 do_dummy_read(skel->progs.dump_task_stack); 150 151 bpf_iter_task_stack__destroy(skel); 152 } 153 154 static void *do_nothing(void *arg) 155 { 156 pthread_exit(arg); 157 } 158 159 static void test_task_file(void) 160 { 161 struct bpf_iter_task_file *skel; 162 pthread_t thread_id; 163 void *ret; 164 165 skel = bpf_iter_task_file__open_and_load(); 166 if (CHECK(!skel, "bpf_iter_task_file__open_and_load", 167 "skeleton open_and_load failed\n")) 168 return; 169 170 skel->bss->tgid = getpid(); 171 172 if (CHECK(pthread_create(&thread_id, NULL, &do_nothing, NULL), 173 "pthread_create", "pthread_create failed\n")) 174 goto done; 175 176 do_dummy_read(skel->progs.dump_task_file); 177 178 if (CHECK(pthread_join(thread_id, &ret) || ret != NULL, 179 "pthread_join", "pthread_join failed\n")) 180 goto done; 181 182 CHECK(skel->bss->count != 0, "check_count", 183 "invalid non pthread file visit count %d\n", skel->bss->count); 184 185 done: 186 bpf_iter_task_file__destroy(skel); 187 } 188 189 #define TASKBUFSZ 32768 190 191 static char taskbuf[TASKBUFSZ]; 192 193 static int do_btf_read(struct bpf_iter_task_btf *skel) 194 { 195 struct bpf_program *prog = skel->progs.dump_task_struct; 196 struct bpf_iter_task_btf__bss *bss = skel->bss; 197 int iter_fd = -1, err; 198 struct bpf_link *link; 199 char *buf = taskbuf; 200 int ret = 0; 201 202 link = bpf_program__attach_iter(prog, NULL); 203 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 204 return ret; 205 206 iter_fd = bpf_iter_create(bpf_link__fd(link)); 207 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 208 goto free_link; 209 210 err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ); 211 if (bss->skip) { 212 printf("%s:SKIP:no __builtin_btf_type_id\n", __func__); 213 ret = 1; 214 test__skip(); 215 goto free_link; 216 } 217 218 if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno))) 219 goto free_link; 220 221 CHECK(strstr(taskbuf, "(struct task_struct)") == NULL, 222 "check for btf representation of task_struct in iter data", 223 "struct task_struct not found"); 224 free_link: 225 if (iter_fd > 0) 226 close(iter_fd); 227 bpf_link__destroy(link); 228 return ret; 229 } 230 231 static void test_task_btf(void) 232 { 233 struct bpf_iter_task_btf__bss *bss; 234 struct bpf_iter_task_btf *skel; 235 int ret; 236 237 skel = bpf_iter_task_btf__open_and_load(); 238 if (CHECK(!skel, "bpf_iter_task_btf__open_and_load", 239 "skeleton open_and_load failed\n")) 240 return; 241 242 bss = skel->bss; 243 244 ret = do_btf_read(skel); 245 if (ret) 246 goto cleanup; 247 248 if (CHECK(bss->tasks == 0, "check if iterated over tasks", 249 "no task iteration, did BPF program run?\n")) 250 goto cleanup; 251 252 CHECK(bss->seq_err != 0, "check for unexpected err", 253 "bpf_seq_printf_btf returned %ld", bss->seq_err); 254 255 cleanup: 256 bpf_iter_task_btf__destroy(skel); 257 } 258 259 static void test_tcp4(void) 260 { 261 struct bpf_iter_tcp4 *skel; 262 263 skel = bpf_iter_tcp4__open_and_load(); 264 if (CHECK(!skel, "bpf_iter_tcp4__open_and_load", 265 "skeleton open_and_load failed\n")) 266 return; 267 268 do_dummy_read(skel->progs.dump_tcp4); 269 270 bpf_iter_tcp4__destroy(skel); 271 } 272 273 static void test_tcp6(void) 274 { 275 struct bpf_iter_tcp6 *skel; 276 277 skel = bpf_iter_tcp6__open_and_load(); 278 if (CHECK(!skel, "bpf_iter_tcp6__open_and_load", 279 "skeleton open_and_load failed\n")) 280 return; 281 282 do_dummy_read(skel->progs.dump_tcp6); 283 284 bpf_iter_tcp6__destroy(skel); 285 } 286 287 static void test_udp4(void) 288 { 289 struct bpf_iter_udp4 *skel; 290 291 skel = bpf_iter_udp4__open_and_load(); 292 if (CHECK(!skel, "bpf_iter_udp4__open_and_load", 293 "skeleton open_and_load failed\n")) 294 return; 295 296 do_dummy_read(skel->progs.dump_udp4); 297 298 bpf_iter_udp4__destroy(skel); 299 } 300 301 static void test_udp6(void) 302 { 303 struct bpf_iter_udp6 *skel; 304 305 skel = bpf_iter_udp6__open_and_load(); 306 if (CHECK(!skel, "bpf_iter_udp6__open_and_load", 307 "skeleton open_and_load failed\n")) 308 return; 309 310 do_dummy_read(skel->progs.dump_udp6); 311 312 bpf_iter_udp6__destroy(skel); 313 } 314 315 /* The expected string is less than 16 bytes */ 316 static int do_read_with_fd(int iter_fd, const char *expected, 317 bool read_one_char) 318 { 319 int err = -1, len, read_buf_len, start; 320 char buf[16] = {}; 321 322 read_buf_len = read_one_char ? 1 : 16; 323 start = 0; 324 while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) { 325 start += len; 326 if (CHECK(start >= 16, "read", "read len %d\n", len)) 327 return -1; 328 read_buf_len = read_one_char ? 1 : 16 - start; 329 } 330 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 331 return -1; 332 333 err = strcmp(buf, expected); 334 if (CHECK(err, "read", "incorrect read result: buf %s, expected %s\n", 335 buf, expected)) 336 return -1; 337 338 return 0; 339 } 340 341 static void test_anon_iter(bool read_one_char) 342 { 343 struct bpf_iter_test_kern1 *skel; 344 struct bpf_link *link; 345 int iter_fd, err; 346 347 skel = bpf_iter_test_kern1__open_and_load(); 348 if (CHECK(!skel, "bpf_iter_test_kern1__open_and_load", 349 "skeleton open_and_load failed\n")) 350 return; 351 352 err = bpf_iter_test_kern1__attach(skel); 353 if (CHECK(err, "bpf_iter_test_kern1__attach", 354 "skeleton attach failed\n")) { 355 goto out; 356 } 357 358 link = skel->links.dump_task; 359 iter_fd = bpf_iter_create(bpf_link__fd(link)); 360 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 361 goto out; 362 363 do_read_with_fd(iter_fd, "abcd", read_one_char); 364 close(iter_fd); 365 366 out: 367 bpf_iter_test_kern1__destroy(skel); 368 } 369 370 static int do_read(const char *path, const char *expected) 371 { 372 int err, iter_fd; 373 374 iter_fd = open(path, O_RDONLY); 375 if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n", 376 path, strerror(errno))) 377 return -1; 378 379 err = do_read_with_fd(iter_fd, expected, false); 380 close(iter_fd); 381 return err; 382 } 383 384 static void test_file_iter(void) 385 { 386 const char *path = "/sys/fs/bpf/bpf_iter_test1"; 387 struct bpf_iter_test_kern1 *skel1; 388 struct bpf_iter_test_kern2 *skel2; 389 struct bpf_link *link; 390 int err; 391 392 skel1 = bpf_iter_test_kern1__open_and_load(); 393 if (CHECK(!skel1, "bpf_iter_test_kern1__open_and_load", 394 "skeleton open_and_load failed\n")) 395 return; 396 397 link = bpf_program__attach_iter(skel1->progs.dump_task, NULL); 398 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 399 goto out; 400 401 /* unlink this path if it exists. */ 402 unlink(path); 403 404 err = bpf_link__pin(link, path); 405 if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err)) 406 goto free_link; 407 408 err = do_read(path, "abcd"); 409 if (err) 410 goto unlink_path; 411 412 /* file based iterator seems working fine. Let us a link update 413 * of the underlying link and `cat` the iterator again, its content 414 * should change. 415 */ 416 skel2 = bpf_iter_test_kern2__open_and_load(); 417 if (CHECK(!skel2, "bpf_iter_test_kern2__open_and_load", 418 "skeleton open_and_load failed\n")) 419 goto unlink_path; 420 421 err = bpf_link__update_program(link, skel2->progs.dump_task); 422 if (CHECK(err, "update_prog", "update_prog failed\n")) 423 goto destroy_skel2; 424 425 do_read(path, "ABCD"); 426 427 destroy_skel2: 428 bpf_iter_test_kern2__destroy(skel2); 429 unlink_path: 430 unlink(path); 431 free_link: 432 bpf_link__destroy(link); 433 out: 434 bpf_iter_test_kern1__destroy(skel1); 435 } 436 437 static void test_overflow(bool test_e2big_overflow, bool ret1) 438 { 439 __u32 map_info_len, total_read_len, expected_read_len; 440 int err, iter_fd, map1_fd, map2_fd, len; 441 struct bpf_map_info map_info = {}; 442 struct bpf_iter_test_kern4 *skel; 443 struct bpf_link *link; 444 __u32 iter_size; 445 char *buf; 446 447 skel = bpf_iter_test_kern4__open(); 448 if (CHECK(!skel, "bpf_iter_test_kern4__open", 449 "skeleton open failed\n")) 450 return; 451 452 /* create two maps: bpf program will only do bpf_seq_write 453 * for these two maps. The goal is one map output almost 454 * fills seq_file buffer and then the other will trigger 455 * overflow and needs restart. 456 */ 457 map1_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); 458 if (CHECK(map1_fd < 0, "bpf_create_map", 459 "map_creation failed: %s\n", strerror(errno))) 460 goto out; 461 map2_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); 462 if (CHECK(map2_fd < 0, "bpf_create_map", 463 "map_creation failed: %s\n", strerror(errno))) 464 goto free_map1; 465 466 /* bpf_seq_printf kernel buffer is 8 pages, so one map 467 * bpf_seq_write will mostly fill it, and the other map 468 * will partially fill and then trigger overflow and need 469 * bpf_seq_read restart. 470 */ 471 iter_size = sysconf(_SC_PAGE_SIZE) << 3; 472 473 if (test_e2big_overflow) { 474 skel->rodata->print_len = (iter_size + 8) / 8; 475 expected_read_len = 2 * (iter_size + 8); 476 } else if (!ret1) { 477 skel->rodata->print_len = (iter_size - 8) / 8; 478 expected_read_len = 2 * (iter_size - 8); 479 } else { 480 skel->rodata->print_len = 1; 481 expected_read_len = 2 * 8; 482 } 483 skel->rodata->ret1 = ret1; 484 485 if (CHECK(bpf_iter_test_kern4__load(skel), 486 "bpf_iter_test_kern4__load", "skeleton load failed\n")) 487 goto free_map2; 488 489 /* setup filtering map_id in bpf program */ 490 map_info_len = sizeof(map_info); 491 err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len); 492 if (CHECK(err, "get_map_info", "get map info failed: %s\n", 493 strerror(errno))) 494 goto free_map2; 495 skel->bss->map1_id = map_info.id; 496 497 err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len); 498 if (CHECK(err, "get_map_info", "get map info failed: %s\n", 499 strerror(errno))) 500 goto free_map2; 501 skel->bss->map2_id = map_info.id; 502 503 link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL); 504 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 505 goto free_map2; 506 507 iter_fd = bpf_iter_create(bpf_link__fd(link)); 508 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 509 goto free_link; 510 511 buf = malloc(expected_read_len); 512 if (!buf) 513 goto close_iter; 514 515 /* do read */ 516 total_read_len = 0; 517 if (test_e2big_overflow) { 518 while ((len = read(iter_fd, buf, expected_read_len)) > 0) 519 total_read_len += len; 520 521 CHECK(len != -1 || errno != E2BIG, "read", 522 "expected ret -1, errno E2BIG, but get ret %d, error %s\n", 523 len, strerror(errno)); 524 goto free_buf; 525 } else if (!ret1) { 526 while ((len = read(iter_fd, buf, expected_read_len)) > 0) 527 total_read_len += len; 528 529 if (CHECK(len < 0, "read", "read failed: %s\n", 530 strerror(errno))) 531 goto free_buf; 532 } else { 533 do { 534 len = read(iter_fd, buf, expected_read_len); 535 if (len > 0) 536 total_read_len += len; 537 } while (len > 0 || len == -EAGAIN); 538 539 if (CHECK(len < 0, "read", "read failed: %s\n", 540 strerror(errno))) 541 goto free_buf; 542 } 543 544 if (CHECK(total_read_len != expected_read_len, "read", 545 "total len %u, expected len %u\n", total_read_len, 546 expected_read_len)) 547 goto free_buf; 548 549 if (CHECK(skel->bss->map1_accessed != 1, "map1_accessed", 550 "expected 1 actual %d\n", skel->bss->map1_accessed)) 551 goto free_buf; 552 553 if (CHECK(skel->bss->map2_accessed != 2, "map2_accessed", 554 "expected 2 actual %d\n", skel->bss->map2_accessed)) 555 goto free_buf; 556 557 CHECK(skel->bss->map2_seqnum1 != skel->bss->map2_seqnum2, 558 "map2_seqnum", "two different seqnum %lld %lld\n", 559 skel->bss->map2_seqnum1, skel->bss->map2_seqnum2); 560 561 free_buf: 562 free(buf); 563 close_iter: 564 close(iter_fd); 565 free_link: 566 bpf_link__destroy(link); 567 free_map2: 568 close(map2_fd); 569 free_map1: 570 close(map1_fd); 571 out: 572 bpf_iter_test_kern4__destroy(skel); 573 } 574 575 static void test_bpf_hash_map(void) 576 { 577 __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0; 578 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 579 struct bpf_iter_bpf_hash_map *skel; 580 int err, i, len, map_fd, iter_fd; 581 union bpf_iter_link_info linfo; 582 __u64 val, expected_val = 0; 583 struct bpf_link *link; 584 struct key_t { 585 int a; 586 int b; 587 int c; 588 } key; 589 char buf[64]; 590 591 skel = bpf_iter_bpf_hash_map__open(); 592 if (CHECK(!skel, "bpf_iter_bpf_hash_map__open", 593 "skeleton open failed\n")) 594 return; 595 596 skel->bss->in_test_mode = true; 597 598 err = bpf_iter_bpf_hash_map__load(skel); 599 if (CHECK(!skel, "bpf_iter_bpf_hash_map__load", 600 "skeleton load failed\n")) 601 goto out; 602 603 /* iterator with hashmap2 and hashmap3 should fail */ 604 memset(&linfo, 0, sizeof(linfo)); 605 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2); 606 opts.link_info = &linfo; 607 opts.link_info_len = sizeof(linfo); 608 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 609 if (CHECK(!IS_ERR(link), "attach_iter", 610 "attach_iter for hashmap2 unexpected succeeded\n")) 611 goto out; 612 613 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3); 614 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 615 if (CHECK(!IS_ERR(link), "attach_iter", 616 "attach_iter for hashmap3 unexpected succeeded\n")) 617 goto out; 618 619 /* hashmap1 should be good, update map values here */ 620 map_fd = bpf_map__fd(skel->maps.hashmap1); 621 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { 622 key.a = i + 1; 623 key.b = i + 2; 624 key.c = i + 3; 625 val = i + 4; 626 expected_key_a += key.a; 627 expected_key_b += key.b; 628 expected_key_c += key.c; 629 expected_val += val; 630 631 err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY); 632 if (CHECK(err, "map_update", "map_update failed\n")) 633 goto out; 634 } 635 636 linfo.map.map_fd = map_fd; 637 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 638 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 639 goto out; 640 641 iter_fd = bpf_iter_create(bpf_link__fd(link)); 642 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 643 goto free_link; 644 645 /* do some tests */ 646 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 647 ; 648 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 649 goto close_iter; 650 651 /* test results */ 652 if (CHECK(skel->bss->key_sum_a != expected_key_a, 653 "key_sum_a", "got %u expected %u\n", 654 skel->bss->key_sum_a, expected_key_a)) 655 goto close_iter; 656 if (CHECK(skel->bss->key_sum_b != expected_key_b, 657 "key_sum_b", "got %u expected %u\n", 658 skel->bss->key_sum_b, expected_key_b)) 659 goto close_iter; 660 if (CHECK(skel->bss->val_sum != expected_val, 661 "val_sum", "got %llu expected %llu\n", 662 skel->bss->val_sum, expected_val)) 663 goto close_iter; 664 665 close_iter: 666 close(iter_fd); 667 free_link: 668 bpf_link__destroy(link); 669 out: 670 bpf_iter_bpf_hash_map__destroy(skel); 671 } 672 673 static void test_bpf_percpu_hash_map(void) 674 { 675 __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0; 676 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 677 struct bpf_iter_bpf_percpu_hash_map *skel; 678 int err, i, j, len, map_fd, iter_fd; 679 union bpf_iter_link_info linfo; 680 __u32 expected_val = 0; 681 struct bpf_link *link; 682 struct key_t { 683 int a; 684 int b; 685 int c; 686 } key; 687 char buf[64]; 688 void *val; 689 690 val = malloc(8 * bpf_num_possible_cpus()); 691 692 skel = bpf_iter_bpf_percpu_hash_map__open(); 693 if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open", 694 "skeleton open failed\n")) 695 return; 696 697 skel->rodata->num_cpus = bpf_num_possible_cpus(); 698 699 err = bpf_iter_bpf_percpu_hash_map__load(skel); 700 if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load", 701 "skeleton load failed\n")) 702 goto out; 703 704 /* update map values here */ 705 map_fd = bpf_map__fd(skel->maps.hashmap1); 706 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { 707 key.a = i + 1; 708 key.b = i + 2; 709 key.c = i + 3; 710 expected_key_a += key.a; 711 expected_key_b += key.b; 712 expected_key_c += key.c; 713 714 for (j = 0; j < bpf_num_possible_cpus(); j++) { 715 *(__u32 *)(val + j * 8) = i + j; 716 expected_val += i + j; 717 } 718 719 err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY); 720 if (CHECK(err, "map_update", "map_update failed\n")) 721 goto out; 722 } 723 724 memset(&linfo, 0, sizeof(linfo)); 725 linfo.map.map_fd = map_fd; 726 opts.link_info = &linfo; 727 opts.link_info_len = sizeof(linfo); 728 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts); 729 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 730 goto out; 731 732 iter_fd = bpf_iter_create(bpf_link__fd(link)); 733 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 734 goto free_link; 735 736 /* do some tests */ 737 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 738 ; 739 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 740 goto close_iter; 741 742 /* test results */ 743 if (CHECK(skel->bss->key_sum_a != expected_key_a, 744 "key_sum_a", "got %u expected %u\n", 745 skel->bss->key_sum_a, expected_key_a)) 746 goto close_iter; 747 if (CHECK(skel->bss->key_sum_b != expected_key_b, 748 "key_sum_b", "got %u expected %u\n", 749 skel->bss->key_sum_b, expected_key_b)) 750 goto close_iter; 751 if (CHECK(skel->bss->val_sum != expected_val, 752 "val_sum", "got %u expected %u\n", 753 skel->bss->val_sum, expected_val)) 754 goto close_iter; 755 756 close_iter: 757 close(iter_fd); 758 free_link: 759 bpf_link__destroy(link); 760 out: 761 bpf_iter_bpf_percpu_hash_map__destroy(skel); 762 } 763 764 static void test_bpf_array_map(void) 765 { 766 __u64 val, expected_val = 0, res_first_val, first_val = 0; 767 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 768 __u32 expected_key = 0, res_first_key; 769 struct bpf_iter_bpf_array_map *skel; 770 union bpf_iter_link_info linfo; 771 int err, i, map_fd, iter_fd; 772 struct bpf_link *link; 773 char buf[64] = {}; 774 int len, start; 775 776 skel = bpf_iter_bpf_array_map__open_and_load(); 777 if (CHECK(!skel, "bpf_iter_bpf_array_map__open_and_load", 778 "skeleton open_and_load failed\n")) 779 return; 780 781 map_fd = bpf_map__fd(skel->maps.arraymap1); 782 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 783 val = i + 4; 784 expected_key += i; 785 expected_val += val; 786 787 if (i == 0) 788 first_val = val; 789 790 err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY); 791 if (CHECK(err, "map_update", "map_update failed\n")) 792 goto out; 793 } 794 795 memset(&linfo, 0, sizeof(linfo)); 796 linfo.map.map_fd = map_fd; 797 opts.link_info = &linfo; 798 opts.link_info_len = sizeof(linfo); 799 link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts); 800 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 801 goto out; 802 803 iter_fd = bpf_iter_create(bpf_link__fd(link)); 804 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 805 goto free_link; 806 807 /* do some tests */ 808 start = 0; 809 while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0) 810 start += len; 811 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 812 goto close_iter; 813 814 /* test results */ 815 res_first_key = *(__u32 *)buf; 816 res_first_val = *(__u64 *)(buf + sizeof(__u32)); 817 if (CHECK(res_first_key != 0 || res_first_val != first_val, 818 "bpf_seq_write", 819 "seq_write failure: first key %u vs expected 0, " 820 " first value %llu vs expected %llu\n", 821 res_first_key, res_first_val, first_val)) 822 goto close_iter; 823 824 if (CHECK(skel->bss->key_sum != expected_key, 825 "key_sum", "got %u expected %u\n", 826 skel->bss->key_sum, expected_key)) 827 goto close_iter; 828 if (CHECK(skel->bss->val_sum != expected_val, 829 "val_sum", "got %llu expected %llu\n", 830 skel->bss->val_sum, expected_val)) 831 goto close_iter; 832 833 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 834 err = bpf_map_lookup_elem(map_fd, &i, &val); 835 if (CHECK(err, "map_lookup", "map_lookup failed\n")) 836 goto out; 837 if (CHECK(i != val, "invalid_val", 838 "got value %llu expected %u\n", val, i)) 839 goto out; 840 } 841 842 close_iter: 843 close(iter_fd); 844 free_link: 845 bpf_link__destroy(link); 846 out: 847 bpf_iter_bpf_array_map__destroy(skel); 848 } 849 850 static void test_bpf_percpu_array_map(void) 851 { 852 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 853 struct bpf_iter_bpf_percpu_array_map *skel; 854 __u32 expected_key = 0, expected_val = 0; 855 union bpf_iter_link_info linfo; 856 int err, i, j, map_fd, iter_fd; 857 struct bpf_link *link; 858 char buf[64]; 859 void *val; 860 int len; 861 862 val = malloc(8 * bpf_num_possible_cpus()); 863 864 skel = bpf_iter_bpf_percpu_array_map__open(); 865 if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open", 866 "skeleton open failed\n")) 867 return; 868 869 skel->rodata->num_cpus = bpf_num_possible_cpus(); 870 871 err = bpf_iter_bpf_percpu_array_map__load(skel); 872 if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load", 873 "skeleton load failed\n")) 874 goto out; 875 876 /* update map values here */ 877 map_fd = bpf_map__fd(skel->maps.arraymap1); 878 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 879 expected_key += i; 880 881 for (j = 0; j < bpf_num_possible_cpus(); j++) { 882 *(__u32 *)(val + j * 8) = i + j; 883 expected_val += i + j; 884 } 885 886 err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY); 887 if (CHECK(err, "map_update", "map_update failed\n")) 888 goto out; 889 } 890 891 memset(&linfo, 0, sizeof(linfo)); 892 linfo.map.map_fd = map_fd; 893 opts.link_info = &linfo; 894 opts.link_info_len = sizeof(linfo); 895 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts); 896 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 897 goto out; 898 899 iter_fd = bpf_iter_create(bpf_link__fd(link)); 900 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 901 goto free_link; 902 903 /* do some tests */ 904 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 905 ; 906 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 907 goto close_iter; 908 909 /* test results */ 910 if (CHECK(skel->bss->key_sum != expected_key, 911 "key_sum", "got %u expected %u\n", 912 skel->bss->key_sum, expected_key)) 913 goto close_iter; 914 if (CHECK(skel->bss->val_sum != expected_val, 915 "val_sum", "got %u expected %u\n", 916 skel->bss->val_sum, expected_val)) 917 goto close_iter; 918 919 close_iter: 920 close(iter_fd); 921 free_link: 922 bpf_link__destroy(link); 923 out: 924 bpf_iter_bpf_percpu_array_map__destroy(skel); 925 } 926 927 /* An iterator program deletes all local storage in a map. */ 928 static void test_bpf_sk_storage_delete(void) 929 { 930 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 931 struct bpf_iter_bpf_sk_storage_helpers *skel; 932 union bpf_iter_link_info linfo; 933 int err, len, map_fd, iter_fd; 934 struct bpf_link *link; 935 int sock_fd = -1; 936 __u32 val = 42; 937 char buf[64]; 938 939 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load(); 940 if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load", 941 "skeleton open_and_load failed\n")) 942 return; 943 944 map_fd = bpf_map__fd(skel->maps.sk_stg_map); 945 946 sock_fd = socket(AF_INET6, SOCK_STREAM, 0); 947 if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno)) 948 goto out; 949 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST); 950 if (CHECK(err, "map_update", "map_update failed\n")) 951 goto out; 952 953 memset(&linfo, 0, sizeof(linfo)); 954 linfo.map.map_fd = map_fd; 955 opts.link_info = &linfo; 956 opts.link_info_len = sizeof(linfo); 957 link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map, 958 &opts); 959 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 960 goto out; 961 962 iter_fd = bpf_iter_create(bpf_link__fd(link)); 963 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 964 goto free_link; 965 966 /* do some tests */ 967 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 968 ; 969 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 970 goto close_iter; 971 972 /* test results */ 973 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); 974 if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem", 975 "map value wasn't deleted (err=%d, errno=%d)\n", err, errno)) 976 goto close_iter; 977 978 close_iter: 979 close(iter_fd); 980 free_link: 981 bpf_link__destroy(link); 982 out: 983 if (sock_fd >= 0) 984 close(sock_fd); 985 bpf_iter_bpf_sk_storage_helpers__destroy(skel); 986 } 987 988 /* This creates a socket and its local storage. It then runs a task_iter BPF 989 * program that replaces the existing socket local storage with the tgid of the 990 * only task owning a file descriptor to this socket, this process, prog_tests. 991 * It then runs a tcp socket iterator that negates the value in the existing 992 * socket local storage, the test verifies that the resulting value is -pid. 993 */ 994 static void test_bpf_sk_storage_get(void) 995 { 996 struct bpf_iter_bpf_sk_storage_helpers *skel; 997 int err, map_fd, val = -1; 998 int sock_fd = -1; 999 1000 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load(); 1001 if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load", 1002 "skeleton open_and_load failed\n")) 1003 return; 1004 1005 sock_fd = socket(AF_INET6, SOCK_STREAM, 0); 1006 if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno)) 1007 goto out; 1008 1009 err = listen(sock_fd, 1); 1010 if (CHECK(err != 0, "listen", "errno: %d\n", errno)) 1011 goto close_socket; 1012 1013 map_fd = bpf_map__fd(skel->maps.sk_stg_map); 1014 1015 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST); 1016 if (CHECK(err, "bpf_map_update_elem", "map_update_failed\n")) 1017 goto close_socket; 1018 1019 do_dummy_read(skel->progs.fill_socket_owner); 1020 1021 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); 1022 if (CHECK(err || val != getpid(), "bpf_map_lookup_elem", 1023 "map value wasn't set correctly (expected %d, got %d, err=%d)\n", 1024 getpid(), val, err)) 1025 goto close_socket; 1026 1027 do_dummy_read(skel->progs.negate_socket_local_storage); 1028 1029 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); 1030 CHECK(err || val != -getpid(), "bpf_map_lookup_elem", 1031 "map value wasn't set correctly (expected %d, got %d, err=%d)\n", 1032 -getpid(), val, err); 1033 1034 close_socket: 1035 close(sock_fd); 1036 out: 1037 bpf_iter_bpf_sk_storage_helpers__destroy(skel); 1038 } 1039 1040 static void test_bpf_sk_storage_map(void) 1041 { 1042 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 1043 int err, i, len, map_fd, iter_fd, num_sockets; 1044 struct bpf_iter_bpf_sk_storage_map *skel; 1045 union bpf_iter_link_info linfo; 1046 int sock_fd[3] = {-1, -1, -1}; 1047 __u32 val, expected_val = 0; 1048 struct bpf_link *link; 1049 char buf[64]; 1050 1051 skel = bpf_iter_bpf_sk_storage_map__open_and_load(); 1052 if (CHECK(!skel, "bpf_iter_bpf_sk_storage_map__open_and_load", 1053 "skeleton open_and_load failed\n")) 1054 return; 1055 1056 map_fd = bpf_map__fd(skel->maps.sk_stg_map); 1057 num_sockets = ARRAY_SIZE(sock_fd); 1058 for (i = 0; i < num_sockets; i++) { 1059 sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0); 1060 if (CHECK(sock_fd[i] < 0, "socket", "errno: %d\n", errno)) 1061 goto out; 1062 1063 val = i + 1; 1064 expected_val += val; 1065 1066 err = bpf_map_update_elem(map_fd, &sock_fd[i], &val, 1067 BPF_NOEXIST); 1068 if (CHECK(err, "map_update", "map_update failed\n")) 1069 goto out; 1070 } 1071 1072 memset(&linfo, 0, sizeof(linfo)); 1073 linfo.map.map_fd = map_fd; 1074 opts.link_info = &linfo; 1075 opts.link_info_len = sizeof(linfo); 1076 link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts); 1077 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 1078 goto out; 1079 1080 iter_fd = bpf_iter_create(bpf_link__fd(link)); 1081 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 1082 goto free_link; 1083 1084 /* do some tests */ 1085 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 1086 ; 1087 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 1088 goto close_iter; 1089 1090 /* test results */ 1091 if (CHECK(skel->bss->ipv6_sk_count != num_sockets, 1092 "ipv6_sk_count", "got %u expected %u\n", 1093 skel->bss->ipv6_sk_count, num_sockets)) 1094 goto close_iter; 1095 1096 if (CHECK(skel->bss->val_sum != expected_val, 1097 "val_sum", "got %u expected %u\n", 1098 skel->bss->val_sum, expected_val)) 1099 goto close_iter; 1100 1101 close_iter: 1102 close(iter_fd); 1103 free_link: 1104 bpf_link__destroy(link); 1105 out: 1106 for (i = 0; i < num_sockets; i++) { 1107 if (sock_fd[i] >= 0) 1108 close(sock_fd[i]); 1109 } 1110 bpf_iter_bpf_sk_storage_map__destroy(skel); 1111 } 1112 1113 static void test_rdonly_buf_out_of_bound(void) 1114 { 1115 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 1116 struct bpf_iter_test_kern5 *skel; 1117 union bpf_iter_link_info linfo; 1118 struct bpf_link *link; 1119 1120 skel = bpf_iter_test_kern5__open_and_load(); 1121 if (CHECK(!skel, "bpf_iter_test_kern5__open_and_load", 1122 "skeleton open_and_load failed\n")) 1123 return; 1124 1125 memset(&linfo, 0, sizeof(linfo)); 1126 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1); 1127 opts.link_info = &linfo; 1128 opts.link_info_len = sizeof(linfo); 1129 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 1130 if (CHECK(!IS_ERR(link), "attach_iter", "unexpected success\n")) 1131 bpf_link__destroy(link); 1132 1133 bpf_iter_test_kern5__destroy(skel); 1134 } 1135 1136 static void test_buf_neg_offset(void) 1137 { 1138 struct bpf_iter_test_kern6 *skel; 1139 1140 skel = bpf_iter_test_kern6__open_and_load(); 1141 if (CHECK(skel, "bpf_iter_test_kern6__open_and_load", 1142 "skeleton open_and_load unexpected success\n")) 1143 bpf_iter_test_kern6__destroy(skel); 1144 } 1145 1146 #define CMP_BUFFER_SIZE 1024 1147 static char task_vma_output[CMP_BUFFER_SIZE]; 1148 static char proc_maps_output[CMP_BUFFER_SIZE]; 1149 1150 /* remove \0 and \t from str, and only keep the first line */ 1151 static void str_strip_first_line(char *str) 1152 { 1153 char *dst = str, *src = str; 1154 1155 do { 1156 if (*src == ' ' || *src == '\t') 1157 src++; 1158 else 1159 *(dst++) = *(src++); 1160 1161 } while (*src != '\0' && *src != '\n'); 1162 1163 *dst = '\0'; 1164 } 1165 1166 #define min(a, b) ((a) < (b) ? (a) : (b)) 1167 1168 static void test_task_vma(void) 1169 { 1170 int err, iter_fd = -1, proc_maps_fd = -1; 1171 struct bpf_iter_task_vma *skel; 1172 int len, read_size = 4; 1173 char maps_path[64]; 1174 1175 skel = bpf_iter_task_vma__open(); 1176 if (CHECK(!skel, "bpf_iter_task_vma__open", "skeleton open failed\n")) 1177 return; 1178 1179 skel->bss->pid = getpid(); 1180 1181 err = bpf_iter_task_vma__load(skel); 1182 if (CHECK(err, "bpf_iter_task_vma__load", "skeleton load failed\n")) 1183 goto out; 1184 1185 skel->links.proc_maps = bpf_program__attach_iter( 1186 skel->progs.proc_maps, NULL); 1187 1188 if (CHECK(IS_ERR(skel->links.proc_maps), "bpf_program__attach_iter", 1189 "attach iterator failed\n")) { 1190 skel->links.proc_maps = NULL; 1191 goto out; 1192 } 1193 1194 iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps)); 1195 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 1196 goto out; 1197 1198 /* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks 1199 * to trigger seq_file corner cases. The expected output is much 1200 * longer than 1kB, so the while loop will terminate. 1201 */ 1202 len = 0; 1203 while (len < CMP_BUFFER_SIZE) { 1204 err = read_fd_into_buffer(iter_fd, task_vma_output + len, 1205 min(read_size, CMP_BUFFER_SIZE - len)); 1206 if (CHECK(err < 0, "read_iter_fd", "read_iter_fd failed\n")) 1207 goto out; 1208 len += err; 1209 } 1210 1211 /* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */ 1212 snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid); 1213 proc_maps_fd = open(maps_path, O_RDONLY); 1214 if (CHECK(proc_maps_fd < 0, "open_proc_maps", "open_proc_maps failed\n")) 1215 goto out; 1216 err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE); 1217 if (CHECK(err < 0, "read_prog_maps_fd", "read_prog_maps_fd failed\n")) 1218 goto out; 1219 1220 /* strip and compare the first line of the two files */ 1221 str_strip_first_line(task_vma_output); 1222 str_strip_first_line(proc_maps_output); 1223 1224 CHECK(strcmp(task_vma_output, proc_maps_output), "compare_output", 1225 "found mismatch\n"); 1226 out: 1227 close(proc_maps_fd); 1228 close(iter_fd); 1229 bpf_iter_task_vma__destroy(skel); 1230 } 1231 1232 void test_bpf_iter(void) 1233 { 1234 if (test__start_subtest("btf_id_or_null")) 1235 test_btf_id_or_null(); 1236 if (test__start_subtest("ipv6_route")) 1237 test_ipv6_route(); 1238 if (test__start_subtest("netlink")) 1239 test_netlink(); 1240 if (test__start_subtest("bpf_map")) 1241 test_bpf_map(); 1242 if (test__start_subtest("task")) 1243 test_task(); 1244 if (test__start_subtest("task_stack")) 1245 test_task_stack(); 1246 if (test__start_subtest("task_file")) 1247 test_task_file(); 1248 if (test__start_subtest("task_vma")) 1249 test_task_vma(); 1250 if (test__start_subtest("task_btf")) 1251 test_task_btf(); 1252 if (test__start_subtest("tcp4")) 1253 test_tcp4(); 1254 if (test__start_subtest("tcp6")) 1255 test_tcp6(); 1256 if (test__start_subtest("udp4")) 1257 test_udp4(); 1258 if (test__start_subtest("udp6")) 1259 test_udp6(); 1260 if (test__start_subtest("anon")) 1261 test_anon_iter(false); 1262 if (test__start_subtest("anon-read-one-char")) 1263 test_anon_iter(true); 1264 if (test__start_subtest("file")) 1265 test_file_iter(); 1266 if (test__start_subtest("overflow")) 1267 test_overflow(false, false); 1268 if (test__start_subtest("overflow-e2big")) 1269 test_overflow(true, false); 1270 if (test__start_subtest("prog-ret-1")) 1271 test_overflow(false, true); 1272 if (test__start_subtest("bpf_hash_map")) 1273 test_bpf_hash_map(); 1274 if (test__start_subtest("bpf_percpu_hash_map")) 1275 test_bpf_percpu_hash_map(); 1276 if (test__start_subtest("bpf_array_map")) 1277 test_bpf_array_map(); 1278 if (test__start_subtest("bpf_percpu_array_map")) 1279 test_bpf_percpu_array_map(); 1280 if (test__start_subtest("bpf_sk_storage_map")) 1281 test_bpf_sk_storage_map(); 1282 if (test__start_subtest("bpf_sk_storage_delete")) 1283 test_bpf_sk_storage_delete(); 1284 if (test__start_subtest("bpf_sk_storage_get")) 1285 test_bpf_sk_storage_get(); 1286 if (test__start_subtest("rdonly-buf-out-of-bound")) 1287 test_rdonly_buf_out_of_bound(); 1288 if (test__start_subtest("buf-neg-offset")) 1289 test_buf_neg_offset(); 1290 } 1291