1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020 Facebook */ 3 #include <test_progs.h> 4 #include "bpf_iter_ipv6_route.skel.h" 5 #include "bpf_iter_netlink.skel.h" 6 #include "bpf_iter_bpf_map.skel.h" 7 #include "bpf_iter_task.skel.h" 8 #include "bpf_iter_task_stack.skel.h" 9 #include "bpf_iter_task_file.skel.h" 10 #include "bpf_iter_task_vma.skel.h" 11 #include "bpf_iter_task_btf.skel.h" 12 #include "bpf_iter_tcp4.skel.h" 13 #include "bpf_iter_tcp6.skel.h" 14 #include "bpf_iter_udp4.skel.h" 15 #include "bpf_iter_udp6.skel.h" 16 #include "bpf_iter_unix.skel.h" 17 #include "bpf_iter_test_kern1.skel.h" 18 #include "bpf_iter_test_kern2.skel.h" 19 #include "bpf_iter_test_kern3.skel.h" 20 #include "bpf_iter_test_kern4.skel.h" 21 #include "bpf_iter_bpf_hash_map.skel.h" 22 #include "bpf_iter_bpf_percpu_hash_map.skel.h" 23 #include "bpf_iter_bpf_array_map.skel.h" 24 #include "bpf_iter_bpf_percpu_array_map.skel.h" 25 #include "bpf_iter_bpf_sk_storage_helpers.skel.h" 26 #include "bpf_iter_bpf_sk_storage_map.skel.h" 27 #include "bpf_iter_test_kern5.skel.h" 28 #include "bpf_iter_test_kern6.skel.h" 29 30 static int duration; 31 32 static void test_btf_id_or_null(void) 33 { 34 struct bpf_iter_test_kern3 *skel; 35 36 skel = bpf_iter_test_kern3__open_and_load(); 37 if (CHECK(skel, "bpf_iter_test_kern3__open_and_load", 38 "skeleton open_and_load unexpectedly succeeded\n")) { 39 bpf_iter_test_kern3__destroy(skel); 40 return; 41 } 42 } 43 44 static void do_dummy_read(struct bpf_program *prog) 45 { 46 struct bpf_link *link; 47 char buf[16] = {}; 48 int iter_fd, len; 49 50 link = bpf_program__attach_iter(prog, NULL); 51 if (!ASSERT_OK_PTR(link, "attach_iter")) 52 return; 53 54 iter_fd = bpf_iter_create(bpf_link__fd(link)); 55 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 56 goto free_link; 57 58 /* not check contents, but ensure read() ends without error */ 59 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 60 ; 61 CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)); 62 63 close(iter_fd); 64 65 free_link: 66 bpf_link__destroy(link); 67 } 68 69 static int read_fd_into_buffer(int fd, char *buf, int size) 70 { 71 int bufleft = size; 72 int len; 73 74 do { 75 len = read(fd, buf, bufleft); 76 if (len > 0) { 77 buf += len; 78 bufleft -= len; 79 } 80 } while (len > 0); 81 82 return len < 0 ? len : size - bufleft; 83 } 84 85 static void test_ipv6_route(void) 86 { 87 struct bpf_iter_ipv6_route *skel; 88 89 skel = bpf_iter_ipv6_route__open_and_load(); 90 if (CHECK(!skel, "bpf_iter_ipv6_route__open_and_load", 91 "skeleton open_and_load failed\n")) 92 return; 93 94 do_dummy_read(skel->progs.dump_ipv6_route); 95 96 bpf_iter_ipv6_route__destroy(skel); 97 } 98 99 static void test_netlink(void) 100 { 101 struct bpf_iter_netlink *skel; 102 103 skel = bpf_iter_netlink__open_and_load(); 104 if (CHECK(!skel, "bpf_iter_netlink__open_and_load", 105 "skeleton open_and_load failed\n")) 106 return; 107 108 do_dummy_read(skel->progs.dump_netlink); 109 110 bpf_iter_netlink__destroy(skel); 111 } 112 113 static void test_bpf_map(void) 114 { 115 struct bpf_iter_bpf_map *skel; 116 117 skel = bpf_iter_bpf_map__open_and_load(); 118 if (CHECK(!skel, "bpf_iter_bpf_map__open_and_load", 119 "skeleton open_and_load failed\n")) 120 return; 121 122 do_dummy_read(skel->progs.dump_bpf_map); 123 124 bpf_iter_bpf_map__destroy(skel); 125 } 126 127 static void test_task(void) 128 { 129 struct bpf_iter_task *skel; 130 131 skel = bpf_iter_task__open_and_load(); 132 if (CHECK(!skel, "bpf_iter_task__open_and_load", 133 "skeleton open_and_load failed\n")) 134 return; 135 136 do_dummy_read(skel->progs.dump_task); 137 138 bpf_iter_task__destroy(skel); 139 } 140 141 static void test_task_stack(void) 142 { 143 struct bpf_iter_task_stack *skel; 144 145 skel = bpf_iter_task_stack__open_and_load(); 146 if (CHECK(!skel, "bpf_iter_task_stack__open_and_load", 147 "skeleton open_and_load failed\n")) 148 return; 149 150 do_dummy_read(skel->progs.dump_task_stack); 151 do_dummy_read(skel->progs.get_task_user_stacks); 152 153 bpf_iter_task_stack__destroy(skel); 154 } 155 156 static void *do_nothing(void *arg) 157 { 158 pthread_exit(arg); 159 } 160 161 static void test_task_file(void) 162 { 163 struct bpf_iter_task_file *skel; 164 pthread_t thread_id; 165 void *ret; 166 167 skel = bpf_iter_task_file__open_and_load(); 168 if (CHECK(!skel, "bpf_iter_task_file__open_and_load", 169 "skeleton open_and_load failed\n")) 170 return; 171 172 skel->bss->tgid = getpid(); 173 174 if (CHECK(pthread_create(&thread_id, NULL, &do_nothing, NULL), 175 "pthread_create", "pthread_create failed\n")) 176 goto done; 177 178 do_dummy_read(skel->progs.dump_task_file); 179 180 if (CHECK(pthread_join(thread_id, &ret) || ret != NULL, 181 "pthread_join", "pthread_join failed\n")) 182 goto done; 183 184 CHECK(skel->bss->count != 0, "check_count", 185 "invalid non pthread file visit count %d\n", skel->bss->count); 186 187 done: 188 bpf_iter_task_file__destroy(skel); 189 } 190 191 #define TASKBUFSZ 32768 192 193 static char taskbuf[TASKBUFSZ]; 194 195 static int do_btf_read(struct bpf_iter_task_btf *skel) 196 { 197 struct bpf_program *prog = skel->progs.dump_task_struct; 198 struct bpf_iter_task_btf__bss *bss = skel->bss; 199 int iter_fd = -1, err; 200 struct bpf_link *link; 201 char *buf = taskbuf; 202 int ret = 0; 203 204 link = bpf_program__attach_iter(prog, NULL); 205 if (!ASSERT_OK_PTR(link, "attach_iter")) 206 return ret; 207 208 iter_fd = bpf_iter_create(bpf_link__fd(link)); 209 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 210 goto free_link; 211 212 err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ); 213 if (bss->skip) { 214 printf("%s:SKIP:no __builtin_btf_type_id\n", __func__); 215 ret = 1; 216 test__skip(); 217 goto free_link; 218 } 219 220 if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno))) 221 goto free_link; 222 223 CHECK(strstr(taskbuf, "(struct task_struct)") == NULL, 224 "check for btf representation of task_struct in iter data", 225 "struct task_struct not found"); 226 free_link: 227 if (iter_fd > 0) 228 close(iter_fd); 229 bpf_link__destroy(link); 230 return ret; 231 } 232 233 static void test_task_btf(void) 234 { 235 struct bpf_iter_task_btf__bss *bss; 236 struct bpf_iter_task_btf *skel; 237 int ret; 238 239 skel = bpf_iter_task_btf__open_and_load(); 240 if (CHECK(!skel, "bpf_iter_task_btf__open_and_load", 241 "skeleton open_and_load failed\n")) 242 return; 243 244 bss = skel->bss; 245 246 ret = do_btf_read(skel); 247 if (ret) 248 goto cleanup; 249 250 if (CHECK(bss->tasks == 0, "check if iterated over tasks", 251 "no task iteration, did BPF program run?\n")) 252 goto cleanup; 253 254 CHECK(bss->seq_err != 0, "check for unexpected err", 255 "bpf_seq_printf_btf returned %ld", bss->seq_err); 256 257 cleanup: 258 bpf_iter_task_btf__destroy(skel); 259 } 260 261 static void test_tcp4(void) 262 { 263 struct bpf_iter_tcp4 *skel; 264 265 skel = bpf_iter_tcp4__open_and_load(); 266 if (CHECK(!skel, "bpf_iter_tcp4__open_and_load", 267 "skeleton open_and_load failed\n")) 268 return; 269 270 do_dummy_read(skel->progs.dump_tcp4); 271 272 bpf_iter_tcp4__destroy(skel); 273 } 274 275 static void test_tcp6(void) 276 { 277 struct bpf_iter_tcp6 *skel; 278 279 skel = bpf_iter_tcp6__open_and_load(); 280 if (CHECK(!skel, "bpf_iter_tcp6__open_and_load", 281 "skeleton open_and_load failed\n")) 282 return; 283 284 do_dummy_read(skel->progs.dump_tcp6); 285 286 bpf_iter_tcp6__destroy(skel); 287 } 288 289 static void test_udp4(void) 290 { 291 struct bpf_iter_udp4 *skel; 292 293 skel = bpf_iter_udp4__open_and_load(); 294 if (CHECK(!skel, "bpf_iter_udp4__open_and_load", 295 "skeleton open_and_load failed\n")) 296 return; 297 298 do_dummy_read(skel->progs.dump_udp4); 299 300 bpf_iter_udp4__destroy(skel); 301 } 302 303 static void test_udp6(void) 304 { 305 struct bpf_iter_udp6 *skel; 306 307 skel = bpf_iter_udp6__open_and_load(); 308 if (CHECK(!skel, "bpf_iter_udp6__open_and_load", 309 "skeleton open_and_load failed\n")) 310 return; 311 312 do_dummy_read(skel->progs.dump_udp6); 313 314 bpf_iter_udp6__destroy(skel); 315 } 316 317 static void test_unix(void) 318 { 319 struct bpf_iter_unix *skel; 320 321 skel = bpf_iter_unix__open_and_load(); 322 if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load")) 323 return; 324 325 do_dummy_read(skel->progs.dump_unix); 326 327 bpf_iter_unix__destroy(skel); 328 } 329 330 /* The expected string is less than 16 bytes */ 331 static int do_read_with_fd(int iter_fd, const char *expected, 332 bool read_one_char) 333 { 334 int err = -1, len, read_buf_len, start; 335 char buf[16] = {}; 336 337 read_buf_len = read_one_char ? 1 : 16; 338 start = 0; 339 while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) { 340 start += len; 341 if (CHECK(start >= 16, "read", "read len %d\n", len)) 342 return -1; 343 read_buf_len = read_one_char ? 1 : 16 - start; 344 } 345 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 346 return -1; 347 348 err = strcmp(buf, expected); 349 if (CHECK(err, "read", "incorrect read result: buf %s, expected %s\n", 350 buf, expected)) 351 return -1; 352 353 return 0; 354 } 355 356 static void test_anon_iter(bool read_one_char) 357 { 358 struct bpf_iter_test_kern1 *skel; 359 struct bpf_link *link; 360 int iter_fd, err; 361 362 skel = bpf_iter_test_kern1__open_and_load(); 363 if (CHECK(!skel, "bpf_iter_test_kern1__open_and_load", 364 "skeleton open_and_load failed\n")) 365 return; 366 367 err = bpf_iter_test_kern1__attach(skel); 368 if (CHECK(err, "bpf_iter_test_kern1__attach", 369 "skeleton attach failed\n")) { 370 goto out; 371 } 372 373 link = skel->links.dump_task; 374 iter_fd = bpf_iter_create(bpf_link__fd(link)); 375 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 376 goto out; 377 378 do_read_with_fd(iter_fd, "abcd", read_one_char); 379 close(iter_fd); 380 381 out: 382 bpf_iter_test_kern1__destroy(skel); 383 } 384 385 static int do_read(const char *path, const char *expected) 386 { 387 int err, iter_fd; 388 389 iter_fd = open(path, O_RDONLY); 390 if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n", 391 path, strerror(errno))) 392 return -1; 393 394 err = do_read_with_fd(iter_fd, expected, false); 395 close(iter_fd); 396 return err; 397 } 398 399 static void test_file_iter(void) 400 { 401 const char *path = "/sys/fs/bpf/bpf_iter_test1"; 402 struct bpf_iter_test_kern1 *skel1; 403 struct bpf_iter_test_kern2 *skel2; 404 struct bpf_link *link; 405 int err; 406 407 skel1 = bpf_iter_test_kern1__open_and_load(); 408 if (CHECK(!skel1, "bpf_iter_test_kern1__open_and_load", 409 "skeleton open_and_load failed\n")) 410 return; 411 412 link = bpf_program__attach_iter(skel1->progs.dump_task, NULL); 413 if (!ASSERT_OK_PTR(link, "attach_iter")) 414 goto out; 415 416 /* unlink this path if it exists. */ 417 unlink(path); 418 419 err = bpf_link__pin(link, path); 420 if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err)) 421 goto free_link; 422 423 err = do_read(path, "abcd"); 424 if (err) 425 goto unlink_path; 426 427 /* file based iterator seems working fine. Let us a link update 428 * of the underlying link and `cat` the iterator again, its content 429 * should change. 430 */ 431 skel2 = bpf_iter_test_kern2__open_and_load(); 432 if (CHECK(!skel2, "bpf_iter_test_kern2__open_and_load", 433 "skeleton open_and_load failed\n")) 434 goto unlink_path; 435 436 err = bpf_link__update_program(link, skel2->progs.dump_task); 437 if (CHECK(err, "update_prog", "update_prog failed\n")) 438 goto destroy_skel2; 439 440 do_read(path, "ABCD"); 441 442 destroy_skel2: 443 bpf_iter_test_kern2__destroy(skel2); 444 unlink_path: 445 unlink(path); 446 free_link: 447 bpf_link__destroy(link); 448 out: 449 bpf_iter_test_kern1__destroy(skel1); 450 } 451 452 static void test_overflow(bool test_e2big_overflow, bool ret1) 453 { 454 __u32 map_info_len, total_read_len, expected_read_len; 455 int err, iter_fd, map1_fd, map2_fd, len; 456 struct bpf_map_info map_info = {}; 457 struct bpf_iter_test_kern4 *skel; 458 struct bpf_link *link; 459 __u32 iter_size; 460 char *buf; 461 462 skel = bpf_iter_test_kern4__open(); 463 if (CHECK(!skel, "bpf_iter_test_kern4__open", 464 "skeleton open failed\n")) 465 return; 466 467 /* create two maps: bpf program will only do bpf_seq_write 468 * for these two maps. The goal is one map output almost 469 * fills seq_file buffer and then the other will trigger 470 * overflow and needs restart. 471 */ 472 map1_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); 473 if (CHECK(map1_fd < 0, "bpf_create_map", 474 "map_creation failed: %s\n", strerror(errno))) 475 goto out; 476 map2_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); 477 if (CHECK(map2_fd < 0, "bpf_create_map", 478 "map_creation failed: %s\n", strerror(errno))) 479 goto free_map1; 480 481 /* bpf_seq_printf kernel buffer is 8 pages, so one map 482 * bpf_seq_write will mostly fill it, and the other map 483 * will partially fill and then trigger overflow and need 484 * bpf_seq_read restart. 485 */ 486 iter_size = sysconf(_SC_PAGE_SIZE) << 3; 487 488 if (test_e2big_overflow) { 489 skel->rodata->print_len = (iter_size + 8) / 8; 490 expected_read_len = 2 * (iter_size + 8); 491 } else if (!ret1) { 492 skel->rodata->print_len = (iter_size - 8) / 8; 493 expected_read_len = 2 * (iter_size - 8); 494 } else { 495 skel->rodata->print_len = 1; 496 expected_read_len = 2 * 8; 497 } 498 skel->rodata->ret1 = ret1; 499 500 if (CHECK(bpf_iter_test_kern4__load(skel), 501 "bpf_iter_test_kern4__load", "skeleton load failed\n")) 502 goto free_map2; 503 504 /* setup filtering map_id in bpf program */ 505 map_info_len = sizeof(map_info); 506 err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len); 507 if (CHECK(err, "get_map_info", "get map info failed: %s\n", 508 strerror(errno))) 509 goto free_map2; 510 skel->bss->map1_id = map_info.id; 511 512 err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len); 513 if (CHECK(err, "get_map_info", "get map info failed: %s\n", 514 strerror(errno))) 515 goto free_map2; 516 skel->bss->map2_id = map_info.id; 517 518 link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL); 519 if (!ASSERT_OK_PTR(link, "attach_iter")) 520 goto free_map2; 521 522 iter_fd = bpf_iter_create(bpf_link__fd(link)); 523 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 524 goto free_link; 525 526 buf = malloc(expected_read_len); 527 if (!buf) 528 goto close_iter; 529 530 /* do read */ 531 total_read_len = 0; 532 if (test_e2big_overflow) { 533 while ((len = read(iter_fd, buf, expected_read_len)) > 0) 534 total_read_len += len; 535 536 CHECK(len != -1 || errno != E2BIG, "read", 537 "expected ret -1, errno E2BIG, but get ret %d, error %s\n", 538 len, strerror(errno)); 539 goto free_buf; 540 } else if (!ret1) { 541 while ((len = read(iter_fd, buf, expected_read_len)) > 0) 542 total_read_len += len; 543 544 if (CHECK(len < 0, "read", "read failed: %s\n", 545 strerror(errno))) 546 goto free_buf; 547 } else { 548 do { 549 len = read(iter_fd, buf, expected_read_len); 550 if (len > 0) 551 total_read_len += len; 552 } while (len > 0 || len == -EAGAIN); 553 554 if (CHECK(len < 0, "read", "read failed: %s\n", 555 strerror(errno))) 556 goto free_buf; 557 } 558 559 if (CHECK(total_read_len != expected_read_len, "read", 560 "total len %u, expected len %u\n", total_read_len, 561 expected_read_len)) 562 goto free_buf; 563 564 if (CHECK(skel->bss->map1_accessed != 1, "map1_accessed", 565 "expected 1 actual %d\n", skel->bss->map1_accessed)) 566 goto free_buf; 567 568 if (CHECK(skel->bss->map2_accessed != 2, "map2_accessed", 569 "expected 2 actual %d\n", skel->bss->map2_accessed)) 570 goto free_buf; 571 572 CHECK(skel->bss->map2_seqnum1 != skel->bss->map2_seqnum2, 573 "map2_seqnum", "two different seqnum %lld %lld\n", 574 skel->bss->map2_seqnum1, skel->bss->map2_seqnum2); 575 576 free_buf: 577 free(buf); 578 close_iter: 579 close(iter_fd); 580 free_link: 581 bpf_link__destroy(link); 582 free_map2: 583 close(map2_fd); 584 free_map1: 585 close(map1_fd); 586 out: 587 bpf_iter_test_kern4__destroy(skel); 588 } 589 590 static void test_bpf_hash_map(void) 591 { 592 __u32 expected_key_a = 0, expected_key_b = 0; 593 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 594 struct bpf_iter_bpf_hash_map *skel; 595 int err, i, len, map_fd, iter_fd; 596 union bpf_iter_link_info linfo; 597 __u64 val, expected_val = 0; 598 struct bpf_link *link; 599 struct key_t { 600 int a; 601 int b; 602 int c; 603 } key; 604 char buf[64]; 605 606 skel = bpf_iter_bpf_hash_map__open(); 607 if (CHECK(!skel, "bpf_iter_bpf_hash_map__open", 608 "skeleton open failed\n")) 609 return; 610 611 skel->bss->in_test_mode = true; 612 613 err = bpf_iter_bpf_hash_map__load(skel); 614 if (CHECK(!skel, "bpf_iter_bpf_hash_map__load", 615 "skeleton load failed\n")) 616 goto out; 617 618 /* iterator with hashmap2 and hashmap3 should fail */ 619 memset(&linfo, 0, sizeof(linfo)); 620 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2); 621 opts.link_info = &linfo; 622 opts.link_info_len = sizeof(linfo); 623 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 624 if (!ASSERT_ERR_PTR(link, "attach_iter")) 625 goto out; 626 627 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3); 628 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 629 if (!ASSERT_ERR_PTR(link, "attach_iter")) 630 goto out; 631 632 /* hashmap1 should be good, update map values here */ 633 map_fd = bpf_map__fd(skel->maps.hashmap1); 634 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { 635 key.a = i + 1; 636 key.b = i + 2; 637 key.c = i + 3; 638 val = i + 4; 639 expected_key_a += key.a; 640 expected_key_b += key.b; 641 expected_val += val; 642 643 err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY); 644 if (CHECK(err, "map_update", "map_update failed\n")) 645 goto out; 646 } 647 648 linfo.map.map_fd = map_fd; 649 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 650 if (!ASSERT_OK_PTR(link, "attach_iter")) 651 goto out; 652 653 iter_fd = bpf_iter_create(bpf_link__fd(link)); 654 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 655 goto free_link; 656 657 /* do some tests */ 658 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 659 ; 660 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 661 goto close_iter; 662 663 /* test results */ 664 if (CHECK(skel->bss->key_sum_a != expected_key_a, 665 "key_sum_a", "got %u expected %u\n", 666 skel->bss->key_sum_a, expected_key_a)) 667 goto close_iter; 668 if (CHECK(skel->bss->key_sum_b != expected_key_b, 669 "key_sum_b", "got %u expected %u\n", 670 skel->bss->key_sum_b, expected_key_b)) 671 goto close_iter; 672 if (CHECK(skel->bss->val_sum != expected_val, 673 "val_sum", "got %llu expected %llu\n", 674 skel->bss->val_sum, expected_val)) 675 goto close_iter; 676 677 close_iter: 678 close(iter_fd); 679 free_link: 680 bpf_link__destroy(link); 681 out: 682 bpf_iter_bpf_hash_map__destroy(skel); 683 } 684 685 static void test_bpf_percpu_hash_map(void) 686 { 687 __u32 expected_key_a = 0, expected_key_b = 0; 688 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 689 struct bpf_iter_bpf_percpu_hash_map *skel; 690 int err, i, j, len, map_fd, iter_fd; 691 union bpf_iter_link_info linfo; 692 __u32 expected_val = 0; 693 struct bpf_link *link; 694 struct key_t { 695 int a; 696 int b; 697 int c; 698 } key; 699 char buf[64]; 700 void *val; 701 702 val = malloc(8 * bpf_num_possible_cpus()); 703 704 skel = bpf_iter_bpf_percpu_hash_map__open(); 705 if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open", 706 "skeleton open failed\n")) 707 return; 708 709 skel->rodata->num_cpus = bpf_num_possible_cpus(); 710 711 err = bpf_iter_bpf_percpu_hash_map__load(skel); 712 if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load", 713 "skeleton load failed\n")) 714 goto out; 715 716 /* update map values here */ 717 map_fd = bpf_map__fd(skel->maps.hashmap1); 718 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { 719 key.a = i + 1; 720 key.b = i + 2; 721 key.c = i + 3; 722 expected_key_a += key.a; 723 expected_key_b += key.b; 724 725 for (j = 0; j < bpf_num_possible_cpus(); j++) { 726 *(__u32 *)(val + j * 8) = i + j; 727 expected_val += i + j; 728 } 729 730 err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY); 731 if (CHECK(err, "map_update", "map_update failed\n")) 732 goto out; 733 } 734 735 memset(&linfo, 0, sizeof(linfo)); 736 linfo.map.map_fd = map_fd; 737 opts.link_info = &linfo; 738 opts.link_info_len = sizeof(linfo); 739 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts); 740 if (!ASSERT_OK_PTR(link, "attach_iter")) 741 goto out; 742 743 iter_fd = bpf_iter_create(bpf_link__fd(link)); 744 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 745 goto free_link; 746 747 /* do some tests */ 748 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 749 ; 750 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 751 goto close_iter; 752 753 /* test results */ 754 if (CHECK(skel->bss->key_sum_a != expected_key_a, 755 "key_sum_a", "got %u expected %u\n", 756 skel->bss->key_sum_a, expected_key_a)) 757 goto close_iter; 758 if (CHECK(skel->bss->key_sum_b != expected_key_b, 759 "key_sum_b", "got %u expected %u\n", 760 skel->bss->key_sum_b, expected_key_b)) 761 goto close_iter; 762 if (CHECK(skel->bss->val_sum != expected_val, 763 "val_sum", "got %u expected %u\n", 764 skel->bss->val_sum, expected_val)) 765 goto close_iter; 766 767 close_iter: 768 close(iter_fd); 769 free_link: 770 bpf_link__destroy(link); 771 out: 772 bpf_iter_bpf_percpu_hash_map__destroy(skel); 773 } 774 775 static void test_bpf_array_map(void) 776 { 777 __u64 val, expected_val = 0, res_first_val, first_val = 0; 778 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 779 __u32 expected_key = 0, res_first_key; 780 struct bpf_iter_bpf_array_map *skel; 781 union bpf_iter_link_info linfo; 782 int err, i, map_fd, iter_fd; 783 struct bpf_link *link; 784 char buf[64] = {}; 785 int len, start; 786 787 skel = bpf_iter_bpf_array_map__open_and_load(); 788 if (CHECK(!skel, "bpf_iter_bpf_array_map__open_and_load", 789 "skeleton open_and_load failed\n")) 790 return; 791 792 map_fd = bpf_map__fd(skel->maps.arraymap1); 793 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 794 val = i + 4; 795 expected_key += i; 796 expected_val += val; 797 798 if (i == 0) 799 first_val = val; 800 801 err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY); 802 if (CHECK(err, "map_update", "map_update failed\n")) 803 goto out; 804 } 805 806 memset(&linfo, 0, sizeof(linfo)); 807 linfo.map.map_fd = map_fd; 808 opts.link_info = &linfo; 809 opts.link_info_len = sizeof(linfo); 810 link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts); 811 if (!ASSERT_OK_PTR(link, "attach_iter")) 812 goto out; 813 814 iter_fd = bpf_iter_create(bpf_link__fd(link)); 815 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 816 goto free_link; 817 818 /* do some tests */ 819 start = 0; 820 while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0) 821 start += len; 822 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 823 goto close_iter; 824 825 /* test results */ 826 res_first_key = *(__u32 *)buf; 827 res_first_val = *(__u64 *)(buf + sizeof(__u32)); 828 if (CHECK(res_first_key != 0 || res_first_val != first_val, 829 "bpf_seq_write", 830 "seq_write failure: first key %u vs expected 0, " 831 " first value %llu vs expected %llu\n", 832 res_first_key, res_first_val, first_val)) 833 goto close_iter; 834 835 if (CHECK(skel->bss->key_sum != expected_key, 836 "key_sum", "got %u expected %u\n", 837 skel->bss->key_sum, expected_key)) 838 goto close_iter; 839 if (CHECK(skel->bss->val_sum != expected_val, 840 "val_sum", "got %llu expected %llu\n", 841 skel->bss->val_sum, expected_val)) 842 goto close_iter; 843 844 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 845 err = bpf_map_lookup_elem(map_fd, &i, &val); 846 if (CHECK(err, "map_lookup", "map_lookup failed\n")) 847 goto out; 848 if (CHECK(i != val, "invalid_val", 849 "got value %llu expected %u\n", val, i)) 850 goto out; 851 } 852 853 close_iter: 854 close(iter_fd); 855 free_link: 856 bpf_link__destroy(link); 857 out: 858 bpf_iter_bpf_array_map__destroy(skel); 859 } 860 861 static void test_bpf_percpu_array_map(void) 862 { 863 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 864 struct bpf_iter_bpf_percpu_array_map *skel; 865 __u32 expected_key = 0, expected_val = 0; 866 union bpf_iter_link_info linfo; 867 int err, i, j, map_fd, iter_fd; 868 struct bpf_link *link; 869 char buf[64]; 870 void *val; 871 int len; 872 873 val = malloc(8 * bpf_num_possible_cpus()); 874 875 skel = bpf_iter_bpf_percpu_array_map__open(); 876 if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open", 877 "skeleton open failed\n")) 878 return; 879 880 skel->rodata->num_cpus = bpf_num_possible_cpus(); 881 882 err = bpf_iter_bpf_percpu_array_map__load(skel); 883 if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load", 884 "skeleton load failed\n")) 885 goto out; 886 887 /* update map values here */ 888 map_fd = bpf_map__fd(skel->maps.arraymap1); 889 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 890 expected_key += i; 891 892 for (j = 0; j < bpf_num_possible_cpus(); j++) { 893 *(__u32 *)(val + j * 8) = i + j; 894 expected_val += i + j; 895 } 896 897 err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY); 898 if (CHECK(err, "map_update", "map_update failed\n")) 899 goto out; 900 } 901 902 memset(&linfo, 0, sizeof(linfo)); 903 linfo.map.map_fd = map_fd; 904 opts.link_info = &linfo; 905 opts.link_info_len = sizeof(linfo); 906 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts); 907 if (!ASSERT_OK_PTR(link, "attach_iter")) 908 goto out; 909 910 iter_fd = bpf_iter_create(bpf_link__fd(link)); 911 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 912 goto free_link; 913 914 /* do some tests */ 915 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 916 ; 917 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 918 goto close_iter; 919 920 /* test results */ 921 if (CHECK(skel->bss->key_sum != expected_key, 922 "key_sum", "got %u expected %u\n", 923 skel->bss->key_sum, expected_key)) 924 goto close_iter; 925 if (CHECK(skel->bss->val_sum != expected_val, 926 "val_sum", "got %u expected %u\n", 927 skel->bss->val_sum, expected_val)) 928 goto close_iter; 929 930 close_iter: 931 close(iter_fd); 932 free_link: 933 bpf_link__destroy(link); 934 out: 935 bpf_iter_bpf_percpu_array_map__destroy(skel); 936 } 937 938 /* An iterator program deletes all local storage in a map. */ 939 static void test_bpf_sk_storage_delete(void) 940 { 941 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 942 struct bpf_iter_bpf_sk_storage_helpers *skel; 943 union bpf_iter_link_info linfo; 944 int err, len, map_fd, iter_fd; 945 struct bpf_link *link; 946 int sock_fd = -1; 947 __u32 val = 42; 948 char buf[64]; 949 950 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load(); 951 if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load", 952 "skeleton open_and_load failed\n")) 953 return; 954 955 map_fd = bpf_map__fd(skel->maps.sk_stg_map); 956 957 sock_fd = socket(AF_INET6, SOCK_STREAM, 0); 958 if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno)) 959 goto out; 960 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST); 961 if (CHECK(err, "map_update", "map_update failed\n")) 962 goto out; 963 964 memset(&linfo, 0, sizeof(linfo)); 965 linfo.map.map_fd = map_fd; 966 opts.link_info = &linfo; 967 opts.link_info_len = sizeof(linfo); 968 link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map, 969 &opts); 970 if (!ASSERT_OK_PTR(link, "attach_iter")) 971 goto out; 972 973 iter_fd = bpf_iter_create(bpf_link__fd(link)); 974 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 975 goto free_link; 976 977 /* do some tests */ 978 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 979 ; 980 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 981 goto close_iter; 982 983 /* test results */ 984 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); 985 if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem", 986 "map value wasn't deleted (err=%d, errno=%d)\n", err, errno)) 987 goto close_iter; 988 989 close_iter: 990 close(iter_fd); 991 free_link: 992 bpf_link__destroy(link); 993 out: 994 if (sock_fd >= 0) 995 close(sock_fd); 996 bpf_iter_bpf_sk_storage_helpers__destroy(skel); 997 } 998 999 /* This creates a socket and its local storage. It then runs a task_iter BPF 1000 * program that replaces the existing socket local storage with the tgid of the 1001 * only task owning a file descriptor to this socket, this process, prog_tests. 1002 * It then runs a tcp socket iterator that negates the value in the existing 1003 * socket local storage, the test verifies that the resulting value is -pid. 1004 */ 1005 static void test_bpf_sk_storage_get(void) 1006 { 1007 struct bpf_iter_bpf_sk_storage_helpers *skel; 1008 int err, map_fd, val = -1; 1009 int sock_fd = -1; 1010 1011 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load(); 1012 if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load", 1013 "skeleton open_and_load failed\n")) 1014 return; 1015 1016 sock_fd = socket(AF_INET6, SOCK_STREAM, 0); 1017 if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno)) 1018 goto out; 1019 1020 err = listen(sock_fd, 1); 1021 if (CHECK(err != 0, "listen", "errno: %d\n", errno)) 1022 goto close_socket; 1023 1024 map_fd = bpf_map__fd(skel->maps.sk_stg_map); 1025 1026 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST); 1027 if (CHECK(err, "bpf_map_update_elem", "map_update_failed\n")) 1028 goto close_socket; 1029 1030 do_dummy_read(skel->progs.fill_socket_owner); 1031 1032 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); 1033 if (CHECK(err || val != getpid(), "bpf_map_lookup_elem", 1034 "map value wasn't set correctly (expected %d, got %d, err=%d)\n", 1035 getpid(), val, err)) 1036 goto close_socket; 1037 1038 do_dummy_read(skel->progs.negate_socket_local_storage); 1039 1040 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); 1041 CHECK(err || val != -getpid(), "bpf_map_lookup_elem", 1042 "map value wasn't set correctly (expected %d, got %d, err=%d)\n", 1043 -getpid(), val, err); 1044 1045 close_socket: 1046 close(sock_fd); 1047 out: 1048 bpf_iter_bpf_sk_storage_helpers__destroy(skel); 1049 } 1050 1051 static void test_bpf_sk_storage_map(void) 1052 { 1053 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 1054 int err, i, len, map_fd, iter_fd, num_sockets; 1055 struct bpf_iter_bpf_sk_storage_map *skel; 1056 union bpf_iter_link_info linfo; 1057 int sock_fd[3] = {-1, -1, -1}; 1058 __u32 val, expected_val = 0; 1059 struct bpf_link *link; 1060 char buf[64]; 1061 1062 skel = bpf_iter_bpf_sk_storage_map__open_and_load(); 1063 if (CHECK(!skel, "bpf_iter_bpf_sk_storage_map__open_and_load", 1064 "skeleton open_and_load failed\n")) 1065 return; 1066 1067 map_fd = bpf_map__fd(skel->maps.sk_stg_map); 1068 num_sockets = ARRAY_SIZE(sock_fd); 1069 for (i = 0; i < num_sockets; i++) { 1070 sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0); 1071 if (CHECK(sock_fd[i] < 0, "socket", "errno: %d\n", errno)) 1072 goto out; 1073 1074 val = i + 1; 1075 expected_val += val; 1076 1077 err = bpf_map_update_elem(map_fd, &sock_fd[i], &val, 1078 BPF_NOEXIST); 1079 if (CHECK(err, "map_update", "map_update failed\n")) 1080 goto out; 1081 } 1082 1083 memset(&linfo, 0, sizeof(linfo)); 1084 linfo.map.map_fd = map_fd; 1085 opts.link_info = &linfo; 1086 opts.link_info_len = sizeof(linfo); 1087 link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts); 1088 if (!ASSERT_OK_PTR(link, "attach_iter")) 1089 goto out; 1090 1091 iter_fd = bpf_iter_create(bpf_link__fd(link)); 1092 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 1093 goto free_link; 1094 1095 /* do some tests */ 1096 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 1097 ; 1098 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 1099 goto close_iter; 1100 1101 /* test results */ 1102 if (CHECK(skel->bss->ipv6_sk_count != num_sockets, 1103 "ipv6_sk_count", "got %u expected %u\n", 1104 skel->bss->ipv6_sk_count, num_sockets)) 1105 goto close_iter; 1106 1107 if (CHECK(skel->bss->val_sum != expected_val, 1108 "val_sum", "got %u expected %u\n", 1109 skel->bss->val_sum, expected_val)) 1110 goto close_iter; 1111 1112 close_iter: 1113 close(iter_fd); 1114 free_link: 1115 bpf_link__destroy(link); 1116 out: 1117 for (i = 0; i < num_sockets; i++) { 1118 if (sock_fd[i] >= 0) 1119 close(sock_fd[i]); 1120 } 1121 bpf_iter_bpf_sk_storage_map__destroy(skel); 1122 } 1123 1124 static void test_rdonly_buf_out_of_bound(void) 1125 { 1126 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 1127 struct bpf_iter_test_kern5 *skel; 1128 union bpf_iter_link_info linfo; 1129 struct bpf_link *link; 1130 1131 skel = bpf_iter_test_kern5__open_and_load(); 1132 if (CHECK(!skel, "bpf_iter_test_kern5__open_and_load", 1133 "skeleton open_and_load failed\n")) 1134 return; 1135 1136 memset(&linfo, 0, sizeof(linfo)); 1137 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1); 1138 opts.link_info = &linfo; 1139 opts.link_info_len = sizeof(linfo); 1140 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 1141 if (!ASSERT_ERR_PTR(link, "attach_iter")) 1142 bpf_link__destroy(link); 1143 1144 bpf_iter_test_kern5__destroy(skel); 1145 } 1146 1147 static void test_buf_neg_offset(void) 1148 { 1149 struct bpf_iter_test_kern6 *skel; 1150 1151 skel = bpf_iter_test_kern6__open_and_load(); 1152 if (CHECK(skel, "bpf_iter_test_kern6__open_and_load", 1153 "skeleton open_and_load unexpected success\n")) 1154 bpf_iter_test_kern6__destroy(skel); 1155 } 1156 1157 #define CMP_BUFFER_SIZE 1024 1158 static char task_vma_output[CMP_BUFFER_SIZE]; 1159 static char proc_maps_output[CMP_BUFFER_SIZE]; 1160 1161 /* remove \0 and \t from str, and only keep the first line */ 1162 static void str_strip_first_line(char *str) 1163 { 1164 char *dst = str, *src = str; 1165 1166 do { 1167 if (*src == ' ' || *src == '\t') 1168 src++; 1169 else 1170 *(dst++) = *(src++); 1171 1172 } while (*src != '\0' && *src != '\n'); 1173 1174 *dst = '\0'; 1175 } 1176 1177 #define min(a, b) ((a) < (b) ? (a) : (b)) 1178 1179 static void test_task_vma(void) 1180 { 1181 int err, iter_fd = -1, proc_maps_fd = -1; 1182 struct bpf_iter_task_vma *skel; 1183 int len, read_size = 4; 1184 char maps_path[64]; 1185 1186 skel = bpf_iter_task_vma__open(); 1187 if (CHECK(!skel, "bpf_iter_task_vma__open", "skeleton open failed\n")) 1188 return; 1189 1190 skel->bss->pid = getpid(); 1191 1192 err = bpf_iter_task_vma__load(skel); 1193 if (CHECK(err, "bpf_iter_task_vma__load", "skeleton load failed\n")) 1194 goto out; 1195 1196 skel->links.proc_maps = bpf_program__attach_iter( 1197 skel->progs.proc_maps, NULL); 1198 1199 if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) { 1200 skel->links.proc_maps = NULL; 1201 goto out; 1202 } 1203 1204 iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps)); 1205 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 1206 goto out; 1207 1208 /* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks 1209 * to trigger seq_file corner cases. The expected output is much 1210 * longer than 1kB, so the while loop will terminate. 1211 */ 1212 len = 0; 1213 while (len < CMP_BUFFER_SIZE) { 1214 err = read_fd_into_buffer(iter_fd, task_vma_output + len, 1215 min(read_size, CMP_BUFFER_SIZE - len)); 1216 if (CHECK(err < 0, "read_iter_fd", "read_iter_fd failed\n")) 1217 goto out; 1218 len += err; 1219 } 1220 1221 /* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */ 1222 snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid); 1223 proc_maps_fd = open(maps_path, O_RDONLY); 1224 if (CHECK(proc_maps_fd < 0, "open_proc_maps", "open_proc_maps failed\n")) 1225 goto out; 1226 err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE); 1227 if (CHECK(err < 0, "read_prog_maps_fd", "read_prog_maps_fd failed\n")) 1228 goto out; 1229 1230 /* strip and compare the first line of the two files */ 1231 str_strip_first_line(task_vma_output); 1232 str_strip_first_line(proc_maps_output); 1233 1234 CHECK(strcmp(task_vma_output, proc_maps_output), "compare_output", 1235 "found mismatch\n"); 1236 out: 1237 close(proc_maps_fd); 1238 close(iter_fd); 1239 bpf_iter_task_vma__destroy(skel); 1240 } 1241 1242 void test_bpf_iter(void) 1243 { 1244 if (test__start_subtest("btf_id_or_null")) 1245 test_btf_id_or_null(); 1246 if (test__start_subtest("ipv6_route")) 1247 test_ipv6_route(); 1248 if (test__start_subtest("netlink")) 1249 test_netlink(); 1250 if (test__start_subtest("bpf_map")) 1251 test_bpf_map(); 1252 if (test__start_subtest("task")) 1253 test_task(); 1254 if (test__start_subtest("task_stack")) 1255 test_task_stack(); 1256 if (test__start_subtest("task_file")) 1257 test_task_file(); 1258 if (test__start_subtest("task_vma")) 1259 test_task_vma(); 1260 if (test__start_subtest("task_btf")) 1261 test_task_btf(); 1262 if (test__start_subtest("tcp4")) 1263 test_tcp4(); 1264 if (test__start_subtest("tcp6")) 1265 test_tcp6(); 1266 if (test__start_subtest("udp4")) 1267 test_udp4(); 1268 if (test__start_subtest("udp6")) 1269 test_udp6(); 1270 if (test__start_subtest("unix")) 1271 test_unix(); 1272 if (test__start_subtest("anon")) 1273 test_anon_iter(false); 1274 if (test__start_subtest("anon-read-one-char")) 1275 test_anon_iter(true); 1276 if (test__start_subtest("file")) 1277 test_file_iter(); 1278 if (test__start_subtest("overflow")) 1279 test_overflow(false, false); 1280 if (test__start_subtest("overflow-e2big")) 1281 test_overflow(true, false); 1282 if (test__start_subtest("prog-ret-1")) 1283 test_overflow(false, true); 1284 if (test__start_subtest("bpf_hash_map")) 1285 test_bpf_hash_map(); 1286 if (test__start_subtest("bpf_percpu_hash_map")) 1287 test_bpf_percpu_hash_map(); 1288 if (test__start_subtest("bpf_array_map")) 1289 test_bpf_array_map(); 1290 if (test__start_subtest("bpf_percpu_array_map")) 1291 test_bpf_percpu_array_map(); 1292 if (test__start_subtest("bpf_sk_storage_map")) 1293 test_bpf_sk_storage_map(); 1294 if (test__start_subtest("bpf_sk_storage_delete")) 1295 test_bpf_sk_storage_delete(); 1296 if (test__start_subtest("bpf_sk_storage_get")) 1297 test_bpf_sk_storage_get(); 1298 if (test__start_subtest("rdonly-buf-out-of-bound")) 1299 test_rdonly_buf_out_of_bound(); 1300 if (test__start_subtest("buf-neg-offset")) 1301 test_buf_neg_offset(); 1302 } 1303