1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020 Facebook */ 3 #include <test_progs.h> 4 #include "bpf_iter_ipv6_route.skel.h" 5 #include "bpf_iter_netlink.skel.h" 6 #include "bpf_iter_bpf_map.skel.h" 7 #include "bpf_iter_task.skel.h" 8 #include "bpf_iter_task_stack.skel.h" 9 #include "bpf_iter_task_file.skel.h" 10 #include "bpf_iter_task_vma.skel.h" 11 #include "bpf_iter_task_btf.skel.h" 12 #include "bpf_iter_tcp4.skel.h" 13 #include "bpf_iter_tcp6.skel.h" 14 #include "bpf_iter_udp4.skel.h" 15 #include "bpf_iter_udp6.skel.h" 16 #include "bpf_iter_unix.skel.h" 17 #include "bpf_iter_test_kern1.skel.h" 18 #include "bpf_iter_test_kern2.skel.h" 19 #include "bpf_iter_test_kern3.skel.h" 20 #include "bpf_iter_test_kern4.skel.h" 21 #include "bpf_iter_bpf_hash_map.skel.h" 22 #include "bpf_iter_bpf_percpu_hash_map.skel.h" 23 #include "bpf_iter_bpf_array_map.skel.h" 24 #include "bpf_iter_bpf_percpu_array_map.skel.h" 25 #include "bpf_iter_bpf_sk_storage_helpers.skel.h" 26 #include "bpf_iter_bpf_sk_storage_map.skel.h" 27 #include "bpf_iter_test_kern5.skel.h" 28 #include "bpf_iter_test_kern6.skel.h" 29 #include "bpf_iter_bpf_link.skel.h" 30 31 static int duration; 32 33 static void test_btf_id_or_null(void) 34 { 35 struct bpf_iter_test_kern3 *skel; 36 37 skel = bpf_iter_test_kern3__open_and_load(); 38 if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) { 39 bpf_iter_test_kern3__destroy(skel); 40 return; 41 } 42 } 43 44 static void do_dummy_read(struct bpf_program *prog) 45 { 46 struct bpf_link *link; 47 char buf[16] = {}; 48 int iter_fd, len; 49 50 link = bpf_program__attach_iter(prog, NULL); 51 if (!ASSERT_OK_PTR(link, "attach_iter")) 52 return; 53 54 iter_fd = bpf_iter_create(bpf_link__fd(link)); 55 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 56 goto free_link; 57 58 /* not check contents, but ensure read() ends without error */ 59 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 60 ; 61 CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)); 62 63 close(iter_fd); 64 65 free_link: 66 bpf_link__destroy(link); 67 } 68 69 static int read_fd_into_buffer(int fd, char *buf, int size) 70 { 71 int bufleft = size; 72 int len; 73 74 do { 75 len = read(fd, buf, bufleft); 76 if (len > 0) { 77 buf += len; 78 bufleft -= len; 79 } 80 } while (len > 0); 81 82 return len < 0 ? len : size - bufleft; 83 } 84 85 static void test_ipv6_route(void) 86 { 87 struct bpf_iter_ipv6_route *skel; 88 89 skel = bpf_iter_ipv6_route__open_and_load(); 90 if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load")) 91 return; 92 93 do_dummy_read(skel->progs.dump_ipv6_route); 94 95 bpf_iter_ipv6_route__destroy(skel); 96 } 97 98 static void test_netlink(void) 99 { 100 struct bpf_iter_netlink *skel; 101 102 skel = bpf_iter_netlink__open_and_load(); 103 if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load")) 104 return; 105 106 do_dummy_read(skel->progs.dump_netlink); 107 108 bpf_iter_netlink__destroy(skel); 109 } 110 111 static void test_bpf_map(void) 112 { 113 struct bpf_iter_bpf_map *skel; 114 115 skel = bpf_iter_bpf_map__open_and_load(); 116 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load")) 117 return; 118 119 do_dummy_read(skel->progs.dump_bpf_map); 120 121 bpf_iter_bpf_map__destroy(skel); 122 } 123 124 static void test_task(void) 125 { 126 struct bpf_iter_task *skel; 127 128 skel = bpf_iter_task__open_and_load(); 129 if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load")) 130 return; 131 132 do_dummy_read(skel->progs.dump_task); 133 134 bpf_iter_task__destroy(skel); 135 } 136 137 static void test_task_sleepable(void) 138 { 139 struct bpf_iter_task *skel; 140 141 skel = bpf_iter_task__open_and_load(); 142 if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load")) 143 return; 144 145 do_dummy_read(skel->progs.dump_task_sleepable); 146 147 ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0, 148 "num_expected_failure_copy_from_user_task"); 149 ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0, 150 "num_success_copy_from_user_task"); 151 152 bpf_iter_task__destroy(skel); 153 } 154 155 static void test_task_stack(void) 156 { 157 struct bpf_iter_task_stack *skel; 158 159 skel = bpf_iter_task_stack__open_and_load(); 160 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load")) 161 return; 162 163 do_dummy_read(skel->progs.dump_task_stack); 164 do_dummy_read(skel->progs.get_task_user_stacks); 165 166 bpf_iter_task_stack__destroy(skel); 167 } 168 169 static void *do_nothing(void *arg) 170 { 171 pthread_exit(arg); 172 } 173 174 static void test_task_file(void) 175 { 176 struct bpf_iter_task_file *skel; 177 pthread_t thread_id; 178 void *ret; 179 180 skel = bpf_iter_task_file__open_and_load(); 181 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_file__open_and_load")) 182 return; 183 184 skel->bss->tgid = getpid(); 185 186 if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing, NULL), 187 "pthread_create")) 188 goto done; 189 190 do_dummy_read(skel->progs.dump_task_file); 191 192 if (!ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL, 193 "pthread_join")) 194 goto done; 195 196 ASSERT_EQ(skel->bss->count, 0, "check_count"); 197 198 done: 199 bpf_iter_task_file__destroy(skel); 200 } 201 202 #define TASKBUFSZ 32768 203 204 static char taskbuf[TASKBUFSZ]; 205 206 static int do_btf_read(struct bpf_iter_task_btf *skel) 207 { 208 struct bpf_program *prog = skel->progs.dump_task_struct; 209 struct bpf_iter_task_btf__bss *bss = skel->bss; 210 int iter_fd = -1, err; 211 struct bpf_link *link; 212 char *buf = taskbuf; 213 int ret = 0; 214 215 link = bpf_program__attach_iter(prog, NULL); 216 if (!ASSERT_OK_PTR(link, "attach_iter")) 217 return ret; 218 219 iter_fd = bpf_iter_create(bpf_link__fd(link)); 220 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 221 goto free_link; 222 223 err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ); 224 if (bss->skip) { 225 printf("%s:SKIP:no __builtin_btf_type_id\n", __func__); 226 ret = 1; 227 test__skip(); 228 goto free_link; 229 } 230 231 if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno))) 232 goto free_link; 233 234 ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)", 235 "check for btf representation of task_struct in iter data"); 236 free_link: 237 if (iter_fd > 0) 238 close(iter_fd); 239 bpf_link__destroy(link); 240 return ret; 241 } 242 243 static void test_task_btf(void) 244 { 245 struct bpf_iter_task_btf__bss *bss; 246 struct bpf_iter_task_btf *skel; 247 int ret; 248 249 skel = bpf_iter_task_btf__open_and_load(); 250 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_btf__open_and_load")) 251 return; 252 253 bss = skel->bss; 254 255 ret = do_btf_read(skel); 256 if (ret) 257 goto cleanup; 258 259 if (!ASSERT_NEQ(bss->tasks, 0, "no task iteration, did BPF program run?")) 260 goto cleanup; 261 262 ASSERT_EQ(bss->seq_err, 0, "check for unexpected err"); 263 264 cleanup: 265 bpf_iter_task_btf__destroy(skel); 266 } 267 268 static void test_tcp4(void) 269 { 270 struct bpf_iter_tcp4 *skel; 271 272 skel = bpf_iter_tcp4__open_and_load(); 273 if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load")) 274 return; 275 276 do_dummy_read(skel->progs.dump_tcp4); 277 278 bpf_iter_tcp4__destroy(skel); 279 } 280 281 static void test_tcp6(void) 282 { 283 struct bpf_iter_tcp6 *skel; 284 285 skel = bpf_iter_tcp6__open_and_load(); 286 if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load")) 287 return; 288 289 do_dummy_read(skel->progs.dump_tcp6); 290 291 bpf_iter_tcp6__destroy(skel); 292 } 293 294 static void test_udp4(void) 295 { 296 struct bpf_iter_udp4 *skel; 297 298 skel = bpf_iter_udp4__open_and_load(); 299 if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load")) 300 return; 301 302 do_dummy_read(skel->progs.dump_udp4); 303 304 bpf_iter_udp4__destroy(skel); 305 } 306 307 static void test_udp6(void) 308 { 309 struct bpf_iter_udp6 *skel; 310 311 skel = bpf_iter_udp6__open_and_load(); 312 if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load")) 313 return; 314 315 do_dummy_read(skel->progs.dump_udp6); 316 317 bpf_iter_udp6__destroy(skel); 318 } 319 320 static void test_unix(void) 321 { 322 struct bpf_iter_unix *skel; 323 324 skel = bpf_iter_unix__open_and_load(); 325 if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load")) 326 return; 327 328 do_dummy_read(skel->progs.dump_unix); 329 330 bpf_iter_unix__destroy(skel); 331 } 332 333 /* The expected string is less than 16 bytes */ 334 static int do_read_with_fd(int iter_fd, const char *expected, 335 bool read_one_char) 336 { 337 int len, read_buf_len, start; 338 char buf[16] = {}; 339 340 read_buf_len = read_one_char ? 1 : 16; 341 start = 0; 342 while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) { 343 start += len; 344 if (CHECK(start >= 16, "read", "read len %d\n", len)) 345 return -1; 346 read_buf_len = read_one_char ? 1 : 16 - start; 347 } 348 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 349 return -1; 350 351 if (!ASSERT_STREQ(buf, expected, "read")) 352 return -1; 353 354 return 0; 355 } 356 357 static void test_anon_iter(bool read_one_char) 358 { 359 struct bpf_iter_test_kern1 *skel; 360 struct bpf_link *link; 361 int iter_fd, err; 362 363 skel = bpf_iter_test_kern1__open_and_load(); 364 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern1__open_and_load")) 365 return; 366 367 err = bpf_iter_test_kern1__attach(skel); 368 if (!ASSERT_OK(err, "bpf_iter_test_kern1__attach")) { 369 goto out; 370 } 371 372 link = skel->links.dump_task; 373 iter_fd = bpf_iter_create(bpf_link__fd(link)); 374 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 375 goto out; 376 377 do_read_with_fd(iter_fd, "abcd", read_one_char); 378 close(iter_fd); 379 380 out: 381 bpf_iter_test_kern1__destroy(skel); 382 } 383 384 static int do_read(const char *path, const char *expected) 385 { 386 int err, iter_fd; 387 388 iter_fd = open(path, O_RDONLY); 389 if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n", 390 path, strerror(errno))) 391 return -1; 392 393 err = do_read_with_fd(iter_fd, expected, false); 394 close(iter_fd); 395 return err; 396 } 397 398 static void test_file_iter(void) 399 { 400 const char *path = "/sys/fs/bpf/bpf_iter_test1"; 401 struct bpf_iter_test_kern1 *skel1; 402 struct bpf_iter_test_kern2 *skel2; 403 struct bpf_link *link; 404 int err; 405 406 skel1 = bpf_iter_test_kern1__open_and_load(); 407 if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load")) 408 return; 409 410 link = bpf_program__attach_iter(skel1->progs.dump_task, NULL); 411 if (!ASSERT_OK_PTR(link, "attach_iter")) 412 goto out; 413 414 /* unlink this path if it exists. */ 415 unlink(path); 416 417 err = bpf_link__pin(link, path); 418 if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err)) 419 goto free_link; 420 421 err = do_read(path, "abcd"); 422 if (err) 423 goto unlink_path; 424 425 /* file based iterator seems working fine. Let us a link update 426 * of the underlying link and `cat` the iterator again, its content 427 * should change. 428 */ 429 skel2 = bpf_iter_test_kern2__open_and_load(); 430 if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load")) 431 goto unlink_path; 432 433 err = bpf_link__update_program(link, skel2->progs.dump_task); 434 if (!ASSERT_OK(err, "update_prog")) 435 goto destroy_skel2; 436 437 do_read(path, "ABCD"); 438 439 destroy_skel2: 440 bpf_iter_test_kern2__destroy(skel2); 441 unlink_path: 442 unlink(path); 443 free_link: 444 bpf_link__destroy(link); 445 out: 446 bpf_iter_test_kern1__destroy(skel1); 447 } 448 449 static void test_overflow(bool test_e2big_overflow, bool ret1) 450 { 451 __u32 map_info_len, total_read_len, expected_read_len; 452 int err, iter_fd, map1_fd, map2_fd, len; 453 struct bpf_map_info map_info = {}; 454 struct bpf_iter_test_kern4 *skel; 455 struct bpf_link *link; 456 __u32 iter_size; 457 char *buf; 458 459 skel = bpf_iter_test_kern4__open(); 460 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open")) 461 return; 462 463 /* create two maps: bpf program will only do bpf_seq_write 464 * for these two maps. The goal is one map output almost 465 * fills seq_file buffer and then the other will trigger 466 * overflow and needs restart. 467 */ 468 map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); 469 if (CHECK(map1_fd < 0, "bpf_map_create", 470 "map_creation failed: %s\n", strerror(errno))) 471 goto out; 472 map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); 473 if (CHECK(map2_fd < 0, "bpf_map_create", 474 "map_creation failed: %s\n", strerror(errno))) 475 goto free_map1; 476 477 /* bpf_seq_printf kernel buffer is 8 pages, so one map 478 * bpf_seq_write will mostly fill it, and the other map 479 * will partially fill and then trigger overflow and need 480 * bpf_seq_read restart. 481 */ 482 iter_size = sysconf(_SC_PAGE_SIZE) << 3; 483 484 if (test_e2big_overflow) { 485 skel->rodata->print_len = (iter_size + 8) / 8; 486 expected_read_len = 2 * (iter_size + 8); 487 } else if (!ret1) { 488 skel->rodata->print_len = (iter_size - 8) / 8; 489 expected_read_len = 2 * (iter_size - 8); 490 } else { 491 skel->rodata->print_len = 1; 492 expected_read_len = 2 * 8; 493 } 494 skel->rodata->ret1 = ret1; 495 496 if (!ASSERT_OK(bpf_iter_test_kern4__load(skel), 497 "bpf_iter_test_kern4__load")) 498 goto free_map2; 499 500 /* setup filtering map_id in bpf program */ 501 map_info_len = sizeof(map_info); 502 err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len); 503 if (CHECK(err, "get_map_info", "get map info failed: %s\n", 504 strerror(errno))) 505 goto free_map2; 506 skel->bss->map1_id = map_info.id; 507 508 err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len); 509 if (CHECK(err, "get_map_info", "get map info failed: %s\n", 510 strerror(errno))) 511 goto free_map2; 512 skel->bss->map2_id = map_info.id; 513 514 link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL); 515 if (!ASSERT_OK_PTR(link, "attach_iter")) 516 goto free_map2; 517 518 iter_fd = bpf_iter_create(bpf_link__fd(link)); 519 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 520 goto free_link; 521 522 buf = malloc(expected_read_len); 523 if (!buf) 524 goto close_iter; 525 526 /* do read */ 527 total_read_len = 0; 528 if (test_e2big_overflow) { 529 while ((len = read(iter_fd, buf, expected_read_len)) > 0) 530 total_read_len += len; 531 532 CHECK(len != -1 || errno != E2BIG, "read", 533 "expected ret -1, errno E2BIG, but get ret %d, error %s\n", 534 len, strerror(errno)); 535 goto free_buf; 536 } else if (!ret1) { 537 while ((len = read(iter_fd, buf, expected_read_len)) > 0) 538 total_read_len += len; 539 540 if (CHECK(len < 0, "read", "read failed: %s\n", 541 strerror(errno))) 542 goto free_buf; 543 } else { 544 do { 545 len = read(iter_fd, buf, expected_read_len); 546 if (len > 0) 547 total_read_len += len; 548 } while (len > 0 || len == -EAGAIN); 549 550 if (CHECK(len < 0, "read", "read failed: %s\n", 551 strerror(errno))) 552 goto free_buf; 553 } 554 555 if (!ASSERT_EQ(total_read_len, expected_read_len, "read")) 556 goto free_buf; 557 558 if (!ASSERT_EQ(skel->bss->map1_accessed, 1, "map1_accessed")) 559 goto free_buf; 560 561 if (!ASSERT_EQ(skel->bss->map2_accessed, 2, "map2_accessed")) 562 goto free_buf; 563 564 ASSERT_EQ(skel->bss->map2_seqnum1, skel->bss->map2_seqnum2, "map2_seqnum"); 565 566 free_buf: 567 free(buf); 568 close_iter: 569 close(iter_fd); 570 free_link: 571 bpf_link__destroy(link); 572 free_map2: 573 close(map2_fd); 574 free_map1: 575 close(map1_fd); 576 out: 577 bpf_iter_test_kern4__destroy(skel); 578 } 579 580 static void test_bpf_hash_map(void) 581 { 582 __u32 expected_key_a = 0, expected_key_b = 0; 583 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 584 struct bpf_iter_bpf_hash_map *skel; 585 int err, i, len, map_fd, iter_fd; 586 union bpf_iter_link_info linfo; 587 __u64 val, expected_val = 0; 588 struct bpf_link *link; 589 struct key_t { 590 int a; 591 int b; 592 int c; 593 } key; 594 char buf[64]; 595 596 skel = bpf_iter_bpf_hash_map__open(); 597 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open")) 598 return; 599 600 skel->bss->in_test_mode = true; 601 602 err = bpf_iter_bpf_hash_map__load(skel); 603 if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load")) 604 goto out; 605 606 /* iterator with hashmap2 and hashmap3 should fail */ 607 memset(&linfo, 0, sizeof(linfo)); 608 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2); 609 opts.link_info = &linfo; 610 opts.link_info_len = sizeof(linfo); 611 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 612 if (!ASSERT_ERR_PTR(link, "attach_iter")) 613 goto out; 614 615 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3); 616 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 617 if (!ASSERT_ERR_PTR(link, "attach_iter")) 618 goto out; 619 620 /* hashmap1 should be good, update map values here */ 621 map_fd = bpf_map__fd(skel->maps.hashmap1); 622 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { 623 key.a = i + 1; 624 key.b = i + 2; 625 key.c = i + 3; 626 val = i + 4; 627 expected_key_a += key.a; 628 expected_key_b += key.b; 629 expected_val += val; 630 631 err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY); 632 if (!ASSERT_OK(err, "map_update")) 633 goto out; 634 } 635 636 linfo.map.map_fd = map_fd; 637 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 638 if (!ASSERT_OK_PTR(link, "attach_iter")) 639 goto out; 640 641 iter_fd = bpf_iter_create(bpf_link__fd(link)); 642 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 643 goto free_link; 644 645 /* do some tests */ 646 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 647 ; 648 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 649 goto close_iter; 650 651 /* test results */ 652 if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a")) 653 goto close_iter; 654 if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b")) 655 goto close_iter; 656 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) 657 goto close_iter; 658 659 close_iter: 660 close(iter_fd); 661 free_link: 662 bpf_link__destroy(link); 663 out: 664 bpf_iter_bpf_hash_map__destroy(skel); 665 } 666 667 static void test_bpf_percpu_hash_map(void) 668 { 669 __u32 expected_key_a = 0, expected_key_b = 0; 670 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 671 struct bpf_iter_bpf_percpu_hash_map *skel; 672 int err, i, j, len, map_fd, iter_fd; 673 union bpf_iter_link_info linfo; 674 __u32 expected_val = 0; 675 struct bpf_link *link; 676 struct key_t { 677 int a; 678 int b; 679 int c; 680 } key; 681 char buf[64]; 682 void *val; 683 684 skel = bpf_iter_bpf_percpu_hash_map__open(); 685 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__open")) 686 return; 687 688 skel->rodata->num_cpus = bpf_num_possible_cpus(); 689 val = malloc(8 * bpf_num_possible_cpus()); 690 691 err = bpf_iter_bpf_percpu_hash_map__load(skel); 692 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load")) 693 goto out; 694 695 /* update map values here */ 696 map_fd = bpf_map__fd(skel->maps.hashmap1); 697 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { 698 key.a = i + 1; 699 key.b = i + 2; 700 key.c = i + 3; 701 expected_key_a += key.a; 702 expected_key_b += key.b; 703 704 for (j = 0; j < bpf_num_possible_cpus(); j++) { 705 *(__u32 *)(val + j * 8) = i + j; 706 expected_val += i + j; 707 } 708 709 err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY); 710 if (!ASSERT_OK(err, "map_update")) 711 goto out; 712 } 713 714 memset(&linfo, 0, sizeof(linfo)); 715 linfo.map.map_fd = map_fd; 716 opts.link_info = &linfo; 717 opts.link_info_len = sizeof(linfo); 718 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts); 719 if (!ASSERT_OK_PTR(link, "attach_iter")) 720 goto out; 721 722 iter_fd = bpf_iter_create(bpf_link__fd(link)); 723 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 724 goto free_link; 725 726 /* do some tests */ 727 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 728 ; 729 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 730 goto close_iter; 731 732 /* test results */ 733 if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a")) 734 goto close_iter; 735 if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b")) 736 goto close_iter; 737 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) 738 goto close_iter; 739 740 close_iter: 741 close(iter_fd); 742 free_link: 743 bpf_link__destroy(link); 744 out: 745 bpf_iter_bpf_percpu_hash_map__destroy(skel); 746 free(val); 747 } 748 749 static void test_bpf_array_map(void) 750 { 751 __u64 val, expected_val = 0, res_first_val, first_val = 0; 752 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 753 __u32 expected_key = 0, res_first_key; 754 struct bpf_iter_bpf_array_map *skel; 755 union bpf_iter_link_info linfo; 756 int err, i, map_fd, iter_fd; 757 struct bpf_link *link; 758 char buf[64] = {}; 759 int len, start; 760 761 skel = bpf_iter_bpf_array_map__open_and_load(); 762 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load")) 763 return; 764 765 map_fd = bpf_map__fd(skel->maps.arraymap1); 766 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 767 val = i + 4; 768 expected_key += i; 769 expected_val += val; 770 771 if (i == 0) 772 first_val = val; 773 774 err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY); 775 if (!ASSERT_OK(err, "map_update")) 776 goto out; 777 } 778 779 memset(&linfo, 0, sizeof(linfo)); 780 linfo.map.map_fd = map_fd; 781 opts.link_info = &linfo; 782 opts.link_info_len = sizeof(linfo); 783 link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts); 784 if (!ASSERT_OK_PTR(link, "attach_iter")) 785 goto out; 786 787 iter_fd = bpf_iter_create(bpf_link__fd(link)); 788 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 789 goto free_link; 790 791 /* do some tests */ 792 start = 0; 793 while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0) 794 start += len; 795 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 796 goto close_iter; 797 798 /* test results */ 799 res_first_key = *(__u32 *)buf; 800 res_first_val = *(__u64 *)(buf + sizeof(__u32)); 801 if (CHECK(res_first_key != 0 || res_first_val != first_val, 802 "bpf_seq_write", 803 "seq_write failure: first key %u vs expected 0, " 804 " first value %llu vs expected %llu\n", 805 res_first_key, res_first_val, first_val)) 806 goto close_iter; 807 808 if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum")) 809 goto close_iter; 810 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) 811 goto close_iter; 812 813 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 814 err = bpf_map_lookup_elem(map_fd, &i, &val); 815 if (!ASSERT_OK(err, "map_lookup")) 816 goto out; 817 if (!ASSERT_EQ(i, val, "invalid_val")) 818 goto out; 819 } 820 821 close_iter: 822 close(iter_fd); 823 free_link: 824 bpf_link__destroy(link); 825 out: 826 bpf_iter_bpf_array_map__destroy(skel); 827 } 828 829 static void test_bpf_percpu_array_map(void) 830 { 831 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 832 struct bpf_iter_bpf_percpu_array_map *skel; 833 __u32 expected_key = 0, expected_val = 0; 834 union bpf_iter_link_info linfo; 835 int err, i, j, map_fd, iter_fd; 836 struct bpf_link *link; 837 char buf[64]; 838 void *val; 839 int len; 840 841 skel = bpf_iter_bpf_percpu_array_map__open(); 842 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__open")) 843 return; 844 845 skel->rodata->num_cpus = bpf_num_possible_cpus(); 846 val = malloc(8 * bpf_num_possible_cpus()); 847 848 err = bpf_iter_bpf_percpu_array_map__load(skel); 849 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load")) 850 goto out; 851 852 /* update map values here */ 853 map_fd = bpf_map__fd(skel->maps.arraymap1); 854 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 855 expected_key += i; 856 857 for (j = 0; j < bpf_num_possible_cpus(); j++) { 858 *(__u32 *)(val + j * 8) = i + j; 859 expected_val += i + j; 860 } 861 862 err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY); 863 if (!ASSERT_OK(err, "map_update")) 864 goto out; 865 } 866 867 memset(&linfo, 0, sizeof(linfo)); 868 linfo.map.map_fd = map_fd; 869 opts.link_info = &linfo; 870 opts.link_info_len = sizeof(linfo); 871 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts); 872 if (!ASSERT_OK_PTR(link, "attach_iter")) 873 goto out; 874 875 iter_fd = bpf_iter_create(bpf_link__fd(link)); 876 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 877 goto free_link; 878 879 /* do some tests */ 880 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 881 ; 882 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 883 goto close_iter; 884 885 /* test results */ 886 if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum")) 887 goto close_iter; 888 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) 889 goto close_iter; 890 891 close_iter: 892 close(iter_fd); 893 free_link: 894 bpf_link__destroy(link); 895 out: 896 bpf_iter_bpf_percpu_array_map__destroy(skel); 897 free(val); 898 } 899 900 /* An iterator program deletes all local storage in a map. */ 901 static void test_bpf_sk_storage_delete(void) 902 { 903 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 904 struct bpf_iter_bpf_sk_storage_helpers *skel; 905 union bpf_iter_link_info linfo; 906 int err, len, map_fd, iter_fd; 907 struct bpf_link *link; 908 int sock_fd = -1; 909 __u32 val = 42; 910 char buf[64]; 911 912 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load(); 913 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load")) 914 return; 915 916 map_fd = bpf_map__fd(skel->maps.sk_stg_map); 917 918 sock_fd = socket(AF_INET6, SOCK_STREAM, 0); 919 if (!ASSERT_GE(sock_fd, 0, "socket")) 920 goto out; 921 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST); 922 if (!ASSERT_OK(err, "map_update")) 923 goto out; 924 925 memset(&linfo, 0, sizeof(linfo)); 926 linfo.map.map_fd = map_fd; 927 opts.link_info = &linfo; 928 opts.link_info_len = sizeof(linfo); 929 link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map, 930 &opts); 931 if (!ASSERT_OK_PTR(link, "attach_iter")) 932 goto out; 933 934 iter_fd = bpf_iter_create(bpf_link__fd(link)); 935 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 936 goto free_link; 937 938 /* do some tests */ 939 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 940 ; 941 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 942 goto close_iter; 943 944 /* test results */ 945 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); 946 if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem", 947 "map value wasn't deleted (err=%d, errno=%d)\n", err, errno)) 948 goto close_iter; 949 950 close_iter: 951 close(iter_fd); 952 free_link: 953 bpf_link__destroy(link); 954 out: 955 if (sock_fd >= 0) 956 close(sock_fd); 957 bpf_iter_bpf_sk_storage_helpers__destroy(skel); 958 } 959 960 /* This creates a socket and its local storage. It then runs a task_iter BPF 961 * program that replaces the existing socket local storage with the tgid of the 962 * only task owning a file descriptor to this socket, this process, prog_tests. 963 * It then runs a tcp socket iterator that negates the value in the existing 964 * socket local storage, the test verifies that the resulting value is -pid. 965 */ 966 static void test_bpf_sk_storage_get(void) 967 { 968 struct bpf_iter_bpf_sk_storage_helpers *skel; 969 int err, map_fd, val = -1; 970 int sock_fd = -1; 971 972 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load(); 973 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load")) 974 return; 975 976 sock_fd = socket(AF_INET6, SOCK_STREAM, 0); 977 if (!ASSERT_GE(sock_fd, 0, "socket")) 978 goto out; 979 980 err = listen(sock_fd, 1); 981 if (!ASSERT_OK(err, "listen")) 982 goto close_socket; 983 984 map_fd = bpf_map__fd(skel->maps.sk_stg_map); 985 986 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST); 987 if (!ASSERT_OK(err, "bpf_map_update_elem")) 988 goto close_socket; 989 990 do_dummy_read(skel->progs.fill_socket_owner); 991 992 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); 993 if (CHECK(err || val != getpid(), "bpf_map_lookup_elem", 994 "map value wasn't set correctly (expected %d, got %d, err=%d)\n", 995 getpid(), val, err)) 996 goto close_socket; 997 998 do_dummy_read(skel->progs.negate_socket_local_storage); 999 1000 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val); 1001 CHECK(err || val != -getpid(), "bpf_map_lookup_elem", 1002 "map value wasn't set correctly (expected %d, got %d, err=%d)\n", 1003 -getpid(), val, err); 1004 1005 close_socket: 1006 close(sock_fd); 1007 out: 1008 bpf_iter_bpf_sk_storage_helpers__destroy(skel); 1009 } 1010 1011 static void test_bpf_sk_storage_map(void) 1012 { 1013 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 1014 int err, i, len, map_fd, iter_fd, num_sockets; 1015 struct bpf_iter_bpf_sk_storage_map *skel; 1016 union bpf_iter_link_info linfo; 1017 int sock_fd[3] = {-1, -1, -1}; 1018 __u32 val, expected_val = 0; 1019 struct bpf_link *link; 1020 char buf[64]; 1021 1022 skel = bpf_iter_bpf_sk_storage_map__open_and_load(); 1023 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load")) 1024 return; 1025 1026 map_fd = bpf_map__fd(skel->maps.sk_stg_map); 1027 num_sockets = ARRAY_SIZE(sock_fd); 1028 for (i = 0; i < num_sockets; i++) { 1029 sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0); 1030 if (!ASSERT_GE(sock_fd[i], 0, "socket")) 1031 goto out; 1032 1033 val = i + 1; 1034 expected_val += val; 1035 1036 err = bpf_map_update_elem(map_fd, &sock_fd[i], &val, 1037 BPF_NOEXIST); 1038 if (!ASSERT_OK(err, "map_update")) 1039 goto out; 1040 } 1041 1042 memset(&linfo, 0, sizeof(linfo)); 1043 linfo.map.map_fd = map_fd; 1044 opts.link_info = &linfo; 1045 opts.link_info_len = sizeof(linfo); 1046 link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts); 1047 if (!ASSERT_OK_PTR(link, "attach_iter")) 1048 goto out; 1049 1050 iter_fd = bpf_iter_create(bpf_link__fd(link)); 1051 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 1052 goto free_link; 1053 1054 /* do some tests */ 1055 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 1056 ; 1057 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 1058 goto close_iter; 1059 1060 /* test results */ 1061 if (!ASSERT_EQ(skel->bss->ipv6_sk_count, num_sockets, "ipv6_sk_count")) 1062 goto close_iter; 1063 1064 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) 1065 goto close_iter; 1066 1067 close_iter: 1068 close(iter_fd); 1069 free_link: 1070 bpf_link__destroy(link); 1071 out: 1072 for (i = 0; i < num_sockets; i++) { 1073 if (sock_fd[i] >= 0) 1074 close(sock_fd[i]); 1075 } 1076 bpf_iter_bpf_sk_storage_map__destroy(skel); 1077 } 1078 1079 static void test_rdonly_buf_out_of_bound(void) 1080 { 1081 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 1082 struct bpf_iter_test_kern5 *skel; 1083 union bpf_iter_link_info linfo; 1084 struct bpf_link *link; 1085 1086 skel = bpf_iter_test_kern5__open_and_load(); 1087 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern5__open_and_load")) 1088 return; 1089 1090 memset(&linfo, 0, sizeof(linfo)); 1091 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1); 1092 opts.link_info = &linfo; 1093 opts.link_info_len = sizeof(linfo); 1094 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 1095 if (!ASSERT_ERR_PTR(link, "attach_iter")) 1096 bpf_link__destroy(link); 1097 1098 bpf_iter_test_kern5__destroy(skel); 1099 } 1100 1101 static void test_buf_neg_offset(void) 1102 { 1103 struct bpf_iter_test_kern6 *skel; 1104 1105 skel = bpf_iter_test_kern6__open_and_load(); 1106 if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern6__open_and_load")) 1107 bpf_iter_test_kern6__destroy(skel); 1108 } 1109 1110 static void test_link_iter(void) 1111 { 1112 struct bpf_iter_bpf_link *skel; 1113 1114 skel = bpf_iter_bpf_link__open_and_load(); 1115 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load")) 1116 return; 1117 1118 do_dummy_read(skel->progs.dump_bpf_link); 1119 1120 bpf_iter_bpf_link__destroy(skel); 1121 } 1122 1123 #define CMP_BUFFER_SIZE 1024 1124 static char task_vma_output[CMP_BUFFER_SIZE]; 1125 static char proc_maps_output[CMP_BUFFER_SIZE]; 1126 1127 /* remove \0 and \t from str, and only keep the first line */ 1128 static void str_strip_first_line(char *str) 1129 { 1130 char *dst = str, *src = str; 1131 1132 do { 1133 if (*src == ' ' || *src == '\t') 1134 src++; 1135 else 1136 *(dst++) = *(src++); 1137 1138 } while (*src != '\0' && *src != '\n'); 1139 1140 *dst = '\0'; 1141 } 1142 1143 static void test_task_vma(void) 1144 { 1145 int err, iter_fd = -1, proc_maps_fd = -1; 1146 struct bpf_iter_task_vma *skel; 1147 int len, read_size = 4; 1148 char maps_path[64]; 1149 1150 skel = bpf_iter_task_vma__open(); 1151 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open")) 1152 return; 1153 1154 skel->bss->pid = getpid(); 1155 1156 err = bpf_iter_task_vma__load(skel); 1157 if (!ASSERT_OK(err, "bpf_iter_task_vma__load")) 1158 goto out; 1159 1160 skel->links.proc_maps = bpf_program__attach_iter( 1161 skel->progs.proc_maps, NULL); 1162 1163 if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) { 1164 skel->links.proc_maps = NULL; 1165 goto out; 1166 } 1167 1168 iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps)); 1169 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 1170 goto out; 1171 1172 /* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks 1173 * to trigger seq_file corner cases. 1174 */ 1175 len = 0; 1176 while (len < CMP_BUFFER_SIZE) { 1177 err = read_fd_into_buffer(iter_fd, task_vma_output + len, 1178 MIN(read_size, CMP_BUFFER_SIZE - len)); 1179 if (!err) 1180 break; 1181 if (!ASSERT_GE(err, 0, "read_iter_fd")) 1182 goto out; 1183 len += err; 1184 } 1185 1186 /* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */ 1187 snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid); 1188 proc_maps_fd = open(maps_path, O_RDONLY); 1189 if (!ASSERT_GE(proc_maps_fd, 0, "open_proc_maps")) 1190 goto out; 1191 err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE); 1192 if (!ASSERT_GE(err, 0, "read_prog_maps_fd")) 1193 goto out; 1194 1195 /* strip and compare the first line of the two files */ 1196 str_strip_first_line(task_vma_output); 1197 str_strip_first_line(proc_maps_output); 1198 1199 ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output"); 1200 out: 1201 close(proc_maps_fd); 1202 close(iter_fd); 1203 bpf_iter_task_vma__destroy(skel); 1204 } 1205 1206 void test_bpf_iter(void) 1207 { 1208 if (test__start_subtest("btf_id_or_null")) 1209 test_btf_id_or_null(); 1210 if (test__start_subtest("ipv6_route")) 1211 test_ipv6_route(); 1212 if (test__start_subtest("netlink")) 1213 test_netlink(); 1214 if (test__start_subtest("bpf_map")) 1215 test_bpf_map(); 1216 if (test__start_subtest("task")) 1217 test_task(); 1218 if (test__start_subtest("task_sleepable")) 1219 test_task_sleepable(); 1220 if (test__start_subtest("task_stack")) 1221 test_task_stack(); 1222 if (test__start_subtest("task_file")) 1223 test_task_file(); 1224 if (test__start_subtest("task_vma")) 1225 test_task_vma(); 1226 if (test__start_subtest("task_btf")) 1227 test_task_btf(); 1228 if (test__start_subtest("tcp4")) 1229 test_tcp4(); 1230 if (test__start_subtest("tcp6")) 1231 test_tcp6(); 1232 if (test__start_subtest("udp4")) 1233 test_udp4(); 1234 if (test__start_subtest("udp6")) 1235 test_udp6(); 1236 if (test__start_subtest("unix")) 1237 test_unix(); 1238 if (test__start_subtest("anon")) 1239 test_anon_iter(false); 1240 if (test__start_subtest("anon-read-one-char")) 1241 test_anon_iter(true); 1242 if (test__start_subtest("file")) 1243 test_file_iter(); 1244 if (test__start_subtest("overflow")) 1245 test_overflow(false, false); 1246 if (test__start_subtest("overflow-e2big")) 1247 test_overflow(true, false); 1248 if (test__start_subtest("prog-ret-1")) 1249 test_overflow(false, true); 1250 if (test__start_subtest("bpf_hash_map")) 1251 test_bpf_hash_map(); 1252 if (test__start_subtest("bpf_percpu_hash_map")) 1253 test_bpf_percpu_hash_map(); 1254 if (test__start_subtest("bpf_array_map")) 1255 test_bpf_array_map(); 1256 if (test__start_subtest("bpf_percpu_array_map")) 1257 test_bpf_percpu_array_map(); 1258 if (test__start_subtest("bpf_sk_storage_map")) 1259 test_bpf_sk_storage_map(); 1260 if (test__start_subtest("bpf_sk_storage_delete")) 1261 test_bpf_sk_storage_delete(); 1262 if (test__start_subtest("bpf_sk_storage_get")) 1263 test_bpf_sk_storage_get(); 1264 if (test__start_subtest("rdonly-buf-out-of-bound")) 1265 test_rdonly_buf_out_of_bound(); 1266 if (test__start_subtest("buf-neg-offset")) 1267 test_buf_neg_offset(); 1268 if (test__start_subtest("link-iter")) 1269 test_link_iter(); 1270 } 1271