1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020 Facebook */ 3 #include <test_progs.h> 4 #include "bpf_iter_ipv6_route.skel.h" 5 #include "bpf_iter_netlink.skel.h" 6 #include "bpf_iter_bpf_map.skel.h" 7 #include "bpf_iter_task.skel.h" 8 #include "bpf_iter_task_stack.skel.h" 9 #include "bpf_iter_task_file.skel.h" 10 #include "bpf_iter_tcp4.skel.h" 11 #include "bpf_iter_tcp6.skel.h" 12 #include "bpf_iter_udp4.skel.h" 13 #include "bpf_iter_udp6.skel.h" 14 #include "bpf_iter_test_kern1.skel.h" 15 #include "bpf_iter_test_kern2.skel.h" 16 #include "bpf_iter_test_kern3.skel.h" 17 #include "bpf_iter_test_kern4.skel.h" 18 #include "bpf_iter_bpf_hash_map.skel.h" 19 #include "bpf_iter_bpf_percpu_hash_map.skel.h" 20 #include "bpf_iter_bpf_array_map.skel.h" 21 #include "bpf_iter_bpf_percpu_array_map.skel.h" 22 #include "bpf_iter_bpf_sk_storage_map.skel.h" 23 #include "bpf_iter_test_kern5.skel.h" 24 #include "bpf_iter_test_kern6.skel.h" 25 26 static int duration; 27 28 static void test_btf_id_or_null(void) 29 { 30 struct bpf_iter_test_kern3 *skel; 31 32 skel = bpf_iter_test_kern3__open_and_load(); 33 if (CHECK(skel, "bpf_iter_test_kern3__open_and_load", 34 "skeleton open_and_load unexpectedly succeeded\n")) { 35 bpf_iter_test_kern3__destroy(skel); 36 return; 37 } 38 } 39 40 static void do_dummy_read(struct bpf_program *prog) 41 { 42 struct bpf_link *link; 43 char buf[16] = {}; 44 int iter_fd, len; 45 46 link = bpf_program__attach_iter(prog, NULL); 47 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 48 return; 49 50 iter_fd = bpf_iter_create(bpf_link__fd(link)); 51 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 52 goto free_link; 53 54 /* not check contents, but ensure read() ends without error */ 55 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 56 ; 57 CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)); 58 59 close(iter_fd); 60 61 free_link: 62 bpf_link__destroy(link); 63 } 64 65 static void test_ipv6_route(void) 66 { 67 struct bpf_iter_ipv6_route *skel; 68 69 skel = bpf_iter_ipv6_route__open_and_load(); 70 if (CHECK(!skel, "bpf_iter_ipv6_route__open_and_load", 71 "skeleton open_and_load failed\n")) 72 return; 73 74 do_dummy_read(skel->progs.dump_ipv6_route); 75 76 bpf_iter_ipv6_route__destroy(skel); 77 } 78 79 static void test_netlink(void) 80 { 81 struct bpf_iter_netlink *skel; 82 83 skel = bpf_iter_netlink__open_and_load(); 84 if (CHECK(!skel, "bpf_iter_netlink__open_and_load", 85 "skeleton open_and_load failed\n")) 86 return; 87 88 do_dummy_read(skel->progs.dump_netlink); 89 90 bpf_iter_netlink__destroy(skel); 91 } 92 93 static void test_bpf_map(void) 94 { 95 struct bpf_iter_bpf_map *skel; 96 97 skel = bpf_iter_bpf_map__open_and_load(); 98 if (CHECK(!skel, "bpf_iter_bpf_map__open_and_load", 99 "skeleton open_and_load failed\n")) 100 return; 101 102 do_dummy_read(skel->progs.dump_bpf_map); 103 104 bpf_iter_bpf_map__destroy(skel); 105 } 106 107 static void test_task(void) 108 { 109 struct bpf_iter_task *skel; 110 111 skel = bpf_iter_task__open_and_load(); 112 if (CHECK(!skel, "bpf_iter_task__open_and_load", 113 "skeleton open_and_load failed\n")) 114 return; 115 116 do_dummy_read(skel->progs.dump_task); 117 118 bpf_iter_task__destroy(skel); 119 } 120 121 static void test_task_stack(void) 122 { 123 struct bpf_iter_task_stack *skel; 124 125 skel = bpf_iter_task_stack__open_and_load(); 126 if (CHECK(!skel, "bpf_iter_task_stack__open_and_load", 127 "skeleton open_and_load failed\n")) 128 return; 129 130 do_dummy_read(skel->progs.dump_task_stack); 131 132 bpf_iter_task_stack__destroy(skel); 133 } 134 135 static void test_task_file(void) 136 { 137 struct bpf_iter_task_file *skel; 138 139 skel = bpf_iter_task_file__open_and_load(); 140 if (CHECK(!skel, "bpf_iter_task_file__open_and_load", 141 "skeleton open_and_load failed\n")) 142 return; 143 144 do_dummy_read(skel->progs.dump_task_file); 145 146 bpf_iter_task_file__destroy(skel); 147 } 148 149 static void test_tcp4(void) 150 { 151 struct bpf_iter_tcp4 *skel; 152 153 skel = bpf_iter_tcp4__open_and_load(); 154 if (CHECK(!skel, "bpf_iter_tcp4__open_and_load", 155 "skeleton open_and_load failed\n")) 156 return; 157 158 do_dummy_read(skel->progs.dump_tcp4); 159 160 bpf_iter_tcp4__destroy(skel); 161 } 162 163 static void test_tcp6(void) 164 { 165 struct bpf_iter_tcp6 *skel; 166 167 skel = bpf_iter_tcp6__open_and_load(); 168 if (CHECK(!skel, "bpf_iter_tcp6__open_and_load", 169 "skeleton open_and_load failed\n")) 170 return; 171 172 do_dummy_read(skel->progs.dump_tcp6); 173 174 bpf_iter_tcp6__destroy(skel); 175 } 176 177 static void test_udp4(void) 178 { 179 struct bpf_iter_udp4 *skel; 180 181 skel = bpf_iter_udp4__open_and_load(); 182 if (CHECK(!skel, "bpf_iter_udp4__open_and_load", 183 "skeleton open_and_load failed\n")) 184 return; 185 186 do_dummy_read(skel->progs.dump_udp4); 187 188 bpf_iter_udp4__destroy(skel); 189 } 190 191 static void test_udp6(void) 192 { 193 struct bpf_iter_udp6 *skel; 194 195 skel = bpf_iter_udp6__open_and_load(); 196 if (CHECK(!skel, "bpf_iter_udp6__open_and_load", 197 "skeleton open_and_load failed\n")) 198 return; 199 200 do_dummy_read(skel->progs.dump_udp6); 201 202 bpf_iter_udp6__destroy(skel); 203 } 204 205 /* The expected string is less than 16 bytes */ 206 static int do_read_with_fd(int iter_fd, const char *expected, 207 bool read_one_char) 208 { 209 int err = -1, len, read_buf_len, start; 210 char buf[16] = {}; 211 212 read_buf_len = read_one_char ? 1 : 16; 213 start = 0; 214 while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) { 215 start += len; 216 if (CHECK(start >= 16, "read", "read len %d\n", len)) 217 return -1; 218 read_buf_len = read_one_char ? 1 : 16 - start; 219 } 220 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 221 return -1; 222 223 err = strcmp(buf, expected); 224 if (CHECK(err, "read", "incorrect read result: buf %s, expected %s\n", 225 buf, expected)) 226 return -1; 227 228 return 0; 229 } 230 231 static void test_anon_iter(bool read_one_char) 232 { 233 struct bpf_iter_test_kern1 *skel; 234 struct bpf_link *link; 235 int iter_fd, err; 236 237 skel = bpf_iter_test_kern1__open_and_load(); 238 if (CHECK(!skel, "bpf_iter_test_kern1__open_and_load", 239 "skeleton open_and_load failed\n")) 240 return; 241 242 err = bpf_iter_test_kern1__attach(skel); 243 if (CHECK(err, "bpf_iter_test_kern1__attach", 244 "skeleton attach failed\n")) { 245 goto out; 246 } 247 248 link = skel->links.dump_task; 249 iter_fd = bpf_iter_create(bpf_link__fd(link)); 250 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 251 goto out; 252 253 do_read_with_fd(iter_fd, "abcd", read_one_char); 254 close(iter_fd); 255 256 out: 257 bpf_iter_test_kern1__destroy(skel); 258 } 259 260 static int do_read(const char *path, const char *expected) 261 { 262 int err, iter_fd; 263 264 iter_fd = open(path, O_RDONLY); 265 if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n", 266 path, strerror(errno))) 267 return -1; 268 269 err = do_read_with_fd(iter_fd, expected, false); 270 close(iter_fd); 271 return err; 272 } 273 274 static void test_file_iter(void) 275 { 276 const char *path = "/sys/fs/bpf/bpf_iter_test1"; 277 struct bpf_iter_test_kern1 *skel1; 278 struct bpf_iter_test_kern2 *skel2; 279 struct bpf_link *link; 280 int err; 281 282 skel1 = bpf_iter_test_kern1__open_and_load(); 283 if (CHECK(!skel1, "bpf_iter_test_kern1__open_and_load", 284 "skeleton open_and_load failed\n")) 285 return; 286 287 link = bpf_program__attach_iter(skel1->progs.dump_task, NULL); 288 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 289 goto out; 290 291 /* unlink this path if it exists. */ 292 unlink(path); 293 294 err = bpf_link__pin(link, path); 295 if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err)) 296 goto free_link; 297 298 err = do_read(path, "abcd"); 299 if (err) 300 goto unlink_path; 301 302 /* file based iterator seems working fine. Let us a link update 303 * of the underlying link and `cat` the iterator again, its content 304 * should change. 305 */ 306 skel2 = bpf_iter_test_kern2__open_and_load(); 307 if (CHECK(!skel2, "bpf_iter_test_kern2__open_and_load", 308 "skeleton open_and_load failed\n")) 309 goto unlink_path; 310 311 err = bpf_link__update_program(link, skel2->progs.dump_task); 312 if (CHECK(err, "update_prog", "update_prog failed\n")) 313 goto destroy_skel2; 314 315 do_read(path, "ABCD"); 316 317 destroy_skel2: 318 bpf_iter_test_kern2__destroy(skel2); 319 unlink_path: 320 unlink(path); 321 free_link: 322 bpf_link__destroy(link); 323 out: 324 bpf_iter_test_kern1__destroy(skel1); 325 } 326 327 static void test_overflow(bool test_e2big_overflow, bool ret1) 328 { 329 __u32 map_info_len, total_read_len, expected_read_len; 330 int err, iter_fd, map1_fd, map2_fd, len; 331 struct bpf_map_info map_info = {}; 332 struct bpf_iter_test_kern4 *skel; 333 struct bpf_link *link; 334 __u32 page_size; 335 char *buf; 336 337 skel = bpf_iter_test_kern4__open(); 338 if (CHECK(!skel, "bpf_iter_test_kern4__open", 339 "skeleton open failed\n")) 340 return; 341 342 /* create two maps: bpf program will only do bpf_seq_write 343 * for these two maps. The goal is one map output almost 344 * fills seq_file buffer and then the other will trigger 345 * overflow and needs restart. 346 */ 347 map1_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); 348 if (CHECK(map1_fd < 0, "bpf_create_map", 349 "map_creation failed: %s\n", strerror(errno))) 350 goto out; 351 map2_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); 352 if (CHECK(map2_fd < 0, "bpf_create_map", 353 "map_creation failed: %s\n", strerror(errno))) 354 goto free_map1; 355 356 /* bpf_seq_printf kernel buffer is one page, so one map 357 * bpf_seq_write will mostly fill it, and the other map 358 * will partially fill and then trigger overflow and need 359 * bpf_seq_read restart. 360 */ 361 page_size = sysconf(_SC_PAGE_SIZE); 362 363 if (test_e2big_overflow) { 364 skel->rodata->print_len = (page_size + 8) / 8; 365 expected_read_len = 2 * (page_size + 8); 366 } else if (!ret1) { 367 skel->rodata->print_len = (page_size - 8) / 8; 368 expected_read_len = 2 * (page_size - 8); 369 } else { 370 skel->rodata->print_len = 1; 371 expected_read_len = 2 * 8; 372 } 373 skel->rodata->ret1 = ret1; 374 375 if (CHECK(bpf_iter_test_kern4__load(skel), 376 "bpf_iter_test_kern4__load", "skeleton load failed\n")) 377 goto free_map2; 378 379 /* setup filtering map_id in bpf program */ 380 map_info_len = sizeof(map_info); 381 err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len); 382 if (CHECK(err, "get_map_info", "get map info failed: %s\n", 383 strerror(errno))) 384 goto free_map2; 385 skel->bss->map1_id = map_info.id; 386 387 err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len); 388 if (CHECK(err, "get_map_info", "get map info failed: %s\n", 389 strerror(errno))) 390 goto free_map2; 391 skel->bss->map2_id = map_info.id; 392 393 link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL); 394 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 395 goto free_map2; 396 397 iter_fd = bpf_iter_create(bpf_link__fd(link)); 398 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 399 goto free_link; 400 401 buf = malloc(expected_read_len); 402 if (!buf) 403 goto close_iter; 404 405 /* do read */ 406 total_read_len = 0; 407 if (test_e2big_overflow) { 408 while ((len = read(iter_fd, buf, expected_read_len)) > 0) 409 total_read_len += len; 410 411 CHECK(len != -1 || errno != E2BIG, "read", 412 "expected ret -1, errno E2BIG, but get ret %d, error %s\n", 413 len, strerror(errno)); 414 goto free_buf; 415 } else if (!ret1) { 416 while ((len = read(iter_fd, buf, expected_read_len)) > 0) 417 total_read_len += len; 418 419 if (CHECK(len < 0, "read", "read failed: %s\n", 420 strerror(errno))) 421 goto free_buf; 422 } else { 423 do { 424 len = read(iter_fd, buf, expected_read_len); 425 if (len > 0) 426 total_read_len += len; 427 } while (len > 0 || len == -EAGAIN); 428 429 if (CHECK(len < 0, "read", "read failed: %s\n", 430 strerror(errno))) 431 goto free_buf; 432 } 433 434 if (CHECK(total_read_len != expected_read_len, "read", 435 "total len %u, expected len %u\n", total_read_len, 436 expected_read_len)) 437 goto free_buf; 438 439 if (CHECK(skel->bss->map1_accessed != 1, "map1_accessed", 440 "expected 1 actual %d\n", skel->bss->map1_accessed)) 441 goto free_buf; 442 443 if (CHECK(skel->bss->map2_accessed != 2, "map2_accessed", 444 "expected 2 actual %d\n", skel->bss->map2_accessed)) 445 goto free_buf; 446 447 CHECK(skel->bss->map2_seqnum1 != skel->bss->map2_seqnum2, 448 "map2_seqnum", "two different seqnum %lld %lld\n", 449 skel->bss->map2_seqnum1, skel->bss->map2_seqnum2); 450 451 free_buf: 452 free(buf); 453 close_iter: 454 close(iter_fd); 455 free_link: 456 bpf_link__destroy(link); 457 free_map2: 458 close(map2_fd); 459 free_map1: 460 close(map1_fd); 461 out: 462 bpf_iter_test_kern4__destroy(skel); 463 } 464 465 static void test_bpf_hash_map(void) 466 { 467 __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0; 468 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 469 struct bpf_iter_bpf_hash_map *skel; 470 int err, i, len, map_fd, iter_fd; 471 union bpf_iter_link_info linfo; 472 __u64 val, expected_val = 0; 473 struct bpf_link *link; 474 struct key_t { 475 int a; 476 int b; 477 int c; 478 } key; 479 char buf[64]; 480 481 skel = bpf_iter_bpf_hash_map__open(); 482 if (CHECK(!skel, "bpf_iter_bpf_hash_map__open", 483 "skeleton open failed\n")) 484 return; 485 486 skel->bss->in_test_mode = true; 487 488 err = bpf_iter_bpf_hash_map__load(skel); 489 if (CHECK(!skel, "bpf_iter_bpf_hash_map__load", 490 "skeleton load failed\n")) 491 goto out; 492 493 /* iterator with hashmap2 and hashmap3 should fail */ 494 memset(&linfo, 0, sizeof(linfo)); 495 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2); 496 opts.link_info = &linfo; 497 opts.link_info_len = sizeof(linfo); 498 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 499 if (CHECK(!IS_ERR(link), "attach_iter", 500 "attach_iter for hashmap2 unexpected succeeded\n")) 501 goto out; 502 503 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3); 504 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 505 if (CHECK(!IS_ERR(link), "attach_iter", 506 "attach_iter for hashmap3 unexpected succeeded\n")) 507 goto out; 508 509 /* hashmap1 should be good, update map values here */ 510 map_fd = bpf_map__fd(skel->maps.hashmap1); 511 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { 512 key.a = i + 1; 513 key.b = i + 2; 514 key.c = i + 3; 515 val = i + 4; 516 expected_key_a += key.a; 517 expected_key_b += key.b; 518 expected_key_c += key.c; 519 expected_val += val; 520 521 err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY); 522 if (CHECK(err, "map_update", "map_update failed\n")) 523 goto out; 524 } 525 526 linfo.map.map_fd = map_fd; 527 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 528 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 529 goto out; 530 531 iter_fd = bpf_iter_create(bpf_link__fd(link)); 532 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 533 goto free_link; 534 535 /* do some tests */ 536 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 537 ; 538 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 539 goto close_iter; 540 541 /* test results */ 542 if (CHECK(skel->bss->key_sum_a != expected_key_a, 543 "key_sum_a", "got %u expected %u\n", 544 skel->bss->key_sum_a, expected_key_a)) 545 goto close_iter; 546 if (CHECK(skel->bss->key_sum_b != expected_key_b, 547 "key_sum_b", "got %u expected %u\n", 548 skel->bss->key_sum_b, expected_key_b)) 549 goto close_iter; 550 if (CHECK(skel->bss->val_sum != expected_val, 551 "val_sum", "got %llu expected %llu\n", 552 skel->bss->val_sum, expected_val)) 553 goto close_iter; 554 555 close_iter: 556 close(iter_fd); 557 free_link: 558 bpf_link__destroy(link); 559 out: 560 bpf_iter_bpf_hash_map__destroy(skel); 561 } 562 563 static void test_bpf_percpu_hash_map(void) 564 { 565 __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0; 566 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 567 struct bpf_iter_bpf_percpu_hash_map *skel; 568 int err, i, j, len, map_fd, iter_fd; 569 union bpf_iter_link_info linfo; 570 __u32 expected_val = 0; 571 struct bpf_link *link; 572 struct key_t { 573 int a; 574 int b; 575 int c; 576 } key; 577 char buf[64]; 578 void *val; 579 580 val = malloc(8 * bpf_num_possible_cpus()); 581 582 skel = bpf_iter_bpf_percpu_hash_map__open(); 583 if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open", 584 "skeleton open failed\n")) 585 return; 586 587 skel->rodata->num_cpus = bpf_num_possible_cpus(); 588 589 err = bpf_iter_bpf_percpu_hash_map__load(skel); 590 if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load", 591 "skeleton load failed\n")) 592 goto out; 593 594 /* update map values here */ 595 map_fd = bpf_map__fd(skel->maps.hashmap1); 596 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { 597 key.a = i + 1; 598 key.b = i + 2; 599 key.c = i + 3; 600 expected_key_a += key.a; 601 expected_key_b += key.b; 602 expected_key_c += key.c; 603 604 for (j = 0; j < bpf_num_possible_cpus(); j++) { 605 *(__u32 *)(val + j * 8) = i + j; 606 expected_val += i + j; 607 } 608 609 err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY); 610 if (CHECK(err, "map_update", "map_update failed\n")) 611 goto out; 612 } 613 614 memset(&linfo, 0, sizeof(linfo)); 615 linfo.map.map_fd = map_fd; 616 opts.link_info = &linfo; 617 opts.link_info_len = sizeof(linfo); 618 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts); 619 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 620 goto out; 621 622 iter_fd = bpf_iter_create(bpf_link__fd(link)); 623 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 624 goto free_link; 625 626 /* do some tests */ 627 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 628 ; 629 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 630 goto close_iter; 631 632 /* test results */ 633 if (CHECK(skel->bss->key_sum_a != expected_key_a, 634 "key_sum_a", "got %u expected %u\n", 635 skel->bss->key_sum_a, expected_key_a)) 636 goto close_iter; 637 if (CHECK(skel->bss->key_sum_b != expected_key_b, 638 "key_sum_b", "got %u expected %u\n", 639 skel->bss->key_sum_b, expected_key_b)) 640 goto close_iter; 641 if (CHECK(skel->bss->val_sum != expected_val, 642 "val_sum", "got %u expected %u\n", 643 skel->bss->val_sum, expected_val)) 644 goto close_iter; 645 646 close_iter: 647 close(iter_fd); 648 free_link: 649 bpf_link__destroy(link); 650 out: 651 bpf_iter_bpf_percpu_hash_map__destroy(skel); 652 } 653 654 static void test_bpf_array_map(void) 655 { 656 __u64 val, expected_val = 0, res_first_val, first_val = 0; 657 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 658 __u32 expected_key = 0, res_first_key; 659 struct bpf_iter_bpf_array_map *skel; 660 union bpf_iter_link_info linfo; 661 int err, i, map_fd, iter_fd; 662 struct bpf_link *link; 663 char buf[64] = {}; 664 int len, start; 665 666 skel = bpf_iter_bpf_array_map__open_and_load(); 667 if (CHECK(!skel, "bpf_iter_bpf_array_map__open_and_load", 668 "skeleton open_and_load failed\n")) 669 return; 670 671 map_fd = bpf_map__fd(skel->maps.arraymap1); 672 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 673 val = i + 4; 674 expected_key += i; 675 expected_val += val; 676 677 if (i == 0) 678 first_val = val; 679 680 err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY); 681 if (CHECK(err, "map_update", "map_update failed\n")) 682 goto out; 683 } 684 685 memset(&linfo, 0, sizeof(linfo)); 686 linfo.map.map_fd = map_fd; 687 opts.link_info = &linfo; 688 opts.link_info_len = sizeof(linfo); 689 link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts); 690 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 691 goto out; 692 693 iter_fd = bpf_iter_create(bpf_link__fd(link)); 694 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 695 goto free_link; 696 697 /* do some tests */ 698 start = 0; 699 while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0) 700 start += len; 701 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 702 goto close_iter; 703 704 /* test results */ 705 res_first_key = *(__u32 *)buf; 706 res_first_val = *(__u64 *)(buf + sizeof(__u32)); 707 if (CHECK(res_first_key != 0 || res_first_val != first_val, 708 "bpf_seq_write", 709 "seq_write failure: first key %u vs expected 0, " 710 " first value %llu vs expected %llu\n", 711 res_first_key, res_first_val, first_val)) 712 goto close_iter; 713 714 if (CHECK(skel->bss->key_sum != expected_key, 715 "key_sum", "got %u expected %u\n", 716 skel->bss->key_sum, expected_key)) 717 goto close_iter; 718 if (CHECK(skel->bss->val_sum != expected_val, 719 "val_sum", "got %llu expected %llu\n", 720 skel->bss->val_sum, expected_val)) 721 goto close_iter; 722 723 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 724 err = bpf_map_lookup_elem(map_fd, &i, &val); 725 if (CHECK(err, "map_lookup", "map_lookup failed\n")) 726 goto out; 727 if (CHECK(i != val, "invalid_val", 728 "got value %llu expected %u\n", val, i)) 729 goto out; 730 } 731 732 close_iter: 733 close(iter_fd); 734 free_link: 735 bpf_link__destroy(link); 736 out: 737 bpf_iter_bpf_array_map__destroy(skel); 738 } 739 740 static void test_bpf_percpu_array_map(void) 741 { 742 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 743 struct bpf_iter_bpf_percpu_array_map *skel; 744 __u32 expected_key = 0, expected_val = 0; 745 union bpf_iter_link_info linfo; 746 int err, i, j, map_fd, iter_fd; 747 struct bpf_link *link; 748 char buf[64]; 749 void *val; 750 int len; 751 752 val = malloc(8 * bpf_num_possible_cpus()); 753 754 skel = bpf_iter_bpf_percpu_array_map__open(); 755 if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open", 756 "skeleton open failed\n")) 757 return; 758 759 skel->rodata->num_cpus = bpf_num_possible_cpus(); 760 761 err = bpf_iter_bpf_percpu_array_map__load(skel); 762 if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load", 763 "skeleton load failed\n")) 764 goto out; 765 766 /* update map values here */ 767 map_fd = bpf_map__fd(skel->maps.arraymap1); 768 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 769 expected_key += i; 770 771 for (j = 0; j < bpf_num_possible_cpus(); j++) { 772 *(__u32 *)(val + j * 8) = i + j; 773 expected_val += i + j; 774 } 775 776 err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY); 777 if (CHECK(err, "map_update", "map_update failed\n")) 778 goto out; 779 } 780 781 memset(&linfo, 0, sizeof(linfo)); 782 linfo.map.map_fd = map_fd; 783 opts.link_info = &linfo; 784 opts.link_info_len = sizeof(linfo); 785 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts); 786 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 787 goto out; 788 789 iter_fd = bpf_iter_create(bpf_link__fd(link)); 790 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 791 goto free_link; 792 793 /* do some tests */ 794 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 795 ; 796 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 797 goto close_iter; 798 799 /* test results */ 800 if (CHECK(skel->bss->key_sum != expected_key, 801 "key_sum", "got %u expected %u\n", 802 skel->bss->key_sum, expected_key)) 803 goto close_iter; 804 if (CHECK(skel->bss->val_sum != expected_val, 805 "val_sum", "got %u expected %u\n", 806 skel->bss->val_sum, expected_val)) 807 goto close_iter; 808 809 close_iter: 810 close(iter_fd); 811 free_link: 812 bpf_link__destroy(link); 813 out: 814 bpf_iter_bpf_percpu_array_map__destroy(skel); 815 } 816 817 static void test_bpf_sk_storage_map(void) 818 { 819 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 820 int err, i, len, map_fd, iter_fd, num_sockets; 821 struct bpf_iter_bpf_sk_storage_map *skel; 822 union bpf_iter_link_info linfo; 823 int sock_fd[3] = {-1, -1, -1}; 824 __u32 val, expected_val = 0; 825 struct bpf_link *link; 826 char buf[64]; 827 828 skel = bpf_iter_bpf_sk_storage_map__open_and_load(); 829 if (CHECK(!skel, "bpf_iter_bpf_sk_storage_map__open_and_load", 830 "skeleton open_and_load failed\n")) 831 return; 832 833 map_fd = bpf_map__fd(skel->maps.sk_stg_map); 834 num_sockets = ARRAY_SIZE(sock_fd); 835 for (i = 0; i < num_sockets; i++) { 836 sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0); 837 if (CHECK(sock_fd[i] < 0, "socket", "errno: %d\n", errno)) 838 goto out; 839 840 val = i + 1; 841 expected_val += val; 842 843 err = bpf_map_update_elem(map_fd, &sock_fd[i], &val, 844 BPF_NOEXIST); 845 if (CHECK(err, "map_update", "map_update failed\n")) 846 goto out; 847 } 848 849 memset(&linfo, 0, sizeof(linfo)); 850 linfo.map.map_fd = map_fd; 851 opts.link_info = &linfo; 852 opts.link_info_len = sizeof(linfo); 853 link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts); 854 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 855 goto out; 856 857 iter_fd = bpf_iter_create(bpf_link__fd(link)); 858 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 859 goto free_link; 860 861 /* do some tests */ 862 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 863 ; 864 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 865 goto close_iter; 866 867 /* test results */ 868 if (CHECK(skel->bss->ipv6_sk_count != num_sockets, 869 "ipv6_sk_count", "got %u expected %u\n", 870 skel->bss->ipv6_sk_count, num_sockets)) 871 goto close_iter; 872 873 if (CHECK(skel->bss->val_sum != expected_val, 874 "val_sum", "got %u expected %u\n", 875 skel->bss->val_sum, expected_val)) 876 goto close_iter; 877 878 close_iter: 879 close(iter_fd); 880 free_link: 881 bpf_link__destroy(link); 882 out: 883 for (i = 0; i < num_sockets; i++) { 884 if (sock_fd[i] >= 0) 885 close(sock_fd[i]); 886 } 887 bpf_iter_bpf_sk_storage_map__destroy(skel); 888 } 889 890 static void test_rdonly_buf_out_of_bound(void) 891 { 892 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 893 struct bpf_iter_test_kern5 *skel; 894 union bpf_iter_link_info linfo; 895 struct bpf_link *link; 896 897 skel = bpf_iter_test_kern5__open_and_load(); 898 if (CHECK(!skel, "bpf_iter_test_kern5__open_and_load", 899 "skeleton open_and_load failed\n")) 900 return; 901 902 memset(&linfo, 0, sizeof(linfo)); 903 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1); 904 opts.link_info = &linfo; 905 opts.link_info_len = sizeof(linfo); 906 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 907 if (CHECK(!IS_ERR(link), "attach_iter", "unexpected success\n")) 908 bpf_link__destroy(link); 909 910 bpf_iter_test_kern5__destroy(skel); 911 } 912 913 static void test_buf_neg_offset(void) 914 { 915 struct bpf_iter_test_kern6 *skel; 916 917 skel = bpf_iter_test_kern6__open_and_load(); 918 if (CHECK(skel, "bpf_iter_test_kern6__open_and_load", 919 "skeleton open_and_load unexpected success\n")) 920 bpf_iter_test_kern6__destroy(skel); 921 } 922 923 void test_bpf_iter(void) 924 { 925 if (test__start_subtest("btf_id_or_null")) 926 test_btf_id_or_null(); 927 if (test__start_subtest("ipv6_route")) 928 test_ipv6_route(); 929 if (test__start_subtest("netlink")) 930 test_netlink(); 931 if (test__start_subtest("bpf_map")) 932 test_bpf_map(); 933 if (test__start_subtest("task")) 934 test_task(); 935 if (test__start_subtest("task_stack")) 936 test_task_stack(); 937 if (test__start_subtest("task_file")) 938 test_task_file(); 939 if (test__start_subtest("tcp4")) 940 test_tcp4(); 941 if (test__start_subtest("tcp6")) 942 test_tcp6(); 943 if (test__start_subtest("udp4")) 944 test_udp4(); 945 if (test__start_subtest("udp6")) 946 test_udp6(); 947 if (test__start_subtest("anon")) 948 test_anon_iter(false); 949 if (test__start_subtest("anon-read-one-char")) 950 test_anon_iter(true); 951 if (test__start_subtest("file")) 952 test_file_iter(); 953 if (test__start_subtest("overflow")) 954 test_overflow(false, false); 955 if (test__start_subtest("overflow-e2big")) 956 test_overflow(true, false); 957 if (test__start_subtest("prog-ret-1")) 958 test_overflow(false, true); 959 if (test__start_subtest("bpf_hash_map")) 960 test_bpf_hash_map(); 961 if (test__start_subtest("bpf_percpu_hash_map")) 962 test_bpf_percpu_hash_map(); 963 if (test__start_subtest("bpf_array_map")) 964 test_bpf_array_map(); 965 if (test__start_subtest("bpf_percpu_array_map")) 966 test_bpf_percpu_array_map(); 967 if (test__start_subtest("bpf_sk_storage_map")) 968 test_bpf_sk_storage_map(); 969 if (test__start_subtest("rdonly-buf-out-of-bound")) 970 test_rdonly_buf_out_of_bound(); 971 if (test__start_subtest("buf-neg-offset")) 972 test_buf_neg_offset(); 973 } 974