1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2020 Facebook */ 3 #include <test_progs.h> 4 #include "bpf_iter_ipv6_route.skel.h" 5 #include "bpf_iter_netlink.skel.h" 6 #include "bpf_iter_bpf_map.skel.h" 7 #include "bpf_iter_task.skel.h" 8 #include "bpf_iter_task_stack.skel.h" 9 #include "bpf_iter_task_file.skel.h" 10 #include "bpf_iter_tcp4.skel.h" 11 #include "bpf_iter_tcp6.skel.h" 12 #include "bpf_iter_udp4.skel.h" 13 #include "bpf_iter_udp6.skel.h" 14 #include "bpf_iter_test_kern1.skel.h" 15 #include "bpf_iter_test_kern2.skel.h" 16 #include "bpf_iter_test_kern3.skel.h" 17 #include "bpf_iter_test_kern4.skel.h" 18 #include "bpf_iter_bpf_hash_map.skel.h" 19 #include "bpf_iter_bpf_percpu_hash_map.skel.h" 20 #include "bpf_iter_bpf_array_map.skel.h" 21 #include "bpf_iter_bpf_percpu_array_map.skel.h" 22 #include "bpf_iter_bpf_sk_storage_map.skel.h" 23 #include "bpf_iter_test_kern5.skel.h" 24 #include "bpf_iter_test_kern6.skel.h" 25 26 static int duration; 27 28 static void test_btf_id_or_null(void) 29 { 30 struct bpf_iter_test_kern3 *skel; 31 32 skel = bpf_iter_test_kern3__open_and_load(); 33 if (CHECK(skel, "bpf_iter_test_kern3__open_and_load", 34 "skeleton open_and_load unexpectedly succeeded\n")) { 35 bpf_iter_test_kern3__destroy(skel); 36 return; 37 } 38 } 39 40 static void do_dummy_read(struct bpf_program *prog) 41 { 42 struct bpf_link *link; 43 char buf[16] = {}; 44 int iter_fd, len; 45 46 link = bpf_program__attach_iter(prog, NULL); 47 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 48 return; 49 50 iter_fd = bpf_iter_create(bpf_link__fd(link)); 51 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 52 goto free_link; 53 54 /* not check contents, but ensure read() ends without error */ 55 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 56 ; 57 CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)); 58 59 close(iter_fd); 60 61 free_link: 62 bpf_link__destroy(link); 63 } 64 65 static void test_ipv6_route(void) 66 { 67 struct bpf_iter_ipv6_route *skel; 68 69 skel = bpf_iter_ipv6_route__open_and_load(); 70 if (CHECK(!skel, "bpf_iter_ipv6_route__open_and_load", 71 "skeleton open_and_load failed\n")) 72 return; 73 74 do_dummy_read(skel->progs.dump_ipv6_route); 75 76 bpf_iter_ipv6_route__destroy(skel); 77 } 78 79 static void test_netlink(void) 80 { 81 struct bpf_iter_netlink *skel; 82 83 skel = bpf_iter_netlink__open_and_load(); 84 if (CHECK(!skel, "bpf_iter_netlink__open_and_load", 85 "skeleton open_and_load failed\n")) 86 return; 87 88 do_dummy_read(skel->progs.dump_netlink); 89 90 bpf_iter_netlink__destroy(skel); 91 } 92 93 static void test_bpf_map(void) 94 { 95 struct bpf_iter_bpf_map *skel; 96 97 skel = bpf_iter_bpf_map__open_and_load(); 98 if (CHECK(!skel, "bpf_iter_bpf_map__open_and_load", 99 "skeleton open_and_load failed\n")) 100 return; 101 102 do_dummy_read(skel->progs.dump_bpf_map); 103 104 bpf_iter_bpf_map__destroy(skel); 105 } 106 107 static void test_task(void) 108 { 109 struct bpf_iter_task *skel; 110 111 skel = bpf_iter_task__open_and_load(); 112 if (CHECK(!skel, "bpf_iter_task__open_and_load", 113 "skeleton open_and_load failed\n")) 114 return; 115 116 do_dummy_read(skel->progs.dump_task); 117 118 bpf_iter_task__destroy(skel); 119 } 120 121 static void test_task_stack(void) 122 { 123 struct bpf_iter_task_stack *skel; 124 125 skel = bpf_iter_task_stack__open_and_load(); 126 if (CHECK(!skel, "bpf_iter_task_stack__open_and_load", 127 "skeleton open_and_load failed\n")) 128 return; 129 130 do_dummy_read(skel->progs.dump_task_stack); 131 132 bpf_iter_task_stack__destroy(skel); 133 } 134 135 static void test_task_file(void) 136 { 137 struct bpf_iter_task_file *skel; 138 139 skel = bpf_iter_task_file__open_and_load(); 140 if (CHECK(!skel, "bpf_iter_task_file__open_and_load", 141 "skeleton open_and_load failed\n")) 142 return; 143 144 do_dummy_read(skel->progs.dump_task_file); 145 146 bpf_iter_task_file__destroy(skel); 147 } 148 149 static void test_tcp4(void) 150 { 151 struct bpf_iter_tcp4 *skel; 152 153 skel = bpf_iter_tcp4__open_and_load(); 154 if (CHECK(!skel, "bpf_iter_tcp4__open_and_load", 155 "skeleton open_and_load failed\n")) 156 return; 157 158 do_dummy_read(skel->progs.dump_tcp4); 159 160 bpf_iter_tcp4__destroy(skel); 161 } 162 163 static void test_tcp6(void) 164 { 165 struct bpf_iter_tcp6 *skel; 166 167 skel = bpf_iter_tcp6__open_and_load(); 168 if (CHECK(!skel, "bpf_iter_tcp6__open_and_load", 169 "skeleton open_and_load failed\n")) 170 return; 171 172 do_dummy_read(skel->progs.dump_tcp6); 173 174 bpf_iter_tcp6__destroy(skel); 175 } 176 177 static void test_udp4(void) 178 { 179 struct bpf_iter_udp4 *skel; 180 181 skel = bpf_iter_udp4__open_and_load(); 182 if (CHECK(!skel, "bpf_iter_udp4__open_and_load", 183 "skeleton open_and_load failed\n")) 184 return; 185 186 do_dummy_read(skel->progs.dump_udp4); 187 188 bpf_iter_udp4__destroy(skel); 189 } 190 191 static void test_udp6(void) 192 { 193 struct bpf_iter_udp6 *skel; 194 195 skel = bpf_iter_udp6__open_and_load(); 196 if (CHECK(!skel, "bpf_iter_udp6__open_and_load", 197 "skeleton open_and_load failed\n")) 198 return; 199 200 do_dummy_read(skel->progs.dump_udp6); 201 202 bpf_iter_udp6__destroy(skel); 203 } 204 205 /* The expected string is less than 16 bytes */ 206 static int do_read_with_fd(int iter_fd, const char *expected, 207 bool read_one_char) 208 { 209 int err = -1, len, read_buf_len, start; 210 char buf[16] = {}; 211 212 read_buf_len = read_one_char ? 1 : 16; 213 start = 0; 214 while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) { 215 start += len; 216 if (CHECK(start >= 16, "read", "read len %d\n", len)) 217 return -1; 218 read_buf_len = read_one_char ? 1 : 16 - start; 219 } 220 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 221 return -1; 222 223 err = strcmp(buf, expected); 224 if (CHECK(err, "read", "incorrect read result: buf %s, expected %s\n", 225 buf, expected)) 226 return -1; 227 228 return 0; 229 } 230 231 static void test_anon_iter(bool read_one_char) 232 { 233 struct bpf_iter_test_kern1 *skel; 234 struct bpf_link *link; 235 int iter_fd, err; 236 237 skel = bpf_iter_test_kern1__open_and_load(); 238 if (CHECK(!skel, "bpf_iter_test_kern1__open_and_load", 239 "skeleton open_and_load failed\n")) 240 return; 241 242 err = bpf_iter_test_kern1__attach(skel); 243 if (CHECK(err, "bpf_iter_test_kern1__attach", 244 "skeleton attach failed\n")) { 245 goto out; 246 } 247 248 link = skel->links.dump_task; 249 iter_fd = bpf_iter_create(bpf_link__fd(link)); 250 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 251 goto out; 252 253 do_read_with_fd(iter_fd, "abcd", read_one_char); 254 close(iter_fd); 255 256 out: 257 bpf_iter_test_kern1__destroy(skel); 258 } 259 260 static int do_read(const char *path, const char *expected) 261 { 262 int err, iter_fd; 263 264 iter_fd = open(path, O_RDONLY); 265 if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n", 266 path, strerror(errno))) 267 return -1; 268 269 err = do_read_with_fd(iter_fd, expected, false); 270 close(iter_fd); 271 return err; 272 } 273 274 static void test_file_iter(void) 275 { 276 const char *path = "/sys/fs/bpf/bpf_iter_test1"; 277 struct bpf_iter_test_kern1 *skel1; 278 struct bpf_iter_test_kern2 *skel2; 279 struct bpf_link *link; 280 int err; 281 282 skel1 = bpf_iter_test_kern1__open_and_load(); 283 if (CHECK(!skel1, "bpf_iter_test_kern1__open_and_load", 284 "skeleton open_and_load failed\n")) 285 return; 286 287 link = bpf_program__attach_iter(skel1->progs.dump_task, NULL); 288 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 289 goto out; 290 291 /* unlink this path if it exists. */ 292 unlink(path); 293 294 err = bpf_link__pin(link, path); 295 if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err)) 296 goto free_link; 297 298 err = do_read(path, "abcd"); 299 if (err) 300 goto unlink_path; 301 302 /* file based iterator seems working fine. Let us a link update 303 * of the underlying link and `cat` the iterator again, its content 304 * should change. 305 */ 306 skel2 = bpf_iter_test_kern2__open_and_load(); 307 if (CHECK(!skel2, "bpf_iter_test_kern2__open_and_load", 308 "skeleton open_and_load failed\n")) 309 goto unlink_path; 310 311 err = bpf_link__update_program(link, skel2->progs.dump_task); 312 if (CHECK(err, "update_prog", "update_prog failed\n")) 313 goto destroy_skel2; 314 315 do_read(path, "ABCD"); 316 317 destroy_skel2: 318 bpf_iter_test_kern2__destroy(skel2); 319 unlink_path: 320 unlink(path); 321 free_link: 322 bpf_link__destroy(link); 323 out: 324 bpf_iter_test_kern1__destroy(skel1); 325 } 326 327 static void test_overflow(bool test_e2big_overflow, bool ret1) 328 { 329 __u32 map_info_len, total_read_len, expected_read_len; 330 int err, iter_fd, map1_fd, map2_fd, len; 331 struct bpf_map_info map_info = {}; 332 struct bpf_iter_test_kern4 *skel; 333 struct bpf_link *link; 334 __u32 page_size; 335 char *buf; 336 337 skel = bpf_iter_test_kern4__open(); 338 if (CHECK(!skel, "bpf_iter_test_kern4__open", 339 "skeleton open failed\n")) 340 return; 341 342 /* create two maps: bpf program will only do bpf_seq_write 343 * for these two maps. The goal is one map output almost 344 * fills seq_file buffer and then the other will trigger 345 * overflow and needs restart. 346 */ 347 map1_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); 348 if (CHECK(map1_fd < 0, "bpf_create_map", 349 "map_creation failed: %s\n", strerror(errno))) 350 goto out; 351 map2_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); 352 if (CHECK(map2_fd < 0, "bpf_create_map", 353 "map_creation failed: %s\n", strerror(errno))) 354 goto free_map1; 355 356 /* bpf_seq_printf kernel buffer is one page, so one map 357 * bpf_seq_write will mostly fill it, and the other map 358 * will partially fill and then trigger overflow and need 359 * bpf_seq_read restart. 360 */ 361 page_size = sysconf(_SC_PAGE_SIZE); 362 363 if (test_e2big_overflow) { 364 skel->rodata->print_len = (page_size + 8) / 8; 365 expected_read_len = 2 * (page_size + 8); 366 } else if (!ret1) { 367 skel->rodata->print_len = (page_size - 8) / 8; 368 expected_read_len = 2 * (page_size - 8); 369 } else { 370 skel->rodata->print_len = 1; 371 expected_read_len = 2 * 8; 372 } 373 skel->rodata->ret1 = ret1; 374 375 if (CHECK(bpf_iter_test_kern4__load(skel), 376 "bpf_iter_test_kern4__load", "skeleton load failed\n")) 377 goto free_map2; 378 379 /* setup filtering map_id in bpf program */ 380 map_info_len = sizeof(map_info); 381 err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len); 382 if (CHECK(err, "get_map_info", "get map info failed: %s\n", 383 strerror(errno))) 384 goto free_map2; 385 skel->bss->map1_id = map_info.id; 386 387 err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len); 388 if (CHECK(err, "get_map_info", "get map info failed: %s\n", 389 strerror(errno))) 390 goto free_map2; 391 skel->bss->map2_id = map_info.id; 392 393 link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL); 394 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 395 goto free_map2; 396 397 iter_fd = bpf_iter_create(bpf_link__fd(link)); 398 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 399 goto free_link; 400 401 buf = malloc(expected_read_len); 402 if (!buf) 403 goto close_iter; 404 405 /* do read */ 406 total_read_len = 0; 407 if (test_e2big_overflow) { 408 while ((len = read(iter_fd, buf, expected_read_len)) > 0) 409 total_read_len += len; 410 411 CHECK(len != -1 || errno != E2BIG, "read", 412 "expected ret -1, errno E2BIG, but get ret %d, error %s\n", 413 len, strerror(errno)); 414 goto free_buf; 415 } else if (!ret1) { 416 while ((len = read(iter_fd, buf, expected_read_len)) > 0) 417 total_read_len += len; 418 419 if (CHECK(len < 0, "read", "read failed: %s\n", 420 strerror(errno))) 421 goto free_buf; 422 } else { 423 do { 424 len = read(iter_fd, buf, expected_read_len); 425 if (len > 0) 426 total_read_len += len; 427 } while (len > 0 || len == -EAGAIN); 428 429 if (CHECK(len < 0, "read", "read failed: %s\n", 430 strerror(errno))) 431 goto free_buf; 432 } 433 434 if (CHECK(total_read_len != expected_read_len, "read", 435 "total len %u, expected len %u\n", total_read_len, 436 expected_read_len)) 437 goto free_buf; 438 439 if (CHECK(skel->bss->map1_accessed != 1, "map1_accessed", 440 "expected 1 actual %d\n", skel->bss->map1_accessed)) 441 goto free_buf; 442 443 if (CHECK(skel->bss->map2_accessed != 2, "map2_accessed", 444 "expected 2 actual %d\n", skel->bss->map2_accessed)) 445 goto free_buf; 446 447 CHECK(skel->bss->map2_seqnum1 != skel->bss->map2_seqnum2, 448 "map2_seqnum", "two different seqnum %lld %lld\n", 449 skel->bss->map2_seqnum1, skel->bss->map2_seqnum2); 450 451 free_buf: 452 free(buf); 453 close_iter: 454 close(iter_fd); 455 free_link: 456 bpf_link__destroy(link); 457 free_map2: 458 close(map2_fd); 459 free_map1: 460 close(map1_fd); 461 out: 462 bpf_iter_test_kern4__destroy(skel); 463 } 464 465 static void test_bpf_hash_map(void) 466 { 467 __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0; 468 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 469 struct bpf_iter_bpf_hash_map *skel; 470 int err, i, len, map_fd, iter_fd; 471 __u64 val, expected_val = 0; 472 struct bpf_link *link; 473 struct key_t { 474 int a; 475 int b; 476 int c; 477 } key; 478 char buf[64]; 479 480 skel = bpf_iter_bpf_hash_map__open(); 481 if (CHECK(!skel, "bpf_iter_bpf_hash_map__open", 482 "skeleton open failed\n")) 483 return; 484 485 skel->bss->in_test_mode = true; 486 487 err = bpf_iter_bpf_hash_map__load(skel); 488 if (CHECK(!skel, "bpf_iter_bpf_hash_map__load", 489 "skeleton load failed\n")) 490 goto out; 491 492 /* iterator with hashmap2 and hashmap3 should fail */ 493 opts.map_fd = bpf_map__fd(skel->maps.hashmap2); 494 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 495 if (CHECK(!IS_ERR(link), "attach_iter", 496 "attach_iter for hashmap2 unexpected succeeded\n")) 497 goto out; 498 499 opts.map_fd = bpf_map__fd(skel->maps.hashmap3); 500 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 501 if (CHECK(!IS_ERR(link), "attach_iter", 502 "attach_iter for hashmap3 unexpected succeeded\n")) 503 goto out; 504 505 /* hashmap1 should be good, update map values here */ 506 map_fd = bpf_map__fd(skel->maps.hashmap1); 507 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { 508 key.a = i + 1; 509 key.b = i + 2; 510 key.c = i + 3; 511 val = i + 4; 512 expected_key_a += key.a; 513 expected_key_b += key.b; 514 expected_key_c += key.c; 515 expected_val += val; 516 517 err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY); 518 if (CHECK(err, "map_update", "map_update failed\n")) 519 goto out; 520 } 521 522 opts.map_fd = map_fd; 523 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 524 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 525 goto out; 526 527 iter_fd = bpf_iter_create(bpf_link__fd(link)); 528 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 529 goto free_link; 530 531 /* do some tests */ 532 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 533 ; 534 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 535 goto close_iter; 536 537 /* test results */ 538 if (CHECK(skel->bss->key_sum_a != expected_key_a, 539 "key_sum_a", "got %u expected %u\n", 540 skel->bss->key_sum_a, expected_key_a)) 541 goto close_iter; 542 if (CHECK(skel->bss->key_sum_b != expected_key_b, 543 "key_sum_b", "got %u expected %u\n", 544 skel->bss->key_sum_b, expected_key_b)) 545 goto close_iter; 546 if (CHECK(skel->bss->val_sum != expected_val, 547 "val_sum", "got %llu expected %llu\n", 548 skel->bss->val_sum, expected_val)) 549 goto close_iter; 550 551 close_iter: 552 close(iter_fd); 553 free_link: 554 bpf_link__destroy(link); 555 out: 556 bpf_iter_bpf_hash_map__destroy(skel); 557 } 558 559 static void test_bpf_percpu_hash_map(void) 560 { 561 __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0; 562 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 563 struct bpf_iter_bpf_percpu_hash_map *skel; 564 int err, i, j, len, map_fd, iter_fd; 565 __u32 expected_val = 0; 566 struct bpf_link *link; 567 struct key_t { 568 int a; 569 int b; 570 int c; 571 } key; 572 char buf[64]; 573 void *val; 574 575 val = malloc(8 * bpf_num_possible_cpus()); 576 577 skel = bpf_iter_bpf_percpu_hash_map__open(); 578 if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open", 579 "skeleton open failed\n")) 580 return; 581 582 skel->rodata->num_cpus = bpf_num_possible_cpus(); 583 584 err = bpf_iter_bpf_percpu_hash_map__load(skel); 585 if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load", 586 "skeleton load failed\n")) 587 goto out; 588 589 /* update map values here */ 590 map_fd = bpf_map__fd(skel->maps.hashmap1); 591 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { 592 key.a = i + 1; 593 key.b = i + 2; 594 key.c = i + 3; 595 expected_key_a += key.a; 596 expected_key_b += key.b; 597 expected_key_c += key.c; 598 599 for (j = 0; j < bpf_num_possible_cpus(); j++) { 600 *(__u32 *)(val + j * 8) = i + j; 601 expected_val += i + j; 602 } 603 604 err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY); 605 if (CHECK(err, "map_update", "map_update failed\n")) 606 goto out; 607 } 608 609 opts.map_fd = map_fd; 610 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts); 611 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 612 goto out; 613 614 iter_fd = bpf_iter_create(bpf_link__fd(link)); 615 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 616 goto free_link; 617 618 /* do some tests */ 619 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 620 ; 621 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 622 goto close_iter; 623 624 /* test results */ 625 if (CHECK(skel->bss->key_sum_a != expected_key_a, 626 "key_sum_a", "got %u expected %u\n", 627 skel->bss->key_sum_a, expected_key_a)) 628 goto close_iter; 629 if (CHECK(skel->bss->key_sum_b != expected_key_b, 630 "key_sum_b", "got %u expected %u\n", 631 skel->bss->key_sum_b, expected_key_b)) 632 goto close_iter; 633 if (CHECK(skel->bss->val_sum != expected_val, 634 "val_sum", "got %u expected %u\n", 635 skel->bss->val_sum, expected_val)) 636 goto close_iter; 637 638 close_iter: 639 close(iter_fd); 640 free_link: 641 bpf_link__destroy(link); 642 out: 643 bpf_iter_bpf_percpu_hash_map__destroy(skel); 644 } 645 646 static void test_bpf_array_map(void) 647 { 648 __u64 val, expected_val = 0, res_first_val, first_val = 0; 649 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 650 __u32 expected_key = 0, res_first_key; 651 struct bpf_iter_bpf_array_map *skel; 652 int err, i, map_fd, iter_fd; 653 struct bpf_link *link; 654 char buf[64] = {}; 655 int len, start; 656 657 skel = bpf_iter_bpf_array_map__open_and_load(); 658 if (CHECK(!skel, "bpf_iter_bpf_array_map__open_and_load", 659 "skeleton open_and_load failed\n")) 660 return; 661 662 map_fd = bpf_map__fd(skel->maps.arraymap1); 663 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 664 val = i + 4; 665 expected_key += i; 666 expected_val += val; 667 668 if (i == 0) 669 first_val = val; 670 671 err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY); 672 if (CHECK(err, "map_update", "map_update failed\n")) 673 goto out; 674 } 675 676 opts.map_fd = map_fd; 677 link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts); 678 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 679 goto out; 680 681 iter_fd = bpf_iter_create(bpf_link__fd(link)); 682 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 683 goto free_link; 684 685 /* do some tests */ 686 start = 0; 687 while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0) 688 start += len; 689 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 690 goto close_iter; 691 692 /* test results */ 693 res_first_key = *(__u32 *)buf; 694 res_first_val = *(__u64 *)(buf + sizeof(__u32)); 695 if (CHECK(res_first_key != 0 || res_first_val != first_val, 696 "bpf_seq_write", 697 "seq_write failure: first key %u vs expected 0, " 698 " first value %llu vs expected %llu\n", 699 res_first_key, res_first_val, first_val)) 700 goto close_iter; 701 702 if (CHECK(skel->bss->key_sum != expected_key, 703 "key_sum", "got %u expected %u\n", 704 skel->bss->key_sum, expected_key)) 705 goto close_iter; 706 if (CHECK(skel->bss->val_sum != expected_val, 707 "val_sum", "got %llu expected %llu\n", 708 skel->bss->val_sum, expected_val)) 709 goto close_iter; 710 711 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 712 err = bpf_map_lookup_elem(map_fd, &i, &val); 713 if (CHECK(err, "map_lookup", "map_lookup failed\n")) 714 goto out; 715 if (CHECK(i != val, "invalid_val", 716 "got value %llu expected %u\n", val, i)) 717 goto out; 718 } 719 720 close_iter: 721 close(iter_fd); 722 free_link: 723 bpf_link__destroy(link); 724 out: 725 bpf_iter_bpf_array_map__destroy(skel); 726 } 727 728 static void test_bpf_percpu_array_map(void) 729 { 730 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 731 struct bpf_iter_bpf_percpu_array_map *skel; 732 __u32 expected_key = 0, expected_val = 0; 733 int err, i, j, map_fd, iter_fd; 734 struct bpf_link *link; 735 char buf[64]; 736 void *val; 737 int len; 738 739 val = malloc(8 * bpf_num_possible_cpus()); 740 741 skel = bpf_iter_bpf_percpu_array_map__open(); 742 if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open", 743 "skeleton open failed\n")) 744 return; 745 746 skel->rodata->num_cpus = bpf_num_possible_cpus(); 747 748 err = bpf_iter_bpf_percpu_array_map__load(skel); 749 if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load", 750 "skeleton load failed\n")) 751 goto out; 752 753 /* update map values here */ 754 map_fd = bpf_map__fd(skel->maps.arraymap1); 755 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { 756 expected_key += i; 757 758 for (j = 0; j < bpf_num_possible_cpus(); j++) { 759 *(__u32 *)(val + j * 8) = i + j; 760 expected_val += i + j; 761 } 762 763 err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY); 764 if (CHECK(err, "map_update", "map_update failed\n")) 765 goto out; 766 } 767 768 opts.map_fd = map_fd; 769 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts); 770 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 771 goto out; 772 773 iter_fd = bpf_iter_create(bpf_link__fd(link)); 774 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 775 goto free_link; 776 777 /* do some tests */ 778 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 779 ; 780 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 781 goto close_iter; 782 783 /* test results */ 784 if (CHECK(skel->bss->key_sum != expected_key, 785 "key_sum", "got %u expected %u\n", 786 skel->bss->key_sum, expected_key)) 787 goto close_iter; 788 if (CHECK(skel->bss->val_sum != expected_val, 789 "val_sum", "got %u expected %u\n", 790 skel->bss->val_sum, expected_val)) 791 goto close_iter; 792 793 close_iter: 794 close(iter_fd); 795 free_link: 796 bpf_link__destroy(link); 797 out: 798 bpf_iter_bpf_percpu_array_map__destroy(skel); 799 } 800 801 static void test_bpf_sk_storage_map(void) 802 { 803 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 804 int err, i, len, map_fd, iter_fd, num_sockets; 805 struct bpf_iter_bpf_sk_storage_map *skel; 806 int sock_fd[3] = {-1, -1, -1}; 807 __u32 val, expected_val = 0; 808 struct bpf_link *link; 809 char buf[64]; 810 811 skel = bpf_iter_bpf_sk_storage_map__open_and_load(); 812 if (CHECK(!skel, "bpf_iter_bpf_sk_storage_map__open_and_load", 813 "skeleton open_and_load failed\n")) 814 return; 815 816 map_fd = bpf_map__fd(skel->maps.sk_stg_map); 817 num_sockets = ARRAY_SIZE(sock_fd); 818 for (i = 0; i < num_sockets; i++) { 819 sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0); 820 if (CHECK(sock_fd[i] < 0, "socket", "errno: %d\n", errno)) 821 goto out; 822 823 val = i + 1; 824 expected_val += val; 825 826 err = bpf_map_update_elem(map_fd, &sock_fd[i], &val, 827 BPF_NOEXIST); 828 if (CHECK(err, "map_update", "map_update failed\n")) 829 goto out; 830 } 831 832 opts.map_fd = map_fd; 833 link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts); 834 if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) 835 goto out; 836 837 iter_fd = bpf_iter_create(bpf_link__fd(link)); 838 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 839 goto free_link; 840 841 /* do some tests */ 842 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 843 ; 844 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) 845 goto close_iter; 846 847 /* test results */ 848 if (CHECK(skel->bss->ipv6_sk_count != num_sockets, 849 "ipv6_sk_count", "got %u expected %u\n", 850 skel->bss->ipv6_sk_count, num_sockets)) 851 goto close_iter; 852 853 if (CHECK(skel->bss->val_sum != expected_val, 854 "val_sum", "got %u expected %u\n", 855 skel->bss->val_sum, expected_val)) 856 goto close_iter; 857 858 close_iter: 859 close(iter_fd); 860 free_link: 861 bpf_link__destroy(link); 862 out: 863 for (i = 0; i < num_sockets; i++) { 864 if (sock_fd[i] >= 0) 865 close(sock_fd[i]); 866 } 867 bpf_iter_bpf_sk_storage_map__destroy(skel); 868 } 869 870 static void test_rdonly_buf_out_of_bound(void) 871 { 872 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 873 struct bpf_iter_test_kern5 *skel; 874 struct bpf_link *link; 875 876 skel = bpf_iter_test_kern5__open_and_load(); 877 if (CHECK(!skel, "bpf_iter_test_kern5__open_and_load", 878 "skeleton open_and_load failed\n")) 879 return; 880 881 opts.map_fd = bpf_map__fd(skel->maps.hashmap1); 882 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); 883 if (CHECK(!IS_ERR(link), "attach_iter", "unexpected success\n")) 884 bpf_link__destroy(link); 885 886 bpf_iter_test_kern5__destroy(skel); 887 } 888 889 static void test_buf_neg_offset(void) 890 { 891 struct bpf_iter_test_kern6 *skel; 892 893 skel = bpf_iter_test_kern6__open_and_load(); 894 if (CHECK(skel, "bpf_iter_test_kern6__open_and_load", 895 "skeleton open_and_load unexpected success\n")) 896 bpf_iter_test_kern6__destroy(skel); 897 } 898 899 void test_bpf_iter(void) 900 { 901 if (test__start_subtest("btf_id_or_null")) 902 test_btf_id_or_null(); 903 if (test__start_subtest("ipv6_route")) 904 test_ipv6_route(); 905 if (test__start_subtest("netlink")) 906 test_netlink(); 907 if (test__start_subtest("bpf_map")) 908 test_bpf_map(); 909 if (test__start_subtest("task")) 910 test_task(); 911 if (test__start_subtest("task_stack")) 912 test_task_stack(); 913 if (test__start_subtest("task_file")) 914 test_task_file(); 915 if (test__start_subtest("tcp4")) 916 test_tcp4(); 917 if (test__start_subtest("tcp6")) 918 test_tcp6(); 919 if (test__start_subtest("udp4")) 920 test_udp4(); 921 if (test__start_subtest("udp6")) 922 test_udp6(); 923 if (test__start_subtest("anon")) 924 test_anon_iter(false); 925 if (test__start_subtest("anon-read-one-char")) 926 test_anon_iter(true); 927 if (test__start_subtest("file")) 928 test_file_iter(); 929 if (test__start_subtest("overflow")) 930 test_overflow(false, false); 931 if (test__start_subtest("overflow-e2big")) 932 test_overflow(true, false); 933 if (test__start_subtest("prog-ret-1")) 934 test_overflow(false, true); 935 if (test__start_subtest("bpf_hash_map")) 936 test_bpf_hash_map(); 937 if (test__start_subtest("bpf_percpu_hash_map")) 938 test_bpf_percpu_hash_map(); 939 if (test__start_subtest("bpf_array_map")) 940 test_bpf_array_map(); 941 if (test__start_subtest("bpf_percpu_array_map")) 942 test_bpf_percpu_array_map(); 943 if (test__start_subtest("bpf_sk_storage_map")) 944 test_bpf_sk_storage_map(); 945 if (test__start_subtest("rdonly-buf-out-of-bound")) 946 test_rdonly_buf_out_of_bound(); 947 if (test__start_subtest("buf-neg-offset")) 948 test_buf_neg_offset(); 949 } 950