xref: /openbmc/linux/tools/testing/selftests/bpf/prog_tests/bpf_iter.c (revision 6614a3c3164a5df2b54abb0b3559f51041cf705b)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <test_progs.h>
4 #include "bpf_iter_ipv6_route.skel.h"
5 #include "bpf_iter_netlink.skel.h"
6 #include "bpf_iter_bpf_map.skel.h"
7 #include "bpf_iter_task.skel.h"
8 #include "bpf_iter_task_stack.skel.h"
9 #include "bpf_iter_task_file.skel.h"
10 #include "bpf_iter_task_vma.skel.h"
11 #include "bpf_iter_task_btf.skel.h"
12 #include "bpf_iter_tcp4.skel.h"
13 #include "bpf_iter_tcp6.skel.h"
14 #include "bpf_iter_udp4.skel.h"
15 #include "bpf_iter_udp6.skel.h"
16 #include "bpf_iter_unix.skel.h"
17 #include "bpf_iter_test_kern1.skel.h"
18 #include "bpf_iter_test_kern2.skel.h"
19 #include "bpf_iter_test_kern3.skel.h"
20 #include "bpf_iter_test_kern4.skel.h"
21 #include "bpf_iter_bpf_hash_map.skel.h"
22 #include "bpf_iter_bpf_percpu_hash_map.skel.h"
23 #include "bpf_iter_bpf_array_map.skel.h"
24 #include "bpf_iter_bpf_percpu_array_map.skel.h"
25 #include "bpf_iter_bpf_sk_storage_helpers.skel.h"
26 #include "bpf_iter_bpf_sk_storage_map.skel.h"
27 #include "bpf_iter_test_kern5.skel.h"
28 #include "bpf_iter_test_kern6.skel.h"
29 #include "bpf_iter_bpf_link.skel.h"
30 #include "bpf_iter_ksym.skel.h"
31 
32 static int duration;
33 
34 static void test_btf_id_or_null(void)
35 {
36 	struct bpf_iter_test_kern3 *skel;
37 
38 	skel = bpf_iter_test_kern3__open_and_load();
39 	if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) {
40 		bpf_iter_test_kern3__destroy(skel);
41 		return;
42 	}
43 }
44 
45 static void do_dummy_read(struct bpf_program *prog)
46 {
47 	struct bpf_link *link;
48 	char buf[16] = {};
49 	int iter_fd, len;
50 
51 	link = bpf_program__attach_iter(prog, NULL);
52 	if (!ASSERT_OK_PTR(link, "attach_iter"))
53 		return;
54 
55 	iter_fd = bpf_iter_create(bpf_link__fd(link));
56 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
57 		goto free_link;
58 
59 	/* not check contents, but ensure read() ends without error */
60 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
61 		;
62 	CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));
63 
64 	close(iter_fd);
65 
66 free_link:
67 	bpf_link__destroy(link);
68 }
69 
70 static int read_fd_into_buffer(int fd, char *buf, int size)
71 {
72 	int bufleft = size;
73 	int len;
74 
75 	do {
76 		len = read(fd, buf, bufleft);
77 		if (len > 0) {
78 			buf += len;
79 			bufleft -= len;
80 		}
81 	} while (len > 0);
82 
83 	return len < 0 ? len : size - bufleft;
84 }
85 
86 static void test_ipv6_route(void)
87 {
88 	struct bpf_iter_ipv6_route *skel;
89 
90 	skel = bpf_iter_ipv6_route__open_and_load();
91 	if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load"))
92 		return;
93 
94 	do_dummy_read(skel->progs.dump_ipv6_route);
95 
96 	bpf_iter_ipv6_route__destroy(skel);
97 }
98 
99 static void test_netlink(void)
100 {
101 	struct bpf_iter_netlink *skel;
102 
103 	skel = bpf_iter_netlink__open_and_load();
104 	if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load"))
105 		return;
106 
107 	do_dummy_read(skel->progs.dump_netlink);
108 
109 	bpf_iter_netlink__destroy(skel);
110 }
111 
112 static void test_bpf_map(void)
113 {
114 	struct bpf_iter_bpf_map *skel;
115 
116 	skel = bpf_iter_bpf_map__open_and_load();
117 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load"))
118 		return;
119 
120 	do_dummy_read(skel->progs.dump_bpf_map);
121 
122 	bpf_iter_bpf_map__destroy(skel);
123 }
124 
125 static void test_task(void)
126 {
127 	struct bpf_iter_task *skel;
128 
129 	skel = bpf_iter_task__open_and_load();
130 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
131 		return;
132 
133 	do_dummy_read(skel->progs.dump_task);
134 
135 	bpf_iter_task__destroy(skel);
136 }
137 
138 static void test_task_sleepable(void)
139 {
140 	struct bpf_iter_task *skel;
141 
142 	skel = bpf_iter_task__open_and_load();
143 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
144 		return;
145 
146 	do_dummy_read(skel->progs.dump_task_sleepable);
147 
148 	ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0,
149 		  "num_expected_failure_copy_from_user_task");
150 	ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0,
151 		  "num_success_copy_from_user_task");
152 
153 	bpf_iter_task__destroy(skel);
154 }
155 
156 static void test_task_stack(void)
157 {
158 	struct bpf_iter_task_stack *skel;
159 
160 	skel = bpf_iter_task_stack__open_and_load();
161 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load"))
162 		return;
163 
164 	do_dummy_read(skel->progs.dump_task_stack);
165 	do_dummy_read(skel->progs.get_task_user_stacks);
166 
167 	bpf_iter_task_stack__destroy(skel);
168 }
169 
170 static void *do_nothing(void *arg)
171 {
172 	pthread_exit(arg);
173 }
174 
175 static void test_task_file(void)
176 {
177 	struct bpf_iter_task_file *skel;
178 	pthread_t thread_id;
179 	void *ret;
180 
181 	skel = bpf_iter_task_file__open_and_load();
182 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_file__open_and_load"))
183 		return;
184 
185 	skel->bss->tgid = getpid();
186 
187 	if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing, NULL),
188 		  "pthread_create"))
189 		goto done;
190 
191 	do_dummy_read(skel->progs.dump_task_file);
192 
193 	if (!ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
194 		  "pthread_join"))
195 		goto done;
196 
197 	ASSERT_EQ(skel->bss->count, 0, "check_count");
198 
199 done:
200 	bpf_iter_task_file__destroy(skel);
201 }
202 
203 #define TASKBUFSZ		32768
204 
205 static char taskbuf[TASKBUFSZ];
206 
207 static int do_btf_read(struct bpf_iter_task_btf *skel)
208 {
209 	struct bpf_program *prog = skel->progs.dump_task_struct;
210 	struct bpf_iter_task_btf__bss *bss = skel->bss;
211 	int iter_fd = -1, err;
212 	struct bpf_link *link;
213 	char *buf = taskbuf;
214 	int ret = 0;
215 
216 	link = bpf_program__attach_iter(prog, NULL);
217 	if (!ASSERT_OK_PTR(link, "attach_iter"))
218 		return ret;
219 
220 	iter_fd = bpf_iter_create(bpf_link__fd(link));
221 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
222 		goto free_link;
223 
224 	err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ);
225 	if (bss->skip) {
226 		printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
227 		ret = 1;
228 		test__skip();
229 		goto free_link;
230 	}
231 
232 	if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno)))
233 		goto free_link;
234 
235 	ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)",
236 	      "check for btf representation of task_struct in iter data");
237 free_link:
238 	if (iter_fd > 0)
239 		close(iter_fd);
240 	bpf_link__destroy(link);
241 	return ret;
242 }
243 
244 static void test_task_btf(void)
245 {
246 	struct bpf_iter_task_btf__bss *bss;
247 	struct bpf_iter_task_btf *skel;
248 	int ret;
249 
250 	skel = bpf_iter_task_btf__open_and_load();
251 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_btf__open_and_load"))
252 		return;
253 
254 	bss = skel->bss;
255 
256 	ret = do_btf_read(skel);
257 	if (ret)
258 		goto cleanup;
259 
260 	if (!ASSERT_NEQ(bss->tasks, 0, "no task iteration, did BPF program run?"))
261 		goto cleanup;
262 
263 	ASSERT_EQ(bss->seq_err, 0, "check for unexpected err");
264 
265 cleanup:
266 	bpf_iter_task_btf__destroy(skel);
267 }
268 
269 static void test_tcp4(void)
270 {
271 	struct bpf_iter_tcp4 *skel;
272 
273 	skel = bpf_iter_tcp4__open_and_load();
274 	if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load"))
275 		return;
276 
277 	do_dummy_read(skel->progs.dump_tcp4);
278 
279 	bpf_iter_tcp4__destroy(skel);
280 }
281 
282 static void test_tcp6(void)
283 {
284 	struct bpf_iter_tcp6 *skel;
285 
286 	skel = bpf_iter_tcp6__open_and_load();
287 	if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load"))
288 		return;
289 
290 	do_dummy_read(skel->progs.dump_tcp6);
291 
292 	bpf_iter_tcp6__destroy(skel);
293 }
294 
295 static void test_udp4(void)
296 {
297 	struct bpf_iter_udp4 *skel;
298 
299 	skel = bpf_iter_udp4__open_and_load();
300 	if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load"))
301 		return;
302 
303 	do_dummy_read(skel->progs.dump_udp4);
304 
305 	bpf_iter_udp4__destroy(skel);
306 }
307 
308 static void test_udp6(void)
309 {
310 	struct bpf_iter_udp6 *skel;
311 
312 	skel = bpf_iter_udp6__open_and_load();
313 	if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load"))
314 		return;
315 
316 	do_dummy_read(skel->progs.dump_udp6);
317 
318 	bpf_iter_udp6__destroy(skel);
319 }
320 
321 static void test_unix(void)
322 {
323 	struct bpf_iter_unix *skel;
324 
325 	skel = bpf_iter_unix__open_and_load();
326 	if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load"))
327 		return;
328 
329 	do_dummy_read(skel->progs.dump_unix);
330 
331 	bpf_iter_unix__destroy(skel);
332 }
333 
334 /* The expected string is less than 16 bytes */
335 static int do_read_with_fd(int iter_fd, const char *expected,
336 			   bool read_one_char)
337 {
338 	int len, read_buf_len, start;
339 	char buf[16] = {};
340 
341 	read_buf_len = read_one_char ? 1 : 16;
342 	start = 0;
343 	while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
344 		start += len;
345 		if (CHECK(start >= 16, "read", "read len %d\n", len))
346 			return -1;
347 		read_buf_len = read_one_char ? 1 : 16 - start;
348 	}
349 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
350 		return -1;
351 
352 	if (!ASSERT_STREQ(buf, expected, "read"))
353 		return -1;
354 
355 	return 0;
356 }
357 
358 static void test_anon_iter(bool read_one_char)
359 {
360 	struct bpf_iter_test_kern1 *skel;
361 	struct bpf_link *link;
362 	int iter_fd, err;
363 
364 	skel = bpf_iter_test_kern1__open_and_load();
365 	if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern1__open_and_load"))
366 		return;
367 
368 	err = bpf_iter_test_kern1__attach(skel);
369 	if (!ASSERT_OK(err, "bpf_iter_test_kern1__attach")) {
370 		goto out;
371 	}
372 
373 	link = skel->links.dump_task;
374 	iter_fd = bpf_iter_create(bpf_link__fd(link));
375 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
376 		goto out;
377 
378 	do_read_with_fd(iter_fd, "abcd", read_one_char);
379 	close(iter_fd);
380 
381 out:
382 	bpf_iter_test_kern1__destroy(skel);
383 }
384 
385 static int do_read(const char *path, const char *expected)
386 {
387 	int err, iter_fd;
388 
389 	iter_fd = open(path, O_RDONLY);
390 	if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n",
391 		  path, strerror(errno)))
392 		return -1;
393 
394 	err = do_read_with_fd(iter_fd, expected, false);
395 	close(iter_fd);
396 	return err;
397 }
398 
399 static void test_file_iter(void)
400 {
401 	const char *path = "/sys/fs/bpf/bpf_iter_test1";
402 	struct bpf_iter_test_kern1 *skel1;
403 	struct bpf_iter_test_kern2 *skel2;
404 	struct bpf_link *link;
405 	int err;
406 
407 	skel1 = bpf_iter_test_kern1__open_and_load();
408 	if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load"))
409 		return;
410 
411 	link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
412 	if (!ASSERT_OK_PTR(link, "attach_iter"))
413 		goto out;
414 
415 	/* unlink this path if it exists. */
416 	unlink(path);
417 
418 	err = bpf_link__pin(link, path);
419 	if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err))
420 		goto free_link;
421 
422 	err = do_read(path, "abcd");
423 	if (err)
424 		goto unlink_path;
425 
426 	/* file based iterator seems working fine. Let us a link update
427 	 * of the underlying link and `cat` the iterator again, its content
428 	 * should change.
429 	 */
430 	skel2 = bpf_iter_test_kern2__open_and_load();
431 	if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load"))
432 		goto unlink_path;
433 
434 	err = bpf_link__update_program(link, skel2->progs.dump_task);
435 	if (!ASSERT_OK(err, "update_prog"))
436 		goto destroy_skel2;
437 
438 	do_read(path, "ABCD");
439 
440 destroy_skel2:
441 	bpf_iter_test_kern2__destroy(skel2);
442 unlink_path:
443 	unlink(path);
444 free_link:
445 	bpf_link__destroy(link);
446 out:
447 	bpf_iter_test_kern1__destroy(skel1);
448 }
449 
450 static void test_overflow(bool test_e2big_overflow, bool ret1)
451 {
452 	__u32 map_info_len, total_read_len, expected_read_len;
453 	int err, iter_fd, map1_fd, map2_fd, len;
454 	struct bpf_map_info map_info = {};
455 	struct bpf_iter_test_kern4 *skel;
456 	struct bpf_link *link;
457 	__u32 iter_size;
458 	char *buf;
459 
460 	skel = bpf_iter_test_kern4__open();
461 	if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open"))
462 		return;
463 
464 	/* create two maps: bpf program will only do bpf_seq_write
465 	 * for these two maps. The goal is one map output almost
466 	 * fills seq_file buffer and then the other will trigger
467 	 * overflow and needs restart.
468 	 */
469 	map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
470 	if (CHECK(map1_fd < 0, "bpf_map_create",
471 		  "map_creation failed: %s\n", strerror(errno)))
472 		goto out;
473 	map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
474 	if (CHECK(map2_fd < 0, "bpf_map_create",
475 		  "map_creation failed: %s\n", strerror(errno)))
476 		goto free_map1;
477 
478 	/* bpf_seq_printf kernel buffer is 8 pages, so one map
479 	 * bpf_seq_write will mostly fill it, and the other map
480 	 * will partially fill and then trigger overflow and need
481 	 * bpf_seq_read restart.
482 	 */
483 	iter_size = sysconf(_SC_PAGE_SIZE) << 3;
484 
485 	if (test_e2big_overflow) {
486 		skel->rodata->print_len = (iter_size + 8) / 8;
487 		expected_read_len = 2 * (iter_size + 8);
488 	} else if (!ret1) {
489 		skel->rodata->print_len = (iter_size - 8) / 8;
490 		expected_read_len = 2 * (iter_size - 8);
491 	} else {
492 		skel->rodata->print_len = 1;
493 		expected_read_len = 2 * 8;
494 	}
495 	skel->rodata->ret1 = ret1;
496 
497 	if (!ASSERT_OK(bpf_iter_test_kern4__load(skel),
498 		  "bpf_iter_test_kern4__load"))
499 		goto free_map2;
500 
501 	/* setup filtering map_id in bpf program */
502 	map_info_len = sizeof(map_info);
503 	err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len);
504 	if (CHECK(err, "get_map_info", "get map info failed: %s\n",
505 		  strerror(errno)))
506 		goto free_map2;
507 	skel->bss->map1_id = map_info.id;
508 
509 	err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len);
510 	if (CHECK(err, "get_map_info", "get map info failed: %s\n",
511 		  strerror(errno)))
512 		goto free_map2;
513 	skel->bss->map2_id = map_info.id;
514 
515 	link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
516 	if (!ASSERT_OK_PTR(link, "attach_iter"))
517 		goto free_map2;
518 
519 	iter_fd = bpf_iter_create(bpf_link__fd(link));
520 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
521 		goto free_link;
522 
523 	buf = malloc(expected_read_len);
524 	if (!buf)
525 		goto close_iter;
526 
527 	/* do read */
528 	total_read_len = 0;
529 	if (test_e2big_overflow) {
530 		while ((len = read(iter_fd, buf, expected_read_len)) > 0)
531 			total_read_len += len;
532 
533 		CHECK(len != -1 || errno != E2BIG, "read",
534 		      "expected ret -1, errno E2BIG, but get ret %d, error %s\n",
535 			  len, strerror(errno));
536 		goto free_buf;
537 	} else if (!ret1) {
538 		while ((len = read(iter_fd, buf, expected_read_len)) > 0)
539 			total_read_len += len;
540 
541 		if (CHECK(len < 0, "read", "read failed: %s\n",
542 			  strerror(errno)))
543 			goto free_buf;
544 	} else {
545 		do {
546 			len = read(iter_fd, buf, expected_read_len);
547 			if (len > 0)
548 				total_read_len += len;
549 		} while (len > 0 || len == -EAGAIN);
550 
551 		if (CHECK(len < 0, "read", "read failed: %s\n",
552 			  strerror(errno)))
553 			goto free_buf;
554 	}
555 
556 	if (!ASSERT_EQ(total_read_len, expected_read_len, "read"))
557 		goto free_buf;
558 
559 	if (!ASSERT_EQ(skel->bss->map1_accessed, 1, "map1_accessed"))
560 		goto free_buf;
561 
562 	if (!ASSERT_EQ(skel->bss->map2_accessed, 2, "map2_accessed"))
563 		goto free_buf;
564 
565 	ASSERT_EQ(skel->bss->map2_seqnum1, skel->bss->map2_seqnum2, "map2_seqnum");
566 
567 free_buf:
568 	free(buf);
569 close_iter:
570 	close(iter_fd);
571 free_link:
572 	bpf_link__destroy(link);
573 free_map2:
574 	close(map2_fd);
575 free_map1:
576 	close(map1_fd);
577 out:
578 	bpf_iter_test_kern4__destroy(skel);
579 }
580 
581 static void test_bpf_hash_map(void)
582 {
583 	__u32 expected_key_a = 0, expected_key_b = 0;
584 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
585 	struct bpf_iter_bpf_hash_map *skel;
586 	int err, i, len, map_fd, iter_fd;
587 	union bpf_iter_link_info linfo;
588 	__u64 val, expected_val = 0;
589 	struct bpf_link *link;
590 	struct key_t {
591 		int a;
592 		int b;
593 		int c;
594 	} key;
595 	char buf[64];
596 
597 	skel = bpf_iter_bpf_hash_map__open();
598 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open"))
599 		return;
600 
601 	skel->bss->in_test_mode = true;
602 
603 	err = bpf_iter_bpf_hash_map__load(skel);
604 	if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load"))
605 		goto out;
606 
607 	/* iterator with hashmap2 and hashmap3 should fail */
608 	memset(&linfo, 0, sizeof(linfo));
609 	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
610 	opts.link_info = &linfo;
611 	opts.link_info_len = sizeof(linfo);
612 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
613 	if (!ASSERT_ERR_PTR(link, "attach_iter"))
614 		goto out;
615 
616 	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
617 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
618 	if (!ASSERT_ERR_PTR(link, "attach_iter"))
619 		goto out;
620 
621 	/* hashmap1 should be good, update map values here */
622 	map_fd = bpf_map__fd(skel->maps.hashmap1);
623 	for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
624 		key.a = i + 1;
625 		key.b = i + 2;
626 		key.c = i + 3;
627 		val = i + 4;
628 		expected_key_a += key.a;
629 		expected_key_b += key.b;
630 		expected_val += val;
631 
632 		err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
633 		if (!ASSERT_OK(err, "map_update"))
634 			goto out;
635 	}
636 
637 	linfo.map.map_fd = map_fd;
638 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
639 	if (!ASSERT_OK_PTR(link, "attach_iter"))
640 		goto out;
641 
642 	iter_fd = bpf_iter_create(bpf_link__fd(link));
643 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
644 		goto free_link;
645 
646 	/* do some tests */
647 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
648 		;
649 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
650 		goto close_iter;
651 
652 	/* test results */
653 	if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
654 		goto close_iter;
655 	if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
656 		goto close_iter;
657 	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
658 		goto close_iter;
659 
660 close_iter:
661 	close(iter_fd);
662 free_link:
663 	bpf_link__destroy(link);
664 out:
665 	bpf_iter_bpf_hash_map__destroy(skel);
666 }
667 
668 static void test_bpf_percpu_hash_map(void)
669 {
670 	__u32 expected_key_a = 0, expected_key_b = 0;
671 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
672 	struct bpf_iter_bpf_percpu_hash_map *skel;
673 	int err, i, j, len, map_fd, iter_fd;
674 	union bpf_iter_link_info linfo;
675 	__u32 expected_val = 0;
676 	struct bpf_link *link;
677 	struct key_t {
678 		int a;
679 		int b;
680 		int c;
681 	} key;
682 	char buf[64];
683 	void *val;
684 
685 	skel = bpf_iter_bpf_percpu_hash_map__open();
686 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__open"))
687 		return;
688 
689 	skel->rodata->num_cpus = bpf_num_possible_cpus();
690 	val = malloc(8 * bpf_num_possible_cpus());
691 
692 	err = bpf_iter_bpf_percpu_hash_map__load(skel);
693 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load"))
694 		goto out;
695 
696 	/* update map values here */
697 	map_fd = bpf_map__fd(skel->maps.hashmap1);
698 	for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
699 		key.a = i + 1;
700 		key.b = i + 2;
701 		key.c = i + 3;
702 		expected_key_a += key.a;
703 		expected_key_b += key.b;
704 
705 		for (j = 0; j < bpf_num_possible_cpus(); j++) {
706 			*(__u32 *)(val + j * 8) = i + j;
707 			expected_val += i + j;
708 		}
709 
710 		err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
711 		if (!ASSERT_OK(err, "map_update"))
712 			goto out;
713 	}
714 
715 	memset(&linfo, 0, sizeof(linfo));
716 	linfo.map.map_fd = map_fd;
717 	opts.link_info = &linfo;
718 	opts.link_info_len = sizeof(linfo);
719 	link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
720 	if (!ASSERT_OK_PTR(link, "attach_iter"))
721 		goto out;
722 
723 	iter_fd = bpf_iter_create(bpf_link__fd(link));
724 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
725 		goto free_link;
726 
727 	/* do some tests */
728 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
729 		;
730 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
731 		goto close_iter;
732 
733 	/* test results */
734 	if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
735 		goto close_iter;
736 	if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
737 		goto close_iter;
738 	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
739 		goto close_iter;
740 
741 close_iter:
742 	close(iter_fd);
743 free_link:
744 	bpf_link__destroy(link);
745 out:
746 	bpf_iter_bpf_percpu_hash_map__destroy(skel);
747 	free(val);
748 }
749 
750 static void test_bpf_array_map(void)
751 {
752 	__u64 val, expected_val = 0, res_first_val, first_val = 0;
753 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
754 	__u32 expected_key = 0, res_first_key;
755 	struct bpf_iter_bpf_array_map *skel;
756 	union bpf_iter_link_info linfo;
757 	int err, i, map_fd, iter_fd;
758 	struct bpf_link *link;
759 	char buf[64] = {};
760 	int len, start;
761 
762 	skel = bpf_iter_bpf_array_map__open_and_load();
763 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
764 		return;
765 
766 	map_fd = bpf_map__fd(skel->maps.arraymap1);
767 	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
768 		val = i + 4;
769 		expected_key += i;
770 		expected_val += val;
771 
772 		if (i == 0)
773 			first_val = val;
774 
775 		err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
776 		if (!ASSERT_OK(err, "map_update"))
777 			goto out;
778 	}
779 
780 	memset(&linfo, 0, sizeof(linfo));
781 	linfo.map.map_fd = map_fd;
782 	opts.link_info = &linfo;
783 	opts.link_info_len = sizeof(linfo);
784 	link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
785 	if (!ASSERT_OK_PTR(link, "attach_iter"))
786 		goto out;
787 
788 	iter_fd = bpf_iter_create(bpf_link__fd(link));
789 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
790 		goto free_link;
791 
792 	/* do some tests */
793 	start = 0;
794 	while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
795 		start += len;
796 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
797 		goto close_iter;
798 
799 	/* test results */
800 	res_first_key = *(__u32 *)buf;
801 	res_first_val = *(__u64 *)(buf + sizeof(__u32));
802 	if (CHECK(res_first_key != 0 || res_first_val != first_val,
803 		  "bpf_seq_write",
804 		  "seq_write failure: first key %u vs expected 0, "
805 		  " first value %llu vs expected %llu\n",
806 		  res_first_key, res_first_val, first_val))
807 		goto close_iter;
808 
809 	if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
810 		goto close_iter;
811 	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
812 		goto close_iter;
813 
814 	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
815 		err = bpf_map_lookup_elem(map_fd, &i, &val);
816 		if (!ASSERT_OK(err, "map_lookup"))
817 			goto out;
818 		if (!ASSERT_EQ(i, val, "invalid_val"))
819 			goto out;
820 	}
821 
822 close_iter:
823 	close(iter_fd);
824 free_link:
825 	bpf_link__destroy(link);
826 out:
827 	bpf_iter_bpf_array_map__destroy(skel);
828 }
829 
830 static void test_bpf_percpu_array_map(void)
831 {
832 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
833 	struct bpf_iter_bpf_percpu_array_map *skel;
834 	__u32 expected_key = 0, expected_val = 0;
835 	union bpf_iter_link_info linfo;
836 	int err, i, j, map_fd, iter_fd;
837 	struct bpf_link *link;
838 	char buf[64];
839 	void *val;
840 	int len;
841 
842 	skel = bpf_iter_bpf_percpu_array_map__open();
843 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__open"))
844 		return;
845 
846 	skel->rodata->num_cpus = bpf_num_possible_cpus();
847 	val = malloc(8 * bpf_num_possible_cpus());
848 
849 	err = bpf_iter_bpf_percpu_array_map__load(skel);
850 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load"))
851 		goto out;
852 
853 	/* update map values here */
854 	map_fd = bpf_map__fd(skel->maps.arraymap1);
855 	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
856 		expected_key += i;
857 
858 		for (j = 0; j < bpf_num_possible_cpus(); j++) {
859 			*(__u32 *)(val + j * 8) = i + j;
860 			expected_val += i + j;
861 		}
862 
863 		err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
864 		if (!ASSERT_OK(err, "map_update"))
865 			goto out;
866 	}
867 
868 	memset(&linfo, 0, sizeof(linfo));
869 	linfo.map.map_fd = map_fd;
870 	opts.link_info = &linfo;
871 	opts.link_info_len = sizeof(linfo);
872 	link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
873 	if (!ASSERT_OK_PTR(link, "attach_iter"))
874 		goto out;
875 
876 	iter_fd = bpf_iter_create(bpf_link__fd(link));
877 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
878 		goto free_link;
879 
880 	/* do some tests */
881 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
882 		;
883 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
884 		goto close_iter;
885 
886 	/* test results */
887 	if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
888 		goto close_iter;
889 	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
890 		goto close_iter;
891 
892 close_iter:
893 	close(iter_fd);
894 free_link:
895 	bpf_link__destroy(link);
896 out:
897 	bpf_iter_bpf_percpu_array_map__destroy(skel);
898 	free(val);
899 }
900 
901 /* An iterator program deletes all local storage in a map. */
902 static void test_bpf_sk_storage_delete(void)
903 {
904 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
905 	struct bpf_iter_bpf_sk_storage_helpers *skel;
906 	union bpf_iter_link_info linfo;
907 	int err, len, map_fd, iter_fd;
908 	struct bpf_link *link;
909 	int sock_fd = -1;
910 	__u32 val = 42;
911 	char buf[64];
912 
913 	skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
914 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
915 		return;
916 
917 	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
918 
919 	sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
920 	if (!ASSERT_GE(sock_fd, 0, "socket"))
921 		goto out;
922 	err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
923 	if (!ASSERT_OK(err, "map_update"))
924 		goto out;
925 
926 	memset(&linfo, 0, sizeof(linfo));
927 	linfo.map.map_fd = map_fd;
928 	opts.link_info = &linfo;
929 	opts.link_info_len = sizeof(linfo);
930 	link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map,
931 					&opts);
932 	if (!ASSERT_OK_PTR(link, "attach_iter"))
933 		goto out;
934 
935 	iter_fd = bpf_iter_create(bpf_link__fd(link));
936 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
937 		goto free_link;
938 
939 	/* do some tests */
940 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
941 		;
942 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
943 		goto close_iter;
944 
945 	/* test results */
946 	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
947 	if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
948 		  "map value wasn't deleted (err=%d, errno=%d)\n", err, errno))
949 		goto close_iter;
950 
951 close_iter:
952 	close(iter_fd);
953 free_link:
954 	bpf_link__destroy(link);
955 out:
956 	if (sock_fd >= 0)
957 		close(sock_fd);
958 	bpf_iter_bpf_sk_storage_helpers__destroy(skel);
959 }
960 
961 /* This creates a socket and its local storage. It then runs a task_iter BPF
962  * program that replaces the existing socket local storage with the tgid of the
963  * only task owning a file descriptor to this socket, this process, prog_tests.
964  * It then runs a tcp socket iterator that negates the value in the existing
965  * socket local storage, the test verifies that the resulting value is -pid.
966  */
967 static void test_bpf_sk_storage_get(void)
968 {
969 	struct bpf_iter_bpf_sk_storage_helpers *skel;
970 	int err, map_fd, val = -1;
971 	int sock_fd = -1;
972 
973 	skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
974 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
975 		return;
976 
977 	sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
978 	if (!ASSERT_GE(sock_fd, 0, "socket"))
979 		goto out;
980 
981 	err = listen(sock_fd, 1);
982 	if (!ASSERT_OK(err, "listen"))
983 		goto close_socket;
984 
985 	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
986 
987 	err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
988 	if (!ASSERT_OK(err, "bpf_map_update_elem"))
989 		goto close_socket;
990 
991 	do_dummy_read(skel->progs.fill_socket_owner);
992 
993 	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
994 	if (CHECK(err || val != getpid(), "bpf_map_lookup_elem",
995 	    "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
996 	    getpid(), val, err))
997 		goto close_socket;
998 
999 	do_dummy_read(skel->progs.negate_socket_local_storage);
1000 
1001 	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1002 	CHECK(err || val != -getpid(), "bpf_map_lookup_elem",
1003 	      "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1004 	      -getpid(), val, err);
1005 
1006 close_socket:
1007 	close(sock_fd);
1008 out:
1009 	bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1010 }
1011 
1012 static void test_bpf_sk_storage_map(void)
1013 {
1014 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1015 	int err, i, len, map_fd, iter_fd, num_sockets;
1016 	struct bpf_iter_bpf_sk_storage_map *skel;
1017 	union bpf_iter_link_info linfo;
1018 	int sock_fd[3] = {-1, -1, -1};
1019 	__u32 val, expected_val = 0;
1020 	struct bpf_link *link;
1021 	char buf[64];
1022 
1023 	skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1024 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
1025 		return;
1026 
1027 	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1028 	num_sockets = ARRAY_SIZE(sock_fd);
1029 	for (i = 0; i < num_sockets; i++) {
1030 		sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
1031 		if (!ASSERT_GE(sock_fd[i], 0, "socket"))
1032 			goto out;
1033 
1034 		val = i + 1;
1035 		expected_val += val;
1036 
1037 		err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
1038 					  BPF_NOEXIST);
1039 		if (!ASSERT_OK(err, "map_update"))
1040 			goto out;
1041 	}
1042 
1043 	memset(&linfo, 0, sizeof(linfo));
1044 	linfo.map.map_fd = map_fd;
1045 	opts.link_info = &linfo;
1046 	opts.link_info_len = sizeof(linfo);
1047 	link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts);
1048 	if (!ASSERT_OK_PTR(link, "attach_iter"))
1049 		goto out;
1050 
1051 	iter_fd = bpf_iter_create(bpf_link__fd(link));
1052 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1053 		goto free_link;
1054 
1055 	/* do some tests */
1056 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1057 		;
1058 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1059 		goto close_iter;
1060 
1061 	/* test results */
1062 	if (!ASSERT_EQ(skel->bss->ipv6_sk_count, num_sockets, "ipv6_sk_count"))
1063 		goto close_iter;
1064 
1065 	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1066 		goto close_iter;
1067 
1068 close_iter:
1069 	close(iter_fd);
1070 free_link:
1071 	bpf_link__destroy(link);
1072 out:
1073 	for (i = 0; i < num_sockets; i++) {
1074 		if (sock_fd[i] >= 0)
1075 			close(sock_fd[i]);
1076 	}
1077 	bpf_iter_bpf_sk_storage_map__destroy(skel);
1078 }
1079 
1080 static void test_rdonly_buf_out_of_bound(void)
1081 {
1082 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1083 	struct bpf_iter_test_kern5 *skel;
1084 	union bpf_iter_link_info linfo;
1085 	struct bpf_link *link;
1086 
1087 	skel = bpf_iter_test_kern5__open_and_load();
1088 	if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern5__open_and_load"))
1089 		return;
1090 
1091 	memset(&linfo, 0, sizeof(linfo));
1092 	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
1093 	opts.link_info = &linfo;
1094 	opts.link_info_len = sizeof(linfo);
1095 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
1096 	if (!ASSERT_ERR_PTR(link, "attach_iter"))
1097 		bpf_link__destroy(link);
1098 
1099 	bpf_iter_test_kern5__destroy(skel);
1100 }
1101 
1102 static void test_buf_neg_offset(void)
1103 {
1104 	struct bpf_iter_test_kern6 *skel;
1105 
1106 	skel = bpf_iter_test_kern6__open_and_load();
1107 	if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern6__open_and_load"))
1108 		bpf_iter_test_kern6__destroy(skel);
1109 }
1110 
1111 static void test_link_iter(void)
1112 {
1113 	struct bpf_iter_bpf_link *skel;
1114 
1115 	skel = bpf_iter_bpf_link__open_and_load();
1116 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load"))
1117 		return;
1118 
1119 	do_dummy_read(skel->progs.dump_bpf_link);
1120 
1121 	bpf_iter_bpf_link__destroy(skel);
1122 }
1123 
1124 static void test_ksym_iter(void)
1125 {
1126 	struct bpf_iter_ksym *skel;
1127 
1128 	skel = bpf_iter_ksym__open_and_load();
1129 	if (!ASSERT_OK_PTR(skel, "bpf_iter_ksym__open_and_load"))
1130 		return;
1131 
1132 	do_dummy_read(skel->progs.dump_ksym);
1133 
1134 	bpf_iter_ksym__destroy(skel);
1135 }
1136 
1137 #define CMP_BUFFER_SIZE 1024
1138 static char task_vma_output[CMP_BUFFER_SIZE];
1139 static char proc_maps_output[CMP_BUFFER_SIZE];
1140 
1141 /* remove \0 and \t from str, and only keep the first line */
1142 static void str_strip_first_line(char *str)
1143 {
1144 	char *dst = str, *src = str;
1145 
1146 	do {
1147 		if (*src == ' ' || *src == '\t')
1148 			src++;
1149 		else
1150 			*(dst++) = *(src++);
1151 
1152 	} while (*src != '\0' && *src != '\n');
1153 
1154 	*dst = '\0';
1155 }
1156 
1157 static void test_task_vma(void)
1158 {
1159 	int err, iter_fd = -1, proc_maps_fd = -1;
1160 	struct bpf_iter_task_vma *skel;
1161 	int len, read_size = 4;
1162 	char maps_path[64];
1163 
1164 	skel = bpf_iter_task_vma__open();
1165 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open"))
1166 		return;
1167 
1168 	skel->bss->pid = getpid();
1169 
1170 	err = bpf_iter_task_vma__load(skel);
1171 	if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
1172 		goto out;
1173 
1174 	skel->links.proc_maps = bpf_program__attach_iter(
1175 		skel->progs.proc_maps, NULL);
1176 
1177 	if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
1178 		skel->links.proc_maps = NULL;
1179 		goto out;
1180 	}
1181 
1182 	iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
1183 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1184 		goto out;
1185 
1186 	/* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks
1187 	 * to trigger seq_file corner cases.
1188 	 */
1189 	len = 0;
1190 	while (len < CMP_BUFFER_SIZE) {
1191 		err = read_fd_into_buffer(iter_fd, task_vma_output + len,
1192 					  MIN(read_size, CMP_BUFFER_SIZE - len));
1193 		if (!err)
1194 			break;
1195 		if (!ASSERT_GE(err, 0, "read_iter_fd"))
1196 			goto out;
1197 		len += err;
1198 	}
1199 
1200 	/* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */
1201 	snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid);
1202 	proc_maps_fd = open(maps_path, O_RDONLY);
1203 	if (!ASSERT_GE(proc_maps_fd, 0, "open_proc_maps"))
1204 		goto out;
1205 	err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE);
1206 	if (!ASSERT_GE(err, 0, "read_prog_maps_fd"))
1207 		goto out;
1208 
1209 	/* strip and compare the first line of the two files */
1210 	str_strip_first_line(task_vma_output);
1211 	str_strip_first_line(proc_maps_output);
1212 
1213 	ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output");
1214 out:
1215 	close(proc_maps_fd);
1216 	close(iter_fd);
1217 	bpf_iter_task_vma__destroy(skel);
1218 }
1219 
1220 void test_bpf_iter(void)
1221 {
1222 	if (test__start_subtest("btf_id_or_null"))
1223 		test_btf_id_or_null();
1224 	if (test__start_subtest("ipv6_route"))
1225 		test_ipv6_route();
1226 	if (test__start_subtest("netlink"))
1227 		test_netlink();
1228 	if (test__start_subtest("bpf_map"))
1229 		test_bpf_map();
1230 	if (test__start_subtest("task"))
1231 		test_task();
1232 	if (test__start_subtest("task_sleepable"))
1233 		test_task_sleepable();
1234 	if (test__start_subtest("task_stack"))
1235 		test_task_stack();
1236 	if (test__start_subtest("task_file"))
1237 		test_task_file();
1238 	if (test__start_subtest("task_vma"))
1239 		test_task_vma();
1240 	if (test__start_subtest("task_btf"))
1241 		test_task_btf();
1242 	if (test__start_subtest("tcp4"))
1243 		test_tcp4();
1244 	if (test__start_subtest("tcp6"))
1245 		test_tcp6();
1246 	if (test__start_subtest("udp4"))
1247 		test_udp4();
1248 	if (test__start_subtest("udp6"))
1249 		test_udp6();
1250 	if (test__start_subtest("unix"))
1251 		test_unix();
1252 	if (test__start_subtest("anon"))
1253 		test_anon_iter(false);
1254 	if (test__start_subtest("anon-read-one-char"))
1255 		test_anon_iter(true);
1256 	if (test__start_subtest("file"))
1257 		test_file_iter();
1258 	if (test__start_subtest("overflow"))
1259 		test_overflow(false, false);
1260 	if (test__start_subtest("overflow-e2big"))
1261 		test_overflow(true, false);
1262 	if (test__start_subtest("prog-ret-1"))
1263 		test_overflow(false, true);
1264 	if (test__start_subtest("bpf_hash_map"))
1265 		test_bpf_hash_map();
1266 	if (test__start_subtest("bpf_percpu_hash_map"))
1267 		test_bpf_percpu_hash_map();
1268 	if (test__start_subtest("bpf_array_map"))
1269 		test_bpf_array_map();
1270 	if (test__start_subtest("bpf_percpu_array_map"))
1271 		test_bpf_percpu_array_map();
1272 	if (test__start_subtest("bpf_sk_storage_map"))
1273 		test_bpf_sk_storage_map();
1274 	if (test__start_subtest("bpf_sk_storage_delete"))
1275 		test_bpf_sk_storage_delete();
1276 	if (test__start_subtest("bpf_sk_storage_get"))
1277 		test_bpf_sk_storage_get();
1278 	if (test__start_subtest("rdonly-buf-out-of-bound"))
1279 		test_rdonly_buf_out_of_bound();
1280 	if (test__start_subtest("buf-neg-offset"))
1281 		test_buf_neg_offset();
1282 	if (test__start_subtest("link-iter"))
1283 		test_link_iter();
1284 	if (test__start_subtest("ksym"))
1285 		test_ksym_iter();
1286 }
1287