1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <test_progs.h>
4 #include "bpf_iter_ipv6_route.skel.h"
5 #include "bpf_iter_netlink.skel.h"
6 #include "bpf_iter_bpf_map.skel.h"
7 #include "bpf_iter_task.skel.h"
8 #include "bpf_iter_task_stack.skel.h"
9 #include "bpf_iter_task_file.skel.h"
10 #include "bpf_iter_task_vma.skel.h"
11 #include "bpf_iter_task_btf.skel.h"
12 #include "bpf_iter_tcp4.skel.h"
13 #include "bpf_iter_tcp6.skel.h"
14 #include "bpf_iter_udp4.skel.h"
15 #include "bpf_iter_udp6.skel.h"
16 #include "bpf_iter_unix.skel.h"
17 #include "bpf_iter_test_kern1.skel.h"
18 #include "bpf_iter_test_kern2.skel.h"
19 #include "bpf_iter_test_kern3.skel.h"
20 #include "bpf_iter_test_kern4.skel.h"
21 #include "bpf_iter_bpf_hash_map.skel.h"
22 #include "bpf_iter_bpf_percpu_hash_map.skel.h"
23 #include "bpf_iter_bpf_array_map.skel.h"
24 #include "bpf_iter_bpf_percpu_array_map.skel.h"
25 #include "bpf_iter_bpf_sk_storage_helpers.skel.h"
26 #include "bpf_iter_bpf_sk_storage_map.skel.h"
27 #include "bpf_iter_test_kern5.skel.h"
28 #include "bpf_iter_test_kern6.skel.h"
29 
30 static int duration;
31 
32 static void test_btf_id_or_null(void)
33 {
34 	struct bpf_iter_test_kern3 *skel;
35 
36 	skel = bpf_iter_test_kern3__open_and_load();
37 	if (CHECK(skel, "bpf_iter_test_kern3__open_and_load",
38 		  "skeleton open_and_load unexpectedly succeeded\n")) {
39 		bpf_iter_test_kern3__destroy(skel);
40 		return;
41 	}
42 }
43 
44 static void do_dummy_read(struct bpf_program *prog)
45 {
46 	struct bpf_link *link;
47 	char buf[16] = {};
48 	int iter_fd, len;
49 
50 	link = bpf_program__attach_iter(prog, NULL);
51 	if (!ASSERT_OK_PTR(link, "attach_iter"))
52 		return;
53 
54 	iter_fd = bpf_iter_create(bpf_link__fd(link));
55 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
56 		goto free_link;
57 
58 	/* not check contents, but ensure read() ends without error */
59 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
60 		;
61 	CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));
62 
63 	close(iter_fd);
64 
65 free_link:
66 	bpf_link__destroy(link);
67 }
68 
69 static int read_fd_into_buffer(int fd, char *buf, int size)
70 {
71 	int bufleft = size;
72 	int len;
73 
74 	do {
75 		len = read(fd, buf, bufleft);
76 		if (len > 0) {
77 			buf += len;
78 			bufleft -= len;
79 		}
80 	} while (len > 0);
81 
82 	return len < 0 ? len : size - bufleft;
83 }
84 
85 static void test_ipv6_route(void)
86 {
87 	struct bpf_iter_ipv6_route *skel;
88 
89 	skel = bpf_iter_ipv6_route__open_and_load();
90 	if (CHECK(!skel, "bpf_iter_ipv6_route__open_and_load",
91 		  "skeleton open_and_load failed\n"))
92 		return;
93 
94 	do_dummy_read(skel->progs.dump_ipv6_route);
95 
96 	bpf_iter_ipv6_route__destroy(skel);
97 }
98 
99 static void test_netlink(void)
100 {
101 	struct bpf_iter_netlink *skel;
102 
103 	skel = bpf_iter_netlink__open_and_load();
104 	if (CHECK(!skel, "bpf_iter_netlink__open_and_load",
105 		  "skeleton open_and_load failed\n"))
106 		return;
107 
108 	do_dummy_read(skel->progs.dump_netlink);
109 
110 	bpf_iter_netlink__destroy(skel);
111 }
112 
113 static void test_bpf_map(void)
114 {
115 	struct bpf_iter_bpf_map *skel;
116 
117 	skel = bpf_iter_bpf_map__open_and_load();
118 	if (CHECK(!skel, "bpf_iter_bpf_map__open_and_load",
119 		  "skeleton open_and_load failed\n"))
120 		return;
121 
122 	do_dummy_read(skel->progs.dump_bpf_map);
123 
124 	bpf_iter_bpf_map__destroy(skel);
125 }
126 
127 static void test_task(void)
128 {
129 	struct bpf_iter_task *skel;
130 
131 	skel = bpf_iter_task__open_and_load();
132 	if (CHECK(!skel, "bpf_iter_task__open_and_load",
133 		  "skeleton open_and_load failed\n"))
134 		return;
135 
136 	do_dummy_read(skel->progs.dump_task);
137 
138 	bpf_iter_task__destroy(skel);
139 }
140 
141 static void test_task_sleepable(void)
142 {
143 	struct bpf_iter_task *skel;
144 
145 	skel = bpf_iter_task__open_and_load();
146 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
147 		return;
148 
149 	do_dummy_read(skel->progs.dump_task_sleepable);
150 
151 	ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0,
152 		  "num_expected_failure_copy_from_user_task");
153 	ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0,
154 		  "num_success_copy_from_user_task");
155 
156 	bpf_iter_task__destroy(skel);
157 }
158 
159 static void test_task_stack(void)
160 {
161 	struct bpf_iter_task_stack *skel;
162 
163 	skel = bpf_iter_task_stack__open_and_load();
164 	if (CHECK(!skel, "bpf_iter_task_stack__open_and_load",
165 		  "skeleton open_and_load failed\n"))
166 		return;
167 
168 	do_dummy_read(skel->progs.dump_task_stack);
169 	do_dummy_read(skel->progs.get_task_user_stacks);
170 
171 	bpf_iter_task_stack__destroy(skel);
172 }
173 
174 static void *do_nothing(void *arg)
175 {
176 	pthread_exit(arg);
177 }
178 
179 static void test_task_file(void)
180 {
181 	struct bpf_iter_task_file *skel;
182 	pthread_t thread_id;
183 	void *ret;
184 
185 	skel = bpf_iter_task_file__open_and_load();
186 	if (CHECK(!skel, "bpf_iter_task_file__open_and_load",
187 		  "skeleton open_and_load failed\n"))
188 		return;
189 
190 	skel->bss->tgid = getpid();
191 
192 	if (CHECK(pthread_create(&thread_id, NULL, &do_nothing, NULL),
193 		  "pthread_create", "pthread_create failed\n"))
194 		goto done;
195 
196 	do_dummy_read(skel->progs.dump_task_file);
197 
198 	if (CHECK(pthread_join(thread_id, &ret) || ret != NULL,
199 		  "pthread_join", "pthread_join failed\n"))
200 		goto done;
201 
202 	CHECK(skel->bss->count != 0, "check_count",
203 	      "invalid non pthread file visit count %d\n", skel->bss->count);
204 
205 done:
206 	bpf_iter_task_file__destroy(skel);
207 }
208 
209 #define TASKBUFSZ		32768
210 
211 static char taskbuf[TASKBUFSZ];
212 
213 static int do_btf_read(struct bpf_iter_task_btf *skel)
214 {
215 	struct bpf_program *prog = skel->progs.dump_task_struct;
216 	struct bpf_iter_task_btf__bss *bss = skel->bss;
217 	int iter_fd = -1, err;
218 	struct bpf_link *link;
219 	char *buf = taskbuf;
220 	int ret = 0;
221 
222 	link = bpf_program__attach_iter(prog, NULL);
223 	if (!ASSERT_OK_PTR(link, "attach_iter"))
224 		return ret;
225 
226 	iter_fd = bpf_iter_create(bpf_link__fd(link));
227 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
228 		goto free_link;
229 
230 	err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ);
231 	if (bss->skip) {
232 		printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
233 		ret = 1;
234 		test__skip();
235 		goto free_link;
236 	}
237 
238 	if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno)))
239 		goto free_link;
240 
241 	CHECK(strstr(taskbuf, "(struct task_struct)") == NULL,
242 	      "check for btf representation of task_struct in iter data",
243 	      "struct task_struct not found");
244 free_link:
245 	if (iter_fd > 0)
246 		close(iter_fd);
247 	bpf_link__destroy(link);
248 	return ret;
249 }
250 
251 static void test_task_btf(void)
252 {
253 	struct bpf_iter_task_btf__bss *bss;
254 	struct bpf_iter_task_btf *skel;
255 	int ret;
256 
257 	skel = bpf_iter_task_btf__open_and_load();
258 	if (CHECK(!skel, "bpf_iter_task_btf__open_and_load",
259 		  "skeleton open_and_load failed\n"))
260 		return;
261 
262 	bss = skel->bss;
263 
264 	ret = do_btf_read(skel);
265 	if (ret)
266 		goto cleanup;
267 
268 	if (CHECK(bss->tasks == 0, "check if iterated over tasks",
269 		  "no task iteration, did BPF program run?\n"))
270 		goto cleanup;
271 
272 	CHECK(bss->seq_err != 0, "check for unexpected err",
273 	      "bpf_seq_printf_btf returned %ld", bss->seq_err);
274 
275 cleanup:
276 	bpf_iter_task_btf__destroy(skel);
277 }
278 
279 static void test_tcp4(void)
280 {
281 	struct bpf_iter_tcp4 *skel;
282 
283 	skel = bpf_iter_tcp4__open_and_load();
284 	if (CHECK(!skel, "bpf_iter_tcp4__open_and_load",
285 		  "skeleton open_and_load failed\n"))
286 		return;
287 
288 	do_dummy_read(skel->progs.dump_tcp4);
289 
290 	bpf_iter_tcp4__destroy(skel);
291 }
292 
293 static void test_tcp6(void)
294 {
295 	struct bpf_iter_tcp6 *skel;
296 
297 	skel = bpf_iter_tcp6__open_and_load();
298 	if (CHECK(!skel, "bpf_iter_tcp6__open_and_load",
299 		  "skeleton open_and_load failed\n"))
300 		return;
301 
302 	do_dummy_read(skel->progs.dump_tcp6);
303 
304 	bpf_iter_tcp6__destroy(skel);
305 }
306 
307 static void test_udp4(void)
308 {
309 	struct bpf_iter_udp4 *skel;
310 
311 	skel = bpf_iter_udp4__open_and_load();
312 	if (CHECK(!skel, "bpf_iter_udp4__open_and_load",
313 		  "skeleton open_and_load failed\n"))
314 		return;
315 
316 	do_dummy_read(skel->progs.dump_udp4);
317 
318 	bpf_iter_udp4__destroy(skel);
319 }
320 
321 static void test_udp6(void)
322 {
323 	struct bpf_iter_udp6 *skel;
324 
325 	skel = bpf_iter_udp6__open_and_load();
326 	if (CHECK(!skel, "bpf_iter_udp6__open_and_load",
327 		  "skeleton open_and_load failed\n"))
328 		return;
329 
330 	do_dummy_read(skel->progs.dump_udp6);
331 
332 	bpf_iter_udp6__destroy(skel);
333 }
334 
335 static void test_unix(void)
336 {
337 	struct bpf_iter_unix *skel;
338 
339 	skel = bpf_iter_unix__open_and_load();
340 	if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load"))
341 		return;
342 
343 	do_dummy_read(skel->progs.dump_unix);
344 
345 	bpf_iter_unix__destroy(skel);
346 }
347 
348 /* The expected string is less than 16 bytes */
349 static int do_read_with_fd(int iter_fd, const char *expected,
350 			   bool read_one_char)
351 {
352 	int err = -1, len, read_buf_len, start;
353 	char buf[16] = {};
354 
355 	read_buf_len = read_one_char ? 1 : 16;
356 	start = 0;
357 	while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
358 		start += len;
359 		if (CHECK(start >= 16, "read", "read len %d\n", len))
360 			return -1;
361 		read_buf_len = read_one_char ? 1 : 16 - start;
362 	}
363 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
364 		return -1;
365 
366 	err = strcmp(buf, expected);
367 	if (CHECK(err, "read", "incorrect read result: buf %s, expected %s\n",
368 		  buf, expected))
369 		return -1;
370 
371 	return 0;
372 }
373 
374 static void test_anon_iter(bool read_one_char)
375 {
376 	struct bpf_iter_test_kern1 *skel;
377 	struct bpf_link *link;
378 	int iter_fd, err;
379 
380 	skel = bpf_iter_test_kern1__open_and_load();
381 	if (CHECK(!skel, "bpf_iter_test_kern1__open_and_load",
382 		  "skeleton open_and_load failed\n"))
383 		return;
384 
385 	err = bpf_iter_test_kern1__attach(skel);
386 	if (CHECK(err, "bpf_iter_test_kern1__attach",
387 		  "skeleton attach failed\n")) {
388 		goto out;
389 	}
390 
391 	link = skel->links.dump_task;
392 	iter_fd = bpf_iter_create(bpf_link__fd(link));
393 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
394 		goto out;
395 
396 	do_read_with_fd(iter_fd, "abcd", read_one_char);
397 	close(iter_fd);
398 
399 out:
400 	bpf_iter_test_kern1__destroy(skel);
401 }
402 
403 static int do_read(const char *path, const char *expected)
404 {
405 	int err, iter_fd;
406 
407 	iter_fd = open(path, O_RDONLY);
408 	if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n",
409 		  path, strerror(errno)))
410 		return -1;
411 
412 	err = do_read_with_fd(iter_fd, expected, false);
413 	close(iter_fd);
414 	return err;
415 }
416 
417 static void test_file_iter(void)
418 {
419 	const char *path = "/sys/fs/bpf/bpf_iter_test1";
420 	struct bpf_iter_test_kern1 *skel1;
421 	struct bpf_iter_test_kern2 *skel2;
422 	struct bpf_link *link;
423 	int err;
424 
425 	skel1 = bpf_iter_test_kern1__open_and_load();
426 	if (CHECK(!skel1, "bpf_iter_test_kern1__open_and_load",
427 		  "skeleton open_and_load failed\n"))
428 		return;
429 
430 	link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
431 	if (!ASSERT_OK_PTR(link, "attach_iter"))
432 		goto out;
433 
434 	/* unlink this path if it exists. */
435 	unlink(path);
436 
437 	err = bpf_link__pin(link, path);
438 	if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err))
439 		goto free_link;
440 
441 	err = do_read(path, "abcd");
442 	if (err)
443 		goto unlink_path;
444 
445 	/* file based iterator seems working fine. Let us a link update
446 	 * of the underlying link and `cat` the iterator again, its content
447 	 * should change.
448 	 */
449 	skel2 = bpf_iter_test_kern2__open_and_load();
450 	if (CHECK(!skel2, "bpf_iter_test_kern2__open_and_load",
451 		  "skeleton open_and_load failed\n"))
452 		goto unlink_path;
453 
454 	err = bpf_link__update_program(link, skel2->progs.dump_task);
455 	if (CHECK(err, "update_prog", "update_prog failed\n"))
456 		goto destroy_skel2;
457 
458 	do_read(path, "ABCD");
459 
460 destroy_skel2:
461 	bpf_iter_test_kern2__destroy(skel2);
462 unlink_path:
463 	unlink(path);
464 free_link:
465 	bpf_link__destroy(link);
466 out:
467 	bpf_iter_test_kern1__destroy(skel1);
468 }
469 
470 static void test_overflow(bool test_e2big_overflow, bool ret1)
471 {
472 	__u32 map_info_len, total_read_len, expected_read_len;
473 	int err, iter_fd, map1_fd, map2_fd, len;
474 	struct bpf_map_info map_info = {};
475 	struct bpf_iter_test_kern4 *skel;
476 	struct bpf_link *link;
477 	__u32 iter_size;
478 	char *buf;
479 
480 	skel = bpf_iter_test_kern4__open();
481 	if (CHECK(!skel, "bpf_iter_test_kern4__open",
482 		  "skeleton open failed\n"))
483 		return;
484 
485 	/* create two maps: bpf program will only do bpf_seq_write
486 	 * for these two maps. The goal is one map output almost
487 	 * fills seq_file buffer and then the other will trigger
488 	 * overflow and needs restart.
489 	 */
490 	map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
491 	if (CHECK(map1_fd < 0, "bpf_map_create",
492 		  "map_creation failed: %s\n", strerror(errno)))
493 		goto out;
494 	map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
495 	if (CHECK(map2_fd < 0, "bpf_map_create",
496 		  "map_creation failed: %s\n", strerror(errno)))
497 		goto free_map1;
498 
499 	/* bpf_seq_printf kernel buffer is 8 pages, so one map
500 	 * bpf_seq_write will mostly fill it, and the other map
501 	 * will partially fill and then trigger overflow and need
502 	 * bpf_seq_read restart.
503 	 */
504 	iter_size = sysconf(_SC_PAGE_SIZE) << 3;
505 
506 	if (test_e2big_overflow) {
507 		skel->rodata->print_len = (iter_size + 8) / 8;
508 		expected_read_len = 2 * (iter_size + 8);
509 	} else if (!ret1) {
510 		skel->rodata->print_len = (iter_size - 8) / 8;
511 		expected_read_len = 2 * (iter_size - 8);
512 	} else {
513 		skel->rodata->print_len = 1;
514 		expected_read_len = 2 * 8;
515 	}
516 	skel->rodata->ret1 = ret1;
517 
518 	if (CHECK(bpf_iter_test_kern4__load(skel),
519 		  "bpf_iter_test_kern4__load", "skeleton load failed\n"))
520 		goto free_map2;
521 
522 	/* setup filtering map_id in bpf program */
523 	map_info_len = sizeof(map_info);
524 	err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len);
525 	if (CHECK(err, "get_map_info", "get map info failed: %s\n",
526 		  strerror(errno)))
527 		goto free_map2;
528 	skel->bss->map1_id = map_info.id;
529 
530 	err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len);
531 	if (CHECK(err, "get_map_info", "get map info failed: %s\n",
532 		  strerror(errno)))
533 		goto free_map2;
534 	skel->bss->map2_id = map_info.id;
535 
536 	link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
537 	if (!ASSERT_OK_PTR(link, "attach_iter"))
538 		goto free_map2;
539 
540 	iter_fd = bpf_iter_create(bpf_link__fd(link));
541 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
542 		goto free_link;
543 
544 	buf = malloc(expected_read_len);
545 	if (!buf)
546 		goto close_iter;
547 
548 	/* do read */
549 	total_read_len = 0;
550 	if (test_e2big_overflow) {
551 		while ((len = read(iter_fd, buf, expected_read_len)) > 0)
552 			total_read_len += len;
553 
554 		CHECK(len != -1 || errno != E2BIG, "read",
555 		      "expected ret -1, errno E2BIG, but get ret %d, error %s\n",
556 			  len, strerror(errno));
557 		goto free_buf;
558 	} else if (!ret1) {
559 		while ((len = read(iter_fd, buf, expected_read_len)) > 0)
560 			total_read_len += len;
561 
562 		if (CHECK(len < 0, "read", "read failed: %s\n",
563 			  strerror(errno)))
564 			goto free_buf;
565 	} else {
566 		do {
567 			len = read(iter_fd, buf, expected_read_len);
568 			if (len > 0)
569 				total_read_len += len;
570 		} while (len > 0 || len == -EAGAIN);
571 
572 		if (CHECK(len < 0, "read", "read failed: %s\n",
573 			  strerror(errno)))
574 			goto free_buf;
575 	}
576 
577 	if (CHECK(total_read_len != expected_read_len, "read",
578 		  "total len %u, expected len %u\n", total_read_len,
579 		  expected_read_len))
580 		goto free_buf;
581 
582 	if (CHECK(skel->bss->map1_accessed != 1, "map1_accessed",
583 		  "expected 1 actual %d\n", skel->bss->map1_accessed))
584 		goto free_buf;
585 
586 	if (CHECK(skel->bss->map2_accessed != 2, "map2_accessed",
587 		  "expected 2 actual %d\n", skel->bss->map2_accessed))
588 		goto free_buf;
589 
590 	CHECK(skel->bss->map2_seqnum1 != skel->bss->map2_seqnum2,
591 	      "map2_seqnum", "two different seqnum %lld %lld\n",
592 	      skel->bss->map2_seqnum1, skel->bss->map2_seqnum2);
593 
594 free_buf:
595 	free(buf);
596 close_iter:
597 	close(iter_fd);
598 free_link:
599 	bpf_link__destroy(link);
600 free_map2:
601 	close(map2_fd);
602 free_map1:
603 	close(map1_fd);
604 out:
605 	bpf_iter_test_kern4__destroy(skel);
606 }
607 
608 static void test_bpf_hash_map(void)
609 {
610 	__u32 expected_key_a = 0, expected_key_b = 0;
611 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
612 	struct bpf_iter_bpf_hash_map *skel;
613 	int err, i, len, map_fd, iter_fd;
614 	union bpf_iter_link_info linfo;
615 	__u64 val, expected_val = 0;
616 	struct bpf_link *link;
617 	struct key_t {
618 		int a;
619 		int b;
620 		int c;
621 	} key;
622 	char buf[64];
623 
624 	skel = bpf_iter_bpf_hash_map__open();
625 	if (CHECK(!skel, "bpf_iter_bpf_hash_map__open",
626 		  "skeleton open failed\n"))
627 		return;
628 
629 	skel->bss->in_test_mode = true;
630 
631 	err = bpf_iter_bpf_hash_map__load(skel);
632 	if (CHECK(!skel, "bpf_iter_bpf_hash_map__load",
633 		  "skeleton load failed\n"))
634 		goto out;
635 
636 	/* iterator with hashmap2 and hashmap3 should fail */
637 	memset(&linfo, 0, sizeof(linfo));
638 	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
639 	opts.link_info = &linfo;
640 	opts.link_info_len = sizeof(linfo);
641 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
642 	if (!ASSERT_ERR_PTR(link, "attach_iter"))
643 		goto out;
644 
645 	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
646 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
647 	if (!ASSERT_ERR_PTR(link, "attach_iter"))
648 		goto out;
649 
650 	/* hashmap1 should be good, update map values here */
651 	map_fd = bpf_map__fd(skel->maps.hashmap1);
652 	for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
653 		key.a = i + 1;
654 		key.b = i + 2;
655 		key.c = i + 3;
656 		val = i + 4;
657 		expected_key_a += key.a;
658 		expected_key_b += key.b;
659 		expected_val += val;
660 
661 		err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
662 		if (CHECK(err, "map_update", "map_update failed\n"))
663 			goto out;
664 	}
665 
666 	linfo.map.map_fd = map_fd;
667 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
668 	if (!ASSERT_OK_PTR(link, "attach_iter"))
669 		goto out;
670 
671 	iter_fd = bpf_iter_create(bpf_link__fd(link));
672 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
673 		goto free_link;
674 
675 	/* do some tests */
676 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
677 		;
678 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
679 		goto close_iter;
680 
681 	/* test results */
682 	if (CHECK(skel->bss->key_sum_a != expected_key_a,
683 		  "key_sum_a", "got %u expected %u\n",
684 		  skel->bss->key_sum_a, expected_key_a))
685 		goto close_iter;
686 	if (CHECK(skel->bss->key_sum_b != expected_key_b,
687 		  "key_sum_b", "got %u expected %u\n",
688 		  skel->bss->key_sum_b, expected_key_b))
689 		goto close_iter;
690 	if (CHECK(skel->bss->val_sum != expected_val,
691 		  "val_sum", "got %llu expected %llu\n",
692 		  skel->bss->val_sum, expected_val))
693 		goto close_iter;
694 
695 close_iter:
696 	close(iter_fd);
697 free_link:
698 	bpf_link__destroy(link);
699 out:
700 	bpf_iter_bpf_hash_map__destroy(skel);
701 }
702 
703 static void test_bpf_percpu_hash_map(void)
704 {
705 	__u32 expected_key_a = 0, expected_key_b = 0;
706 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
707 	struct bpf_iter_bpf_percpu_hash_map *skel;
708 	int err, i, j, len, map_fd, iter_fd;
709 	union bpf_iter_link_info linfo;
710 	__u32 expected_val = 0;
711 	struct bpf_link *link;
712 	struct key_t {
713 		int a;
714 		int b;
715 		int c;
716 	} key;
717 	char buf[64];
718 	void *val;
719 
720 	skel = bpf_iter_bpf_percpu_hash_map__open();
721 	if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open",
722 		  "skeleton open failed\n"))
723 		return;
724 
725 	skel->rodata->num_cpus = bpf_num_possible_cpus();
726 	val = malloc(8 * bpf_num_possible_cpus());
727 
728 	err = bpf_iter_bpf_percpu_hash_map__load(skel);
729 	if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load",
730 		  "skeleton load failed\n"))
731 		goto out;
732 
733 	/* update map values here */
734 	map_fd = bpf_map__fd(skel->maps.hashmap1);
735 	for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
736 		key.a = i + 1;
737 		key.b = i + 2;
738 		key.c = i + 3;
739 		expected_key_a += key.a;
740 		expected_key_b += key.b;
741 
742 		for (j = 0; j < bpf_num_possible_cpus(); j++) {
743 			*(__u32 *)(val + j * 8) = i + j;
744 			expected_val += i + j;
745 		}
746 
747 		err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
748 		if (CHECK(err, "map_update", "map_update failed\n"))
749 			goto out;
750 	}
751 
752 	memset(&linfo, 0, sizeof(linfo));
753 	linfo.map.map_fd = map_fd;
754 	opts.link_info = &linfo;
755 	opts.link_info_len = sizeof(linfo);
756 	link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
757 	if (!ASSERT_OK_PTR(link, "attach_iter"))
758 		goto out;
759 
760 	iter_fd = bpf_iter_create(bpf_link__fd(link));
761 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
762 		goto free_link;
763 
764 	/* do some tests */
765 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
766 		;
767 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
768 		goto close_iter;
769 
770 	/* test results */
771 	if (CHECK(skel->bss->key_sum_a != expected_key_a,
772 		  "key_sum_a", "got %u expected %u\n",
773 		  skel->bss->key_sum_a, expected_key_a))
774 		goto close_iter;
775 	if (CHECK(skel->bss->key_sum_b != expected_key_b,
776 		  "key_sum_b", "got %u expected %u\n",
777 		  skel->bss->key_sum_b, expected_key_b))
778 		goto close_iter;
779 	if (CHECK(skel->bss->val_sum != expected_val,
780 		  "val_sum", "got %u expected %u\n",
781 		  skel->bss->val_sum, expected_val))
782 		goto close_iter;
783 
784 close_iter:
785 	close(iter_fd);
786 free_link:
787 	bpf_link__destroy(link);
788 out:
789 	bpf_iter_bpf_percpu_hash_map__destroy(skel);
790 	free(val);
791 }
792 
793 static void test_bpf_array_map(void)
794 {
795 	__u64 val, expected_val = 0, res_first_val, first_val = 0;
796 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
797 	__u32 expected_key = 0, res_first_key;
798 	struct bpf_iter_bpf_array_map *skel;
799 	union bpf_iter_link_info linfo;
800 	int err, i, map_fd, iter_fd;
801 	struct bpf_link *link;
802 	char buf[64] = {};
803 	int len, start;
804 
805 	skel = bpf_iter_bpf_array_map__open_and_load();
806 	if (CHECK(!skel, "bpf_iter_bpf_array_map__open_and_load",
807 		  "skeleton open_and_load failed\n"))
808 		return;
809 
810 	map_fd = bpf_map__fd(skel->maps.arraymap1);
811 	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
812 		val = i + 4;
813 		expected_key += i;
814 		expected_val += val;
815 
816 		if (i == 0)
817 			first_val = val;
818 
819 		err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
820 		if (CHECK(err, "map_update", "map_update failed\n"))
821 			goto out;
822 	}
823 
824 	memset(&linfo, 0, sizeof(linfo));
825 	linfo.map.map_fd = map_fd;
826 	opts.link_info = &linfo;
827 	opts.link_info_len = sizeof(linfo);
828 	link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
829 	if (!ASSERT_OK_PTR(link, "attach_iter"))
830 		goto out;
831 
832 	iter_fd = bpf_iter_create(bpf_link__fd(link));
833 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
834 		goto free_link;
835 
836 	/* do some tests */
837 	start = 0;
838 	while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
839 		start += len;
840 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
841 		goto close_iter;
842 
843 	/* test results */
844 	res_first_key = *(__u32 *)buf;
845 	res_first_val = *(__u64 *)(buf + sizeof(__u32));
846 	if (CHECK(res_first_key != 0 || res_first_val != first_val,
847 		  "bpf_seq_write",
848 		  "seq_write failure: first key %u vs expected 0, "
849 		  " first value %llu vs expected %llu\n",
850 		  res_first_key, res_first_val, first_val))
851 		goto close_iter;
852 
853 	if (CHECK(skel->bss->key_sum != expected_key,
854 		  "key_sum", "got %u expected %u\n",
855 		  skel->bss->key_sum, expected_key))
856 		goto close_iter;
857 	if (CHECK(skel->bss->val_sum != expected_val,
858 		  "val_sum", "got %llu expected %llu\n",
859 		  skel->bss->val_sum, expected_val))
860 		goto close_iter;
861 
862 	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
863 		err = bpf_map_lookup_elem(map_fd, &i, &val);
864 		if (CHECK(err, "map_lookup", "map_lookup failed\n"))
865 			goto out;
866 		if (CHECK(i != val, "invalid_val",
867 			  "got value %llu expected %u\n", val, i))
868 			goto out;
869 	}
870 
871 close_iter:
872 	close(iter_fd);
873 free_link:
874 	bpf_link__destroy(link);
875 out:
876 	bpf_iter_bpf_array_map__destroy(skel);
877 }
878 
879 static void test_bpf_percpu_array_map(void)
880 {
881 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
882 	struct bpf_iter_bpf_percpu_array_map *skel;
883 	__u32 expected_key = 0, expected_val = 0;
884 	union bpf_iter_link_info linfo;
885 	int err, i, j, map_fd, iter_fd;
886 	struct bpf_link *link;
887 	char buf[64];
888 	void *val;
889 	int len;
890 
891 	skel = bpf_iter_bpf_percpu_array_map__open();
892 	if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open",
893 		  "skeleton open failed\n"))
894 		return;
895 
896 	skel->rodata->num_cpus = bpf_num_possible_cpus();
897 	val = malloc(8 * bpf_num_possible_cpus());
898 
899 	err = bpf_iter_bpf_percpu_array_map__load(skel);
900 	if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load",
901 		  "skeleton load failed\n"))
902 		goto out;
903 
904 	/* update map values here */
905 	map_fd = bpf_map__fd(skel->maps.arraymap1);
906 	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
907 		expected_key += i;
908 
909 		for (j = 0; j < bpf_num_possible_cpus(); j++) {
910 			*(__u32 *)(val + j * 8) = i + j;
911 			expected_val += i + j;
912 		}
913 
914 		err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
915 		if (CHECK(err, "map_update", "map_update failed\n"))
916 			goto out;
917 	}
918 
919 	memset(&linfo, 0, sizeof(linfo));
920 	linfo.map.map_fd = map_fd;
921 	opts.link_info = &linfo;
922 	opts.link_info_len = sizeof(linfo);
923 	link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
924 	if (!ASSERT_OK_PTR(link, "attach_iter"))
925 		goto out;
926 
927 	iter_fd = bpf_iter_create(bpf_link__fd(link));
928 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
929 		goto free_link;
930 
931 	/* do some tests */
932 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
933 		;
934 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
935 		goto close_iter;
936 
937 	/* test results */
938 	if (CHECK(skel->bss->key_sum != expected_key,
939 		  "key_sum", "got %u expected %u\n",
940 		  skel->bss->key_sum, expected_key))
941 		goto close_iter;
942 	if (CHECK(skel->bss->val_sum != expected_val,
943 		  "val_sum", "got %u expected %u\n",
944 		  skel->bss->val_sum, expected_val))
945 		goto close_iter;
946 
947 close_iter:
948 	close(iter_fd);
949 free_link:
950 	bpf_link__destroy(link);
951 out:
952 	bpf_iter_bpf_percpu_array_map__destroy(skel);
953 	free(val);
954 }
955 
956 /* An iterator program deletes all local storage in a map. */
957 static void test_bpf_sk_storage_delete(void)
958 {
959 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
960 	struct bpf_iter_bpf_sk_storage_helpers *skel;
961 	union bpf_iter_link_info linfo;
962 	int err, len, map_fd, iter_fd;
963 	struct bpf_link *link;
964 	int sock_fd = -1;
965 	__u32 val = 42;
966 	char buf[64];
967 
968 	skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
969 	if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load",
970 		  "skeleton open_and_load failed\n"))
971 		return;
972 
973 	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
974 
975 	sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
976 	if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno))
977 		goto out;
978 	err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
979 	if (CHECK(err, "map_update", "map_update failed\n"))
980 		goto out;
981 
982 	memset(&linfo, 0, sizeof(linfo));
983 	linfo.map.map_fd = map_fd;
984 	opts.link_info = &linfo;
985 	opts.link_info_len = sizeof(linfo);
986 	link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map,
987 					&opts);
988 	if (!ASSERT_OK_PTR(link, "attach_iter"))
989 		goto out;
990 
991 	iter_fd = bpf_iter_create(bpf_link__fd(link));
992 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
993 		goto free_link;
994 
995 	/* do some tests */
996 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
997 		;
998 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
999 		goto close_iter;
1000 
1001 	/* test results */
1002 	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1003 	if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
1004 		  "map value wasn't deleted (err=%d, errno=%d)\n", err, errno))
1005 		goto close_iter;
1006 
1007 close_iter:
1008 	close(iter_fd);
1009 free_link:
1010 	bpf_link__destroy(link);
1011 out:
1012 	if (sock_fd >= 0)
1013 		close(sock_fd);
1014 	bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1015 }
1016 
1017 /* This creates a socket and its local storage. It then runs a task_iter BPF
1018  * program that replaces the existing socket local storage with the tgid of the
1019  * only task owning a file descriptor to this socket, this process, prog_tests.
1020  * It then runs a tcp socket iterator that negates the value in the existing
1021  * socket local storage, the test verifies that the resulting value is -pid.
1022  */
1023 static void test_bpf_sk_storage_get(void)
1024 {
1025 	struct bpf_iter_bpf_sk_storage_helpers *skel;
1026 	int err, map_fd, val = -1;
1027 	int sock_fd = -1;
1028 
1029 	skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1030 	if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load",
1031 		  "skeleton open_and_load failed\n"))
1032 		return;
1033 
1034 	sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1035 	if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno))
1036 		goto out;
1037 
1038 	err = listen(sock_fd, 1);
1039 	if (CHECK(err != 0, "listen", "errno: %d\n", errno))
1040 		goto close_socket;
1041 
1042 	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1043 
1044 	err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1045 	if (CHECK(err, "bpf_map_update_elem", "map_update_failed\n"))
1046 		goto close_socket;
1047 
1048 	do_dummy_read(skel->progs.fill_socket_owner);
1049 
1050 	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1051 	if (CHECK(err || val != getpid(), "bpf_map_lookup_elem",
1052 	    "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1053 	    getpid(), val, err))
1054 		goto close_socket;
1055 
1056 	do_dummy_read(skel->progs.negate_socket_local_storage);
1057 
1058 	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1059 	CHECK(err || val != -getpid(), "bpf_map_lookup_elem",
1060 	      "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1061 	      -getpid(), val, err);
1062 
1063 close_socket:
1064 	close(sock_fd);
1065 out:
1066 	bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1067 }
1068 
1069 static void test_bpf_sk_storage_map(void)
1070 {
1071 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1072 	int err, i, len, map_fd, iter_fd, num_sockets;
1073 	struct bpf_iter_bpf_sk_storage_map *skel;
1074 	union bpf_iter_link_info linfo;
1075 	int sock_fd[3] = {-1, -1, -1};
1076 	__u32 val, expected_val = 0;
1077 	struct bpf_link *link;
1078 	char buf[64];
1079 
1080 	skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1081 	if (CHECK(!skel, "bpf_iter_bpf_sk_storage_map__open_and_load",
1082 		  "skeleton open_and_load failed\n"))
1083 		return;
1084 
1085 	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1086 	num_sockets = ARRAY_SIZE(sock_fd);
1087 	for (i = 0; i < num_sockets; i++) {
1088 		sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
1089 		if (CHECK(sock_fd[i] < 0, "socket", "errno: %d\n", errno))
1090 			goto out;
1091 
1092 		val = i + 1;
1093 		expected_val += val;
1094 
1095 		err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
1096 					  BPF_NOEXIST);
1097 		if (CHECK(err, "map_update", "map_update failed\n"))
1098 			goto out;
1099 	}
1100 
1101 	memset(&linfo, 0, sizeof(linfo));
1102 	linfo.map.map_fd = map_fd;
1103 	opts.link_info = &linfo;
1104 	opts.link_info_len = sizeof(linfo);
1105 	link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts);
1106 	if (!ASSERT_OK_PTR(link, "attach_iter"))
1107 		goto out;
1108 
1109 	iter_fd = bpf_iter_create(bpf_link__fd(link));
1110 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
1111 		goto free_link;
1112 
1113 	/* do some tests */
1114 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1115 		;
1116 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1117 		goto close_iter;
1118 
1119 	/* test results */
1120 	if (CHECK(skel->bss->ipv6_sk_count != num_sockets,
1121 		  "ipv6_sk_count", "got %u expected %u\n",
1122 		  skel->bss->ipv6_sk_count, num_sockets))
1123 		goto close_iter;
1124 
1125 	if (CHECK(skel->bss->val_sum != expected_val,
1126 		  "val_sum", "got %u expected %u\n",
1127 		  skel->bss->val_sum, expected_val))
1128 		goto close_iter;
1129 
1130 close_iter:
1131 	close(iter_fd);
1132 free_link:
1133 	bpf_link__destroy(link);
1134 out:
1135 	for (i = 0; i < num_sockets; i++) {
1136 		if (sock_fd[i] >= 0)
1137 			close(sock_fd[i]);
1138 	}
1139 	bpf_iter_bpf_sk_storage_map__destroy(skel);
1140 }
1141 
1142 static void test_rdonly_buf_out_of_bound(void)
1143 {
1144 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1145 	struct bpf_iter_test_kern5 *skel;
1146 	union bpf_iter_link_info linfo;
1147 	struct bpf_link *link;
1148 
1149 	skel = bpf_iter_test_kern5__open_and_load();
1150 	if (CHECK(!skel, "bpf_iter_test_kern5__open_and_load",
1151 		  "skeleton open_and_load failed\n"))
1152 		return;
1153 
1154 	memset(&linfo, 0, sizeof(linfo));
1155 	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
1156 	opts.link_info = &linfo;
1157 	opts.link_info_len = sizeof(linfo);
1158 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
1159 	if (!ASSERT_ERR_PTR(link, "attach_iter"))
1160 		bpf_link__destroy(link);
1161 
1162 	bpf_iter_test_kern5__destroy(skel);
1163 }
1164 
1165 static void test_buf_neg_offset(void)
1166 {
1167 	struct bpf_iter_test_kern6 *skel;
1168 
1169 	skel = bpf_iter_test_kern6__open_and_load();
1170 	if (CHECK(skel, "bpf_iter_test_kern6__open_and_load",
1171 		  "skeleton open_and_load unexpected success\n"))
1172 		bpf_iter_test_kern6__destroy(skel);
1173 }
1174 
1175 #define CMP_BUFFER_SIZE 1024
1176 static char task_vma_output[CMP_BUFFER_SIZE];
1177 static char proc_maps_output[CMP_BUFFER_SIZE];
1178 
1179 /* remove \0 and \t from str, and only keep the first line */
1180 static void str_strip_first_line(char *str)
1181 {
1182 	char *dst = str, *src = str;
1183 
1184 	do {
1185 		if (*src == ' ' || *src == '\t')
1186 			src++;
1187 		else
1188 			*(dst++) = *(src++);
1189 
1190 	} while (*src != '\0' && *src != '\n');
1191 
1192 	*dst = '\0';
1193 }
1194 
1195 #define min(a, b) ((a) < (b) ? (a) : (b))
1196 
1197 static void test_task_vma(void)
1198 {
1199 	int err, iter_fd = -1, proc_maps_fd = -1;
1200 	struct bpf_iter_task_vma *skel;
1201 	int len, read_size = 4;
1202 	char maps_path[64];
1203 
1204 	skel = bpf_iter_task_vma__open();
1205 	if (CHECK(!skel, "bpf_iter_task_vma__open", "skeleton open failed\n"))
1206 		return;
1207 
1208 	skel->bss->pid = getpid();
1209 
1210 	err = bpf_iter_task_vma__load(skel);
1211 	if (CHECK(err, "bpf_iter_task_vma__load", "skeleton load failed\n"))
1212 		goto out;
1213 
1214 	skel->links.proc_maps = bpf_program__attach_iter(
1215 		skel->progs.proc_maps, NULL);
1216 
1217 	if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
1218 		skel->links.proc_maps = NULL;
1219 		goto out;
1220 	}
1221 
1222 	iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
1223 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
1224 		goto out;
1225 
1226 	/* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks
1227 	 * to trigger seq_file corner cases.
1228 	 */
1229 	len = 0;
1230 	while (len < CMP_BUFFER_SIZE) {
1231 		err = read_fd_into_buffer(iter_fd, task_vma_output + len,
1232 					  min(read_size, CMP_BUFFER_SIZE - len));
1233 		if (!err)
1234 			break;
1235 		if (CHECK(err < 0, "read_iter_fd", "read_iter_fd failed\n"))
1236 			goto out;
1237 		len += err;
1238 	}
1239 
1240 	/* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */
1241 	snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid);
1242 	proc_maps_fd = open(maps_path, O_RDONLY);
1243 	if (CHECK(proc_maps_fd < 0, "open_proc_maps", "open_proc_maps failed\n"))
1244 		goto out;
1245 	err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE);
1246 	if (CHECK(err < 0, "read_prog_maps_fd", "read_prog_maps_fd failed\n"))
1247 		goto out;
1248 
1249 	/* strip and compare the first line of the two files */
1250 	str_strip_first_line(task_vma_output);
1251 	str_strip_first_line(proc_maps_output);
1252 
1253 	CHECK(strcmp(task_vma_output, proc_maps_output), "compare_output",
1254 	      "found mismatch\n");
1255 out:
1256 	close(proc_maps_fd);
1257 	close(iter_fd);
1258 	bpf_iter_task_vma__destroy(skel);
1259 }
1260 
1261 void test_bpf_iter(void)
1262 {
1263 	if (test__start_subtest("btf_id_or_null"))
1264 		test_btf_id_or_null();
1265 	if (test__start_subtest("ipv6_route"))
1266 		test_ipv6_route();
1267 	if (test__start_subtest("netlink"))
1268 		test_netlink();
1269 	if (test__start_subtest("bpf_map"))
1270 		test_bpf_map();
1271 	if (test__start_subtest("task"))
1272 		test_task();
1273 	if (test__start_subtest("task_sleepable"))
1274 		test_task_sleepable();
1275 	if (test__start_subtest("task_stack"))
1276 		test_task_stack();
1277 	if (test__start_subtest("task_file"))
1278 		test_task_file();
1279 	if (test__start_subtest("task_vma"))
1280 		test_task_vma();
1281 	if (test__start_subtest("task_btf"))
1282 		test_task_btf();
1283 	if (test__start_subtest("tcp4"))
1284 		test_tcp4();
1285 	if (test__start_subtest("tcp6"))
1286 		test_tcp6();
1287 	if (test__start_subtest("udp4"))
1288 		test_udp4();
1289 	if (test__start_subtest("udp6"))
1290 		test_udp6();
1291 	if (test__start_subtest("unix"))
1292 		test_unix();
1293 	if (test__start_subtest("anon"))
1294 		test_anon_iter(false);
1295 	if (test__start_subtest("anon-read-one-char"))
1296 		test_anon_iter(true);
1297 	if (test__start_subtest("file"))
1298 		test_file_iter();
1299 	if (test__start_subtest("overflow"))
1300 		test_overflow(false, false);
1301 	if (test__start_subtest("overflow-e2big"))
1302 		test_overflow(true, false);
1303 	if (test__start_subtest("prog-ret-1"))
1304 		test_overflow(false, true);
1305 	if (test__start_subtest("bpf_hash_map"))
1306 		test_bpf_hash_map();
1307 	if (test__start_subtest("bpf_percpu_hash_map"))
1308 		test_bpf_percpu_hash_map();
1309 	if (test__start_subtest("bpf_array_map"))
1310 		test_bpf_array_map();
1311 	if (test__start_subtest("bpf_percpu_array_map"))
1312 		test_bpf_percpu_array_map();
1313 	if (test__start_subtest("bpf_sk_storage_map"))
1314 		test_bpf_sk_storage_map();
1315 	if (test__start_subtest("bpf_sk_storage_delete"))
1316 		test_bpf_sk_storage_delete();
1317 	if (test__start_subtest("bpf_sk_storage_get"))
1318 		test_bpf_sk_storage_get();
1319 	if (test__start_subtest("rdonly-buf-out-of-bound"))
1320 		test_rdonly_buf_out_of_bound();
1321 	if (test__start_subtest("buf-neg-offset"))
1322 		test_buf_neg_offset();
1323 }
1324