1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <test_progs.h>
4 #include "bpf_iter_ipv6_route.skel.h"
5 #include "bpf_iter_netlink.skel.h"
6 #include "bpf_iter_bpf_map.skel.h"
7 #include "bpf_iter_task.skel.h"
8 #include "bpf_iter_task_stack.skel.h"
9 #include "bpf_iter_task_file.skel.h"
10 #include "bpf_iter_tcp4.skel.h"
11 #include "bpf_iter_tcp6.skel.h"
12 #include "bpf_iter_udp4.skel.h"
13 #include "bpf_iter_udp6.skel.h"
14 #include "bpf_iter_test_kern1.skel.h"
15 #include "bpf_iter_test_kern2.skel.h"
16 #include "bpf_iter_test_kern3.skel.h"
17 #include "bpf_iter_test_kern4.skel.h"
18 #include "bpf_iter_bpf_hash_map.skel.h"
19 #include "bpf_iter_bpf_percpu_hash_map.skel.h"
20 #include "bpf_iter_bpf_array_map.skel.h"
21 #include "bpf_iter_bpf_percpu_array_map.skel.h"
22 #include "bpf_iter_bpf_sk_storage_map.skel.h"
23 #include "bpf_iter_test_kern5.skel.h"
24 #include "bpf_iter_test_kern6.skel.h"
25 
26 static int duration;
27 
28 static void test_btf_id_or_null(void)
29 {
30 	struct bpf_iter_test_kern3 *skel;
31 
32 	skel = bpf_iter_test_kern3__open_and_load();
33 	if (CHECK(skel, "bpf_iter_test_kern3__open_and_load",
34 		  "skeleton open_and_load unexpectedly succeeded\n")) {
35 		bpf_iter_test_kern3__destroy(skel);
36 		return;
37 	}
38 }
39 
40 static void do_dummy_read(struct bpf_program *prog)
41 {
42 	struct bpf_link *link;
43 	char buf[16] = {};
44 	int iter_fd, len;
45 
46 	link = bpf_program__attach_iter(prog, NULL);
47 	if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
48 		return;
49 
50 	iter_fd = bpf_iter_create(bpf_link__fd(link));
51 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
52 		goto free_link;
53 
54 	/* not check contents, but ensure read() ends without error */
55 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
56 		;
57 	CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));
58 
59 	close(iter_fd);
60 
61 free_link:
62 	bpf_link__destroy(link);
63 }
64 
65 static void test_ipv6_route(void)
66 {
67 	struct bpf_iter_ipv6_route *skel;
68 
69 	skel = bpf_iter_ipv6_route__open_and_load();
70 	if (CHECK(!skel, "bpf_iter_ipv6_route__open_and_load",
71 		  "skeleton open_and_load failed\n"))
72 		return;
73 
74 	do_dummy_read(skel->progs.dump_ipv6_route);
75 
76 	bpf_iter_ipv6_route__destroy(skel);
77 }
78 
79 static void test_netlink(void)
80 {
81 	struct bpf_iter_netlink *skel;
82 
83 	skel = bpf_iter_netlink__open_and_load();
84 	if (CHECK(!skel, "bpf_iter_netlink__open_and_load",
85 		  "skeleton open_and_load failed\n"))
86 		return;
87 
88 	do_dummy_read(skel->progs.dump_netlink);
89 
90 	bpf_iter_netlink__destroy(skel);
91 }
92 
93 static void test_bpf_map(void)
94 {
95 	struct bpf_iter_bpf_map *skel;
96 
97 	skel = bpf_iter_bpf_map__open_and_load();
98 	if (CHECK(!skel, "bpf_iter_bpf_map__open_and_load",
99 		  "skeleton open_and_load failed\n"))
100 		return;
101 
102 	do_dummy_read(skel->progs.dump_bpf_map);
103 
104 	bpf_iter_bpf_map__destroy(skel);
105 }
106 
107 static void test_task(void)
108 {
109 	struct bpf_iter_task *skel;
110 
111 	skel = bpf_iter_task__open_and_load();
112 	if (CHECK(!skel, "bpf_iter_task__open_and_load",
113 		  "skeleton open_and_load failed\n"))
114 		return;
115 
116 	do_dummy_read(skel->progs.dump_task);
117 
118 	bpf_iter_task__destroy(skel);
119 }
120 
121 static void test_task_stack(void)
122 {
123 	struct bpf_iter_task_stack *skel;
124 
125 	skel = bpf_iter_task_stack__open_and_load();
126 	if (CHECK(!skel, "bpf_iter_task_stack__open_and_load",
127 		  "skeleton open_and_load failed\n"))
128 		return;
129 
130 	do_dummy_read(skel->progs.dump_task_stack);
131 
132 	bpf_iter_task_stack__destroy(skel);
133 }
134 
135 static void *do_nothing(void *arg)
136 {
137 	pthread_exit(arg);
138 }
139 
140 static void test_task_file(void)
141 {
142 	struct bpf_iter_task_file *skel;
143 	pthread_t thread_id;
144 	void *ret;
145 
146 	skel = bpf_iter_task_file__open_and_load();
147 	if (CHECK(!skel, "bpf_iter_task_file__open_and_load",
148 		  "skeleton open_and_load failed\n"))
149 		return;
150 
151 	skel->bss->tgid = getpid();
152 
153 	if (CHECK(pthread_create(&thread_id, NULL, &do_nothing, NULL),
154 		  "pthread_create", "pthread_create failed\n"))
155 		goto done;
156 
157 	do_dummy_read(skel->progs.dump_task_file);
158 
159 	if (CHECK(pthread_join(thread_id, &ret) || ret != NULL,
160 		  "pthread_join", "pthread_join failed\n"))
161 		goto done;
162 
163 	CHECK(skel->bss->count != 0, "check_count",
164 	      "invalid non pthread file visit count %d\n", skel->bss->count);
165 
166 done:
167 	bpf_iter_task_file__destroy(skel);
168 }
169 
170 static void test_tcp4(void)
171 {
172 	struct bpf_iter_tcp4 *skel;
173 
174 	skel = bpf_iter_tcp4__open_and_load();
175 	if (CHECK(!skel, "bpf_iter_tcp4__open_and_load",
176 		  "skeleton open_and_load failed\n"))
177 		return;
178 
179 	do_dummy_read(skel->progs.dump_tcp4);
180 
181 	bpf_iter_tcp4__destroy(skel);
182 }
183 
184 static void test_tcp6(void)
185 {
186 	struct bpf_iter_tcp6 *skel;
187 
188 	skel = bpf_iter_tcp6__open_and_load();
189 	if (CHECK(!skel, "bpf_iter_tcp6__open_and_load",
190 		  "skeleton open_and_load failed\n"))
191 		return;
192 
193 	do_dummy_read(skel->progs.dump_tcp6);
194 
195 	bpf_iter_tcp6__destroy(skel);
196 }
197 
198 static void test_udp4(void)
199 {
200 	struct bpf_iter_udp4 *skel;
201 
202 	skel = bpf_iter_udp4__open_and_load();
203 	if (CHECK(!skel, "bpf_iter_udp4__open_and_load",
204 		  "skeleton open_and_load failed\n"))
205 		return;
206 
207 	do_dummy_read(skel->progs.dump_udp4);
208 
209 	bpf_iter_udp4__destroy(skel);
210 }
211 
212 static void test_udp6(void)
213 {
214 	struct bpf_iter_udp6 *skel;
215 
216 	skel = bpf_iter_udp6__open_and_load();
217 	if (CHECK(!skel, "bpf_iter_udp6__open_and_load",
218 		  "skeleton open_and_load failed\n"))
219 		return;
220 
221 	do_dummy_read(skel->progs.dump_udp6);
222 
223 	bpf_iter_udp6__destroy(skel);
224 }
225 
226 /* The expected string is less than 16 bytes */
227 static int do_read_with_fd(int iter_fd, const char *expected,
228 			   bool read_one_char)
229 {
230 	int err = -1, len, read_buf_len, start;
231 	char buf[16] = {};
232 
233 	read_buf_len = read_one_char ? 1 : 16;
234 	start = 0;
235 	while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
236 		start += len;
237 		if (CHECK(start >= 16, "read", "read len %d\n", len))
238 			return -1;
239 		read_buf_len = read_one_char ? 1 : 16 - start;
240 	}
241 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
242 		return -1;
243 
244 	err = strcmp(buf, expected);
245 	if (CHECK(err, "read", "incorrect read result: buf %s, expected %s\n",
246 		  buf, expected))
247 		return -1;
248 
249 	return 0;
250 }
251 
252 static void test_anon_iter(bool read_one_char)
253 {
254 	struct bpf_iter_test_kern1 *skel;
255 	struct bpf_link *link;
256 	int iter_fd, err;
257 
258 	skel = bpf_iter_test_kern1__open_and_load();
259 	if (CHECK(!skel, "bpf_iter_test_kern1__open_and_load",
260 		  "skeleton open_and_load failed\n"))
261 		return;
262 
263 	err = bpf_iter_test_kern1__attach(skel);
264 	if (CHECK(err, "bpf_iter_test_kern1__attach",
265 		  "skeleton attach failed\n")) {
266 		goto out;
267 	}
268 
269 	link = skel->links.dump_task;
270 	iter_fd = bpf_iter_create(bpf_link__fd(link));
271 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
272 		goto out;
273 
274 	do_read_with_fd(iter_fd, "abcd", read_one_char);
275 	close(iter_fd);
276 
277 out:
278 	bpf_iter_test_kern1__destroy(skel);
279 }
280 
281 static int do_read(const char *path, const char *expected)
282 {
283 	int err, iter_fd;
284 
285 	iter_fd = open(path, O_RDONLY);
286 	if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n",
287 		  path, strerror(errno)))
288 		return -1;
289 
290 	err = do_read_with_fd(iter_fd, expected, false);
291 	close(iter_fd);
292 	return err;
293 }
294 
295 static void test_file_iter(void)
296 {
297 	const char *path = "/sys/fs/bpf/bpf_iter_test1";
298 	struct bpf_iter_test_kern1 *skel1;
299 	struct bpf_iter_test_kern2 *skel2;
300 	struct bpf_link *link;
301 	int err;
302 
303 	skel1 = bpf_iter_test_kern1__open_and_load();
304 	if (CHECK(!skel1, "bpf_iter_test_kern1__open_and_load",
305 		  "skeleton open_and_load failed\n"))
306 		return;
307 
308 	link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
309 	if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
310 		goto out;
311 
312 	/* unlink this path if it exists. */
313 	unlink(path);
314 
315 	err = bpf_link__pin(link, path);
316 	if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err))
317 		goto free_link;
318 
319 	err = do_read(path, "abcd");
320 	if (err)
321 		goto unlink_path;
322 
323 	/* file based iterator seems working fine. Let us a link update
324 	 * of the underlying link and `cat` the iterator again, its content
325 	 * should change.
326 	 */
327 	skel2 = bpf_iter_test_kern2__open_and_load();
328 	if (CHECK(!skel2, "bpf_iter_test_kern2__open_and_load",
329 		  "skeleton open_and_load failed\n"))
330 		goto unlink_path;
331 
332 	err = bpf_link__update_program(link, skel2->progs.dump_task);
333 	if (CHECK(err, "update_prog", "update_prog failed\n"))
334 		goto destroy_skel2;
335 
336 	do_read(path, "ABCD");
337 
338 destroy_skel2:
339 	bpf_iter_test_kern2__destroy(skel2);
340 unlink_path:
341 	unlink(path);
342 free_link:
343 	bpf_link__destroy(link);
344 out:
345 	bpf_iter_test_kern1__destroy(skel1);
346 }
347 
348 static void test_overflow(bool test_e2big_overflow, bool ret1)
349 {
350 	__u32 map_info_len, total_read_len, expected_read_len;
351 	int err, iter_fd, map1_fd, map2_fd, len;
352 	struct bpf_map_info map_info = {};
353 	struct bpf_iter_test_kern4 *skel;
354 	struct bpf_link *link;
355 	__u32 page_size;
356 	char *buf;
357 
358 	skel = bpf_iter_test_kern4__open();
359 	if (CHECK(!skel, "bpf_iter_test_kern4__open",
360 		  "skeleton open failed\n"))
361 		return;
362 
363 	/* create two maps: bpf program will only do bpf_seq_write
364 	 * for these two maps. The goal is one map output almost
365 	 * fills seq_file buffer and then the other will trigger
366 	 * overflow and needs restart.
367 	 */
368 	map1_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
369 	if (CHECK(map1_fd < 0, "bpf_create_map",
370 		  "map_creation failed: %s\n", strerror(errno)))
371 		goto out;
372 	map2_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
373 	if (CHECK(map2_fd < 0, "bpf_create_map",
374 		  "map_creation failed: %s\n", strerror(errno)))
375 		goto free_map1;
376 
377 	/* bpf_seq_printf kernel buffer is one page, so one map
378 	 * bpf_seq_write will mostly fill it, and the other map
379 	 * will partially fill and then trigger overflow and need
380 	 * bpf_seq_read restart.
381 	 */
382 	page_size = sysconf(_SC_PAGE_SIZE);
383 
384 	if (test_e2big_overflow) {
385 		skel->rodata->print_len = (page_size + 8) / 8;
386 		expected_read_len = 2 * (page_size + 8);
387 	} else if (!ret1) {
388 		skel->rodata->print_len = (page_size - 8) / 8;
389 		expected_read_len = 2 * (page_size - 8);
390 	} else {
391 		skel->rodata->print_len = 1;
392 		expected_read_len = 2 * 8;
393 	}
394 	skel->rodata->ret1 = ret1;
395 
396 	if (CHECK(bpf_iter_test_kern4__load(skel),
397 		  "bpf_iter_test_kern4__load", "skeleton load failed\n"))
398 		goto free_map2;
399 
400 	/* setup filtering map_id in bpf program */
401 	map_info_len = sizeof(map_info);
402 	err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len);
403 	if (CHECK(err, "get_map_info", "get map info failed: %s\n",
404 		  strerror(errno)))
405 		goto free_map2;
406 	skel->bss->map1_id = map_info.id;
407 
408 	err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len);
409 	if (CHECK(err, "get_map_info", "get map info failed: %s\n",
410 		  strerror(errno)))
411 		goto free_map2;
412 	skel->bss->map2_id = map_info.id;
413 
414 	link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
415 	if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
416 		goto free_map2;
417 
418 	iter_fd = bpf_iter_create(bpf_link__fd(link));
419 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
420 		goto free_link;
421 
422 	buf = malloc(expected_read_len);
423 	if (!buf)
424 		goto close_iter;
425 
426 	/* do read */
427 	total_read_len = 0;
428 	if (test_e2big_overflow) {
429 		while ((len = read(iter_fd, buf, expected_read_len)) > 0)
430 			total_read_len += len;
431 
432 		CHECK(len != -1 || errno != E2BIG, "read",
433 		      "expected ret -1, errno E2BIG, but get ret %d, error %s\n",
434 			  len, strerror(errno));
435 		goto free_buf;
436 	} else if (!ret1) {
437 		while ((len = read(iter_fd, buf, expected_read_len)) > 0)
438 			total_read_len += len;
439 
440 		if (CHECK(len < 0, "read", "read failed: %s\n",
441 			  strerror(errno)))
442 			goto free_buf;
443 	} else {
444 		do {
445 			len = read(iter_fd, buf, expected_read_len);
446 			if (len > 0)
447 				total_read_len += len;
448 		} while (len > 0 || len == -EAGAIN);
449 
450 		if (CHECK(len < 0, "read", "read failed: %s\n",
451 			  strerror(errno)))
452 			goto free_buf;
453 	}
454 
455 	if (CHECK(total_read_len != expected_read_len, "read",
456 		  "total len %u, expected len %u\n", total_read_len,
457 		  expected_read_len))
458 		goto free_buf;
459 
460 	if (CHECK(skel->bss->map1_accessed != 1, "map1_accessed",
461 		  "expected 1 actual %d\n", skel->bss->map1_accessed))
462 		goto free_buf;
463 
464 	if (CHECK(skel->bss->map2_accessed != 2, "map2_accessed",
465 		  "expected 2 actual %d\n", skel->bss->map2_accessed))
466 		goto free_buf;
467 
468 	CHECK(skel->bss->map2_seqnum1 != skel->bss->map2_seqnum2,
469 	      "map2_seqnum", "two different seqnum %lld %lld\n",
470 	      skel->bss->map2_seqnum1, skel->bss->map2_seqnum2);
471 
472 free_buf:
473 	free(buf);
474 close_iter:
475 	close(iter_fd);
476 free_link:
477 	bpf_link__destroy(link);
478 free_map2:
479 	close(map2_fd);
480 free_map1:
481 	close(map1_fd);
482 out:
483 	bpf_iter_test_kern4__destroy(skel);
484 }
485 
486 static void test_bpf_hash_map(void)
487 {
488 	__u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0;
489 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
490 	struct bpf_iter_bpf_hash_map *skel;
491 	int err, i, len, map_fd, iter_fd;
492 	union bpf_iter_link_info linfo;
493 	__u64 val, expected_val = 0;
494 	struct bpf_link *link;
495 	struct key_t {
496 		int a;
497 		int b;
498 		int c;
499 	} key;
500 	char buf[64];
501 
502 	skel = bpf_iter_bpf_hash_map__open();
503 	if (CHECK(!skel, "bpf_iter_bpf_hash_map__open",
504 		  "skeleton open failed\n"))
505 		return;
506 
507 	skel->bss->in_test_mode = true;
508 
509 	err = bpf_iter_bpf_hash_map__load(skel);
510 	if (CHECK(!skel, "bpf_iter_bpf_hash_map__load",
511 		  "skeleton load failed\n"))
512 		goto out;
513 
514 	/* iterator with hashmap2 and hashmap3 should fail */
515 	memset(&linfo, 0, sizeof(linfo));
516 	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
517 	opts.link_info = &linfo;
518 	opts.link_info_len = sizeof(linfo);
519 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
520 	if (CHECK(!IS_ERR(link), "attach_iter",
521 		  "attach_iter for hashmap2 unexpected succeeded\n"))
522 		goto out;
523 
524 	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
525 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
526 	if (CHECK(!IS_ERR(link), "attach_iter",
527 		  "attach_iter for hashmap3 unexpected succeeded\n"))
528 		goto out;
529 
530 	/* hashmap1 should be good, update map values here */
531 	map_fd = bpf_map__fd(skel->maps.hashmap1);
532 	for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
533 		key.a = i + 1;
534 		key.b = i + 2;
535 		key.c = i + 3;
536 		val = i + 4;
537 		expected_key_a += key.a;
538 		expected_key_b += key.b;
539 		expected_key_c += key.c;
540 		expected_val += val;
541 
542 		err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
543 		if (CHECK(err, "map_update", "map_update failed\n"))
544 			goto out;
545 	}
546 
547 	linfo.map.map_fd = map_fd;
548 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
549 	if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
550 		goto out;
551 
552 	iter_fd = bpf_iter_create(bpf_link__fd(link));
553 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
554 		goto free_link;
555 
556 	/* do some tests */
557 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
558 		;
559 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
560 		goto close_iter;
561 
562 	/* test results */
563 	if (CHECK(skel->bss->key_sum_a != expected_key_a,
564 		  "key_sum_a", "got %u expected %u\n",
565 		  skel->bss->key_sum_a, expected_key_a))
566 		goto close_iter;
567 	if (CHECK(skel->bss->key_sum_b != expected_key_b,
568 		  "key_sum_b", "got %u expected %u\n",
569 		  skel->bss->key_sum_b, expected_key_b))
570 		goto close_iter;
571 	if (CHECK(skel->bss->val_sum != expected_val,
572 		  "val_sum", "got %llu expected %llu\n",
573 		  skel->bss->val_sum, expected_val))
574 		goto close_iter;
575 
576 close_iter:
577 	close(iter_fd);
578 free_link:
579 	bpf_link__destroy(link);
580 out:
581 	bpf_iter_bpf_hash_map__destroy(skel);
582 }
583 
584 static void test_bpf_percpu_hash_map(void)
585 {
586 	__u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0;
587 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
588 	struct bpf_iter_bpf_percpu_hash_map *skel;
589 	int err, i, j, len, map_fd, iter_fd;
590 	union bpf_iter_link_info linfo;
591 	__u32 expected_val = 0;
592 	struct bpf_link *link;
593 	struct key_t {
594 		int a;
595 		int b;
596 		int c;
597 	} key;
598 	char buf[64];
599 	void *val;
600 
601 	val = malloc(8 * bpf_num_possible_cpus());
602 
603 	skel = bpf_iter_bpf_percpu_hash_map__open();
604 	if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open",
605 		  "skeleton open failed\n"))
606 		return;
607 
608 	skel->rodata->num_cpus = bpf_num_possible_cpus();
609 
610 	err = bpf_iter_bpf_percpu_hash_map__load(skel);
611 	if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load",
612 		  "skeleton load failed\n"))
613 		goto out;
614 
615 	/* update map values here */
616 	map_fd = bpf_map__fd(skel->maps.hashmap1);
617 	for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
618 		key.a = i + 1;
619 		key.b = i + 2;
620 		key.c = i + 3;
621 		expected_key_a += key.a;
622 		expected_key_b += key.b;
623 		expected_key_c += key.c;
624 
625 		for (j = 0; j < bpf_num_possible_cpus(); j++) {
626 			*(__u32 *)(val + j * 8) = i + j;
627 			expected_val += i + j;
628 		}
629 
630 		err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
631 		if (CHECK(err, "map_update", "map_update failed\n"))
632 			goto out;
633 	}
634 
635 	memset(&linfo, 0, sizeof(linfo));
636 	linfo.map.map_fd = map_fd;
637 	opts.link_info = &linfo;
638 	opts.link_info_len = sizeof(linfo);
639 	link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
640 	if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
641 		goto out;
642 
643 	iter_fd = bpf_iter_create(bpf_link__fd(link));
644 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
645 		goto free_link;
646 
647 	/* do some tests */
648 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
649 		;
650 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
651 		goto close_iter;
652 
653 	/* test results */
654 	if (CHECK(skel->bss->key_sum_a != expected_key_a,
655 		  "key_sum_a", "got %u expected %u\n",
656 		  skel->bss->key_sum_a, expected_key_a))
657 		goto close_iter;
658 	if (CHECK(skel->bss->key_sum_b != expected_key_b,
659 		  "key_sum_b", "got %u expected %u\n",
660 		  skel->bss->key_sum_b, expected_key_b))
661 		goto close_iter;
662 	if (CHECK(skel->bss->val_sum != expected_val,
663 		  "val_sum", "got %u expected %u\n",
664 		  skel->bss->val_sum, expected_val))
665 		goto close_iter;
666 
667 close_iter:
668 	close(iter_fd);
669 free_link:
670 	bpf_link__destroy(link);
671 out:
672 	bpf_iter_bpf_percpu_hash_map__destroy(skel);
673 }
674 
675 static void test_bpf_array_map(void)
676 {
677 	__u64 val, expected_val = 0, res_first_val, first_val = 0;
678 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
679 	__u32 expected_key = 0, res_first_key;
680 	struct bpf_iter_bpf_array_map *skel;
681 	union bpf_iter_link_info linfo;
682 	int err, i, map_fd, iter_fd;
683 	struct bpf_link *link;
684 	char buf[64] = {};
685 	int len, start;
686 
687 	skel = bpf_iter_bpf_array_map__open_and_load();
688 	if (CHECK(!skel, "bpf_iter_bpf_array_map__open_and_load",
689 		  "skeleton open_and_load failed\n"))
690 		return;
691 
692 	map_fd = bpf_map__fd(skel->maps.arraymap1);
693 	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
694 		val = i + 4;
695 		expected_key += i;
696 		expected_val += val;
697 
698 		if (i == 0)
699 			first_val = val;
700 
701 		err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
702 		if (CHECK(err, "map_update", "map_update failed\n"))
703 			goto out;
704 	}
705 
706 	memset(&linfo, 0, sizeof(linfo));
707 	linfo.map.map_fd = map_fd;
708 	opts.link_info = &linfo;
709 	opts.link_info_len = sizeof(linfo);
710 	link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
711 	if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
712 		goto out;
713 
714 	iter_fd = bpf_iter_create(bpf_link__fd(link));
715 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
716 		goto free_link;
717 
718 	/* do some tests */
719 	start = 0;
720 	while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
721 		start += len;
722 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
723 		goto close_iter;
724 
725 	/* test results */
726 	res_first_key = *(__u32 *)buf;
727 	res_first_val = *(__u64 *)(buf + sizeof(__u32));
728 	if (CHECK(res_first_key != 0 || res_first_val != first_val,
729 		  "bpf_seq_write",
730 		  "seq_write failure: first key %u vs expected 0, "
731 		  " first value %llu vs expected %llu\n",
732 		  res_first_key, res_first_val, first_val))
733 		goto close_iter;
734 
735 	if (CHECK(skel->bss->key_sum != expected_key,
736 		  "key_sum", "got %u expected %u\n",
737 		  skel->bss->key_sum, expected_key))
738 		goto close_iter;
739 	if (CHECK(skel->bss->val_sum != expected_val,
740 		  "val_sum", "got %llu expected %llu\n",
741 		  skel->bss->val_sum, expected_val))
742 		goto close_iter;
743 
744 	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
745 		err = bpf_map_lookup_elem(map_fd, &i, &val);
746 		if (CHECK(err, "map_lookup", "map_lookup failed\n"))
747 			goto out;
748 		if (CHECK(i != val, "invalid_val",
749 			  "got value %llu expected %u\n", val, i))
750 			goto out;
751 	}
752 
753 close_iter:
754 	close(iter_fd);
755 free_link:
756 	bpf_link__destroy(link);
757 out:
758 	bpf_iter_bpf_array_map__destroy(skel);
759 }
760 
761 static void test_bpf_percpu_array_map(void)
762 {
763 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
764 	struct bpf_iter_bpf_percpu_array_map *skel;
765 	__u32 expected_key = 0, expected_val = 0;
766 	union bpf_iter_link_info linfo;
767 	int err, i, j, map_fd, iter_fd;
768 	struct bpf_link *link;
769 	char buf[64];
770 	void *val;
771 	int len;
772 
773 	val = malloc(8 * bpf_num_possible_cpus());
774 
775 	skel = bpf_iter_bpf_percpu_array_map__open();
776 	if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open",
777 		  "skeleton open failed\n"))
778 		return;
779 
780 	skel->rodata->num_cpus = bpf_num_possible_cpus();
781 
782 	err = bpf_iter_bpf_percpu_array_map__load(skel);
783 	if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load",
784 		  "skeleton load failed\n"))
785 		goto out;
786 
787 	/* update map values here */
788 	map_fd = bpf_map__fd(skel->maps.arraymap1);
789 	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
790 		expected_key += i;
791 
792 		for (j = 0; j < bpf_num_possible_cpus(); j++) {
793 			*(__u32 *)(val + j * 8) = i + j;
794 			expected_val += i + j;
795 		}
796 
797 		err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
798 		if (CHECK(err, "map_update", "map_update failed\n"))
799 			goto out;
800 	}
801 
802 	memset(&linfo, 0, sizeof(linfo));
803 	linfo.map.map_fd = map_fd;
804 	opts.link_info = &linfo;
805 	opts.link_info_len = sizeof(linfo);
806 	link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
807 	if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
808 		goto out;
809 
810 	iter_fd = bpf_iter_create(bpf_link__fd(link));
811 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
812 		goto free_link;
813 
814 	/* do some tests */
815 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
816 		;
817 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
818 		goto close_iter;
819 
820 	/* test results */
821 	if (CHECK(skel->bss->key_sum != expected_key,
822 		  "key_sum", "got %u expected %u\n",
823 		  skel->bss->key_sum, expected_key))
824 		goto close_iter;
825 	if (CHECK(skel->bss->val_sum != expected_val,
826 		  "val_sum", "got %u expected %u\n",
827 		  skel->bss->val_sum, expected_val))
828 		goto close_iter;
829 
830 close_iter:
831 	close(iter_fd);
832 free_link:
833 	bpf_link__destroy(link);
834 out:
835 	bpf_iter_bpf_percpu_array_map__destroy(skel);
836 }
837 
838 static void test_bpf_sk_storage_map(void)
839 {
840 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
841 	int err, i, len, map_fd, iter_fd, num_sockets;
842 	struct bpf_iter_bpf_sk_storage_map *skel;
843 	union bpf_iter_link_info linfo;
844 	int sock_fd[3] = {-1, -1, -1};
845 	__u32 val, expected_val = 0;
846 	struct bpf_link *link;
847 	char buf[64];
848 
849 	skel = bpf_iter_bpf_sk_storage_map__open_and_load();
850 	if (CHECK(!skel, "bpf_iter_bpf_sk_storage_map__open_and_load",
851 		  "skeleton open_and_load failed\n"))
852 		return;
853 
854 	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
855 	num_sockets = ARRAY_SIZE(sock_fd);
856 	for (i = 0; i < num_sockets; i++) {
857 		sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
858 		if (CHECK(sock_fd[i] < 0, "socket", "errno: %d\n", errno))
859 			goto out;
860 
861 		val = i + 1;
862 		expected_val += val;
863 
864 		err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
865 					  BPF_NOEXIST);
866 		if (CHECK(err, "map_update", "map_update failed\n"))
867 			goto out;
868 	}
869 
870 	memset(&linfo, 0, sizeof(linfo));
871 	linfo.map.map_fd = map_fd;
872 	opts.link_info = &linfo;
873 	opts.link_info_len = sizeof(linfo);
874 	link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts);
875 	if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
876 		goto out;
877 
878 	iter_fd = bpf_iter_create(bpf_link__fd(link));
879 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
880 		goto free_link;
881 
882 	/* do some tests */
883 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
884 		;
885 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
886 		goto close_iter;
887 
888 	/* test results */
889 	if (CHECK(skel->bss->ipv6_sk_count != num_sockets,
890 		  "ipv6_sk_count", "got %u expected %u\n",
891 		  skel->bss->ipv6_sk_count, num_sockets))
892 		goto close_iter;
893 
894 	if (CHECK(skel->bss->val_sum != expected_val,
895 		  "val_sum", "got %u expected %u\n",
896 		  skel->bss->val_sum, expected_val))
897 		goto close_iter;
898 
899 close_iter:
900 	close(iter_fd);
901 free_link:
902 	bpf_link__destroy(link);
903 out:
904 	for (i = 0; i < num_sockets; i++) {
905 		if (sock_fd[i] >= 0)
906 			close(sock_fd[i]);
907 	}
908 	bpf_iter_bpf_sk_storage_map__destroy(skel);
909 }
910 
911 static void test_rdonly_buf_out_of_bound(void)
912 {
913 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
914 	struct bpf_iter_test_kern5 *skel;
915 	union bpf_iter_link_info linfo;
916 	struct bpf_link *link;
917 
918 	skel = bpf_iter_test_kern5__open_and_load();
919 	if (CHECK(!skel, "bpf_iter_test_kern5__open_and_load",
920 		  "skeleton open_and_load failed\n"))
921 		return;
922 
923 	memset(&linfo, 0, sizeof(linfo));
924 	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
925 	opts.link_info = &linfo;
926 	opts.link_info_len = sizeof(linfo);
927 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
928 	if (CHECK(!IS_ERR(link), "attach_iter", "unexpected success\n"))
929 		bpf_link__destroy(link);
930 
931 	bpf_iter_test_kern5__destroy(skel);
932 }
933 
934 static void test_buf_neg_offset(void)
935 {
936 	struct bpf_iter_test_kern6 *skel;
937 
938 	skel = bpf_iter_test_kern6__open_and_load();
939 	if (CHECK(skel, "bpf_iter_test_kern6__open_and_load",
940 		  "skeleton open_and_load unexpected success\n"))
941 		bpf_iter_test_kern6__destroy(skel);
942 }
943 
944 void test_bpf_iter(void)
945 {
946 	if (test__start_subtest("btf_id_or_null"))
947 		test_btf_id_or_null();
948 	if (test__start_subtest("ipv6_route"))
949 		test_ipv6_route();
950 	if (test__start_subtest("netlink"))
951 		test_netlink();
952 	if (test__start_subtest("bpf_map"))
953 		test_bpf_map();
954 	if (test__start_subtest("task"))
955 		test_task();
956 	if (test__start_subtest("task_stack"))
957 		test_task_stack();
958 	if (test__start_subtest("task_file"))
959 		test_task_file();
960 	if (test__start_subtest("tcp4"))
961 		test_tcp4();
962 	if (test__start_subtest("tcp6"))
963 		test_tcp6();
964 	if (test__start_subtest("udp4"))
965 		test_udp4();
966 	if (test__start_subtest("udp6"))
967 		test_udp6();
968 	if (test__start_subtest("anon"))
969 		test_anon_iter(false);
970 	if (test__start_subtest("anon-read-one-char"))
971 		test_anon_iter(true);
972 	if (test__start_subtest("file"))
973 		test_file_iter();
974 	if (test__start_subtest("overflow"))
975 		test_overflow(false, false);
976 	if (test__start_subtest("overflow-e2big"))
977 		test_overflow(true, false);
978 	if (test__start_subtest("prog-ret-1"))
979 		test_overflow(false, true);
980 	if (test__start_subtest("bpf_hash_map"))
981 		test_bpf_hash_map();
982 	if (test__start_subtest("bpf_percpu_hash_map"))
983 		test_bpf_percpu_hash_map();
984 	if (test__start_subtest("bpf_array_map"))
985 		test_bpf_array_map();
986 	if (test__start_subtest("bpf_percpu_array_map"))
987 		test_bpf_percpu_array_map();
988 	if (test__start_subtest("bpf_sk_storage_map"))
989 		test_bpf_sk_storage_map();
990 	if (test__start_subtest("rdonly-buf-out-of-bound"))
991 		test_rdonly_buf_out_of_bound();
992 	if (test__start_subtest("buf-neg-offset"))
993 		test_buf_neg_offset();
994 }
995