1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <test_progs.h>
4 #include "bpf_iter_ipv6_route.skel.h"
5 #include "bpf_iter_netlink.skel.h"
6 #include "bpf_iter_bpf_map.skel.h"
7 #include "bpf_iter_task.skel.h"
8 #include "bpf_iter_task_stack.skel.h"
9 #include "bpf_iter_task_file.skel.h"
10 #include "bpf_iter_task_btf.skel.h"
11 #include "bpf_iter_tcp4.skel.h"
12 #include "bpf_iter_tcp6.skel.h"
13 #include "bpf_iter_udp4.skel.h"
14 #include "bpf_iter_udp6.skel.h"
15 #include "bpf_iter_test_kern1.skel.h"
16 #include "bpf_iter_test_kern2.skel.h"
17 #include "bpf_iter_test_kern3.skel.h"
18 #include "bpf_iter_test_kern4.skel.h"
19 #include "bpf_iter_bpf_hash_map.skel.h"
20 #include "bpf_iter_bpf_percpu_hash_map.skel.h"
21 #include "bpf_iter_bpf_array_map.skel.h"
22 #include "bpf_iter_bpf_percpu_array_map.skel.h"
23 #include "bpf_iter_bpf_sk_storage_helpers.skel.h"
24 #include "bpf_iter_bpf_sk_storage_map.skel.h"
25 #include "bpf_iter_test_kern5.skel.h"
26 #include "bpf_iter_test_kern6.skel.h"
27 
28 static int duration;
29 
30 static void test_btf_id_or_null(void)
31 {
32 	struct bpf_iter_test_kern3 *skel;
33 
34 	skel = bpf_iter_test_kern3__open_and_load();
35 	if (CHECK(skel, "bpf_iter_test_kern3__open_and_load",
36 		  "skeleton open_and_load unexpectedly succeeded\n")) {
37 		bpf_iter_test_kern3__destroy(skel);
38 		return;
39 	}
40 }
41 
42 static void do_dummy_read(struct bpf_program *prog)
43 {
44 	struct bpf_link *link;
45 	char buf[16] = {};
46 	int iter_fd, len;
47 
48 	link = bpf_program__attach_iter(prog, NULL);
49 	if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
50 		return;
51 
52 	iter_fd = bpf_iter_create(bpf_link__fd(link));
53 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
54 		goto free_link;
55 
56 	/* not check contents, but ensure read() ends without error */
57 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
58 		;
59 	CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));
60 
61 	close(iter_fd);
62 
63 free_link:
64 	bpf_link__destroy(link);
65 }
66 
67 static void test_ipv6_route(void)
68 {
69 	struct bpf_iter_ipv6_route *skel;
70 
71 	skel = bpf_iter_ipv6_route__open_and_load();
72 	if (CHECK(!skel, "bpf_iter_ipv6_route__open_and_load",
73 		  "skeleton open_and_load failed\n"))
74 		return;
75 
76 	do_dummy_read(skel->progs.dump_ipv6_route);
77 
78 	bpf_iter_ipv6_route__destroy(skel);
79 }
80 
81 static void test_netlink(void)
82 {
83 	struct bpf_iter_netlink *skel;
84 
85 	skel = bpf_iter_netlink__open_and_load();
86 	if (CHECK(!skel, "bpf_iter_netlink__open_and_load",
87 		  "skeleton open_and_load failed\n"))
88 		return;
89 
90 	do_dummy_read(skel->progs.dump_netlink);
91 
92 	bpf_iter_netlink__destroy(skel);
93 }
94 
95 static void test_bpf_map(void)
96 {
97 	struct bpf_iter_bpf_map *skel;
98 
99 	skel = bpf_iter_bpf_map__open_and_load();
100 	if (CHECK(!skel, "bpf_iter_bpf_map__open_and_load",
101 		  "skeleton open_and_load failed\n"))
102 		return;
103 
104 	do_dummy_read(skel->progs.dump_bpf_map);
105 
106 	bpf_iter_bpf_map__destroy(skel);
107 }
108 
109 static void test_task(void)
110 {
111 	struct bpf_iter_task *skel;
112 
113 	skel = bpf_iter_task__open_and_load();
114 	if (CHECK(!skel, "bpf_iter_task__open_and_load",
115 		  "skeleton open_and_load failed\n"))
116 		return;
117 
118 	do_dummy_read(skel->progs.dump_task);
119 
120 	bpf_iter_task__destroy(skel);
121 }
122 
123 static void test_task_stack(void)
124 {
125 	struct bpf_iter_task_stack *skel;
126 
127 	skel = bpf_iter_task_stack__open_and_load();
128 	if (CHECK(!skel, "bpf_iter_task_stack__open_and_load",
129 		  "skeleton open_and_load failed\n"))
130 		return;
131 
132 	do_dummy_read(skel->progs.dump_task_stack);
133 
134 	bpf_iter_task_stack__destroy(skel);
135 }
136 
137 static void *do_nothing(void *arg)
138 {
139 	pthread_exit(arg);
140 }
141 
142 static void test_task_file(void)
143 {
144 	struct bpf_iter_task_file *skel;
145 	pthread_t thread_id;
146 	void *ret;
147 
148 	skel = bpf_iter_task_file__open_and_load();
149 	if (CHECK(!skel, "bpf_iter_task_file__open_and_load",
150 		  "skeleton open_and_load failed\n"))
151 		return;
152 
153 	skel->bss->tgid = getpid();
154 
155 	if (CHECK(pthread_create(&thread_id, NULL, &do_nothing, NULL),
156 		  "pthread_create", "pthread_create failed\n"))
157 		goto done;
158 
159 	do_dummy_read(skel->progs.dump_task_file);
160 
161 	if (CHECK(pthread_join(thread_id, &ret) || ret != NULL,
162 		  "pthread_join", "pthread_join failed\n"))
163 		goto done;
164 
165 	CHECK(skel->bss->count != 0, "check_count",
166 	      "invalid non pthread file visit count %d\n", skel->bss->count);
167 
168 done:
169 	bpf_iter_task_file__destroy(skel);
170 }
171 
172 #define TASKBUFSZ		32768
173 
174 static char taskbuf[TASKBUFSZ];
175 
176 static int do_btf_read(struct bpf_iter_task_btf *skel)
177 {
178 	struct bpf_program *prog = skel->progs.dump_task_struct;
179 	struct bpf_iter_task_btf__bss *bss = skel->bss;
180 	int iter_fd = -1, len = 0, bufleft = TASKBUFSZ;
181 	struct bpf_link *link;
182 	char *buf = taskbuf;
183 	int ret = 0;
184 
185 	link = bpf_program__attach_iter(prog, NULL);
186 	if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
187 		return ret;
188 
189 	iter_fd = bpf_iter_create(bpf_link__fd(link));
190 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
191 		goto free_link;
192 
193 	do {
194 		len = read(iter_fd, buf, bufleft);
195 		if (len > 0) {
196 			buf += len;
197 			bufleft -= len;
198 		}
199 	} while (len > 0);
200 
201 	if (bss->skip) {
202 		printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
203 		ret = 1;
204 		test__skip();
205 		goto free_link;
206 	}
207 
208 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
209 		goto free_link;
210 
211 	CHECK(strstr(taskbuf, "(struct task_struct)") == NULL,
212 	      "check for btf representation of task_struct in iter data",
213 	      "struct task_struct not found");
214 free_link:
215 	if (iter_fd > 0)
216 		close(iter_fd);
217 	bpf_link__destroy(link);
218 	return ret;
219 }
220 
221 static void test_task_btf(void)
222 {
223 	struct bpf_iter_task_btf__bss *bss;
224 	struct bpf_iter_task_btf *skel;
225 	int ret;
226 
227 	skel = bpf_iter_task_btf__open_and_load();
228 	if (CHECK(!skel, "bpf_iter_task_btf__open_and_load",
229 		  "skeleton open_and_load failed\n"))
230 		return;
231 
232 	bss = skel->bss;
233 
234 	ret = do_btf_read(skel);
235 	if (ret)
236 		goto cleanup;
237 
238 	if (CHECK(bss->tasks == 0, "check if iterated over tasks",
239 		  "no task iteration, did BPF program run?\n"))
240 		goto cleanup;
241 
242 	CHECK(bss->seq_err != 0, "check for unexpected err",
243 	      "bpf_seq_printf_btf returned %ld", bss->seq_err);
244 
245 cleanup:
246 	bpf_iter_task_btf__destroy(skel);
247 }
248 
249 static void test_tcp4(void)
250 {
251 	struct bpf_iter_tcp4 *skel;
252 
253 	skel = bpf_iter_tcp4__open_and_load();
254 	if (CHECK(!skel, "bpf_iter_tcp4__open_and_load",
255 		  "skeleton open_and_load failed\n"))
256 		return;
257 
258 	do_dummy_read(skel->progs.dump_tcp4);
259 
260 	bpf_iter_tcp4__destroy(skel);
261 }
262 
263 static void test_tcp6(void)
264 {
265 	struct bpf_iter_tcp6 *skel;
266 
267 	skel = bpf_iter_tcp6__open_and_load();
268 	if (CHECK(!skel, "bpf_iter_tcp6__open_and_load",
269 		  "skeleton open_and_load failed\n"))
270 		return;
271 
272 	do_dummy_read(skel->progs.dump_tcp6);
273 
274 	bpf_iter_tcp6__destroy(skel);
275 }
276 
277 static void test_udp4(void)
278 {
279 	struct bpf_iter_udp4 *skel;
280 
281 	skel = bpf_iter_udp4__open_and_load();
282 	if (CHECK(!skel, "bpf_iter_udp4__open_and_load",
283 		  "skeleton open_and_load failed\n"))
284 		return;
285 
286 	do_dummy_read(skel->progs.dump_udp4);
287 
288 	bpf_iter_udp4__destroy(skel);
289 }
290 
291 static void test_udp6(void)
292 {
293 	struct bpf_iter_udp6 *skel;
294 
295 	skel = bpf_iter_udp6__open_and_load();
296 	if (CHECK(!skel, "bpf_iter_udp6__open_and_load",
297 		  "skeleton open_and_load failed\n"))
298 		return;
299 
300 	do_dummy_read(skel->progs.dump_udp6);
301 
302 	bpf_iter_udp6__destroy(skel);
303 }
304 
305 /* The expected string is less than 16 bytes */
306 static int do_read_with_fd(int iter_fd, const char *expected,
307 			   bool read_one_char)
308 {
309 	int err = -1, len, read_buf_len, start;
310 	char buf[16] = {};
311 
312 	read_buf_len = read_one_char ? 1 : 16;
313 	start = 0;
314 	while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
315 		start += len;
316 		if (CHECK(start >= 16, "read", "read len %d\n", len))
317 			return -1;
318 		read_buf_len = read_one_char ? 1 : 16 - start;
319 	}
320 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
321 		return -1;
322 
323 	err = strcmp(buf, expected);
324 	if (CHECK(err, "read", "incorrect read result: buf %s, expected %s\n",
325 		  buf, expected))
326 		return -1;
327 
328 	return 0;
329 }
330 
331 static void test_anon_iter(bool read_one_char)
332 {
333 	struct bpf_iter_test_kern1 *skel;
334 	struct bpf_link *link;
335 	int iter_fd, err;
336 
337 	skel = bpf_iter_test_kern1__open_and_load();
338 	if (CHECK(!skel, "bpf_iter_test_kern1__open_and_load",
339 		  "skeleton open_and_load failed\n"))
340 		return;
341 
342 	err = bpf_iter_test_kern1__attach(skel);
343 	if (CHECK(err, "bpf_iter_test_kern1__attach",
344 		  "skeleton attach failed\n")) {
345 		goto out;
346 	}
347 
348 	link = skel->links.dump_task;
349 	iter_fd = bpf_iter_create(bpf_link__fd(link));
350 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
351 		goto out;
352 
353 	do_read_with_fd(iter_fd, "abcd", read_one_char);
354 	close(iter_fd);
355 
356 out:
357 	bpf_iter_test_kern1__destroy(skel);
358 }
359 
360 static int do_read(const char *path, const char *expected)
361 {
362 	int err, iter_fd;
363 
364 	iter_fd = open(path, O_RDONLY);
365 	if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n",
366 		  path, strerror(errno)))
367 		return -1;
368 
369 	err = do_read_with_fd(iter_fd, expected, false);
370 	close(iter_fd);
371 	return err;
372 }
373 
374 static void test_file_iter(void)
375 {
376 	const char *path = "/sys/fs/bpf/bpf_iter_test1";
377 	struct bpf_iter_test_kern1 *skel1;
378 	struct bpf_iter_test_kern2 *skel2;
379 	struct bpf_link *link;
380 	int err;
381 
382 	skel1 = bpf_iter_test_kern1__open_and_load();
383 	if (CHECK(!skel1, "bpf_iter_test_kern1__open_and_load",
384 		  "skeleton open_and_load failed\n"))
385 		return;
386 
387 	link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
388 	if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
389 		goto out;
390 
391 	/* unlink this path if it exists. */
392 	unlink(path);
393 
394 	err = bpf_link__pin(link, path);
395 	if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err))
396 		goto free_link;
397 
398 	err = do_read(path, "abcd");
399 	if (err)
400 		goto unlink_path;
401 
402 	/* file based iterator seems working fine. Let us a link update
403 	 * of the underlying link and `cat` the iterator again, its content
404 	 * should change.
405 	 */
406 	skel2 = bpf_iter_test_kern2__open_and_load();
407 	if (CHECK(!skel2, "bpf_iter_test_kern2__open_and_load",
408 		  "skeleton open_and_load failed\n"))
409 		goto unlink_path;
410 
411 	err = bpf_link__update_program(link, skel2->progs.dump_task);
412 	if (CHECK(err, "update_prog", "update_prog failed\n"))
413 		goto destroy_skel2;
414 
415 	do_read(path, "ABCD");
416 
417 destroy_skel2:
418 	bpf_iter_test_kern2__destroy(skel2);
419 unlink_path:
420 	unlink(path);
421 free_link:
422 	bpf_link__destroy(link);
423 out:
424 	bpf_iter_test_kern1__destroy(skel1);
425 }
426 
427 static void test_overflow(bool test_e2big_overflow, bool ret1)
428 {
429 	__u32 map_info_len, total_read_len, expected_read_len;
430 	int err, iter_fd, map1_fd, map2_fd, len;
431 	struct bpf_map_info map_info = {};
432 	struct bpf_iter_test_kern4 *skel;
433 	struct bpf_link *link;
434 	__u32 iter_size;
435 	char *buf;
436 
437 	skel = bpf_iter_test_kern4__open();
438 	if (CHECK(!skel, "bpf_iter_test_kern4__open",
439 		  "skeleton open failed\n"))
440 		return;
441 
442 	/* create two maps: bpf program will only do bpf_seq_write
443 	 * for these two maps. The goal is one map output almost
444 	 * fills seq_file buffer and then the other will trigger
445 	 * overflow and needs restart.
446 	 */
447 	map1_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
448 	if (CHECK(map1_fd < 0, "bpf_create_map",
449 		  "map_creation failed: %s\n", strerror(errno)))
450 		goto out;
451 	map2_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
452 	if (CHECK(map2_fd < 0, "bpf_create_map",
453 		  "map_creation failed: %s\n", strerror(errno)))
454 		goto free_map1;
455 
456 	/* bpf_seq_printf kernel buffer is 8 pages, so one map
457 	 * bpf_seq_write will mostly fill it, and the other map
458 	 * will partially fill and then trigger overflow and need
459 	 * bpf_seq_read restart.
460 	 */
461 	iter_size = sysconf(_SC_PAGE_SIZE) << 3;
462 
463 	if (test_e2big_overflow) {
464 		skel->rodata->print_len = (iter_size + 8) / 8;
465 		expected_read_len = 2 * (iter_size + 8);
466 	} else if (!ret1) {
467 		skel->rodata->print_len = (iter_size - 8) / 8;
468 		expected_read_len = 2 * (iter_size - 8);
469 	} else {
470 		skel->rodata->print_len = 1;
471 		expected_read_len = 2 * 8;
472 	}
473 	skel->rodata->ret1 = ret1;
474 
475 	if (CHECK(bpf_iter_test_kern4__load(skel),
476 		  "bpf_iter_test_kern4__load", "skeleton load failed\n"))
477 		goto free_map2;
478 
479 	/* setup filtering map_id in bpf program */
480 	map_info_len = sizeof(map_info);
481 	err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len);
482 	if (CHECK(err, "get_map_info", "get map info failed: %s\n",
483 		  strerror(errno)))
484 		goto free_map2;
485 	skel->bss->map1_id = map_info.id;
486 
487 	err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len);
488 	if (CHECK(err, "get_map_info", "get map info failed: %s\n",
489 		  strerror(errno)))
490 		goto free_map2;
491 	skel->bss->map2_id = map_info.id;
492 
493 	link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
494 	if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
495 		goto free_map2;
496 
497 	iter_fd = bpf_iter_create(bpf_link__fd(link));
498 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
499 		goto free_link;
500 
501 	buf = malloc(expected_read_len);
502 	if (!buf)
503 		goto close_iter;
504 
505 	/* do read */
506 	total_read_len = 0;
507 	if (test_e2big_overflow) {
508 		while ((len = read(iter_fd, buf, expected_read_len)) > 0)
509 			total_read_len += len;
510 
511 		CHECK(len != -1 || errno != E2BIG, "read",
512 		      "expected ret -1, errno E2BIG, but get ret %d, error %s\n",
513 			  len, strerror(errno));
514 		goto free_buf;
515 	} else if (!ret1) {
516 		while ((len = read(iter_fd, buf, expected_read_len)) > 0)
517 			total_read_len += len;
518 
519 		if (CHECK(len < 0, "read", "read failed: %s\n",
520 			  strerror(errno)))
521 			goto free_buf;
522 	} else {
523 		do {
524 			len = read(iter_fd, buf, expected_read_len);
525 			if (len > 0)
526 				total_read_len += len;
527 		} while (len > 0 || len == -EAGAIN);
528 
529 		if (CHECK(len < 0, "read", "read failed: %s\n",
530 			  strerror(errno)))
531 			goto free_buf;
532 	}
533 
534 	if (CHECK(total_read_len != expected_read_len, "read",
535 		  "total len %u, expected len %u\n", total_read_len,
536 		  expected_read_len))
537 		goto free_buf;
538 
539 	if (CHECK(skel->bss->map1_accessed != 1, "map1_accessed",
540 		  "expected 1 actual %d\n", skel->bss->map1_accessed))
541 		goto free_buf;
542 
543 	if (CHECK(skel->bss->map2_accessed != 2, "map2_accessed",
544 		  "expected 2 actual %d\n", skel->bss->map2_accessed))
545 		goto free_buf;
546 
547 	CHECK(skel->bss->map2_seqnum1 != skel->bss->map2_seqnum2,
548 	      "map2_seqnum", "two different seqnum %lld %lld\n",
549 	      skel->bss->map2_seqnum1, skel->bss->map2_seqnum2);
550 
551 free_buf:
552 	free(buf);
553 close_iter:
554 	close(iter_fd);
555 free_link:
556 	bpf_link__destroy(link);
557 free_map2:
558 	close(map2_fd);
559 free_map1:
560 	close(map1_fd);
561 out:
562 	bpf_iter_test_kern4__destroy(skel);
563 }
564 
565 static void test_bpf_hash_map(void)
566 {
567 	__u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0;
568 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
569 	struct bpf_iter_bpf_hash_map *skel;
570 	int err, i, len, map_fd, iter_fd;
571 	union bpf_iter_link_info linfo;
572 	__u64 val, expected_val = 0;
573 	struct bpf_link *link;
574 	struct key_t {
575 		int a;
576 		int b;
577 		int c;
578 	} key;
579 	char buf[64];
580 
581 	skel = bpf_iter_bpf_hash_map__open();
582 	if (CHECK(!skel, "bpf_iter_bpf_hash_map__open",
583 		  "skeleton open failed\n"))
584 		return;
585 
586 	skel->bss->in_test_mode = true;
587 
588 	err = bpf_iter_bpf_hash_map__load(skel);
589 	if (CHECK(!skel, "bpf_iter_bpf_hash_map__load",
590 		  "skeleton load failed\n"))
591 		goto out;
592 
593 	/* iterator with hashmap2 and hashmap3 should fail */
594 	memset(&linfo, 0, sizeof(linfo));
595 	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
596 	opts.link_info = &linfo;
597 	opts.link_info_len = sizeof(linfo);
598 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
599 	if (CHECK(!IS_ERR(link), "attach_iter",
600 		  "attach_iter for hashmap2 unexpected succeeded\n"))
601 		goto out;
602 
603 	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
604 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
605 	if (CHECK(!IS_ERR(link), "attach_iter",
606 		  "attach_iter for hashmap3 unexpected succeeded\n"))
607 		goto out;
608 
609 	/* hashmap1 should be good, update map values here */
610 	map_fd = bpf_map__fd(skel->maps.hashmap1);
611 	for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
612 		key.a = i + 1;
613 		key.b = i + 2;
614 		key.c = i + 3;
615 		val = i + 4;
616 		expected_key_a += key.a;
617 		expected_key_b += key.b;
618 		expected_key_c += key.c;
619 		expected_val += val;
620 
621 		err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
622 		if (CHECK(err, "map_update", "map_update failed\n"))
623 			goto out;
624 	}
625 
626 	linfo.map.map_fd = map_fd;
627 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
628 	if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
629 		goto out;
630 
631 	iter_fd = bpf_iter_create(bpf_link__fd(link));
632 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
633 		goto free_link;
634 
635 	/* do some tests */
636 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
637 		;
638 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
639 		goto close_iter;
640 
641 	/* test results */
642 	if (CHECK(skel->bss->key_sum_a != expected_key_a,
643 		  "key_sum_a", "got %u expected %u\n",
644 		  skel->bss->key_sum_a, expected_key_a))
645 		goto close_iter;
646 	if (CHECK(skel->bss->key_sum_b != expected_key_b,
647 		  "key_sum_b", "got %u expected %u\n",
648 		  skel->bss->key_sum_b, expected_key_b))
649 		goto close_iter;
650 	if (CHECK(skel->bss->val_sum != expected_val,
651 		  "val_sum", "got %llu expected %llu\n",
652 		  skel->bss->val_sum, expected_val))
653 		goto close_iter;
654 
655 close_iter:
656 	close(iter_fd);
657 free_link:
658 	bpf_link__destroy(link);
659 out:
660 	bpf_iter_bpf_hash_map__destroy(skel);
661 }
662 
663 static void test_bpf_percpu_hash_map(void)
664 {
665 	__u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0;
666 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
667 	struct bpf_iter_bpf_percpu_hash_map *skel;
668 	int err, i, j, len, map_fd, iter_fd;
669 	union bpf_iter_link_info linfo;
670 	__u32 expected_val = 0;
671 	struct bpf_link *link;
672 	struct key_t {
673 		int a;
674 		int b;
675 		int c;
676 	} key;
677 	char buf[64];
678 	void *val;
679 
680 	val = malloc(8 * bpf_num_possible_cpus());
681 
682 	skel = bpf_iter_bpf_percpu_hash_map__open();
683 	if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open",
684 		  "skeleton open failed\n"))
685 		return;
686 
687 	skel->rodata->num_cpus = bpf_num_possible_cpus();
688 
689 	err = bpf_iter_bpf_percpu_hash_map__load(skel);
690 	if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load",
691 		  "skeleton load failed\n"))
692 		goto out;
693 
694 	/* update map values here */
695 	map_fd = bpf_map__fd(skel->maps.hashmap1);
696 	for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
697 		key.a = i + 1;
698 		key.b = i + 2;
699 		key.c = i + 3;
700 		expected_key_a += key.a;
701 		expected_key_b += key.b;
702 		expected_key_c += key.c;
703 
704 		for (j = 0; j < bpf_num_possible_cpus(); j++) {
705 			*(__u32 *)(val + j * 8) = i + j;
706 			expected_val += i + j;
707 		}
708 
709 		err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
710 		if (CHECK(err, "map_update", "map_update failed\n"))
711 			goto out;
712 	}
713 
714 	memset(&linfo, 0, sizeof(linfo));
715 	linfo.map.map_fd = map_fd;
716 	opts.link_info = &linfo;
717 	opts.link_info_len = sizeof(linfo);
718 	link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
719 	if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
720 		goto out;
721 
722 	iter_fd = bpf_iter_create(bpf_link__fd(link));
723 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
724 		goto free_link;
725 
726 	/* do some tests */
727 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
728 		;
729 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
730 		goto close_iter;
731 
732 	/* test results */
733 	if (CHECK(skel->bss->key_sum_a != expected_key_a,
734 		  "key_sum_a", "got %u expected %u\n",
735 		  skel->bss->key_sum_a, expected_key_a))
736 		goto close_iter;
737 	if (CHECK(skel->bss->key_sum_b != expected_key_b,
738 		  "key_sum_b", "got %u expected %u\n",
739 		  skel->bss->key_sum_b, expected_key_b))
740 		goto close_iter;
741 	if (CHECK(skel->bss->val_sum != expected_val,
742 		  "val_sum", "got %u expected %u\n",
743 		  skel->bss->val_sum, expected_val))
744 		goto close_iter;
745 
746 close_iter:
747 	close(iter_fd);
748 free_link:
749 	bpf_link__destroy(link);
750 out:
751 	bpf_iter_bpf_percpu_hash_map__destroy(skel);
752 }
753 
754 static void test_bpf_array_map(void)
755 {
756 	__u64 val, expected_val = 0, res_first_val, first_val = 0;
757 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
758 	__u32 expected_key = 0, res_first_key;
759 	struct bpf_iter_bpf_array_map *skel;
760 	union bpf_iter_link_info linfo;
761 	int err, i, map_fd, iter_fd;
762 	struct bpf_link *link;
763 	char buf[64] = {};
764 	int len, start;
765 
766 	skel = bpf_iter_bpf_array_map__open_and_load();
767 	if (CHECK(!skel, "bpf_iter_bpf_array_map__open_and_load",
768 		  "skeleton open_and_load failed\n"))
769 		return;
770 
771 	map_fd = bpf_map__fd(skel->maps.arraymap1);
772 	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
773 		val = i + 4;
774 		expected_key += i;
775 		expected_val += val;
776 
777 		if (i == 0)
778 			first_val = val;
779 
780 		err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
781 		if (CHECK(err, "map_update", "map_update failed\n"))
782 			goto out;
783 	}
784 
785 	memset(&linfo, 0, sizeof(linfo));
786 	linfo.map.map_fd = map_fd;
787 	opts.link_info = &linfo;
788 	opts.link_info_len = sizeof(linfo);
789 	link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
790 	if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
791 		goto out;
792 
793 	iter_fd = bpf_iter_create(bpf_link__fd(link));
794 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
795 		goto free_link;
796 
797 	/* do some tests */
798 	start = 0;
799 	while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
800 		start += len;
801 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
802 		goto close_iter;
803 
804 	/* test results */
805 	res_first_key = *(__u32 *)buf;
806 	res_first_val = *(__u64 *)(buf + sizeof(__u32));
807 	if (CHECK(res_first_key != 0 || res_first_val != first_val,
808 		  "bpf_seq_write",
809 		  "seq_write failure: first key %u vs expected 0, "
810 		  " first value %llu vs expected %llu\n",
811 		  res_first_key, res_first_val, first_val))
812 		goto close_iter;
813 
814 	if (CHECK(skel->bss->key_sum != expected_key,
815 		  "key_sum", "got %u expected %u\n",
816 		  skel->bss->key_sum, expected_key))
817 		goto close_iter;
818 	if (CHECK(skel->bss->val_sum != expected_val,
819 		  "val_sum", "got %llu expected %llu\n",
820 		  skel->bss->val_sum, expected_val))
821 		goto close_iter;
822 
823 	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
824 		err = bpf_map_lookup_elem(map_fd, &i, &val);
825 		if (CHECK(err, "map_lookup", "map_lookup failed\n"))
826 			goto out;
827 		if (CHECK(i != val, "invalid_val",
828 			  "got value %llu expected %u\n", val, i))
829 			goto out;
830 	}
831 
832 close_iter:
833 	close(iter_fd);
834 free_link:
835 	bpf_link__destroy(link);
836 out:
837 	bpf_iter_bpf_array_map__destroy(skel);
838 }
839 
840 static void test_bpf_percpu_array_map(void)
841 {
842 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
843 	struct bpf_iter_bpf_percpu_array_map *skel;
844 	__u32 expected_key = 0, expected_val = 0;
845 	union bpf_iter_link_info linfo;
846 	int err, i, j, map_fd, iter_fd;
847 	struct bpf_link *link;
848 	char buf[64];
849 	void *val;
850 	int len;
851 
852 	val = malloc(8 * bpf_num_possible_cpus());
853 
854 	skel = bpf_iter_bpf_percpu_array_map__open();
855 	if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open",
856 		  "skeleton open failed\n"))
857 		return;
858 
859 	skel->rodata->num_cpus = bpf_num_possible_cpus();
860 
861 	err = bpf_iter_bpf_percpu_array_map__load(skel);
862 	if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load",
863 		  "skeleton load failed\n"))
864 		goto out;
865 
866 	/* update map values here */
867 	map_fd = bpf_map__fd(skel->maps.arraymap1);
868 	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
869 		expected_key += i;
870 
871 		for (j = 0; j < bpf_num_possible_cpus(); j++) {
872 			*(__u32 *)(val + j * 8) = i + j;
873 			expected_val += i + j;
874 		}
875 
876 		err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
877 		if (CHECK(err, "map_update", "map_update failed\n"))
878 			goto out;
879 	}
880 
881 	memset(&linfo, 0, sizeof(linfo));
882 	linfo.map.map_fd = map_fd;
883 	opts.link_info = &linfo;
884 	opts.link_info_len = sizeof(linfo);
885 	link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
886 	if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
887 		goto out;
888 
889 	iter_fd = bpf_iter_create(bpf_link__fd(link));
890 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
891 		goto free_link;
892 
893 	/* do some tests */
894 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
895 		;
896 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
897 		goto close_iter;
898 
899 	/* test results */
900 	if (CHECK(skel->bss->key_sum != expected_key,
901 		  "key_sum", "got %u expected %u\n",
902 		  skel->bss->key_sum, expected_key))
903 		goto close_iter;
904 	if (CHECK(skel->bss->val_sum != expected_val,
905 		  "val_sum", "got %u expected %u\n",
906 		  skel->bss->val_sum, expected_val))
907 		goto close_iter;
908 
909 close_iter:
910 	close(iter_fd);
911 free_link:
912 	bpf_link__destroy(link);
913 out:
914 	bpf_iter_bpf_percpu_array_map__destroy(skel);
915 }
916 
917 /* An iterator program deletes all local storage in a map. */
918 static void test_bpf_sk_storage_delete(void)
919 {
920 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
921 	struct bpf_iter_bpf_sk_storage_helpers *skel;
922 	union bpf_iter_link_info linfo;
923 	int err, len, map_fd, iter_fd;
924 	struct bpf_link *link;
925 	int sock_fd = -1;
926 	__u32 val = 42;
927 	char buf[64];
928 
929 	skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
930 	if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load",
931 		  "skeleton open_and_load failed\n"))
932 		return;
933 
934 	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
935 
936 	sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
937 	if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno))
938 		goto out;
939 	err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
940 	if (CHECK(err, "map_update", "map_update failed\n"))
941 		goto out;
942 
943 	memset(&linfo, 0, sizeof(linfo));
944 	linfo.map.map_fd = map_fd;
945 	opts.link_info = &linfo;
946 	opts.link_info_len = sizeof(linfo);
947 	link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map,
948 					&opts);
949 	if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
950 		goto out;
951 
952 	iter_fd = bpf_iter_create(bpf_link__fd(link));
953 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
954 		goto free_link;
955 
956 	/* do some tests */
957 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
958 		;
959 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
960 		goto close_iter;
961 
962 	/* test results */
963 	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
964 	if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
965 		  "map value wasn't deleted (err=%d, errno=%d)\n", err, errno))
966 		goto close_iter;
967 
968 close_iter:
969 	close(iter_fd);
970 free_link:
971 	bpf_link__destroy(link);
972 out:
973 	if (sock_fd >= 0)
974 		close(sock_fd);
975 	bpf_iter_bpf_sk_storage_helpers__destroy(skel);
976 }
977 
978 /* This creates a socket and its local storage. It then runs a task_iter BPF
979  * program that replaces the existing socket local storage with the tgid of the
980  * only task owning a file descriptor to this socket, this process, prog_tests.
981  * It then runs a tcp socket iterator that negates the value in the existing
982  * socket local storage, the test verifies that the resulting value is -pid.
983  */
984 static void test_bpf_sk_storage_get(void)
985 {
986 	struct bpf_iter_bpf_sk_storage_helpers *skel;
987 	int err, map_fd, val = -1;
988 	int sock_fd = -1;
989 
990 	skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
991 	if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load",
992 		  "skeleton open_and_load failed\n"))
993 		return;
994 
995 	sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
996 	if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno))
997 		goto out;
998 
999 	err = listen(sock_fd, 1);
1000 	if (CHECK(err != 0, "listen", "errno: %d\n", errno))
1001 		goto close_socket;
1002 
1003 	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1004 
1005 	err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1006 	if (CHECK(err, "bpf_map_update_elem", "map_update_failed\n"))
1007 		goto close_socket;
1008 
1009 	do_dummy_read(skel->progs.fill_socket_owner);
1010 
1011 	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1012 	if (CHECK(err || val != getpid(), "bpf_map_lookup_elem",
1013 	    "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1014 	    getpid(), val, err))
1015 		goto close_socket;
1016 
1017 	do_dummy_read(skel->progs.negate_socket_local_storage);
1018 
1019 	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1020 	CHECK(err || val != -getpid(), "bpf_map_lookup_elem",
1021 	      "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1022 	      -getpid(), val, err);
1023 
1024 close_socket:
1025 	close(sock_fd);
1026 out:
1027 	bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1028 }
1029 
1030 static void test_bpf_sk_storage_map(void)
1031 {
1032 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1033 	int err, i, len, map_fd, iter_fd, num_sockets;
1034 	struct bpf_iter_bpf_sk_storage_map *skel;
1035 	union bpf_iter_link_info linfo;
1036 	int sock_fd[3] = {-1, -1, -1};
1037 	__u32 val, expected_val = 0;
1038 	struct bpf_link *link;
1039 	char buf[64];
1040 
1041 	skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1042 	if (CHECK(!skel, "bpf_iter_bpf_sk_storage_map__open_and_load",
1043 		  "skeleton open_and_load failed\n"))
1044 		return;
1045 
1046 	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1047 	num_sockets = ARRAY_SIZE(sock_fd);
1048 	for (i = 0; i < num_sockets; i++) {
1049 		sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
1050 		if (CHECK(sock_fd[i] < 0, "socket", "errno: %d\n", errno))
1051 			goto out;
1052 
1053 		val = i + 1;
1054 		expected_val += val;
1055 
1056 		err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
1057 					  BPF_NOEXIST);
1058 		if (CHECK(err, "map_update", "map_update failed\n"))
1059 			goto out;
1060 	}
1061 
1062 	memset(&linfo, 0, sizeof(linfo));
1063 	linfo.map.map_fd = map_fd;
1064 	opts.link_info = &linfo;
1065 	opts.link_info_len = sizeof(linfo);
1066 	link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts);
1067 	if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
1068 		goto out;
1069 
1070 	iter_fd = bpf_iter_create(bpf_link__fd(link));
1071 	if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
1072 		goto free_link;
1073 
1074 	/* do some tests */
1075 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1076 		;
1077 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1078 		goto close_iter;
1079 
1080 	/* test results */
1081 	if (CHECK(skel->bss->ipv6_sk_count != num_sockets,
1082 		  "ipv6_sk_count", "got %u expected %u\n",
1083 		  skel->bss->ipv6_sk_count, num_sockets))
1084 		goto close_iter;
1085 
1086 	if (CHECK(skel->bss->val_sum != expected_val,
1087 		  "val_sum", "got %u expected %u\n",
1088 		  skel->bss->val_sum, expected_val))
1089 		goto close_iter;
1090 
1091 close_iter:
1092 	close(iter_fd);
1093 free_link:
1094 	bpf_link__destroy(link);
1095 out:
1096 	for (i = 0; i < num_sockets; i++) {
1097 		if (sock_fd[i] >= 0)
1098 			close(sock_fd[i]);
1099 	}
1100 	bpf_iter_bpf_sk_storage_map__destroy(skel);
1101 }
1102 
1103 static void test_rdonly_buf_out_of_bound(void)
1104 {
1105 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1106 	struct bpf_iter_test_kern5 *skel;
1107 	union bpf_iter_link_info linfo;
1108 	struct bpf_link *link;
1109 
1110 	skel = bpf_iter_test_kern5__open_and_load();
1111 	if (CHECK(!skel, "bpf_iter_test_kern5__open_and_load",
1112 		  "skeleton open_and_load failed\n"))
1113 		return;
1114 
1115 	memset(&linfo, 0, sizeof(linfo));
1116 	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
1117 	opts.link_info = &linfo;
1118 	opts.link_info_len = sizeof(linfo);
1119 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
1120 	if (CHECK(!IS_ERR(link), "attach_iter", "unexpected success\n"))
1121 		bpf_link__destroy(link);
1122 
1123 	bpf_iter_test_kern5__destroy(skel);
1124 }
1125 
1126 static void test_buf_neg_offset(void)
1127 {
1128 	struct bpf_iter_test_kern6 *skel;
1129 
1130 	skel = bpf_iter_test_kern6__open_and_load();
1131 	if (CHECK(skel, "bpf_iter_test_kern6__open_and_load",
1132 		  "skeleton open_and_load unexpected success\n"))
1133 		bpf_iter_test_kern6__destroy(skel);
1134 }
1135 
1136 void test_bpf_iter(void)
1137 {
1138 	if (test__start_subtest("btf_id_or_null"))
1139 		test_btf_id_or_null();
1140 	if (test__start_subtest("ipv6_route"))
1141 		test_ipv6_route();
1142 	if (test__start_subtest("netlink"))
1143 		test_netlink();
1144 	if (test__start_subtest("bpf_map"))
1145 		test_bpf_map();
1146 	if (test__start_subtest("task"))
1147 		test_task();
1148 	if (test__start_subtest("task_stack"))
1149 		test_task_stack();
1150 	if (test__start_subtest("task_file"))
1151 		test_task_file();
1152 	if (test__start_subtest("task_btf"))
1153 		test_task_btf();
1154 	if (test__start_subtest("tcp4"))
1155 		test_tcp4();
1156 	if (test__start_subtest("tcp6"))
1157 		test_tcp6();
1158 	if (test__start_subtest("udp4"))
1159 		test_udp4();
1160 	if (test__start_subtest("udp6"))
1161 		test_udp6();
1162 	if (test__start_subtest("anon"))
1163 		test_anon_iter(false);
1164 	if (test__start_subtest("anon-read-one-char"))
1165 		test_anon_iter(true);
1166 	if (test__start_subtest("file"))
1167 		test_file_iter();
1168 	if (test__start_subtest("overflow"))
1169 		test_overflow(false, false);
1170 	if (test__start_subtest("overflow-e2big"))
1171 		test_overflow(true, false);
1172 	if (test__start_subtest("prog-ret-1"))
1173 		test_overflow(false, true);
1174 	if (test__start_subtest("bpf_hash_map"))
1175 		test_bpf_hash_map();
1176 	if (test__start_subtest("bpf_percpu_hash_map"))
1177 		test_bpf_percpu_hash_map();
1178 	if (test__start_subtest("bpf_array_map"))
1179 		test_bpf_array_map();
1180 	if (test__start_subtest("bpf_percpu_array_map"))
1181 		test_bpf_percpu_array_map();
1182 	if (test__start_subtest("bpf_sk_storage_map"))
1183 		test_bpf_sk_storage_map();
1184 	if (test__start_subtest("bpf_sk_storage_delete"))
1185 		test_bpf_sk_storage_delete();
1186 	if (test__start_subtest("bpf_sk_storage_get"))
1187 		test_bpf_sk_storage_get();
1188 	if (test__start_subtest("rdonly-buf-out-of-bound"))
1189 		test_rdonly_buf_out_of_bound();
1190 	if (test__start_subtest("buf-neg-offset"))
1191 		test_buf_neg_offset();
1192 }
1193