1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <test_progs.h>
4 #include <unistd.h>
5 #include <sys/syscall.h>
6 #include <task_local_storage_helpers.h>
7 #include "bpf_iter_ipv6_route.skel.h"
8 #include "bpf_iter_netlink.skel.h"
9 #include "bpf_iter_bpf_map.skel.h"
10 #include "bpf_iter_task.skel.h"
11 #include "bpf_iter_task_stack.skel.h"
12 #include "bpf_iter_task_file.skel.h"
13 #include "bpf_iter_task_vma.skel.h"
14 #include "bpf_iter_task_btf.skel.h"
15 #include "bpf_iter_tcp4.skel.h"
16 #include "bpf_iter_tcp6.skel.h"
17 #include "bpf_iter_udp4.skel.h"
18 #include "bpf_iter_udp6.skel.h"
19 #include "bpf_iter_unix.skel.h"
20 #include "bpf_iter_vma_offset.skel.h"
21 #include "bpf_iter_test_kern1.skel.h"
22 #include "bpf_iter_test_kern2.skel.h"
23 #include "bpf_iter_test_kern3.skel.h"
24 #include "bpf_iter_test_kern4.skel.h"
25 #include "bpf_iter_bpf_hash_map.skel.h"
26 #include "bpf_iter_bpf_percpu_hash_map.skel.h"
27 #include "bpf_iter_bpf_array_map.skel.h"
28 #include "bpf_iter_bpf_percpu_array_map.skel.h"
29 #include "bpf_iter_bpf_sk_storage_helpers.skel.h"
30 #include "bpf_iter_bpf_sk_storage_map.skel.h"
31 #include "bpf_iter_test_kern5.skel.h"
32 #include "bpf_iter_test_kern6.skel.h"
33 #include "bpf_iter_bpf_link.skel.h"
34 #include "bpf_iter_ksym.skel.h"
35 #include "bpf_iter_sockmap.skel.h"
36 
37 static int duration;
38 
test_btf_id_or_null(void)39 static void test_btf_id_or_null(void)
40 {
41 	struct bpf_iter_test_kern3 *skel;
42 
43 	skel = bpf_iter_test_kern3__open_and_load();
44 	if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) {
45 		bpf_iter_test_kern3__destroy(skel);
46 		return;
47 	}
48 }
49 
do_dummy_read_opts(struct bpf_program * prog,struct bpf_iter_attach_opts * opts)50 static void do_dummy_read_opts(struct bpf_program *prog, struct bpf_iter_attach_opts *opts)
51 {
52 	struct bpf_link *link;
53 	char buf[16] = {};
54 	int iter_fd, len;
55 
56 	link = bpf_program__attach_iter(prog, opts);
57 	if (!ASSERT_OK_PTR(link, "attach_iter"))
58 		return;
59 
60 	iter_fd = bpf_iter_create(bpf_link__fd(link));
61 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
62 		goto free_link;
63 
64 	/* not check contents, but ensure read() ends without error */
65 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
66 		;
67 	CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));
68 
69 	close(iter_fd);
70 
71 free_link:
72 	bpf_link__destroy(link);
73 }
74 
do_dummy_read(struct bpf_program * prog)75 static void do_dummy_read(struct bpf_program *prog)
76 {
77 	do_dummy_read_opts(prog, NULL);
78 }
79 
do_read_map_iter_fd(struct bpf_object_skeleton ** skel,struct bpf_program * prog,struct bpf_map * map)80 static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog,
81 				struct bpf_map *map)
82 {
83 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
84 	union bpf_iter_link_info linfo;
85 	struct bpf_link *link;
86 	char buf[16] = {};
87 	int iter_fd, len;
88 
89 	memset(&linfo, 0, sizeof(linfo));
90 	linfo.map.map_fd = bpf_map__fd(map);
91 	opts.link_info = &linfo;
92 	opts.link_info_len = sizeof(linfo);
93 	link = bpf_program__attach_iter(prog, &opts);
94 	if (!ASSERT_OK_PTR(link, "attach_map_iter"))
95 		return;
96 
97 	iter_fd = bpf_iter_create(bpf_link__fd(link));
98 	if (!ASSERT_GE(iter_fd, 0, "create_map_iter")) {
99 		bpf_link__destroy(link);
100 		return;
101 	}
102 
103 	/* Close link and map fd prematurely */
104 	bpf_link__destroy(link);
105 	bpf_object__destroy_skeleton(*skel);
106 	*skel = NULL;
107 
108 	/* Try to let map free work to run first if map is freed */
109 	usleep(100);
110 	/* Memory used by both sock map and sock local storage map are
111 	 * freed after two synchronize_rcu() calls, so wait for it
112 	 */
113 	kern_sync_rcu();
114 	kern_sync_rcu();
115 
116 	/* Read after both map fd and link fd are closed */
117 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
118 		;
119 	ASSERT_GE(len, 0, "read_iterator");
120 
121 	close(iter_fd);
122 }
123 
read_fd_into_buffer(int fd,char * buf,int size)124 static int read_fd_into_buffer(int fd, char *buf, int size)
125 {
126 	int bufleft = size;
127 	int len;
128 
129 	do {
130 		len = read(fd, buf, bufleft);
131 		if (len > 0) {
132 			buf += len;
133 			bufleft -= len;
134 		}
135 	} while (len > 0);
136 
137 	return len < 0 ? len : size - bufleft;
138 }
139 
test_ipv6_route(void)140 static void test_ipv6_route(void)
141 {
142 	struct bpf_iter_ipv6_route *skel;
143 
144 	skel = bpf_iter_ipv6_route__open_and_load();
145 	if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load"))
146 		return;
147 
148 	do_dummy_read(skel->progs.dump_ipv6_route);
149 
150 	bpf_iter_ipv6_route__destroy(skel);
151 }
152 
test_netlink(void)153 static void test_netlink(void)
154 {
155 	struct bpf_iter_netlink *skel;
156 
157 	skel = bpf_iter_netlink__open_and_load();
158 	if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load"))
159 		return;
160 
161 	do_dummy_read(skel->progs.dump_netlink);
162 
163 	bpf_iter_netlink__destroy(skel);
164 }
165 
test_bpf_map(void)166 static void test_bpf_map(void)
167 {
168 	struct bpf_iter_bpf_map *skel;
169 
170 	skel = bpf_iter_bpf_map__open_and_load();
171 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load"))
172 		return;
173 
174 	do_dummy_read(skel->progs.dump_bpf_map);
175 
176 	bpf_iter_bpf_map__destroy(skel);
177 }
178 
check_bpf_link_info(const struct bpf_program * prog)179 static void check_bpf_link_info(const struct bpf_program *prog)
180 {
181 	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
182 	union bpf_iter_link_info linfo;
183 	struct bpf_link_info info = {};
184 	struct bpf_link *link;
185 	__u32 info_len;
186 	int err;
187 
188 	memset(&linfo, 0, sizeof(linfo));
189 	linfo.task.tid = getpid();
190 	opts.link_info = &linfo;
191 	opts.link_info_len = sizeof(linfo);
192 
193 	link = bpf_program__attach_iter(prog, &opts);
194 	if (!ASSERT_OK_PTR(link, "attach_iter"))
195 		return;
196 
197 	info_len = sizeof(info);
198 	err = bpf_link_get_info_by_fd(bpf_link__fd(link), &info, &info_len);
199 	ASSERT_OK(err, "bpf_link_get_info_by_fd");
200 	ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid");
201 
202 	bpf_link__destroy(link);
203 }
204 
205 static pthread_mutex_t do_nothing_mutex;
206 
do_nothing_wait(void * arg)207 static void *do_nothing_wait(void *arg)
208 {
209 	pthread_mutex_lock(&do_nothing_mutex);
210 	pthread_mutex_unlock(&do_nothing_mutex);
211 
212 	pthread_exit(arg);
213 }
214 
test_task_common_nocheck(struct bpf_iter_attach_opts * opts,int * num_unknown,int * num_known)215 static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts,
216 				     int *num_unknown, int *num_known)
217 {
218 	struct bpf_iter_task *skel;
219 	pthread_t thread_id;
220 	void *ret;
221 
222 	skel = bpf_iter_task__open_and_load();
223 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
224 		return;
225 
226 	ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
227 
228 	ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
229 		  "pthread_create");
230 
231 	skel->bss->tid = getpid();
232 
233 	do_dummy_read_opts(skel->progs.dump_task, opts);
234 
235 	*num_unknown = skel->bss->num_unknown_tid;
236 	*num_known = skel->bss->num_known_tid;
237 
238 	ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
239 	ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
240 		     "pthread_join");
241 
242 	bpf_iter_task__destroy(skel);
243 }
244 
test_task_common(struct bpf_iter_attach_opts * opts,int num_unknown,int num_known)245 static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown, int num_known)
246 {
247 	int num_unknown_tid, num_known_tid;
248 
249 	test_task_common_nocheck(opts, &num_unknown_tid, &num_known_tid);
250 	ASSERT_EQ(num_unknown_tid, num_unknown, "check_num_unknown_tid");
251 	ASSERT_EQ(num_known_tid, num_known, "check_num_known_tid");
252 }
253 
test_task_tid(void)254 static void test_task_tid(void)
255 {
256 	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
257 	union bpf_iter_link_info linfo;
258 	int num_unknown_tid, num_known_tid;
259 
260 	memset(&linfo, 0, sizeof(linfo));
261 	linfo.task.tid = getpid();
262 	opts.link_info = &linfo;
263 	opts.link_info_len = sizeof(linfo);
264 	test_task_common(&opts, 0, 1);
265 
266 	linfo.task.tid = 0;
267 	linfo.task.pid = getpid();
268 	test_task_common(&opts, 1, 1);
269 
270 	test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid);
271 	ASSERT_GT(num_unknown_tid, 1, "check_num_unknown_tid");
272 	ASSERT_EQ(num_known_tid, 1, "check_num_known_tid");
273 }
274 
test_task_pid(void)275 static void test_task_pid(void)
276 {
277 	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
278 	union bpf_iter_link_info linfo;
279 
280 	memset(&linfo, 0, sizeof(linfo));
281 	linfo.task.pid = getpid();
282 	opts.link_info = &linfo;
283 	opts.link_info_len = sizeof(linfo);
284 
285 	test_task_common(&opts, 1, 1);
286 }
287 
test_task_pidfd(void)288 static void test_task_pidfd(void)
289 {
290 	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
291 	union bpf_iter_link_info linfo;
292 	int pidfd;
293 
294 	pidfd = sys_pidfd_open(getpid(), 0);
295 	if (!ASSERT_GT(pidfd, 0, "sys_pidfd_open"))
296 		return;
297 
298 	memset(&linfo, 0, sizeof(linfo));
299 	linfo.task.pid_fd = pidfd;
300 	opts.link_info = &linfo;
301 	opts.link_info_len = sizeof(linfo);
302 
303 	test_task_common(&opts, 1, 1);
304 
305 	close(pidfd);
306 }
307 
test_task_sleepable(void)308 static void test_task_sleepable(void)
309 {
310 	struct bpf_iter_task *skel;
311 
312 	skel = bpf_iter_task__open_and_load();
313 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
314 		return;
315 
316 	do_dummy_read(skel->progs.dump_task_sleepable);
317 
318 	ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0,
319 		  "num_expected_failure_copy_from_user_task");
320 	ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0,
321 		  "num_success_copy_from_user_task");
322 
323 	bpf_iter_task__destroy(skel);
324 }
325 
test_task_stack(void)326 static void test_task_stack(void)
327 {
328 	struct bpf_iter_task_stack *skel;
329 
330 	skel = bpf_iter_task_stack__open_and_load();
331 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load"))
332 		return;
333 
334 	do_dummy_read(skel->progs.dump_task_stack);
335 	do_dummy_read(skel->progs.get_task_user_stacks);
336 
337 	ASSERT_EQ(skel->bss->num_user_stacks, 1, "num_user_stacks");
338 
339 	bpf_iter_task_stack__destroy(skel);
340 }
341 
test_task_file(void)342 static void test_task_file(void)
343 {
344 	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
345 	struct bpf_iter_task_file *skel;
346 	union bpf_iter_link_info linfo;
347 	pthread_t thread_id;
348 	void *ret;
349 
350 	skel = bpf_iter_task_file__open_and_load();
351 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_file__open_and_load"))
352 		return;
353 
354 	skel->bss->tgid = getpid();
355 
356 	ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
357 
358 	ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
359 		  "pthread_create");
360 
361 	memset(&linfo, 0, sizeof(linfo));
362 	linfo.task.tid = getpid();
363 	opts.link_info = &linfo;
364 	opts.link_info_len = sizeof(linfo);
365 
366 	do_dummy_read_opts(skel->progs.dump_task_file, &opts);
367 
368 	ASSERT_EQ(skel->bss->count, 0, "check_count");
369 	ASSERT_EQ(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
370 
371 	skel->bss->last_tgid = 0;
372 	skel->bss->count = 0;
373 	skel->bss->unique_tgid_count = 0;
374 
375 	do_dummy_read(skel->progs.dump_task_file);
376 
377 	ASSERT_EQ(skel->bss->count, 0, "check_count");
378 	ASSERT_GT(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
379 
380 	check_bpf_link_info(skel->progs.dump_task_file);
381 
382 	ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
383 	ASSERT_OK(pthread_join(thread_id, &ret), "pthread_join");
384 	ASSERT_NULL(ret, "pthread_join");
385 
386 	bpf_iter_task_file__destroy(skel);
387 }
388 
389 #define TASKBUFSZ		32768
390 
391 static char taskbuf[TASKBUFSZ];
392 
do_btf_read(struct bpf_iter_task_btf * skel)393 static int do_btf_read(struct bpf_iter_task_btf *skel)
394 {
395 	struct bpf_program *prog = skel->progs.dump_task_struct;
396 	struct bpf_iter_task_btf__bss *bss = skel->bss;
397 	int iter_fd = -1, err;
398 	struct bpf_link *link;
399 	char *buf = taskbuf;
400 	int ret = 0;
401 
402 	link = bpf_program__attach_iter(prog, NULL);
403 	if (!ASSERT_OK_PTR(link, "attach_iter"))
404 		return ret;
405 
406 	iter_fd = bpf_iter_create(bpf_link__fd(link));
407 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
408 		goto free_link;
409 
410 	err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ);
411 	if (bss->skip) {
412 		printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
413 		ret = 1;
414 		test__skip();
415 		goto free_link;
416 	}
417 
418 	if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno)))
419 		goto free_link;
420 
421 	ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)",
422 	      "check for btf representation of task_struct in iter data");
423 free_link:
424 	if (iter_fd > 0)
425 		close(iter_fd);
426 	bpf_link__destroy(link);
427 	return ret;
428 }
429 
test_task_btf(void)430 static void test_task_btf(void)
431 {
432 	struct bpf_iter_task_btf__bss *bss;
433 	struct bpf_iter_task_btf *skel;
434 	int ret;
435 
436 	skel = bpf_iter_task_btf__open_and_load();
437 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_btf__open_and_load"))
438 		return;
439 
440 	bss = skel->bss;
441 
442 	ret = do_btf_read(skel);
443 	if (ret)
444 		goto cleanup;
445 
446 	if (!ASSERT_NEQ(bss->tasks, 0, "no task iteration, did BPF program run?"))
447 		goto cleanup;
448 
449 	ASSERT_EQ(bss->seq_err, 0, "check for unexpected err");
450 
451 cleanup:
452 	bpf_iter_task_btf__destroy(skel);
453 }
454 
test_tcp4(void)455 static void test_tcp4(void)
456 {
457 	struct bpf_iter_tcp4 *skel;
458 
459 	skel = bpf_iter_tcp4__open_and_load();
460 	if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load"))
461 		return;
462 
463 	do_dummy_read(skel->progs.dump_tcp4);
464 
465 	bpf_iter_tcp4__destroy(skel);
466 }
467 
test_tcp6(void)468 static void test_tcp6(void)
469 {
470 	struct bpf_iter_tcp6 *skel;
471 
472 	skel = bpf_iter_tcp6__open_and_load();
473 	if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load"))
474 		return;
475 
476 	do_dummy_read(skel->progs.dump_tcp6);
477 
478 	bpf_iter_tcp6__destroy(skel);
479 }
480 
test_udp4(void)481 static void test_udp4(void)
482 {
483 	struct bpf_iter_udp4 *skel;
484 
485 	skel = bpf_iter_udp4__open_and_load();
486 	if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load"))
487 		return;
488 
489 	do_dummy_read(skel->progs.dump_udp4);
490 
491 	bpf_iter_udp4__destroy(skel);
492 }
493 
test_udp6(void)494 static void test_udp6(void)
495 {
496 	struct bpf_iter_udp6 *skel;
497 
498 	skel = bpf_iter_udp6__open_and_load();
499 	if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load"))
500 		return;
501 
502 	do_dummy_read(skel->progs.dump_udp6);
503 
504 	bpf_iter_udp6__destroy(skel);
505 }
506 
test_unix(void)507 static void test_unix(void)
508 {
509 	struct bpf_iter_unix *skel;
510 
511 	skel = bpf_iter_unix__open_and_load();
512 	if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load"))
513 		return;
514 
515 	do_dummy_read(skel->progs.dump_unix);
516 
517 	bpf_iter_unix__destroy(skel);
518 }
519 
520 /* The expected string is less than 16 bytes */
do_read_with_fd(int iter_fd,const char * expected,bool read_one_char)521 static int do_read_with_fd(int iter_fd, const char *expected,
522 			   bool read_one_char)
523 {
524 	int len, read_buf_len, start;
525 	char buf[16] = {};
526 
527 	read_buf_len = read_one_char ? 1 : 16;
528 	start = 0;
529 	while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
530 		start += len;
531 		if (CHECK(start >= 16, "read", "read len %d\n", len))
532 			return -1;
533 		read_buf_len = read_one_char ? 1 : 16 - start;
534 	}
535 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
536 		return -1;
537 
538 	if (!ASSERT_STREQ(buf, expected, "read"))
539 		return -1;
540 
541 	return 0;
542 }
543 
test_anon_iter(bool read_one_char)544 static void test_anon_iter(bool read_one_char)
545 {
546 	struct bpf_iter_test_kern1 *skel;
547 	struct bpf_link *link;
548 	int iter_fd, err;
549 
550 	skel = bpf_iter_test_kern1__open_and_load();
551 	if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern1__open_and_load"))
552 		return;
553 
554 	err = bpf_iter_test_kern1__attach(skel);
555 	if (!ASSERT_OK(err, "bpf_iter_test_kern1__attach")) {
556 		goto out;
557 	}
558 
559 	link = skel->links.dump_task;
560 	iter_fd = bpf_iter_create(bpf_link__fd(link));
561 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
562 		goto out;
563 
564 	do_read_with_fd(iter_fd, "abcd", read_one_char);
565 	close(iter_fd);
566 
567 out:
568 	bpf_iter_test_kern1__destroy(skel);
569 }
570 
do_read(const char * path,const char * expected)571 static int do_read(const char *path, const char *expected)
572 {
573 	int err, iter_fd;
574 
575 	iter_fd = open(path, O_RDONLY);
576 	if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n",
577 		  path, strerror(errno)))
578 		return -1;
579 
580 	err = do_read_with_fd(iter_fd, expected, false);
581 	close(iter_fd);
582 	return err;
583 }
584 
test_file_iter(void)585 static void test_file_iter(void)
586 {
587 	const char *path = "/sys/fs/bpf/bpf_iter_test1";
588 	struct bpf_iter_test_kern1 *skel1;
589 	struct bpf_iter_test_kern2 *skel2;
590 	struct bpf_link *link;
591 	int err;
592 
593 	skel1 = bpf_iter_test_kern1__open_and_load();
594 	if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load"))
595 		return;
596 
597 	link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
598 	if (!ASSERT_OK_PTR(link, "attach_iter"))
599 		goto out;
600 
601 	/* unlink this path if it exists. */
602 	unlink(path);
603 
604 	err = bpf_link__pin(link, path);
605 	if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err))
606 		goto free_link;
607 
608 	err = do_read(path, "abcd");
609 	if (err)
610 		goto unlink_path;
611 
612 	/* file based iterator seems working fine. Let us a link update
613 	 * of the underlying link and `cat` the iterator again, its content
614 	 * should change.
615 	 */
616 	skel2 = bpf_iter_test_kern2__open_and_load();
617 	if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load"))
618 		goto unlink_path;
619 
620 	err = bpf_link__update_program(link, skel2->progs.dump_task);
621 	if (!ASSERT_OK(err, "update_prog"))
622 		goto destroy_skel2;
623 
624 	do_read(path, "ABCD");
625 
626 destroy_skel2:
627 	bpf_iter_test_kern2__destroy(skel2);
628 unlink_path:
629 	unlink(path);
630 free_link:
631 	bpf_link__destroy(link);
632 out:
633 	bpf_iter_test_kern1__destroy(skel1);
634 }
635 
test_overflow(bool test_e2big_overflow,bool ret1)636 static void test_overflow(bool test_e2big_overflow, bool ret1)
637 {
638 	__u32 map_info_len, total_read_len, expected_read_len;
639 	int err, iter_fd, map1_fd, map2_fd, len;
640 	struct bpf_map_info map_info = {};
641 	struct bpf_iter_test_kern4 *skel;
642 	struct bpf_link *link;
643 	__u32 iter_size;
644 	char *buf;
645 
646 	skel = bpf_iter_test_kern4__open();
647 	if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open"))
648 		return;
649 
650 	/* create two maps: bpf program will only do bpf_seq_write
651 	 * for these two maps. The goal is one map output almost
652 	 * fills seq_file buffer and then the other will trigger
653 	 * overflow and needs restart.
654 	 */
655 	map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
656 	if (CHECK(map1_fd < 0, "bpf_map_create",
657 		  "map_creation failed: %s\n", strerror(errno)))
658 		goto out;
659 	map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
660 	if (CHECK(map2_fd < 0, "bpf_map_create",
661 		  "map_creation failed: %s\n", strerror(errno)))
662 		goto free_map1;
663 
664 	/* bpf_seq_printf kernel buffer is 8 pages, so one map
665 	 * bpf_seq_write will mostly fill it, and the other map
666 	 * will partially fill and then trigger overflow and need
667 	 * bpf_seq_read restart.
668 	 */
669 	iter_size = sysconf(_SC_PAGE_SIZE) << 3;
670 
671 	if (test_e2big_overflow) {
672 		skel->rodata->print_len = (iter_size + 8) / 8;
673 		expected_read_len = 2 * (iter_size + 8);
674 	} else if (!ret1) {
675 		skel->rodata->print_len = (iter_size - 8) / 8;
676 		expected_read_len = 2 * (iter_size - 8);
677 	} else {
678 		skel->rodata->print_len = 1;
679 		expected_read_len = 2 * 8;
680 	}
681 	skel->rodata->ret1 = ret1;
682 
683 	if (!ASSERT_OK(bpf_iter_test_kern4__load(skel),
684 		  "bpf_iter_test_kern4__load"))
685 		goto free_map2;
686 
687 	/* setup filtering map_id in bpf program */
688 	map_info_len = sizeof(map_info);
689 	err = bpf_map_get_info_by_fd(map1_fd, &map_info, &map_info_len);
690 	if (CHECK(err, "get_map_info", "get map info failed: %s\n",
691 		  strerror(errno)))
692 		goto free_map2;
693 	skel->bss->map1_id = map_info.id;
694 
695 	err = bpf_map_get_info_by_fd(map2_fd, &map_info, &map_info_len);
696 	if (CHECK(err, "get_map_info", "get map info failed: %s\n",
697 		  strerror(errno)))
698 		goto free_map2;
699 	skel->bss->map2_id = map_info.id;
700 
701 	link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
702 	if (!ASSERT_OK_PTR(link, "attach_iter"))
703 		goto free_map2;
704 
705 	iter_fd = bpf_iter_create(bpf_link__fd(link));
706 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
707 		goto free_link;
708 
709 	buf = malloc(expected_read_len);
710 	if (!buf)
711 		goto close_iter;
712 
713 	/* do read */
714 	total_read_len = 0;
715 	if (test_e2big_overflow) {
716 		while ((len = read(iter_fd, buf, expected_read_len)) > 0)
717 			total_read_len += len;
718 
719 		CHECK(len != -1 || errno != E2BIG, "read",
720 		      "expected ret -1, errno E2BIG, but get ret %d, error %s\n",
721 			  len, strerror(errno));
722 		goto free_buf;
723 	} else if (!ret1) {
724 		while ((len = read(iter_fd, buf, expected_read_len)) > 0)
725 			total_read_len += len;
726 
727 		if (CHECK(len < 0, "read", "read failed: %s\n",
728 			  strerror(errno)))
729 			goto free_buf;
730 	} else {
731 		do {
732 			len = read(iter_fd, buf, expected_read_len);
733 			if (len > 0)
734 				total_read_len += len;
735 		} while (len > 0 || len == -EAGAIN);
736 
737 		if (CHECK(len < 0, "read", "read failed: %s\n",
738 			  strerror(errno)))
739 			goto free_buf;
740 	}
741 
742 	if (!ASSERT_EQ(total_read_len, expected_read_len, "read"))
743 		goto free_buf;
744 
745 	if (!ASSERT_EQ(skel->bss->map1_accessed, 1, "map1_accessed"))
746 		goto free_buf;
747 
748 	if (!ASSERT_EQ(skel->bss->map2_accessed, 2, "map2_accessed"))
749 		goto free_buf;
750 
751 	ASSERT_EQ(skel->bss->map2_seqnum1, skel->bss->map2_seqnum2, "map2_seqnum");
752 
753 free_buf:
754 	free(buf);
755 close_iter:
756 	close(iter_fd);
757 free_link:
758 	bpf_link__destroy(link);
759 free_map2:
760 	close(map2_fd);
761 free_map1:
762 	close(map1_fd);
763 out:
764 	bpf_iter_test_kern4__destroy(skel);
765 }
766 
test_bpf_hash_map(void)767 static void test_bpf_hash_map(void)
768 {
769 	__u32 expected_key_a = 0, expected_key_b = 0;
770 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
771 	struct bpf_iter_bpf_hash_map *skel;
772 	int err, i, len, map_fd, iter_fd;
773 	union bpf_iter_link_info linfo;
774 	__u64 val, expected_val = 0;
775 	struct bpf_link *link;
776 	struct key_t {
777 		int a;
778 		int b;
779 		int c;
780 	} key;
781 	char buf[64];
782 
783 	skel = bpf_iter_bpf_hash_map__open();
784 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open"))
785 		return;
786 
787 	skel->bss->in_test_mode = true;
788 
789 	err = bpf_iter_bpf_hash_map__load(skel);
790 	if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load"))
791 		goto out;
792 
793 	/* iterator with hashmap2 and hashmap3 should fail */
794 	memset(&linfo, 0, sizeof(linfo));
795 	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
796 	opts.link_info = &linfo;
797 	opts.link_info_len = sizeof(linfo);
798 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
799 	if (!ASSERT_ERR_PTR(link, "attach_iter"))
800 		goto out;
801 
802 	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
803 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
804 	if (!ASSERT_ERR_PTR(link, "attach_iter"))
805 		goto out;
806 
807 	/* hashmap1 should be good, update map values here */
808 	map_fd = bpf_map__fd(skel->maps.hashmap1);
809 	for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
810 		key.a = i + 1;
811 		key.b = i + 2;
812 		key.c = i + 3;
813 		val = i + 4;
814 		expected_key_a += key.a;
815 		expected_key_b += key.b;
816 		expected_val += val;
817 
818 		err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
819 		if (!ASSERT_OK(err, "map_update"))
820 			goto out;
821 	}
822 
823 	/* Sleepable program is prohibited for hash map iterator */
824 	linfo.map.map_fd = map_fd;
825 	link = bpf_program__attach_iter(skel->progs.sleepable_dummy_dump, &opts);
826 	if (!ASSERT_ERR_PTR(link, "attach_sleepable_prog_to_iter"))
827 		goto out;
828 
829 	linfo.map.map_fd = map_fd;
830 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
831 	if (!ASSERT_OK_PTR(link, "attach_iter"))
832 		goto out;
833 
834 	iter_fd = bpf_iter_create(bpf_link__fd(link));
835 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
836 		goto free_link;
837 
838 	/* do some tests */
839 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
840 		;
841 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
842 		goto close_iter;
843 
844 	/* test results */
845 	if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
846 		goto close_iter;
847 	if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
848 		goto close_iter;
849 	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
850 		goto close_iter;
851 
852 close_iter:
853 	close(iter_fd);
854 free_link:
855 	bpf_link__destroy(link);
856 out:
857 	bpf_iter_bpf_hash_map__destroy(skel);
858 }
859 
test_bpf_percpu_hash_map(void)860 static void test_bpf_percpu_hash_map(void)
861 {
862 	__u32 expected_key_a = 0, expected_key_b = 0;
863 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
864 	struct bpf_iter_bpf_percpu_hash_map *skel;
865 	int err, i, j, len, map_fd, iter_fd;
866 	union bpf_iter_link_info linfo;
867 	__u32 expected_val = 0;
868 	struct bpf_link *link;
869 	struct key_t {
870 		int a;
871 		int b;
872 		int c;
873 	} key;
874 	char buf[64];
875 	void *val;
876 
877 	skel = bpf_iter_bpf_percpu_hash_map__open();
878 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__open"))
879 		return;
880 
881 	skel->rodata->num_cpus = bpf_num_possible_cpus();
882 	val = malloc(8 * bpf_num_possible_cpus());
883 
884 	err = bpf_iter_bpf_percpu_hash_map__load(skel);
885 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load"))
886 		goto out;
887 
888 	/* update map values here */
889 	map_fd = bpf_map__fd(skel->maps.hashmap1);
890 	for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
891 		key.a = i + 1;
892 		key.b = i + 2;
893 		key.c = i + 3;
894 		expected_key_a += key.a;
895 		expected_key_b += key.b;
896 
897 		for (j = 0; j < bpf_num_possible_cpus(); j++) {
898 			*(__u32 *)(val + j * 8) = i + j;
899 			expected_val += i + j;
900 		}
901 
902 		err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
903 		if (!ASSERT_OK(err, "map_update"))
904 			goto out;
905 	}
906 
907 	memset(&linfo, 0, sizeof(linfo));
908 	linfo.map.map_fd = map_fd;
909 	opts.link_info = &linfo;
910 	opts.link_info_len = sizeof(linfo);
911 	link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
912 	if (!ASSERT_OK_PTR(link, "attach_iter"))
913 		goto out;
914 
915 	iter_fd = bpf_iter_create(bpf_link__fd(link));
916 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
917 		goto free_link;
918 
919 	/* do some tests */
920 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
921 		;
922 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
923 		goto close_iter;
924 
925 	/* test results */
926 	if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
927 		goto close_iter;
928 	if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
929 		goto close_iter;
930 	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
931 		goto close_iter;
932 
933 close_iter:
934 	close(iter_fd);
935 free_link:
936 	bpf_link__destroy(link);
937 out:
938 	bpf_iter_bpf_percpu_hash_map__destroy(skel);
939 	free(val);
940 }
941 
test_bpf_array_map(void)942 static void test_bpf_array_map(void)
943 {
944 	__u64 val, expected_val = 0, res_first_val, first_val = 0;
945 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
946 	__u32 key, expected_key = 0, res_first_key;
947 	int err, i, map_fd, hash_fd, iter_fd;
948 	struct bpf_iter_bpf_array_map *skel;
949 	union bpf_iter_link_info linfo;
950 	struct bpf_link *link;
951 	char buf[64] = {};
952 	int len, start;
953 
954 	skel = bpf_iter_bpf_array_map__open_and_load();
955 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
956 		return;
957 
958 	map_fd = bpf_map__fd(skel->maps.arraymap1);
959 	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
960 		val = i + 4;
961 		expected_key += i;
962 		expected_val += val;
963 
964 		if (i == 0)
965 			first_val = val;
966 
967 		err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
968 		if (!ASSERT_OK(err, "map_update"))
969 			goto out;
970 	}
971 
972 	memset(&linfo, 0, sizeof(linfo));
973 	linfo.map.map_fd = map_fd;
974 	opts.link_info = &linfo;
975 	opts.link_info_len = sizeof(linfo);
976 	link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
977 	if (!ASSERT_OK_PTR(link, "attach_iter"))
978 		goto out;
979 
980 	iter_fd = bpf_iter_create(bpf_link__fd(link));
981 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
982 		goto free_link;
983 
984 	/* do some tests */
985 	start = 0;
986 	while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
987 		start += len;
988 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
989 		goto close_iter;
990 
991 	/* test results */
992 	res_first_key = *(__u32 *)buf;
993 	res_first_val = *(__u64 *)(buf + sizeof(__u32));
994 	if (CHECK(res_first_key != 0 || res_first_val != first_val,
995 		  "bpf_seq_write",
996 		  "seq_write failure: first key %u vs expected 0, "
997 		  " first value %llu vs expected %llu\n",
998 		  res_first_key, res_first_val, first_val))
999 		goto close_iter;
1000 
1001 	if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
1002 		goto close_iter;
1003 	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1004 		goto close_iter;
1005 
1006 	hash_fd = bpf_map__fd(skel->maps.hashmap1);
1007 	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
1008 		err = bpf_map_lookup_elem(map_fd, &i, &val);
1009 		if (!ASSERT_OK(err, "map_lookup arraymap1"))
1010 			goto close_iter;
1011 		if (!ASSERT_EQ(i, val, "invalid_val arraymap1"))
1012 			goto close_iter;
1013 
1014 		val = i + 4;
1015 		err = bpf_map_lookup_elem(hash_fd, &val, &key);
1016 		if (!ASSERT_OK(err, "map_lookup hashmap1"))
1017 			goto close_iter;
1018 		if (!ASSERT_EQ(key, val - 4, "invalid_val hashmap1"))
1019 			goto close_iter;
1020 	}
1021 
1022 close_iter:
1023 	close(iter_fd);
1024 free_link:
1025 	bpf_link__destroy(link);
1026 out:
1027 	bpf_iter_bpf_array_map__destroy(skel);
1028 }
1029 
test_bpf_array_map_iter_fd(void)1030 static void test_bpf_array_map_iter_fd(void)
1031 {
1032 	struct bpf_iter_bpf_array_map *skel;
1033 
1034 	skel = bpf_iter_bpf_array_map__open_and_load();
1035 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
1036 		return;
1037 
1038 	do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_array_map,
1039 			    skel->maps.arraymap1);
1040 
1041 	bpf_iter_bpf_array_map__destroy(skel);
1042 }
1043 
test_bpf_percpu_array_map(void)1044 static void test_bpf_percpu_array_map(void)
1045 {
1046 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1047 	struct bpf_iter_bpf_percpu_array_map *skel;
1048 	__u32 expected_key = 0, expected_val = 0;
1049 	union bpf_iter_link_info linfo;
1050 	int err, i, j, map_fd, iter_fd;
1051 	struct bpf_link *link;
1052 	char buf[64];
1053 	void *val;
1054 	int len;
1055 
1056 	skel = bpf_iter_bpf_percpu_array_map__open();
1057 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__open"))
1058 		return;
1059 
1060 	skel->rodata->num_cpus = bpf_num_possible_cpus();
1061 	val = malloc(8 * bpf_num_possible_cpus());
1062 
1063 	err = bpf_iter_bpf_percpu_array_map__load(skel);
1064 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load"))
1065 		goto out;
1066 
1067 	/* update map values here */
1068 	map_fd = bpf_map__fd(skel->maps.arraymap1);
1069 	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
1070 		expected_key += i;
1071 
1072 		for (j = 0; j < bpf_num_possible_cpus(); j++) {
1073 			*(__u32 *)(val + j * 8) = i + j;
1074 			expected_val += i + j;
1075 		}
1076 
1077 		err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
1078 		if (!ASSERT_OK(err, "map_update"))
1079 			goto out;
1080 	}
1081 
1082 	memset(&linfo, 0, sizeof(linfo));
1083 	linfo.map.map_fd = map_fd;
1084 	opts.link_info = &linfo;
1085 	opts.link_info_len = sizeof(linfo);
1086 	link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
1087 	if (!ASSERT_OK_PTR(link, "attach_iter"))
1088 		goto out;
1089 
1090 	iter_fd = bpf_iter_create(bpf_link__fd(link));
1091 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1092 		goto free_link;
1093 
1094 	/* do some tests */
1095 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1096 		;
1097 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1098 		goto close_iter;
1099 
1100 	/* test results */
1101 	if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
1102 		goto close_iter;
1103 	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1104 		goto close_iter;
1105 
1106 close_iter:
1107 	close(iter_fd);
1108 free_link:
1109 	bpf_link__destroy(link);
1110 out:
1111 	bpf_iter_bpf_percpu_array_map__destroy(skel);
1112 	free(val);
1113 }
1114 
1115 /* An iterator program deletes all local storage in a map. */
test_bpf_sk_storage_delete(void)1116 static void test_bpf_sk_storage_delete(void)
1117 {
1118 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1119 	struct bpf_iter_bpf_sk_storage_helpers *skel;
1120 	union bpf_iter_link_info linfo;
1121 	int err, len, map_fd, iter_fd;
1122 	struct bpf_link *link;
1123 	int sock_fd = -1;
1124 	__u32 val = 42;
1125 	char buf[64];
1126 
1127 	skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1128 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
1129 		return;
1130 
1131 	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1132 
1133 	sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1134 	if (!ASSERT_GE(sock_fd, 0, "socket"))
1135 		goto out;
1136 	err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1137 	if (!ASSERT_OK(err, "map_update"))
1138 		goto out;
1139 
1140 	memset(&linfo, 0, sizeof(linfo));
1141 	linfo.map.map_fd = map_fd;
1142 	opts.link_info = &linfo;
1143 	opts.link_info_len = sizeof(linfo);
1144 	link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map,
1145 					&opts);
1146 	if (!ASSERT_OK_PTR(link, "attach_iter"))
1147 		goto out;
1148 
1149 	iter_fd = bpf_iter_create(bpf_link__fd(link));
1150 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1151 		goto free_link;
1152 
1153 	/* do some tests */
1154 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1155 		;
1156 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1157 		goto close_iter;
1158 
1159 	/* test results */
1160 	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1161 	if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
1162 		  "map value wasn't deleted (err=%d, errno=%d)\n", err, errno))
1163 		goto close_iter;
1164 
1165 close_iter:
1166 	close(iter_fd);
1167 free_link:
1168 	bpf_link__destroy(link);
1169 out:
1170 	if (sock_fd >= 0)
1171 		close(sock_fd);
1172 	bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1173 }
1174 
1175 /* This creates a socket and its local storage. It then runs a task_iter BPF
1176  * program that replaces the existing socket local storage with the tgid of the
1177  * only task owning a file descriptor to this socket, this process, prog_tests.
1178  * It then runs a tcp socket iterator that negates the value in the existing
1179  * socket local storage, the test verifies that the resulting value is -pid.
1180  */
test_bpf_sk_storage_get(void)1181 static void test_bpf_sk_storage_get(void)
1182 {
1183 	struct bpf_iter_bpf_sk_storage_helpers *skel;
1184 	int err, map_fd, val = -1;
1185 	int sock_fd = -1;
1186 
1187 	skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1188 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
1189 		return;
1190 
1191 	sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1192 	if (!ASSERT_GE(sock_fd, 0, "socket"))
1193 		goto out;
1194 
1195 	err = listen(sock_fd, 1);
1196 	if (!ASSERT_OK(err, "listen"))
1197 		goto close_socket;
1198 
1199 	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1200 
1201 	err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1202 	if (!ASSERT_OK(err, "bpf_map_update_elem"))
1203 		goto close_socket;
1204 
1205 	do_dummy_read(skel->progs.fill_socket_owner);
1206 
1207 	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1208 	if (CHECK(err || val != getpid(), "bpf_map_lookup_elem",
1209 	    "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1210 	    getpid(), val, err))
1211 		goto close_socket;
1212 
1213 	do_dummy_read(skel->progs.negate_socket_local_storage);
1214 
1215 	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1216 	CHECK(err || val != -getpid(), "bpf_map_lookup_elem",
1217 	      "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1218 	      -getpid(), val, err);
1219 
1220 close_socket:
1221 	close(sock_fd);
1222 out:
1223 	bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1224 }
1225 
test_bpf_sk_stoarge_map_iter_fd(void)1226 static void test_bpf_sk_stoarge_map_iter_fd(void)
1227 {
1228 	struct bpf_iter_bpf_sk_storage_map *skel;
1229 
1230 	skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1231 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
1232 		return;
1233 
1234 	do_read_map_iter_fd(&skel->skeleton, skel->progs.rw_bpf_sk_storage_map,
1235 			    skel->maps.sk_stg_map);
1236 
1237 	bpf_iter_bpf_sk_storage_map__destroy(skel);
1238 }
1239 
test_bpf_sk_storage_map(void)1240 static void test_bpf_sk_storage_map(void)
1241 {
1242 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1243 	int err, i, len, map_fd, iter_fd, num_sockets;
1244 	struct bpf_iter_bpf_sk_storage_map *skel;
1245 	union bpf_iter_link_info linfo;
1246 	int sock_fd[3] = {-1, -1, -1};
1247 	__u32 val, expected_val = 0;
1248 	struct bpf_link *link;
1249 	char buf[64];
1250 
1251 	skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1252 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
1253 		return;
1254 
1255 	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1256 	num_sockets = ARRAY_SIZE(sock_fd);
1257 	for (i = 0; i < num_sockets; i++) {
1258 		sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
1259 		if (!ASSERT_GE(sock_fd[i], 0, "socket"))
1260 			goto out;
1261 
1262 		val = i + 1;
1263 		expected_val += val;
1264 
1265 		err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
1266 					  BPF_NOEXIST);
1267 		if (!ASSERT_OK(err, "map_update"))
1268 			goto out;
1269 	}
1270 
1271 	memset(&linfo, 0, sizeof(linfo));
1272 	linfo.map.map_fd = map_fd;
1273 	opts.link_info = &linfo;
1274 	opts.link_info_len = sizeof(linfo);
1275 	link = bpf_program__attach_iter(skel->progs.oob_write_bpf_sk_storage_map, &opts);
1276 	err = libbpf_get_error(link);
1277 	if (!ASSERT_EQ(err, -EACCES, "attach_oob_write_iter")) {
1278 		if (!err)
1279 			bpf_link__destroy(link);
1280 		goto out;
1281 	}
1282 
1283 	link = bpf_program__attach_iter(skel->progs.rw_bpf_sk_storage_map, &opts);
1284 	if (!ASSERT_OK_PTR(link, "attach_iter"))
1285 		goto out;
1286 
1287 	iter_fd = bpf_iter_create(bpf_link__fd(link));
1288 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1289 		goto free_link;
1290 
1291 	skel->bss->to_add_val = time(NULL);
1292 	/* do some tests */
1293 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1294 		;
1295 	if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1296 		goto close_iter;
1297 
1298 	/* test results */
1299 	if (!ASSERT_EQ(skel->bss->ipv6_sk_count, num_sockets, "ipv6_sk_count"))
1300 		goto close_iter;
1301 
1302 	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1303 		goto close_iter;
1304 
1305 	for (i = 0; i < num_sockets; i++) {
1306 		err = bpf_map_lookup_elem(map_fd, &sock_fd[i], &val);
1307 		if (!ASSERT_OK(err, "map_lookup") ||
1308 		    !ASSERT_EQ(val, i + 1 + skel->bss->to_add_val, "check_map_value"))
1309 			break;
1310 	}
1311 
1312 close_iter:
1313 	close(iter_fd);
1314 free_link:
1315 	bpf_link__destroy(link);
1316 out:
1317 	for (i = 0; i < num_sockets; i++) {
1318 		if (sock_fd[i] >= 0)
1319 			close(sock_fd[i]);
1320 	}
1321 	bpf_iter_bpf_sk_storage_map__destroy(skel);
1322 }
1323 
test_rdonly_buf_out_of_bound(void)1324 static void test_rdonly_buf_out_of_bound(void)
1325 {
1326 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1327 	struct bpf_iter_test_kern5 *skel;
1328 	union bpf_iter_link_info linfo;
1329 	struct bpf_link *link;
1330 
1331 	skel = bpf_iter_test_kern5__open_and_load();
1332 	if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern5__open_and_load"))
1333 		return;
1334 
1335 	memset(&linfo, 0, sizeof(linfo));
1336 	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
1337 	opts.link_info = &linfo;
1338 	opts.link_info_len = sizeof(linfo);
1339 	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
1340 	if (!ASSERT_ERR_PTR(link, "attach_iter"))
1341 		bpf_link__destroy(link);
1342 
1343 	bpf_iter_test_kern5__destroy(skel);
1344 }
1345 
test_buf_neg_offset(void)1346 static void test_buf_neg_offset(void)
1347 {
1348 	struct bpf_iter_test_kern6 *skel;
1349 
1350 	skel = bpf_iter_test_kern6__open_and_load();
1351 	if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern6__open_and_load"))
1352 		bpf_iter_test_kern6__destroy(skel);
1353 }
1354 
test_link_iter(void)1355 static void test_link_iter(void)
1356 {
1357 	struct bpf_iter_bpf_link *skel;
1358 
1359 	skel = bpf_iter_bpf_link__open_and_load();
1360 	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load"))
1361 		return;
1362 
1363 	do_dummy_read(skel->progs.dump_bpf_link);
1364 
1365 	bpf_iter_bpf_link__destroy(skel);
1366 }
1367 
test_ksym_iter(void)1368 static void test_ksym_iter(void)
1369 {
1370 	struct bpf_iter_ksym *skel;
1371 
1372 	skel = bpf_iter_ksym__open_and_load();
1373 	if (!ASSERT_OK_PTR(skel, "bpf_iter_ksym__open_and_load"))
1374 		return;
1375 
1376 	do_dummy_read(skel->progs.dump_ksym);
1377 
1378 	bpf_iter_ksym__destroy(skel);
1379 }
1380 
1381 #define CMP_BUFFER_SIZE 1024
1382 static char task_vma_output[CMP_BUFFER_SIZE];
1383 static char proc_maps_output[CMP_BUFFER_SIZE];
1384 
1385 /* remove \0 and \t from str, and only keep the first line */
str_strip_first_line(char * str)1386 static void str_strip_first_line(char *str)
1387 {
1388 	char *dst = str, *src = str;
1389 
1390 	do {
1391 		if (*src == ' ' || *src == '\t')
1392 			src++;
1393 		else
1394 			*(dst++) = *(src++);
1395 
1396 	} while (*src != '\0' && *src != '\n');
1397 
1398 	*dst = '\0';
1399 }
1400 
test_task_vma_common(struct bpf_iter_attach_opts * opts)1401 static void test_task_vma_common(struct bpf_iter_attach_opts *opts)
1402 {
1403 	int err, iter_fd = -1, proc_maps_fd = -1;
1404 	struct bpf_iter_task_vma *skel;
1405 	int len, read_size = 4;
1406 	char maps_path[64];
1407 
1408 	skel = bpf_iter_task_vma__open();
1409 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open"))
1410 		return;
1411 
1412 	skel->bss->pid = getpid();
1413 	skel->bss->one_task = opts ? 1 : 0;
1414 
1415 	err = bpf_iter_task_vma__load(skel);
1416 	if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
1417 		goto out;
1418 
1419 	skel->links.proc_maps = bpf_program__attach_iter(
1420 		skel->progs.proc_maps, opts);
1421 
1422 	if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
1423 		skel->links.proc_maps = NULL;
1424 		goto out;
1425 	}
1426 
1427 	iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
1428 	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1429 		goto out;
1430 
1431 	/* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks
1432 	 * to trigger seq_file corner cases.
1433 	 */
1434 	len = 0;
1435 	while (len < CMP_BUFFER_SIZE) {
1436 		err = read_fd_into_buffer(iter_fd, task_vma_output + len,
1437 					  MIN(read_size, CMP_BUFFER_SIZE - len));
1438 		if (!err)
1439 			break;
1440 		if (!ASSERT_GE(err, 0, "read_iter_fd"))
1441 			goto out;
1442 		len += err;
1443 	}
1444 	if (opts)
1445 		ASSERT_EQ(skel->bss->one_task_error, 0, "unexpected task");
1446 
1447 	/* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */
1448 	snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid);
1449 	proc_maps_fd = open(maps_path, O_RDONLY);
1450 	if (!ASSERT_GE(proc_maps_fd, 0, "open_proc_maps"))
1451 		goto out;
1452 	err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE);
1453 	if (!ASSERT_GE(err, 0, "read_prog_maps_fd"))
1454 		goto out;
1455 
1456 	/* strip and compare the first line of the two files */
1457 	str_strip_first_line(task_vma_output);
1458 	str_strip_first_line(proc_maps_output);
1459 
1460 	ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output");
1461 
1462 	check_bpf_link_info(skel->progs.proc_maps);
1463 
1464 out:
1465 	close(proc_maps_fd);
1466 	close(iter_fd);
1467 	bpf_iter_task_vma__destroy(skel);
1468 }
1469 
test_task_vma_dead_task(void)1470 static void test_task_vma_dead_task(void)
1471 {
1472 	struct bpf_iter_task_vma *skel;
1473 	int wstatus, child_pid = -1;
1474 	time_t start_tm, cur_tm;
1475 	int err, iter_fd = -1;
1476 	int wait_sec = 3;
1477 
1478 	skel = bpf_iter_task_vma__open();
1479 	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open"))
1480 		return;
1481 
1482 	skel->bss->pid = getpid();
1483 
1484 	err = bpf_iter_task_vma__load(skel);
1485 	if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
1486 		goto out;
1487 
1488 	skel->links.proc_maps = bpf_program__attach_iter(
1489 		skel->progs.proc_maps, NULL);
1490 
1491 	if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
1492 		skel->links.proc_maps = NULL;
1493 		goto out;
1494 	}
1495 
1496 	start_tm = time(NULL);
1497 	cur_tm = start_tm;
1498 
1499 	child_pid = fork();
1500 	if (child_pid == 0) {
1501 		/* Fork short-lived processes in the background. */
1502 		while (cur_tm < start_tm + wait_sec) {
1503 			system("echo > /dev/null");
1504 			cur_tm = time(NULL);
1505 		}
1506 		exit(0);
1507 	}
1508 
1509 	if (!ASSERT_GE(child_pid, 0, "fork_child"))
1510 		goto out;
1511 
1512 	while (cur_tm < start_tm + wait_sec) {
1513 		iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
1514 		if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1515 			goto out;
1516 
1517 		/* Drain all data from iter_fd. */
1518 		while (cur_tm < start_tm + wait_sec) {
1519 			err = read_fd_into_buffer(iter_fd, task_vma_output, CMP_BUFFER_SIZE);
1520 			if (!ASSERT_GE(err, 0, "read_iter_fd"))
1521 				goto out;
1522 
1523 			cur_tm = time(NULL);
1524 
1525 			if (err == 0)
1526 				break;
1527 		}
1528 
1529 		close(iter_fd);
1530 		iter_fd = -1;
1531 	}
1532 
1533 	check_bpf_link_info(skel->progs.proc_maps);
1534 
1535 out:
1536 	waitpid(child_pid, &wstatus, 0);
1537 	close(iter_fd);
1538 	bpf_iter_task_vma__destroy(skel);
1539 }
1540 
test_bpf_sockmap_map_iter_fd(void)1541 void test_bpf_sockmap_map_iter_fd(void)
1542 {
1543 	struct bpf_iter_sockmap *skel;
1544 
1545 	skel = bpf_iter_sockmap__open_and_load();
1546 	if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
1547 		return;
1548 
1549 	do_read_map_iter_fd(&skel->skeleton, skel->progs.copy, skel->maps.sockmap);
1550 
1551 	bpf_iter_sockmap__destroy(skel);
1552 }
1553 
test_task_vma(void)1554 static void test_task_vma(void)
1555 {
1556 	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1557 	union bpf_iter_link_info linfo;
1558 
1559 	memset(&linfo, 0, sizeof(linfo));
1560 	linfo.task.tid = getpid();
1561 	opts.link_info = &linfo;
1562 	opts.link_info_len = sizeof(linfo);
1563 
1564 	test_task_vma_common(&opts);
1565 	test_task_vma_common(NULL);
1566 }
1567 
1568 /* uprobe attach point */
trigger_func(int arg)1569 static noinline int trigger_func(int arg)
1570 {
1571 	asm volatile ("");
1572 	return arg + 1;
1573 }
1574 
test_task_vma_offset_common(struct bpf_iter_attach_opts * opts,bool one_proc)1575 static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc)
1576 {
1577 	struct bpf_iter_vma_offset *skel;
1578 	char buf[16] = {};
1579 	int iter_fd, len;
1580 	int pgsz, shift;
1581 
1582 	skel = bpf_iter_vma_offset__open_and_load();
1583 	if (!ASSERT_OK_PTR(skel, "bpf_iter_vma_offset__open_and_load"))
1584 		return;
1585 
1586 	skel->bss->pid = getpid();
1587 	skel->bss->address = (uintptr_t)trigger_func;
1588 	for (pgsz = getpagesize(), shift = 0; pgsz > 1; pgsz >>= 1, shift++)
1589 		;
1590 	skel->bss->page_shift = shift;
1591 
1592 	skel->links.get_vma_offset = bpf_program__attach_iter(skel->progs.get_vma_offset, opts);
1593 	if (!ASSERT_OK_PTR(skel->links.get_vma_offset, "attach_iter"))
1594 		goto exit;
1595 
1596 	iter_fd = bpf_iter_create(bpf_link__fd(skel->links.get_vma_offset));
1597 	if (!ASSERT_GT(iter_fd, 0, "create_iter"))
1598 		goto exit;
1599 
1600 	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1601 		;
1602 	buf[15] = 0;
1603 	ASSERT_EQ(strcmp(buf, "OK\n"), 0, "strcmp");
1604 
1605 	ASSERT_EQ(skel->bss->offset, get_uprobe_offset(trigger_func), "offset");
1606 	if (one_proc)
1607 		ASSERT_EQ(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
1608 	else
1609 		ASSERT_GT(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
1610 
1611 	close(iter_fd);
1612 
1613 exit:
1614 	bpf_iter_vma_offset__destroy(skel);
1615 }
1616 
test_task_vma_offset(void)1617 static void test_task_vma_offset(void)
1618 {
1619 	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1620 	union bpf_iter_link_info linfo;
1621 
1622 	memset(&linfo, 0, sizeof(linfo));
1623 	linfo.task.pid = getpid();
1624 	opts.link_info = &linfo;
1625 	opts.link_info_len = sizeof(linfo);
1626 
1627 	test_task_vma_offset_common(&opts, true);
1628 
1629 	linfo.task.pid = 0;
1630 	linfo.task.tid = getpid();
1631 	test_task_vma_offset_common(&opts, true);
1632 
1633 	test_task_vma_offset_common(NULL, false);
1634 }
1635 
test_bpf_iter(void)1636 void test_bpf_iter(void)
1637 {
1638 	ASSERT_OK(pthread_mutex_init(&do_nothing_mutex, NULL), "pthread_mutex_init");
1639 
1640 	if (test__start_subtest("btf_id_or_null"))
1641 		test_btf_id_or_null();
1642 	if (test__start_subtest("ipv6_route"))
1643 		test_ipv6_route();
1644 	if (test__start_subtest("netlink"))
1645 		test_netlink();
1646 	if (test__start_subtest("bpf_map"))
1647 		test_bpf_map();
1648 	if (test__start_subtest("task_tid"))
1649 		test_task_tid();
1650 	if (test__start_subtest("task_pid"))
1651 		test_task_pid();
1652 	if (test__start_subtest("task_pidfd"))
1653 		test_task_pidfd();
1654 	if (test__start_subtest("task_sleepable"))
1655 		test_task_sleepable();
1656 	if (test__start_subtest("task_stack"))
1657 		test_task_stack();
1658 	if (test__start_subtest("task_file"))
1659 		test_task_file();
1660 	if (test__start_subtest("task_vma"))
1661 		test_task_vma();
1662 	if (test__start_subtest("task_vma_dead_task"))
1663 		test_task_vma_dead_task();
1664 	if (test__start_subtest("task_btf"))
1665 		test_task_btf();
1666 	if (test__start_subtest("tcp4"))
1667 		test_tcp4();
1668 	if (test__start_subtest("tcp6"))
1669 		test_tcp6();
1670 	if (test__start_subtest("udp4"))
1671 		test_udp4();
1672 	if (test__start_subtest("udp6"))
1673 		test_udp6();
1674 	if (test__start_subtest("unix"))
1675 		test_unix();
1676 	if (test__start_subtest("anon"))
1677 		test_anon_iter(false);
1678 	if (test__start_subtest("anon-read-one-char"))
1679 		test_anon_iter(true);
1680 	if (test__start_subtest("file"))
1681 		test_file_iter();
1682 	if (test__start_subtest("overflow"))
1683 		test_overflow(false, false);
1684 	if (test__start_subtest("overflow-e2big"))
1685 		test_overflow(true, false);
1686 	if (test__start_subtest("prog-ret-1"))
1687 		test_overflow(false, true);
1688 	if (test__start_subtest("bpf_hash_map"))
1689 		test_bpf_hash_map();
1690 	if (test__start_subtest("bpf_percpu_hash_map"))
1691 		test_bpf_percpu_hash_map();
1692 	if (test__start_subtest("bpf_array_map"))
1693 		test_bpf_array_map();
1694 	if (test__start_subtest("bpf_array_map_iter_fd"))
1695 		test_bpf_array_map_iter_fd();
1696 	if (test__start_subtest("bpf_percpu_array_map"))
1697 		test_bpf_percpu_array_map();
1698 	if (test__start_subtest("bpf_sk_storage_map"))
1699 		test_bpf_sk_storage_map();
1700 	if (test__start_subtest("bpf_sk_storage_map_iter_fd"))
1701 		test_bpf_sk_stoarge_map_iter_fd();
1702 	if (test__start_subtest("bpf_sk_storage_delete"))
1703 		test_bpf_sk_storage_delete();
1704 	if (test__start_subtest("bpf_sk_storage_get"))
1705 		test_bpf_sk_storage_get();
1706 	if (test__start_subtest("rdonly-buf-out-of-bound"))
1707 		test_rdonly_buf_out_of_bound();
1708 	if (test__start_subtest("buf-neg-offset"))
1709 		test_buf_neg_offset();
1710 	if (test__start_subtest("link-iter"))
1711 		test_link_iter();
1712 	if (test__start_subtest("ksym"))
1713 		test_ksym_iter();
1714 	if (test__start_subtest("bpf_sockmap_map_iter_fd"))
1715 		test_bpf_sockmap_map_iter_fd();
1716 	if (test__start_subtest("vma_offset"))
1717 		test_task_vma_offset();
1718 }
1719