1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <network_helpers.h>
4 
5 /* test_tailcall_1 checks basic functionality by patching multiple locations
6  * in a single program for a single tail call slot with nop->jmp, jmp->nop
7  * and jmp->jmp rewrites. Also checks for nop->nop.
8  */
test_tailcall_1(void)9 static void test_tailcall_1(void)
10 {
11 	int err, map_fd, prog_fd, main_fd, i, j;
12 	struct bpf_map *prog_array;
13 	struct bpf_program *prog;
14 	struct bpf_object *obj;
15 	char prog_name[32];
16 	char buff[128] = {};
17 	LIBBPF_OPTS(bpf_test_run_opts, topts,
18 		.data_in = buff,
19 		.data_size_in = sizeof(buff),
20 		.repeat = 1,
21 	);
22 
23 	err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
24 				 &prog_fd);
25 	if (CHECK_FAIL(err))
26 		return;
27 
28 	prog = bpf_object__find_program_by_name(obj, "entry");
29 	if (CHECK_FAIL(!prog))
30 		goto out;
31 
32 	main_fd = bpf_program__fd(prog);
33 	if (CHECK_FAIL(main_fd < 0))
34 		goto out;
35 
36 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
37 	if (CHECK_FAIL(!prog_array))
38 		goto out;
39 
40 	map_fd = bpf_map__fd(prog_array);
41 	if (CHECK_FAIL(map_fd < 0))
42 		goto out;
43 
44 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
45 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
46 
47 		prog = bpf_object__find_program_by_name(obj, prog_name);
48 		if (CHECK_FAIL(!prog))
49 			goto out;
50 
51 		prog_fd = bpf_program__fd(prog);
52 		if (CHECK_FAIL(prog_fd < 0))
53 			goto out;
54 
55 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
56 		if (CHECK_FAIL(err))
57 			goto out;
58 	}
59 
60 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
61 		err = bpf_prog_test_run_opts(main_fd, &topts);
62 		ASSERT_OK(err, "tailcall");
63 		ASSERT_EQ(topts.retval, i, "tailcall retval");
64 
65 		err = bpf_map_delete_elem(map_fd, &i);
66 		if (CHECK_FAIL(err))
67 			goto out;
68 	}
69 
70 	err = bpf_prog_test_run_opts(main_fd, &topts);
71 	ASSERT_OK(err, "tailcall");
72 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
73 
74 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
75 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
76 
77 		prog = bpf_object__find_program_by_name(obj, prog_name);
78 		if (CHECK_FAIL(!prog))
79 			goto out;
80 
81 		prog_fd = bpf_program__fd(prog);
82 		if (CHECK_FAIL(prog_fd < 0))
83 			goto out;
84 
85 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
86 		if (CHECK_FAIL(err))
87 			goto out;
88 	}
89 
90 	err = bpf_prog_test_run_opts(main_fd, &topts);
91 	ASSERT_OK(err, "tailcall");
92 	ASSERT_OK(topts.retval, "tailcall retval");
93 
94 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
95 		j = bpf_map__max_entries(prog_array) - 1 - i;
96 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
97 
98 		prog = bpf_object__find_program_by_name(obj, prog_name);
99 		if (CHECK_FAIL(!prog))
100 			goto out;
101 
102 		prog_fd = bpf_program__fd(prog);
103 		if (CHECK_FAIL(prog_fd < 0))
104 			goto out;
105 
106 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
107 		if (CHECK_FAIL(err))
108 			goto out;
109 	}
110 
111 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
112 		j = bpf_map__max_entries(prog_array) - 1 - i;
113 
114 		err = bpf_prog_test_run_opts(main_fd, &topts);
115 		ASSERT_OK(err, "tailcall");
116 		ASSERT_EQ(topts.retval, j, "tailcall retval");
117 
118 		err = bpf_map_delete_elem(map_fd, &i);
119 		if (CHECK_FAIL(err))
120 			goto out;
121 	}
122 
123 	err = bpf_prog_test_run_opts(main_fd, &topts);
124 	ASSERT_OK(err, "tailcall");
125 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
126 
127 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
128 		err = bpf_map_delete_elem(map_fd, &i);
129 		if (CHECK_FAIL(err >= 0 || errno != ENOENT))
130 			goto out;
131 
132 		err = bpf_prog_test_run_opts(main_fd, &topts);
133 		ASSERT_OK(err, "tailcall");
134 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
135 	}
136 
137 out:
138 	bpf_object__close(obj);
139 }
140 
141 /* test_tailcall_2 checks that patching multiple programs for a single
142  * tail call slot works. It also jumps through several programs and tests
143  * the tail call limit counter.
144  */
test_tailcall_2(void)145 static void test_tailcall_2(void)
146 {
147 	int err, map_fd, prog_fd, main_fd, i;
148 	struct bpf_map *prog_array;
149 	struct bpf_program *prog;
150 	struct bpf_object *obj;
151 	char prog_name[32];
152 	char buff[128] = {};
153 	LIBBPF_OPTS(bpf_test_run_opts, topts,
154 		.data_in = buff,
155 		.data_size_in = sizeof(buff),
156 		.repeat = 1,
157 	);
158 
159 	err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
160 				 &prog_fd);
161 	if (CHECK_FAIL(err))
162 		return;
163 
164 	prog = bpf_object__find_program_by_name(obj, "entry");
165 	if (CHECK_FAIL(!prog))
166 		goto out;
167 
168 	main_fd = bpf_program__fd(prog);
169 	if (CHECK_FAIL(main_fd < 0))
170 		goto out;
171 
172 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
173 	if (CHECK_FAIL(!prog_array))
174 		goto out;
175 
176 	map_fd = bpf_map__fd(prog_array);
177 	if (CHECK_FAIL(map_fd < 0))
178 		goto out;
179 
180 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
181 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
182 
183 		prog = bpf_object__find_program_by_name(obj, prog_name);
184 		if (CHECK_FAIL(!prog))
185 			goto out;
186 
187 		prog_fd = bpf_program__fd(prog);
188 		if (CHECK_FAIL(prog_fd < 0))
189 			goto out;
190 
191 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
192 		if (CHECK_FAIL(err))
193 			goto out;
194 	}
195 
196 	err = bpf_prog_test_run_opts(main_fd, &topts);
197 	ASSERT_OK(err, "tailcall");
198 	ASSERT_EQ(topts.retval, 2, "tailcall retval");
199 
200 	i = 2;
201 	err = bpf_map_delete_elem(map_fd, &i);
202 	if (CHECK_FAIL(err))
203 		goto out;
204 
205 	err = bpf_prog_test_run_opts(main_fd, &topts);
206 	ASSERT_OK(err, "tailcall");
207 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
208 
209 	i = 0;
210 	err = bpf_map_delete_elem(map_fd, &i);
211 	if (CHECK_FAIL(err))
212 		goto out;
213 
214 	err = bpf_prog_test_run_opts(main_fd, &topts);
215 	ASSERT_OK(err, "tailcall");
216 	ASSERT_EQ(topts.retval, 3, "tailcall retval");
217 out:
218 	bpf_object__close(obj);
219 }
220 
test_tailcall_count(const char * which)221 static void test_tailcall_count(const char *which)
222 {
223 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
224 	struct bpf_map *prog_array, *data_map;
225 	struct bpf_program *prog;
226 	struct bpf_object *obj;
227 	char buff[128] = {};
228 	LIBBPF_OPTS(bpf_test_run_opts, topts,
229 		.data_in = buff,
230 		.data_size_in = sizeof(buff),
231 		.repeat = 1,
232 	);
233 
234 	err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
235 			    &prog_fd);
236 	if (CHECK_FAIL(err))
237 		return;
238 
239 	prog = bpf_object__find_program_by_name(obj, "entry");
240 	if (CHECK_FAIL(!prog))
241 		goto out;
242 
243 	main_fd = bpf_program__fd(prog);
244 	if (CHECK_FAIL(main_fd < 0))
245 		goto out;
246 
247 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
248 	if (CHECK_FAIL(!prog_array))
249 		goto out;
250 
251 	map_fd = bpf_map__fd(prog_array);
252 	if (CHECK_FAIL(map_fd < 0))
253 		goto out;
254 
255 	prog = bpf_object__find_program_by_name(obj, "classifier_0");
256 	if (CHECK_FAIL(!prog))
257 		goto out;
258 
259 	prog_fd = bpf_program__fd(prog);
260 	if (CHECK_FAIL(prog_fd < 0))
261 		goto out;
262 
263 	i = 0;
264 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
265 	if (CHECK_FAIL(err))
266 		goto out;
267 
268 	err = bpf_prog_test_run_opts(main_fd, &topts);
269 	ASSERT_OK(err, "tailcall");
270 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
271 
272 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
273 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
274 		goto out;
275 
276 	data_fd = bpf_map__fd(data_map);
277 	if (CHECK_FAIL(data_fd < 0))
278 		goto out;
279 
280 	i = 0;
281 	err = bpf_map_lookup_elem(data_fd, &i, &val);
282 	ASSERT_OK(err, "tailcall count");
283 	ASSERT_EQ(val, 33, "tailcall count");
284 
285 	i = 0;
286 	err = bpf_map_delete_elem(map_fd, &i);
287 	if (CHECK_FAIL(err))
288 		goto out;
289 
290 	err = bpf_prog_test_run_opts(main_fd, &topts);
291 	ASSERT_OK(err, "tailcall");
292 	ASSERT_OK(topts.retval, "tailcall retval");
293 out:
294 	bpf_object__close(obj);
295 }
296 
297 /* test_tailcall_3 checks that the count value of the tail call limit
298  * enforcement matches with expectations. JIT uses direct jump.
299  */
test_tailcall_3(void)300 static void test_tailcall_3(void)
301 {
302 	test_tailcall_count("tailcall3.bpf.o");
303 }
304 
305 /* test_tailcall_6 checks that the count value of the tail call limit
306  * enforcement matches with expectations. JIT uses indirect jump.
307  */
test_tailcall_6(void)308 static void test_tailcall_6(void)
309 {
310 	test_tailcall_count("tailcall6.bpf.o");
311 }
312 
313 /* test_tailcall_4 checks that the kernel properly selects indirect jump
314  * for the case where the key is not known. Latter is passed via global
315  * data to select different targets we can compare return value of.
316  */
test_tailcall_4(void)317 static void test_tailcall_4(void)
318 {
319 	int err, map_fd, prog_fd, main_fd, data_fd, i;
320 	struct bpf_map *prog_array, *data_map;
321 	struct bpf_program *prog;
322 	struct bpf_object *obj;
323 	static const int zero = 0;
324 	char buff[128] = {};
325 	char prog_name[32];
326 	LIBBPF_OPTS(bpf_test_run_opts, topts,
327 		.data_in = buff,
328 		.data_size_in = sizeof(buff),
329 		.repeat = 1,
330 	);
331 
332 	err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
333 				 &prog_fd);
334 	if (CHECK_FAIL(err))
335 		return;
336 
337 	prog = bpf_object__find_program_by_name(obj, "entry");
338 	if (CHECK_FAIL(!prog))
339 		goto out;
340 
341 	main_fd = bpf_program__fd(prog);
342 	if (CHECK_FAIL(main_fd < 0))
343 		goto out;
344 
345 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
346 	if (CHECK_FAIL(!prog_array))
347 		goto out;
348 
349 	map_fd = bpf_map__fd(prog_array);
350 	if (CHECK_FAIL(map_fd < 0))
351 		goto out;
352 
353 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
354 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
355 		goto out;
356 
357 	data_fd = bpf_map__fd(data_map);
358 	if (CHECK_FAIL(data_fd < 0))
359 		goto out;
360 
361 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
362 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
363 
364 		prog = bpf_object__find_program_by_name(obj, prog_name);
365 		if (CHECK_FAIL(!prog))
366 			goto out;
367 
368 		prog_fd = bpf_program__fd(prog);
369 		if (CHECK_FAIL(prog_fd < 0))
370 			goto out;
371 
372 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
373 		if (CHECK_FAIL(err))
374 			goto out;
375 	}
376 
377 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
378 		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
379 		if (CHECK_FAIL(err))
380 			goto out;
381 
382 		err = bpf_prog_test_run_opts(main_fd, &topts);
383 		ASSERT_OK(err, "tailcall");
384 		ASSERT_EQ(topts.retval, i, "tailcall retval");
385 	}
386 
387 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
388 		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
389 		if (CHECK_FAIL(err))
390 			goto out;
391 
392 		err = bpf_map_delete_elem(map_fd, &i);
393 		if (CHECK_FAIL(err))
394 			goto out;
395 
396 		err = bpf_prog_test_run_opts(main_fd, &topts);
397 		ASSERT_OK(err, "tailcall");
398 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
399 	}
400 out:
401 	bpf_object__close(obj);
402 }
403 
404 /* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
405  * an indirect jump when the keys are const but different from different branches.
406  */
test_tailcall_5(void)407 static void test_tailcall_5(void)
408 {
409 	int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
410 	struct bpf_map *prog_array, *data_map;
411 	struct bpf_program *prog;
412 	struct bpf_object *obj;
413 	static const int zero = 0;
414 	char buff[128] = {};
415 	char prog_name[32];
416 	LIBBPF_OPTS(bpf_test_run_opts, topts,
417 		.data_in = buff,
418 		.data_size_in = sizeof(buff),
419 		.repeat = 1,
420 	);
421 
422 	err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
423 				 &prog_fd);
424 	if (CHECK_FAIL(err))
425 		return;
426 
427 	prog = bpf_object__find_program_by_name(obj, "entry");
428 	if (CHECK_FAIL(!prog))
429 		goto out;
430 
431 	main_fd = bpf_program__fd(prog);
432 	if (CHECK_FAIL(main_fd < 0))
433 		goto out;
434 
435 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
436 	if (CHECK_FAIL(!prog_array))
437 		goto out;
438 
439 	map_fd = bpf_map__fd(prog_array);
440 	if (CHECK_FAIL(map_fd < 0))
441 		goto out;
442 
443 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
444 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
445 		goto out;
446 
447 	data_fd = bpf_map__fd(data_map);
448 	if (CHECK_FAIL(data_fd < 0))
449 		goto out;
450 
451 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
452 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
453 
454 		prog = bpf_object__find_program_by_name(obj, prog_name);
455 		if (CHECK_FAIL(!prog))
456 			goto out;
457 
458 		prog_fd = bpf_program__fd(prog);
459 		if (CHECK_FAIL(prog_fd < 0))
460 			goto out;
461 
462 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
463 		if (CHECK_FAIL(err))
464 			goto out;
465 	}
466 
467 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
468 		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
469 		if (CHECK_FAIL(err))
470 			goto out;
471 
472 		err = bpf_prog_test_run_opts(main_fd, &topts);
473 		ASSERT_OK(err, "tailcall");
474 		ASSERT_EQ(topts.retval, i, "tailcall retval");
475 	}
476 
477 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
478 		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
479 		if (CHECK_FAIL(err))
480 			goto out;
481 
482 		err = bpf_map_delete_elem(map_fd, &i);
483 		if (CHECK_FAIL(err))
484 			goto out;
485 
486 		err = bpf_prog_test_run_opts(main_fd, &topts);
487 		ASSERT_OK(err, "tailcall");
488 		ASSERT_EQ(topts.retval, 3, "tailcall retval");
489 	}
490 out:
491 	bpf_object__close(obj);
492 }
493 
494 /* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
495  * correctly in correlation with BPF subprograms
496  */
test_tailcall_bpf2bpf_1(void)497 static void test_tailcall_bpf2bpf_1(void)
498 {
499 	int err, map_fd, prog_fd, main_fd, i;
500 	struct bpf_map *prog_array;
501 	struct bpf_program *prog;
502 	struct bpf_object *obj;
503 	char prog_name[32];
504 	LIBBPF_OPTS(bpf_test_run_opts, topts,
505 		.data_in = &pkt_v4,
506 		.data_size_in = sizeof(pkt_v4),
507 		.repeat = 1,
508 	);
509 
510 	err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
511 				 &obj, &prog_fd);
512 	if (CHECK_FAIL(err))
513 		return;
514 
515 	prog = bpf_object__find_program_by_name(obj, "entry");
516 	if (CHECK_FAIL(!prog))
517 		goto out;
518 
519 	main_fd = bpf_program__fd(prog);
520 	if (CHECK_FAIL(main_fd < 0))
521 		goto out;
522 
523 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
524 	if (CHECK_FAIL(!prog_array))
525 		goto out;
526 
527 	map_fd = bpf_map__fd(prog_array);
528 	if (CHECK_FAIL(map_fd < 0))
529 		goto out;
530 
531 	/* nop -> jmp */
532 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
533 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
534 
535 		prog = bpf_object__find_program_by_name(obj, prog_name);
536 		if (CHECK_FAIL(!prog))
537 			goto out;
538 
539 		prog_fd = bpf_program__fd(prog);
540 		if (CHECK_FAIL(prog_fd < 0))
541 			goto out;
542 
543 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
544 		if (CHECK_FAIL(err))
545 			goto out;
546 	}
547 
548 	err = bpf_prog_test_run_opts(main_fd, &topts);
549 	ASSERT_OK(err, "tailcall");
550 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
551 
552 	/* jmp -> nop, call subprog that will do tailcall */
553 	i = 1;
554 	err = bpf_map_delete_elem(map_fd, &i);
555 	if (CHECK_FAIL(err))
556 		goto out;
557 
558 	err = bpf_prog_test_run_opts(main_fd, &topts);
559 	ASSERT_OK(err, "tailcall");
560 	ASSERT_OK(topts.retval, "tailcall retval");
561 
562 	/* make sure that subprog can access ctx and entry prog that
563 	 * called this subprog can properly return
564 	 */
565 	i = 0;
566 	err = bpf_map_delete_elem(map_fd, &i);
567 	if (CHECK_FAIL(err))
568 		goto out;
569 
570 	err = bpf_prog_test_run_opts(main_fd, &topts);
571 	ASSERT_OK(err, "tailcall");
572 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
573 out:
574 	bpf_object__close(obj);
575 }
576 
577 /* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
578  * enforcement matches with expectations when tailcall is preceded with
579  * bpf2bpf call.
580  */
test_tailcall_bpf2bpf_2(void)581 static void test_tailcall_bpf2bpf_2(void)
582 {
583 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
584 	struct bpf_map *prog_array, *data_map;
585 	struct bpf_program *prog;
586 	struct bpf_object *obj;
587 	char buff[128] = {};
588 	LIBBPF_OPTS(bpf_test_run_opts, topts,
589 		.data_in = buff,
590 		.data_size_in = sizeof(buff),
591 		.repeat = 1,
592 	);
593 
594 	err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
595 				 &obj, &prog_fd);
596 	if (CHECK_FAIL(err))
597 		return;
598 
599 	prog = bpf_object__find_program_by_name(obj, "entry");
600 	if (CHECK_FAIL(!prog))
601 		goto out;
602 
603 	main_fd = bpf_program__fd(prog);
604 	if (CHECK_FAIL(main_fd < 0))
605 		goto out;
606 
607 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
608 	if (CHECK_FAIL(!prog_array))
609 		goto out;
610 
611 	map_fd = bpf_map__fd(prog_array);
612 	if (CHECK_FAIL(map_fd < 0))
613 		goto out;
614 
615 	prog = bpf_object__find_program_by_name(obj, "classifier_0");
616 	if (CHECK_FAIL(!prog))
617 		goto out;
618 
619 	prog_fd = bpf_program__fd(prog);
620 	if (CHECK_FAIL(prog_fd < 0))
621 		goto out;
622 
623 	i = 0;
624 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
625 	if (CHECK_FAIL(err))
626 		goto out;
627 
628 	err = bpf_prog_test_run_opts(main_fd, &topts);
629 	ASSERT_OK(err, "tailcall");
630 	ASSERT_EQ(topts.retval, 1, "tailcall retval");
631 
632 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
633 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
634 		goto out;
635 
636 	data_fd = bpf_map__fd(data_map);
637 	if (CHECK_FAIL(data_fd < 0))
638 		goto out;
639 
640 	i = 0;
641 	err = bpf_map_lookup_elem(data_fd, &i, &val);
642 	ASSERT_OK(err, "tailcall count");
643 	ASSERT_EQ(val, 33, "tailcall count");
644 
645 	i = 0;
646 	err = bpf_map_delete_elem(map_fd, &i);
647 	if (CHECK_FAIL(err))
648 		goto out;
649 
650 	err = bpf_prog_test_run_opts(main_fd, &topts);
651 	ASSERT_OK(err, "tailcall");
652 	ASSERT_OK(topts.retval, "tailcall retval");
653 out:
654 	bpf_object__close(obj);
655 }
656 
657 /* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
658  * 256 bytes) can be used within bpf subprograms that have the tailcalls
659  * in them
660  */
test_tailcall_bpf2bpf_3(void)661 static void test_tailcall_bpf2bpf_3(void)
662 {
663 	int err, map_fd, prog_fd, main_fd, i;
664 	struct bpf_map *prog_array;
665 	struct bpf_program *prog;
666 	struct bpf_object *obj;
667 	char prog_name[32];
668 	LIBBPF_OPTS(bpf_test_run_opts, topts,
669 		.data_in = &pkt_v4,
670 		.data_size_in = sizeof(pkt_v4),
671 		.repeat = 1,
672 	);
673 
674 	err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
675 				 &obj, &prog_fd);
676 	if (CHECK_FAIL(err))
677 		return;
678 
679 	prog = bpf_object__find_program_by_name(obj, "entry");
680 	if (CHECK_FAIL(!prog))
681 		goto out;
682 
683 	main_fd = bpf_program__fd(prog);
684 	if (CHECK_FAIL(main_fd < 0))
685 		goto out;
686 
687 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
688 	if (CHECK_FAIL(!prog_array))
689 		goto out;
690 
691 	map_fd = bpf_map__fd(prog_array);
692 	if (CHECK_FAIL(map_fd < 0))
693 		goto out;
694 
695 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
696 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
697 
698 		prog = bpf_object__find_program_by_name(obj, prog_name);
699 		if (CHECK_FAIL(!prog))
700 			goto out;
701 
702 		prog_fd = bpf_program__fd(prog);
703 		if (CHECK_FAIL(prog_fd < 0))
704 			goto out;
705 
706 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
707 		if (CHECK_FAIL(err))
708 			goto out;
709 	}
710 
711 	err = bpf_prog_test_run_opts(main_fd, &topts);
712 	ASSERT_OK(err, "tailcall");
713 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
714 
715 	i = 1;
716 	err = bpf_map_delete_elem(map_fd, &i);
717 	if (CHECK_FAIL(err))
718 		goto out;
719 
720 	err = bpf_prog_test_run_opts(main_fd, &topts);
721 	ASSERT_OK(err, "tailcall");
722 	ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
723 
724 	i = 0;
725 	err = bpf_map_delete_elem(map_fd, &i);
726 	if (CHECK_FAIL(err))
727 		goto out;
728 
729 	err = bpf_prog_test_run_opts(main_fd, &topts);
730 	ASSERT_OK(err, "tailcall");
731 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
732 out:
733 	bpf_object__close(obj);
734 }
735 
736 #include "tailcall_bpf2bpf4.skel.h"
737 
738 /* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
739  * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
740  * counter behaves correctly, bpf program will go through following flow:
741  *
742  * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
743  * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
744  * subprog2 [here bump global counter] --------^
745  *
746  * We go through first two tailcalls and start counting from the subprog2 where
747  * the loop begins. At the end of the test make sure that the global counter is
748  * equal to 31, because tailcall counter includes the first two tailcalls
749  * whereas global counter is incremented only on loop presented on flow above.
750  *
751  * The noise parameter is used to insert bpf_map_update calls into the logic
752  * to force verifier to patch instructions. This allows us to ensure jump
753  * logic remains correct with instruction movement.
754  */
test_tailcall_bpf2bpf_4(bool noise)755 static void test_tailcall_bpf2bpf_4(bool noise)
756 {
757 	int err, map_fd, prog_fd, main_fd, data_fd, i;
758 	struct tailcall_bpf2bpf4__bss val;
759 	struct bpf_map *prog_array, *data_map;
760 	struct bpf_program *prog;
761 	struct bpf_object *obj;
762 	char prog_name[32];
763 	LIBBPF_OPTS(bpf_test_run_opts, topts,
764 		.data_in = &pkt_v4,
765 		.data_size_in = sizeof(pkt_v4),
766 		.repeat = 1,
767 	);
768 
769 	err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
770 				 &obj, &prog_fd);
771 	if (CHECK_FAIL(err))
772 		return;
773 
774 	prog = bpf_object__find_program_by_name(obj, "entry");
775 	if (CHECK_FAIL(!prog))
776 		goto out;
777 
778 	main_fd = bpf_program__fd(prog);
779 	if (CHECK_FAIL(main_fd < 0))
780 		goto out;
781 
782 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
783 	if (CHECK_FAIL(!prog_array))
784 		goto out;
785 
786 	map_fd = bpf_map__fd(prog_array);
787 	if (CHECK_FAIL(map_fd < 0))
788 		goto out;
789 
790 	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
791 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
792 
793 		prog = bpf_object__find_program_by_name(obj, prog_name);
794 		if (CHECK_FAIL(!prog))
795 			goto out;
796 
797 		prog_fd = bpf_program__fd(prog);
798 		if (CHECK_FAIL(prog_fd < 0))
799 			goto out;
800 
801 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
802 		if (CHECK_FAIL(err))
803 			goto out;
804 	}
805 
806 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
807 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
808 		goto out;
809 
810 	data_fd = bpf_map__fd(data_map);
811 	if (CHECK_FAIL(data_fd < 0))
812 		goto out;
813 
814 	i = 0;
815 	val.noise = noise;
816 	val.count = 0;
817 	err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
818 	if (CHECK_FAIL(err))
819 		goto out;
820 
821 	err = bpf_prog_test_run_opts(main_fd, &topts);
822 	ASSERT_OK(err, "tailcall");
823 	ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
824 
825 	i = 0;
826 	err = bpf_map_lookup_elem(data_fd, &i, &val);
827 	ASSERT_OK(err, "tailcall count");
828 	ASSERT_EQ(val.count, 31, "tailcall count");
829 
830 out:
831 	bpf_object__close(obj);
832 }
833 
834 #include "tailcall_bpf2bpf6.skel.h"
835 
836 /* Tail call counting works even when there is data on stack which is
837  * not aligned to 8 bytes.
838  */
test_tailcall_bpf2bpf_6(void)839 static void test_tailcall_bpf2bpf_6(void)
840 {
841 	struct tailcall_bpf2bpf6 *obj;
842 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
843 	LIBBPF_OPTS(bpf_test_run_opts, topts,
844 		.data_in = &pkt_v4,
845 		.data_size_in = sizeof(pkt_v4),
846 		.repeat = 1,
847 	);
848 
849 	obj = tailcall_bpf2bpf6__open_and_load();
850 	if (!ASSERT_OK_PTR(obj, "open and load"))
851 		return;
852 
853 	main_fd = bpf_program__fd(obj->progs.entry);
854 	if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
855 		goto out;
856 
857 	map_fd = bpf_map__fd(obj->maps.jmp_table);
858 	if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
859 		goto out;
860 
861 	prog_fd = bpf_program__fd(obj->progs.classifier_0);
862 	if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
863 		goto out;
864 
865 	i = 0;
866 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
867 	if (!ASSERT_OK(err, "jmp_table map update"))
868 		goto out;
869 
870 	err = bpf_prog_test_run_opts(main_fd, &topts);
871 	ASSERT_OK(err, "entry prog test run");
872 	ASSERT_EQ(topts.retval, 0, "tailcall retval");
873 
874 	data_fd = bpf_map__fd(obj->maps.bss);
875 	if (!ASSERT_GE(data_fd, 0, "bss map fd"))
876 		goto out;
877 
878 	i = 0;
879 	err = bpf_map_lookup_elem(data_fd, &i, &val);
880 	ASSERT_OK(err, "bss map lookup");
881 	ASSERT_EQ(val, 1, "done flag is set");
882 
883 out:
884 	tailcall_bpf2bpf6__destroy(obj);
885 }
886 
test_tailcalls(void)887 void test_tailcalls(void)
888 {
889 	if (test__start_subtest("tailcall_1"))
890 		test_tailcall_1();
891 	if (test__start_subtest("tailcall_2"))
892 		test_tailcall_2();
893 	if (test__start_subtest("tailcall_3"))
894 		test_tailcall_3();
895 	if (test__start_subtest("tailcall_4"))
896 		test_tailcall_4();
897 	if (test__start_subtest("tailcall_5"))
898 		test_tailcall_5();
899 	if (test__start_subtest("tailcall_6"))
900 		test_tailcall_6();
901 	if (test__start_subtest("tailcall_bpf2bpf_1"))
902 		test_tailcall_bpf2bpf_1();
903 	if (test__start_subtest("tailcall_bpf2bpf_2"))
904 		test_tailcall_bpf2bpf_2();
905 	if (test__start_subtest("tailcall_bpf2bpf_3"))
906 		test_tailcall_bpf2bpf_3();
907 	if (test__start_subtest("tailcall_bpf2bpf_4"))
908 		test_tailcall_bpf2bpf_4(false);
909 	if (test__start_subtest("tailcall_bpf2bpf_5"))
910 		test_tailcall_bpf2bpf_4(true);
911 	if (test__start_subtest("tailcall_bpf2bpf_6"))
912 		test_tailcall_bpf2bpf_6();
913 }
914