1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3 
4 #include <linux/err.h>
5 #include <netinet/tcp.h>
6 #include <test_progs.h>
7 #include "network_helpers.h"
8 #include "bpf_dctcp.skel.h"
9 #include "bpf_cubic.skel.h"
10 #include "bpf_tcp_nogpl.skel.h"
11 #include "tcp_ca_update.skel.h"
12 #include "bpf_dctcp_release.skel.h"
13 #include "tcp_ca_write_sk_pacing.skel.h"
14 #include "tcp_ca_incompl_cong_ops.skel.h"
15 #include "tcp_ca_unsupp_cong_op.skel.h"
16 
17 #ifndef ENOTSUPP
18 #define ENOTSUPP 524
19 #endif
20 
21 static const unsigned int total_bytes = 10 * 1024 * 1024;
22 static int expected_stg = 0xeB9F;
23 static int stop, duration;
24 
settcpca(int fd,const char * tcp_ca)25 static int settcpca(int fd, const char *tcp_ca)
26 {
27 	int err;
28 
29 	err = setsockopt(fd, IPPROTO_TCP, TCP_CONGESTION, tcp_ca, strlen(tcp_ca));
30 	if (CHECK(err == -1, "setsockopt(fd, TCP_CONGESTION)", "errno:%d\n",
31 		  errno))
32 		return -1;
33 
34 	return 0;
35 }
36 
server(void * arg)37 static void *server(void *arg)
38 {
39 	int lfd = (int)(long)arg, err = 0, fd;
40 	ssize_t nr_sent = 0, bytes = 0;
41 	char batch[1500];
42 
43 	fd = accept(lfd, NULL, NULL);
44 	while (fd == -1) {
45 		if (errno == EINTR)
46 			continue;
47 		err = -errno;
48 		goto done;
49 	}
50 
51 	if (settimeo(fd, 0)) {
52 		err = -errno;
53 		goto done;
54 	}
55 
56 	while (bytes < total_bytes && !READ_ONCE(stop)) {
57 		nr_sent = send(fd, &batch,
58 			       MIN(total_bytes - bytes, sizeof(batch)), 0);
59 		if (nr_sent == -1 && errno == EINTR)
60 			continue;
61 		if (nr_sent == -1) {
62 			err = -errno;
63 			break;
64 		}
65 		bytes += nr_sent;
66 	}
67 
68 	CHECK(bytes != total_bytes, "send", "%zd != %u nr_sent:%zd errno:%d\n",
69 	      bytes, total_bytes, nr_sent, errno);
70 
71 done:
72 	if (fd >= 0)
73 		close(fd);
74 	if (err) {
75 		WRITE_ONCE(stop, 1);
76 		return ERR_PTR(err);
77 	}
78 	return NULL;
79 }
80 
do_test(const char * tcp_ca,const struct bpf_map * sk_stg_map)81 static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
82 {
83 	struct sockaddr_in6 sa6 = {};
84 	ssize_t nr_recv = 0, bytes = 0;
85 	int lfd = -1, fd = -1;
86 	pthread_t srv_thread;
87 	socklen_t addrlen = sizeof(sa6);
88 	void *thread_ret;
89 	char batch[1500];
90 	int err;
91 
92 	WRITE_ONCE(stop, 0);
93 
94 	lfd = socket(AF_INET6, SOCK_STREAM, 0);
95 	if (CHECK(lfd == -1, "socket", "errno:%d\n", errno))
96 		return;
97 	fd = socket(AF_INET6, SOCK_STREAM, 0);
98 	if (CHECK(fd == -1, "socket", "errno:%d\n", errno)) {
99 		close(lfd);
100 		return;
101 	}
102 
103 	if (settcpca(lfd, tcp_ca) || settcpca(fd, tcp_ca) ||
104 	    settimeo(lfd, 0) || settimeo(fd, 0))
105 		goto done;
106 
107 	/* bind, listen and start server thread to accept */
108 	sa6.sin6_family = AF_INET6;
109 	sa6.sin6_addr = in6addr_loopback;
110 	err = bind(lfd, (struct sockaddr *)&sa6, addrlen);
111 	if (CHECK(err == -1, "bind", "errno:%d\n", errno))
112 		goto done;
113 	err = getsockname(lfd, (struct sockaddr *)&sa6, &addrlen);
114 	if (CHECK(err == -1, "getsockname", "errno:%d\n", errno))
115 		goto done;
116 	err = listen(lfd, 1);
117 	if (CHECK(err == -1, "listen", "errno:%d\n", errno))
118 		goto done;
119 
120 	if (sk_stg_map) {
121 		err = bpf_map_update_elem(bpf_map__fd(sk_stg_map), &fd,
122 					  &expected_stg, BPF_NOEXIST);
123 		if (CHECK(err, "bpf_map_update_elem(sk_stg_map)",
124 			  "err:%d errno:%d\n", err, errno))
125 			goto done;
126 	}
127 
128 	/* connect to server */
129 	err = connect(fd, (struct sockaddr *)&sa6, addrlen);
130 	if (CHECK(err == -1, "connect", "errno:%d\n", errno))
131 		goto done;
132 
133 	if (sk_stg_map) {
134 		int tmp_stg;
135 
136 		err = bpf_map_lookup_elem(bpf_map__fd(sk_stg_map), &fd,
137 					  &tmp_stg);
138 		if (CHECK(!err || errno != ENOENT,
139 			  "bpf_map_lookup_elem(sk_stg_map)",
140 			  "err:%d errno:%d\n", err, errno))
141 			goto done;
142 	}
143 
144 	err = pthread_create(&srv_thread, NULL, server, (void *)(long)lfd);
145 	if (CHECK(err != 0, "pthread_create", "err:%d errno:%d\n", err, errno))
146 		goto done;
147 
148 	/* recv total_bytes */
149 	while (bytes < total_bytes && !READ_ONCE(stop)) {
150 		nr_recv = recv(fd, &batch,
151 			       MIN(total_bytes - bytes, sizeof(batch)), 0);
152 		if (nr_recv == -1 && errno == EINTR)
153 			continue;
154 		if (nr_recv == -1)
155 			break;
156 		bytes += nr_recv;
157 	}
158 
159 	CHECK(bytes != total_bytes, "recv", "%zd != %u nr_recv:%zd errno:%d\n",
160 	      bytes, total_bytes, nr_recv, errno);
161 
162 	WRITE_ONCE(stop, 1);
163 	pthread_join(srv_thread, &thread_ret);
164 	CHECK(IS_ERR(thread_ret), "pthread_join", "thread_ret:%ld",
165 	      PTR_ERR(thread_ret));
166 done:
167 	close(lfd);
168 	close(fd);
169 }
170 
test_cubic(void)171 static void test_cubic(void)
172 {
173 	struct bpf_cubic *cubic_skel;
174 	struct bpf_link *link;
175 
176 	cubic_skel = bpf_cubic__open_and_load();
177 	if (CHECK(!cubic_skel, "bpf_cubic__open_and_load", "failed\n"))
178 		return;
179 
180 	link = bpf_map__attach_struct_ops(cubic_skel->maps.cubic);
181 	if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) {
182 		bpf_cubic__destroy(cubic_skel);
183 		return;
184 	}
185 
186 	do_test("bpf_cubic", NULL);
187 
188 	ASSERT_EQ(cubic_skel->bss->bpf_cubic_acked_called, 1, "pkts_acked called");
189 
190 	bpf_link__destroy(link);
191 	bpf_cubic__destroy(cubic_skel);
192 }
193 
test_dctcp(void)194 static void test_dctcp(void)
195 {
196 	struct bpf_dctcp *dctcp_skel;
197 	struct bpf_link *link;
198 
199 	dctcp_skel = bpf_dctcp__open_and_load();
200 	if (CHECK(!dctcp_skel, "bpf_dctcp__open_and_load", "failed\n"))
201 		return;
202 
203 	link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp);
204 	if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) {
205 		bpf_dctcp__destroy(dctcp_skel);
206 		return;
207 	}
208 
209 	do_test("bpf_dctcp", dctcp_skel->maps.sk_stg_map);
210 	CHECK(dctcp_skel->bss->stg_result != expected_stg,
211 	      "Unexpected stg_result", "stg_result (%x) != expected_stg (%x)\n",
212 	      dctcp_skel->bss->stg_result, expected_stg);
213 
214 	bpf_link__destroy(link);
215 	bpf_dctcp__destroy(dctcp_skel);
216 }
217 
218 static char *err_str;
219 static bool found;
220 
libbpf_debug_print(enum libbpf_print_level level,const char * format,va_list args)221 static int libbpf_debug_print(enum libbpf_print_level level,
222 			      const char *format, va_list args)
223 {
224 	const char *prog_name, *log_buf;
225 
226 	if (level != LIBBPF_WARN ||
227 	    !strstr(format, "-- BEGIN PROG LOAD LOG --")) {
228 		vprintf(format, args);
229 		return 0;
230 	}
231 
232 	prog_name = va_arg(args, char *);
233 	log_buf = va_arg(args, char *);
234 	if (!log_buf)
235 		goto out;
236 	if (err_str && strstr(log_buf, err_str) != NULL)
237 		found = true;
238 out:
239 	printf(format, prog_name, log_buf);
240 	return 0;
241 }
242 
test_invalid_license(void)243 static void test_invalid_license(void)
244 {
245 	libbpf_print_fn_t old_print_fn;
246 	struct bpf_tcp_nogpl *skel;
247 
248 	err_str = "struct ops programs must have a GPL compatible license";
249 	found = false;
250 	old_print_fn = libbpf_set_print(libbpf_debug_print);
251 
252 	skel = bpf_tcp_nogpl__open_and_load();
253 	ASSERT_NULL(skel, "bpf_tcp_nogpl");
254 	ASSERT_EQ(found, true, "expected_err_msg");
255 
256 	bpf_tcp_nogpl__destroy(skel);
257 	libbpf_set_print(old_print_fn);
258 }
259 
test_dctcp_fallback(void)260 static void test_dctcp_fallback(void)
261 {
262 	int err, lfd = -1, cli_fd = -1, srv_fd = -1;
263 	struct network_helper_opts opts = {
264 		.cc = "cubic",
265 	};
266 	struct bpf_dctcp *dctcp_skel;
267 	struct bpf_link *link = NULL;
268 	char srv_cc[16];
269 	socklen_t cc_len = sizeof(srv_cc);
270 
271 	dctcp_skel = bpf_dctcp__open();
272 	if (!ASSERT_OK_PTR(dctcp_skel, "dctcp_skel"))
273 		return;
274 	strcpy(dctcp_skel->rodata->fallback, "cubic");
275 	if (!ASSERT_OK(bpf_dctcp__load(dctcp_skel), "bpf_dctcp__load"))
276 		goto done;
277 
278 	link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp);
279 	if (!ASSERT_OK_PTR(link, "dctcp link"))
280 		goto done;
281 
282 	lfd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
283 	if (!ASSERT_GE(lfd, 0, "lfd") ||
284 	    !ASSERT_OK(settcpca(lfd, "bpf_dctcp"), "lfd=>bpf_dctcp"))
285 		goto done;
286 
287 	cli_fd = connect_to_fd_opts(lfd, &opts);
288 	if (!ASSERT_GE(cli_fd, 0, "cli_fd"))
289 		goto done;
290 
291 	srv_fd = accept(lfd, NULL, 0);
292 	if (!ASSERT_GE(srv_fd, 0, "srv_fd"))
293 		goto done;
294 	ASSERT_STREQ(dctcp_skel->bss->cc_res, "cubic", "cc_res");
295 	ASSERT_EQ(dctcp_skel->bss->tcp_cdg_res, -ENOTSUPP, "tcp_cdg_res");
296 	/* All setsockopt(TCP_CONGESTION) in the recurred
297 	 * bpf_dctcp->init() should fail with -EBUSY.
298 	 */
299 	ASSERT_EQ(dctcp_skel->bss->ebusy_cnt, 3, "ebusy_cnt");
300 
301 	err = getsockopt(srv_fd, SOL_TCP, TCP_CONGESTION, srv_cc, &cc_len);
302 	if (!ASSERT_OK(err, "getsockopt(srv_fd, TCP_CONGESTION)"))
303 		goto done;
304 	ASSERT_STREQ(srv_cc, "cubic", "srv_fd cc");
305 
306 done:
307 	bpf_link__destroy(link);
308 	bpf_dctcp__destroy(dctcp_skel);
309 	if (lfd != -1)
310 		close(lfd);
311 	if (srv_fd != -1)
312 		close(srv_fd);
313 	if (cli_fd != -1)
314 		close(cli_fd);
315 }
316 
test_rel_setsockopt(void)317 static void test_rel_setsockopt(void)
318 {
319 	struct bpf_dctcp_release *rel_skel;
320 	libbpf_print_fn_t old_print_fn;
321 
322 	err_str = "unknown func bpf_setsockopt";
323 	found = false;
324 
325 	old_print_fn = libbpf_set_print(libbpf_debug_print);
326 	rel_skel = bpf_dctcp_release__open_and_load();
327 	libbpf_set_print(old_print_fn);
328 
329 	ASSERT_ERR_PTR(rel_skel, "rel_skel");
330 	ASSERT_TRUE(found, "expected_err_msg");
331 
332 	bpf_dctcp_release__destroy(rel_skel);
333 }
334 
test_write_sk_pacing(void)335 static void test_write_sk_pacing(void)
336 {
337 	struct tcp_ca_write_sk_pacing *skel;
338 	struct bpf_link *link;
339 
340 	skel = tcp_ca_write_sk_pacing__open_and_load();
341 	if (!ASSERT_OK_PTR(skel, "open_and_load"))
342 		return;
343 
344 	link = bpf_map__attach_struct_ops(skel->maps.write_sk_pacing);
345 	ASSERT_OK_PTR(link, "attach_struct_ops");
346 
347 	bpf_link__destroy(link);
348 	tcp_ca_write_sk_pacing__destroy(skel);
349 }
350 
test_incompl_cong_ops(void)351 static void test_incompl_cong_ops(void)
352 {
353 	struct tcp_ca_incompl_cong_ops *skel;
354 	struct bpf_link *link;
355 
356 	skel = tcp_ca_incompl_cong_ops__open_and_load();
357 	if (!ASSERT_OK_PTR(skel, "open_and_load"))
358 		return;
359 
360 	/* That cong_avoid() and cong_control() are missing is only reported at
361 	 * this point:
362 	 */
363 	link = bpf_map__attach_struct_ops(skel->maps.incompl_cong_ops);
364 	ASSERT_ERR_PTR(link, "attach_struct_ops");
365 
366 	bpf_link__destroy(link);
367 	tcp_ca_incompl_cong_ops__destroy(skel);
368 }
369 
test_unsupp_cong_op(void)370 static void test_unsupp_cong_op(void)
371 {
372 	libbpf_print_fn_t old_print_fn;
373 	struct tcp_ca_unsupp_cong_op *skel;
374 
375 	err_str = "attach to unsupported member get_info";
376 	found = false;
377 	old_print_fn = libbpf_set_print(libbpf_debug_print);
378 
379 	skel = tcp_ca_unsupp_cong_op__open_and_load();
380 	ASSERT_NULL(skel, "open_and_load");
381 	ASSERT_EQ(found, true, "expected_err_msg");
382 
383 	tcp_ca_unsupp_cong_op__destroy(skel);
384 	libbpf_set_print(old_print_fn);
385 }
386 
test_update_ca(void)387 static void test_update_ca(void)
388 {
389 	struct tcp_ca_update *skel;
390 	struct bpf_link *link;
391 	int saved_ca1_cnt;
392 	int err;
393 
394 	skel = tcp_ca_update__open_and_load();
395 	if (!ASSERT_OK_PTR(skel, "open"))
396 		return;
397 
398 	link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
399 	ASSERT_OK_PTR(link, "attach_struct_ops");
400 
401 	do_test("tcp_ca_update", NULL);
402 	saved_ca1_cnt = skel->bss->ca1_cnt;
403 	ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt");
404 
405 	err = bpf_link__update_map(link, skel->maps.ca_update_2);
406 	ASSERT_OK(err, "update_map");
407 
408 	do_test("tcp_ca_update", NULL);
409 	ASSERT_EQ(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt");
410 	ASSERT_GT(skel->bss->ca2_cnt, 0, "ca2_ca2_cnt");
411 
412 	bpf_link__destroy(link);
413 	tcp_ca_update__destroy(skel);
414 }
415 
test_update_wrong(void)416 static void test_update_wrong(void)
417 {
418 	struct tcp_ca_update *skel;
419 	struct bpf_link *link;
420 	int saved_ca1_cnt;
421 	int err;
422 
423 	skel = tcp_ca_update__open_and_load();
424 	if (!ASSERT_OK_PTR(skel, "open"))
425 		return;
426 
427 	link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
428 	ASSERT_OK_PTR(link, "attach_struct_ops");
429 
430 	do_test("tcp_ca_update", NULL);
431 	saved_ca1_cnt = skel->bss->ca1_cnt;
432 	ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt");
433 
434 	err = bpf_link__update_map(link, skel->maps.ca_wrong);
435 	ASSERT_ERR(err, "update_map");
436 
437 	do_test("tcp_ca_update", NULL);
438 	ASSERT_GT(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt");
439 
440 	bpf_link__destroy(link);
441 	tcp_ca_update__destroy(skel);
442 }
443 
test_mixed_links(void)444 static void test_mixed_links(void)
445 {
446 	struct tcp_ca_update *skel;
447 	struct bpf_link *link, *link_nl;
448 	int err;
449 
450 	skel = tcp_ca_update__open_and_load();
451 	if (!ASSERT_OK_PTR(skel, "open"))
452 		return;
453 
454 	link_nl = bpf_map__attach_struct_ops(skel->maps.ca_no_link);
455 	ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl");
456 
457 	link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
458 	ASSERT_OK_PTR(link, "attach_struct_ops");
459 
460 	do_test("tcp_ca_update", NULL);
461 	ASSERT_GT(skel->bss->ca1_cnt, 0, "ca1_ca1_cnt");
462 
463 	err = bpf_link__update_map(link, skel->maps.ca_no_link);
464 	ASSERT_ERR(err, "update_map");
465 
466 	bpf_link__destroy(link);
467 	bpf_link__destroy(link_nl);
468 	tcp_ca_update__destroy(skel);
469 }
470 
test_multi_links(void)471 static void test_multi_links(void)
472 {
473 	struct tcp_ca_update *skel;
474 	struct bpf_link *link;
475 
476 	skel = tcp_ca_update__open_and_load();
477 	if (!ASSERT_OK_PTR(skel, "open"))
478 		return;
479 
480 	link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
481 	ASSERT_OK_PTR(link, "attach_struct_ops_1st");
482 	bpf_link__destroy(link);
483 
484 	/* A map should be able to be used to create links multiple
485 	 * times.
486 	 */
487 	link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
488 	ASSERT_OK_PTR(link, "attach_struct_ops_2nd");
489 	bpf_link__destroy(link);
490 
491 	tcp_ca_update__destroy(skel);
492 }
493 
test_link_replace(void)494 static void test_link_replace(void)
495 {
496 	DECLARE_LIBBPF_OPTS(bpf_link_update_opts, opts);
497 	struct tcp_ca_update *skel;
498 	struct bpf_link *link;
499 	int err;
500 
501 	skel = tcp_ca_update__open_and_load();
502 	if (!ASSERT_OK_PTR(skel, "open"))
503 		return;
504 
505 	link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
506 	ASSERT_OK_PTR(link, "attach_struct_ops_1st");
507 	bpf_link__destroy(link);
508 
509 	link = bpf_map__attach_struct_ops(skel->maps.ca_update_2);
510 	ASSERT_OK_PTR(link, "attach_struct_ops_2nd");
511 
512 	/* BPF_F_REPLACE with a wrong old map Fd. It should fail!
513 	 *
514 	 * With BPF_F_REPLACE, the link should be updated only if the
515 	 * old map fd given here matches the map backing the link.
516 	 */
517 	opts.old_map_fd = bpf_map__fd(skel->maps.ca_update_1);
518 	opts.flags = BPF_F_REPLACE;
519 	err = bpf_link_update(bpf_link__fd(link),
520 			      bpf_map__fd(skel->maps.ca_update_1),
521 			      &opts);
522 	ASSERT_ERR(err, "bpf_link_update_fail");
523 
524 	/* BPF_F_REPLACE with a correct old map Fd. It should success! */
525 	opts.old_map_fd = bpf_map__fd(skel->maps.ca_update_2);
526 	err = bpf_link_update(bpf_link__fd(link),
527 			      bpf_map__fd(skel->maps.ca_update_1),
528 			      &opts);
529 	ASSERT_OK(err, "bpf_link_update_success");
530 
531 	bpf_link__destroy(link);
532 
533 	tcp_ca_update__destroy(skel);
534 }
535 
test_bpf_tcp_ca(void)536 void test_bpf_tcp_ca(void)
537 {
538 	if (test__start_subtest("dctcp"))
539 		test_dctcp();
540 	if (test__start_subtest("cubic"))
541 		test_cubic();
542 	if (test__start_subtest("invalid_license"))
543 		test_invalid_license();
544 	if (test__start_subtest("dctcp_fallback"))
545 		test_dctcp_fallback();
546 	if (test__start_subtest("rel_setsockopt"))
547 		test_rel_setsockopt();
548 	if (test__start_subtest("write_sk_pacing"))
549 		test_write_sk_pacing();
550 	if (test__start_subtest("incompl_cong_ops"))
551 		test_incompl_cong_ops();
552 	if (test__start_subtest("unsupp_cong_op"))
553 		test_unsupp_cong_op();
554 	if (test__start_subtest("update_ca"))
555 		test_update_ca();
556 	if (test__start_subtest("update_wrong"))
557 		test_update_wrong();
558 	if (test__start_subtest("mixed_links"))
559 		test_mixed_links();
560 	if (test__start_subtest("multi_links"))
561 		test_multi_links();
562 	if (test__start_subtest("link_replace"))
563 		test_link_replace();
564 }
565