1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2020 Cloudflare 3 #include <error.h> 4 #include <netinet/tcp.h> 5 6 #include "test_progs.h" 7 #include "test_skmsg_load_helpers.skel.h" 8 #include "test_sockmap_update.skel.h" 9 #include "test_sockmap_invalid_update.skel.h" 10 #include "test_sockmap_skb_verdict_attach.skel.h" 11 #include "test_sockmap_progs_query.skel.h" 12 #include "bpf_iter_sockmap.skel.h" 13 14 #define TCP_REPAIR 19 /* TCP sock is under repair right now */ 15 16 #define TCP_REPAIR_ON 1 17 #define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */ 18 19 static int connected_socket_v4(void) 20 { 21 struct sockaddr_in addr = { 22 .sin_family = AF_INET, 23 .sin_port = htons(80), 24 .sin_addr = { inet_addr("127.0.0.1") }, 25 }; 26 socklen_t len = sizeof(addr); 27 int s, repair, err; 28 29 s = socket(AF_INET, SOCK_STREAM, 0); 30 if (!ASSERT_GE(s, 0, "socket")) 31 goto error; 32 33 repair = TCP_REPAIR_ON; 34 err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); 35 if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)")) 36 goto error; 37 38 err = connect(s, (struct sockaddr *)&addr, len); 39 if (!ASSERT_OK(err, "connect")) 40 goto error; 41 42 repair = TCP_REPAIR_OFF_NO_WP; 43 err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); 44 if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)")) 45 goto error; 46 47 return s; 48 error: 49 perror(__func__); 50 close(s); 51 return -1; 52 } 53 54 static void compare_cookies(struct bpf_map *src, struct bpf_map *dst) 55 { 56 __u32 i, max_entries = bpf_map__max_entries(src); 57 int err, src_fd, dst_fd; 58 59 src_fd = bpf_map__fd(src); 60 dst_fd = bpf_map__fd(dst); 61 62 for (i = 0; i < max_entries; i++) { 63 __u64 src_cookie, dst_cookie; 64 65 err = bpf_map_lookup_elem(src_fd, &i, &src_cookie); 66 if (err && errno == ENOENT) { 67 err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie); 68 ASSERT_ERR(err, "map_lookup_elem(dst)"); 69 ASSERT_EQ(errno, ENOENT, "map_lookup_elem(dst)"); 70 continue; 71 } 72 if (!ASSERT_OK(err, "lookup_elem(src)")) 73 continue; 74 75 err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie); 76 if (!ASSERT_OK(err, "lookup_elem(dst)")) 77 continue; 78 79 ASSERT_EQ(dst_cookie, src_cookie, "cookie mismatch"); 80 } 81 } 82 83 /* Create a map, populate it with one socket, and free the map. */ 84 static void test_sockmap_create_update_free(enum bpf_map_type map_type) 85 { 86 const int zero = 0; 87 int s, map, err; 88 89 s = connected_socket_v4(); 90 if (!ASSERT_GE(s, 0, "connected_socket_v4")) 91 return; 92 93 map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL); 94 if (!ASSERT_GE(map, 0, "bpf_map_create")) 95 goto out; 96 97 err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST); 98 if (!ASSERT_OK(err, "bpf_map_update")) 99 goto out; 100 101 out: 102 close(map); 103 close(s); 104 } 105 106 static void test_skmsg_helpers(enum bpf_map_type map_type) 107 { 108 struct test_skmsg_load_helpers *skel; 109 int err, map, verdict; 110 111 skel = test_skmsg_load_helpers__open_and_load(); 112 if (!ASSERT_OK_PTR(skel, "test_skmsg_load_helpers__open_and_load")) 113 return; 114 115 verdict = bpf_program__fd(skel->progs.prog_msg_verdict); 116 map = bpf_map__fd(skel->maps.sock_map); 117 118 err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0); 119 if (!ASSERT_OK(err, "bpf_prog_attach")) 120 goto out; 121 122 err = bpf_prog_detach2(verdict, map, BPF_SK_MSG_VERDICT); 123 if (!ASSERT_OK(err, "bpf_prog_detach2")) 124 goto out; 125 out: 126 test_skmsg_load_helpers__destroy(skel); 127 } 128 129 static void test_sockmap_update(enum bpf_map_type map_type) 130 { 131 int err, prog, src; 132 struct test_sockmap_update *skel; 133 struct bpf_map *dst_map; 134 const __u32 zero = 0; 135 char dummy[14] = {0}; 136 LIBBPF_OPTS(bpf_test_run_opts, topts, 137 .data_in = dummy, 138 .data_size_in = sizeof(dummy), 139 .repeat = 1, 140 ); 141 __s64 sk; 142 143 sk = connected_socket_v4(); 144 if (!ASSERT_NEQ(sk, -1, "connected_socket_v4")) 145 return; 146 147 skel = test_sockmap_update__open_and_load(); 148 if (!ASSERT_OK_PTR(skel, "open_and_load")) 149 goto close_sk; 150 151 prog = bpf_program__fd(skel->progs.copy_sock_map); 152 src = bpf_map__fd(skel->maps.src); 153 if (map_type == BPF_MAP_TYPE_SOCKMAP) 154 dst_map = skel->maps.dst_sock_map; 155 else 156 dst_map = skel->maps.dst_sock_hash; 157 158 err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST); 159 if (!ASSERT_OK(err, "update_elem(src)")) 160 goto out; 161 162 err = bpf_prog_test_run_opts(prog, &topts); 163 if (!ASSERT_OK(err, "test_run")) 164 goto out; 165 if (!ASSERT_NEQ(topts.retval, 0, "test_run retval")) 166 goto out; 167 168 compare_cookies(skel->maps.src, dst_map); 169 170 out: 171 test_sockmap_update__destroy(skel); 172 close_sk: 173 close(sk); 174 } 175 176 static void test_sockmap_invalid_update(void) 177 { 178 struct test_sockmap_invalid_update *skel; 179 180 skel = test_sockmap_invalid_update__open_and_load(); 181 if (!ASSERT_NULL(skel, "open_and_load")) 182 test_sockmap_invalid_update__destroy(skel); 183 } 184 185 static void test_sockmap_copy(enum bpf_map_type map_type) 186 { 187 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 188 int err, len, src_fd, iter_fd; 189 union bpf_iter_link_info linfo = {}; 190 __u32 i, num_sockets, num_elems; 191 struct bpf_iter_sockmap *skel; 192 __s64 *sock_fd = NULL; 193 struct bpf_link *link; 194 struct bpf_map *src; 195 char buf[64]; 196 197 skel = bpf_iter_sockmap__open_and_load(); 198 if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load")) 199 return; 200 201 if (map_type == BPF_MAP_TYPE_SOCKMAP) { 202 src = skel->maps.sockmap; 203 num_elems = bpf_map__max_entries(src); 204 num_sockets = num_elems - 1; 205 } else { 206 src = skel->maps.sockhash; 207 num_elems = bpf_map__max_entries(src) - 1; 208 num_sockets = num_elems; 209 } 210 211 sock_fd = calloc(num_sockets, sizeof(*sock_fd)); 212 if (!ASSERT_OK_PTR(sock_fd, "calloc(sock_fd)")) 213 goto out; 214 215 for (i = 0; i < num_sockets; i++) 216 sock_fd[i] = -1; 217 218 src_fd = bpf_map__fd(src); 219 220 for (i = 0; i < num_sockets; i++) { 221 sock_fd[i] = connected_socket_v4(); 222 if (!ASSERT_NEQ(sock_fd[i], -1, "connected_socket_v4")) 223 goto out; 224 225 err = bpf_map_update_elem(src_fd, &i, &sock_fd[i], BPF_NOEXIST); 226 if (!ASSERT_OK(err, "map_update")) 227 goto out; 228 } 229 230 linfo.map.map_fd = src_fd; 231 opts.link_info = &linfo; 232 opts.link_info_len = sizeof(linfo); 233 link = bpf_program__attach_iter(skel->progs.copy, &opts); 234 if (!ASSERT_OK_PTR(link, "attach_iter")) 235 goto out; 236 237 iter_fd = bpf_iter_create(bpf_link__fd(link)); 238 if (!ASSERT_GE(iter_fd, 0, "create_iter")) 239 goto free_link; 240 241 /* do some tests */ 242 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 243 ; 244 if (!ASSERT_GE(len, 0, "read")) 245 goto close_iter; 246 247 /* test results */ 248 if (!ASSERT_EQ(skel->bss->elems, num_elems, "elems")) 249 goto close_iter; 250 251 if (!ASSERT_EQ(skel->bss->socks, num_sockets, "socks")) 252 goto close_iter; 253 254 compare_cookies(src, skel->maps.dst); 255 256 close_iter: 257 close(iter_fd); 258 free_link: 259 bpf_link__destroy(link); 260 out: 261 for (i = 0; sock_fd && i < num_sockets; i++) 262 if (sock_fd[i] >= 0) 263 close(sock_fd[i]); 264 if (sock_fd) 265 free(sock_fd); 266 bpf_iter_sockmap__destroy(skel); 267 } 268 269 static void test_sockmap_skb_verdict_attach(enum bpf_attach_type first, 270 enum bpf_attach_type second) 271 { 272 struct test_sockmap_skb_verdict_attach *skel; 273 int err, map, verdict; 274 275 skel = test_sockmap_skb_verdict_attach__open_and_load(); 276 if (!ASSERT_OK_PTR(skel, "open_and_load")) 277 return; 278 279 verdict = bpf_program__fd(skel->progs.prog_skb_verdict); 280 map = bpf_map__fd(skel->maps.sock_map); 281 282 err = bpf_prog_attach(verdict, map, first, 0); 283 if (!ASSERT_OK(err, "bpf_prog_attach")) 284 goto out; 285 286 err = bpf_prog_attach(verdict, map, second, 0); 287 ASSERT_EQ(err, -EBUSY, "prog_attach_fail"); 288 289 err = bpf_prog_detach2(verdict, map, first); 290 if (!ASSERT_OK(err, "bpf_prog_detach2")) 291 goto out; 292 out: 293 test_sockmap_skb_verdict_attach__destroy(skel); 294 } 295 296 static __u32 query_prog_id(int prog_fd) 297 { 298 struct bpf_prog_info info = {}; 299 __u32 info_len = sizeof(info); 300 int err; 301 302 err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); 303 if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd") || 304 !ASSERT_EQ(info_len, sizeof(info), "bpf_obj_get_info_by_fd")) 305 return 0; 306 307 return info.id; 308 } 309 310 static void test_sockmap_progs_query(enum bpf_attach_type attach_type) 311 { 312 struct test_sockmap_progs_query *skel; 313 int err, map_fd, verdict_fd; 314 __u32 attach_flags = 0; 315 __u32 prog_ids[3] = {}; 316 __u32 prog_cnt = 3; 317 318 skel = test_sockmap_progs_query__open_and_load(); 319 if (!ASSERT_OK_PTR(skel, "test_sockmap_progs_query__open_and_load")) 320 return; 321 322 map_fd = bpf_map__fd(skel->maps.sock_map); 323 324 if (attach_type == BPF_SK_MSG_VERDICT) 325 verdict_fd = bpf_program__fd(skel->progs.prog_skmsg_verdict); 326 else 327 verdict_fd = bpf_program__fd(skel->progs.prog_skb_verdict); 328 329 err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */, 330 &attach_flags, prog_ids, &prog_cnt); 331 ASSERT_OK(err, "bpf_prog_query failed"); 332 ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query"); 333 ASSERT_EQ(prog_cnt, 0, "wrong program count on query"); 334 335 err = bpf_prog_attach(verdict_fd, map_fd, attach_type, 0); 336 if (!ASSERT_OK(err, "bpf_prog_attach failed")) 337 goto out; 338 339 prog_cnt = 1; 340 err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */, 341 &attach_flags, prog_ids, &prog_cnt); 342 ASSERT_OK(err, "bpf_prog_query failed"); 343 ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query"); 344 ASSERT_EQ(prog_cnt, 1, "wrong program count on query"); 345 ASSERT_EQ(prog_ids[0], query_prog_id(verdict_fd), 346 "wrong prog_ids on query"); 347 348 bpf_prog_detach2(verdict_fd, map_fd, attach_type); 349 out: 350 test_sockmap_progs_query__destroy(skel); 351 } 352 353 void test_sockmap_basic(void) 354 { 355 if (test__start_subtest("sockmap create_update_free")) 356 test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP); 357 if (test__start_subtest("sockhash create_update_free")) 358 test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH); 359 if (test__start_subtest("sockmap sk_msg load helpers")) 360 test_skmsg_helpers(BPF_MAP_TYPE_SOCKMAP); 361 if (test__start_subtest("sockhash sk_msg load helpers")) 362 test_skmsg_helpers(BPF_MAP_TYPE_SOCKHASH); 363 if (test__start_subtest("sockmap update")) 364 test_sockmap_update(BPF_MAP_TYPE_SOCKMAP); 365 if (test__start_subtest("sockhash update")) 366 test_sockmap_update(BPF_MAP_TYPE_SOCKHASH); 367 if (test__start_subtest("sockmap update in unsafe context")) 368 test_sockmap_invalid_update(); 369 if (test__start_subtest("sockmap copy")) 370 test_sockmap_copy(BPF_MAP_TYPE_SOCKMAP); 371 if (test__start_subtest("sockhash copy")) 372 test_sockmap_copy(BPF_MAP_TYPE_SOCKHASH); 373 if (test__start_subtest("sockmap skb_verdict attach")) { 374 test_sockmap_skb_verdict_attach(BPF_SK_SKB_VERDICT, 375 BPF_SK_SKB_STREAM_VERDICT); 376 test_sockmap_skb_verdict_attach(BPF_SK_SKB_STREAM_VERDICT, 377 BPF_SK_SKB_VERDICT); 378 } 379 if (test__start_subtest("sockmap msg_verdict progs query")) 380 test_sockmap_progs_query(BPF_SK_MSG_VERDICT); 381 if (test__start_subtest("sockmap stream_parser progs query")) 382 test_sockmap_progs_query(BPF_SK_SKB_STREAM_PARSER); 383 if (test__start_subtest("sockmap stream_verdict progs query")) 384 test_sockmap_progs_query(BPF_SK_SKB_STREAM_VERDICT); 385 if (test__start_subtest("sockmap skb_verdict progs query")) 386 test_sockmap_progs_query(BPF_SK_SKB_VERDICT); 387 } 388