1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2020 Cloudflare 3 #include <error.h> 4 #include <netinet/tcp.h> 5 6 #include "test_progs.h" 7 #include "test_skmsg_load_helpers.skel.h" 8 #include "test_sockmap_update.skel.h" 9 #include "test_sockmap_invalid_update.skel.h" 10 #include "test_sockmap_skb_verdict_attach.skel.h" 11 #include "bpf_iter_sockmap.skel.h" 12 13 #define TCP_REPAIR 19 /* TCP sock is under repair right now */ 14 15 #define TCP_REPAIR_ON 1 16 #define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */ 17 18 static int connected_socket_v4(void) 19 { 20 struct sockaddr_in addr = { 21 .sin_family = AF_INET, 22 .sin_port = htons(80), 23 .sin_addr = { inet_addr("127.0.0.1") }, 24 }; 25 socklen_t len = sizeof(addr); 26 int s, repair, err; 27 28 s = socket(AF_INET, SOCK_STREAM, 0); 29 if (CHECK_FAIL(s == -1)) 30 goto error; 31 32 repair = TCP_REPAIR_ON; 33 err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); 34 if (CHECK_FAIL(err)) 35 goto error; 36 37 err = connect(s, (struct sockaddr *)&addr, len); 38 if (CHECK_FAIL(err)) 39 goto error; 40 41 repair = TCP_REPAIR_OFF_NO_WP; 42 err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); 43 if (CHECK_FAIL(err)) 44 goto error; 45 46 return s; 47 error: 48 perror(__func__); 49 close(s); 50 return -1; 51 } 52 53 static void compare_cookies(struct bpf_map *src, struct bpf_map *dst) 54 { 55 __u32 i, max_entries = bpf_map__max_entries(src); 56 int err, duration = 0, src_fd, dst_fd; 57 58 src_fd = bpf_map__fd(src); 59 dst_fd = bpf_map__fd(dst); 60 61 for (i = 0; i < max_entries; i++) { 62 __u64 src_cookie, dst_cookie; 63 64 err = bpf_map_lookup_elem(src_fd, &i, &src_cookie); 65 if (err && errno == ENOENT) { 66 err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie); 67 CHECK(!err, "map_lookup_elem(dst)", "element %u not deleted\n", i); 68 CHECK(err && errno != ENOENT, "map_lookup_elem(dst)", "%s\n", 69 strerror(errno)); 70 continue; 71 } 72 if (CHECK(err, "lookup_elem(src)", "%s\n", strerror(errno))) 73 continue; 74 75 err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie); 76 if (CHECK(err, "lookup_elem(dst)", "%s\n", strerror(errno))) 77 continue; 78 79 CHECK(dst_cookie != src_cookie, "cookie mismatch", 80 "%llu != %llu (pos %u)\n", dst_cookie, src_cookie, i); 81 } 82 } 83 84 /* Create a map, populate it with one socket, and free the map. */ 85 static void test_sockmap_create_update_free(enum bpf_map_type map_type) 86 { 87 const int zero = 0; 88 int s, map, err; 89 90 s = connected_socket_v4(); 91 if (CHECK_FAIL(s < 0)) 92 return; 93 94 map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0); 95 if (CHECK_FAIL(map < 0)) { 96 perror("bpf_create_map"); 97 goto out; 98 } 99 100 err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST); 101 if (CHECK_FAIL(err)) { 102 perror("bpf_map_update"); 103 goto out; 104 } 105 106 out: 107 close(map); 108 close(s); 109 } 110 111 static void test_skmsg_helpers(enum bpf_map_type map_type) 112 { 113 struct test_skmsg_load_helpers *skel; 114 int err, map, verdict; 115 116 skel = test_skmsg_load_helpers__open_and_load(); 117 if (CHECK_FAIL(!skel)) { 118 perror("test_skmsg_load_helpers__open_and_load"); 119 return; 120 } 121 122 verdict = bpf_program__fd(skel->progs.prog_msg_verdict); 123 map = bpf_map__fd(skel->maps.sock_map); 124 125 err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0); 126 if (CHECK_FAIL(err)) { 127 perror("bpf_prog_attach"); 128 goto out; 129 } 130 131 err = bpf_prog_detach2(verdict, map, BPF_SK_MSG_VERDICT); 132 if (CHECK_FAIL(err)) { 133 perror("bpf_prog_detach2"); 134 goto out; 135 } 136 out: 137 test_skmsg_load_helpers__destroy(skel); 138 } 139 140 static void test_sockmap_update(enum bpf_map_type map_type) 141 { 142 struct bpf_prog_test_run_attr tattr; 143 int err, prog, src, duration = 0; 144 struct test_sockmap_update *skel; 145 struct bpf_map *dst_map; 146 const __u32 zero = 0; 147 char dummy[14] = {0}; 148 __s64 sk; 149 150 sk = connected_socket_v4(); 151 if (CHECK(sk == -1, "connected_socket_v4", "cannot connect\n")) 152 return; 153 154 skel = test_sockmap_update__open_and_load(); 155 if (CHECK(!skel, "open_and_load", "cannot load skeleton\n")) 156 goto close_sk; 157 158 prog = bpf_program__fd(skel->progs.copy_sock_map); 159 src = bpf_map__fd(skel->maps.src); 160 if (map_type == BPF_MAP_TYPE_SOCKMAP) 161 dst_map = skel->maps.dst_sock_map; 162 else 163 dst_map = skel->maps.dst_sock_hash; 164 165 err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST); 166 if (CHECK(err, "update_elem(src)", "errno=%u\n", errno)) 167 goto out; 168 169 tattr = (struct bpf_prog_test_run_attr){ 170 .prog_fd = prog, 171 .repeat = 1, 172 .data_in = dummy, 173 .data_size_in = sizeof(dummy), 174 }; 175 176 err = bpf_prog_test_run_xattr(&tattr); 177 if (CHECK_ATTR(err || !tattr.retval, "bpf_prog_test_run", 178 "errno=%u retval=%u\n", errno, tattr.retval)) 179 goto out; 180 181 compare_cookies(skel->maps.src, dst_map); 182 183 out: 184 test_sockmap_update__destroy(skel); 185 close_sk: 186 close(sk); 187 } 188 189 static void test_sockmap_invalid_update(void) 190 { 191 struct test_sockmap_invalid_update *skel; 192 int duration = 0; 193 194 skel = test_sockmap_invalid_update__open_and_load(); 195 if (CHECK(skel, "open_and_load", "verifier accepted map_update\n")) 196 test_sockmap_invalid_update__destroy(skel); 197 } 198 199 static void test_sockmap_copy(enum bpf_map_type map_type) 200 { 201 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 202 int err, len, src_fd, iter_fd, duration = 0; 203 union bpf_iter_link_info linfo = {}; 204 __u32 i, num_sockets, num_elems; 205 struct bpf_iter_sockmap *skel; 206 __s64 *sock_fd = NULL; 207 struct bpf_link *link; 208 struct bpf_map *src; 209 char buf[64]; 210 211 skel = bpf_iter_sockmap__open_and_load(); 212 if (CHECK(!skel, "bpf_iter_sockmap__open_and_load", "skeleton open_and_load failed\n")) 213 return; 214 215 if (map_type == BPF_MAP_TYPE_SOCKMAP) { 216 src = skel->maps.sockmap; 217 num_elems = bpf_map__max_entries(src); 218 num_sockets = num_elems - 1; 219 } else { 220 src = skel->maps.sockhash; 221 num_elems = bpf_map__max_entries(src) - 1; 222 num_sockets = num_elems; 223 } 224 225 sock_fd = calloc(num_sockets, sizeof(*sock_fd)); 226 if (CHECK(!sock_fd, "calloc(sock_fd)", "failed to allocate\n")) 227 goto out; 228 229 for (i = 0; i < num_sockets; i++) 230 sock_fd[i] = -1; 231 232 src_fd = bpf_map__fd(src); 233 234 for (i = 0; i < num_sockets; i++) { 235 sock_fd[i] = connected_socket_v4(); 236 if (CHECK(sock_fd[i] == -1, "connected_socket_v4", "cannot connect\n")) 237 goto out; 238 239 err = bpf_map_update_elem(src_fd, &i, &sock_fd[i], BPF_NOEXIST); 240 if (CHECK(err, "map_update", "failed: %s\n", strerror(errno))) 241 goto out; 242 } 243 244 linfo.map.map_fd = src_fd; 245 opts.link_info = &linfo; 246 opts.link_info_len = sizeof(linfo); 247 link = bpf_program__attach_iter(skel->progs.copy, &opts); 248 if (!ASSERT_OK_PTR(link, "attach_iter")) 249 goto out; 250 251 iter_fd = bpf_iter_create(bpf_link__fd(link)); 252 if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) 253 goto free_link; 254 255 /* do some tests */ 256 while ((len = read(iter_fd, buf, sizeof(buf))) > 0) 257 ; 258 if (CHECK(len < 0, "read", "failed: %s\n", strerror(errno))) 259 goto close_iter; 260 261 /* test results */ 262 if (CHECK(skel->bss->elems != num_elems, "elems", "got %u expected %u\n", 263 skel->bss->elems, num_elems)) 264 goto close_iter; 265 266 if (CHECK(skel->bss->socks != num_sockets, "socks", "got %u expected %u\n", 267 skel->bss->socks, num_sockets)) 268 goto close_iter; 269 270 compare_cookies(src, skel->maps.dst); 271 272 close_iter: 273 close(iter_fd); 274 free_link: 275 bpf_link__destroy(link); 276 out: 277 for (i = 0; sock_fd && i < num_sockets; i++) 278 if (sock_fd[i] >= 0) 279 close(sock_fd[i]); 280 if (sock_fd) 281 free(sock_fd); 282 bpf_iter_sockmap__destroy(skel); 283 } 284 285 static void test_sockmap_skb_verdict_attach(enum bpf_attach_type first, 286 enum bpf_attach_type second) 287 { 288 struct test_sockmap_skb_verdict_attach *skel; 289 int err, map, verdict; 290 291 skel = test_sockmap_skb_verdict_attach__open_and_load(); 292 if (CHECK_FAIL(!skel)) { 293 perror("test_sockmap_skb_verdict_attach__open_and_load"); 294 return; 295 } 296 297 verdict = bpf_program__fd(skel->progs.prog_skb_verdict); 298 map = bpf_map__fd(skel->maps.sock_map); 299 300 err = bpf_prog_attach(verdict, map, first, 0); 301 if (CHECK_FAIL(err)) { 302 perror("bpf_prog_attach"); 303 goto out; 304 } 305 306 err = bpf_prog_attach(verdict, map, second, 0); 307 ASSERT_EQ(err, -EBUSY, "prog_attach_fail"); 308 309 err = bpf_prog_detach2(verdict, map, first); 310 if (CHECK_FAIL(err)) { 311 perror("bpf_prog_detach2"); 312 goto out; 313 } 314 out: 315 test_sockmap_skb_verdict_attach__destroy(skel); 316 } 317 318 void test_sockmap_basic(void) 319 { 320 if (test__start_subtest("sockmap create_update_free")) 321 test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP); 322 if (test__start_subtest("sockhash create_update_free")) 323 test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH); 324 if (test__start_subtest("sockmap sk_msg load helpers")) 325 test_skmsg_helpers(BPF_MAP_TYPE_SOCKMAP); 326 if (test__start_subtest("sockhash sk_msg load helpers")) 327 test_skmsg_helpers(BPF_MAP_TYPE_SOCKHASH); 328 if (test__start_subtest("sockmap update")) 329 test_sockmap_update(BPF_MAP_TYPE_SOCKMAP); 330 if (test__start_subtest("sockhash update")) 331 test_sockmap_update(BPF_MAP_TYPE_SOCKHASH); 332 if (test__start_subtest("sockmap update in unsafe context")) 333 test_sockmap_invalid_update(); 334 if (test__start_subtest("sockmap copy")) 335 test_sockmap_copy(BPF_MAP_TYPE_SOCKMAP); 336 if (test__start_subtest("sockhash copy")) 337 test_sockmap_copy(BPF_MAP_TYPE_SOCKHASH); 338 if (test__start_subtest("sockmap skb_verdict attach")) { 339 test_sockmap_skb_verdict_attach(BPF_SK_SKB_VERDICT, 340 BPF_SK_SKB_STREAM_VERDICT); 341 test_sockmap_skb_verdict_attach(BPF_SK_SKB_STREAM_VERDICT, 342 BPF_SK_SKB_VERDICT); 343 } 344 } 345