1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2023 Isovalent */ 3 #include <uapi/linux/if_link.h> 4 #include <net/if.h> 5 #include <test_progs.h> 6 7 #define loopback 1 8 #define ping_cmd "ping -q -c1 -w1 127.0.0.1 > /dev/null" 9 10 #include "test_tc_link.skel.h" 11 #include "tc_helpers.h" 12 13 void serial_test_tc_opts_basic(void) 14 { 15 LIBBPF_OPTS(bpf_prog_attach_opts, opta); 16 LIBBPF_OPTS(bpf_prog_detach_opts, optd); 17 LIBBPF_OPTS(bpf_prog_query_opts, optq); 18 __u32 fd1, fd2, id1, id2; 19 struct test_tc_link *skel; 20 __u32 prog_ids[2]; 21 int err; 22 23 skel = test_tc_link__open_and_load(); 24 if (!ASSERT_OK_PTR(skel, "skel_load")) 25 goto cleanup; 26 27 fd1 = bpf_program__fd(skel->progs.tc1); 28 fd2 = bpf_program__fd(skel->progs.tc2); 29 30 id1 = id_from_prog_fd(fd1); 31 id2 = id_from_prog_fd(fd2); 32 33 ASSERT_NEQ(id1, id2, "prog_ids_1_2"); 34 35 assert_mprog_count(BPF_TCX_INGRESS, 0); 36 assert_mprog_count(BPF_TCX_EGRESS, 0); 37 38 ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1"); 39 ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); 40 41 err = bpf_prog_attach_opts(fd1, loopback, BPF_TCX_INGRESS, &opta); 42 if (!ASSERT_EQ(err, 0, "prog_attach")) 43 goto cleanup; 44 45 assert_mprog_count(BPF_TCX_INGRESS, 1); 46 assert_mprog_count(BPF_TCX_EGRESS, 0); 47 48 optq.prog_ids = prog_ids; 49 50 memset(prog_ids, 0, sizeof(prog_ids)); 51 optq.count = ARRAY_SIZE(prog_ids); 52 53 err = bpf_prog_query_opts(loopback, BPF_TCX_INGRESS, &optq); 54 if (!ASSERT_OK(err, "prog_query")) 55 goto cleanup_in; 56 57 ASSERT_EQ(optq.count, 1, "count"); 58 ASSERT_EQ(optq.revision, 2, "revision"); 59 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); 60 ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); 61 62 ASSERT_OK(system(ping_cmd), ping_cmd); 63 64 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 65 ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); 66 67 err = bpf_prog_attach_opts(fd2, loopback, BPF_TCX_EGRESS, &opta); 68 if (!ASSERT_EQ(err, 0, "prog_attach")) 69 goto cleanup_in; 70 71 assert_mprog_count(BPF_TCX_INGRESS, 1); 72 assert_mprog_count(BPF_TCX_EGRESS, 1); 73 74 memset(prog_ids, 0, sizeof(prog_ids)); 75 optq.count = ARRAY_SIZE(prog_ids); 76 77 err = bpf_prog_query_opts(loopback, BPF_TCX_EGRESS, &optq); 78 if (!ASSERT_OK(err, "prog_query")) 79 goto cleanup_eg; 80 81 ASSERT_EQ(optq.count, 1, "count"); 82 ASSERT_EQ(optq.revision, 2, "revision"); 83 ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); 84 ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); 85 86 ASSERT_OK(system(ping_cmd), ping_cmd); 87 88 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 89 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 90 91 cleanup_eg: 92 err = bpf_prog_detach_opts(fd2, loopback, BPF_TCX_EGRESS, &optd); 93 ASSERT_OK(err, "prog_detach_eg"); 94 95 assert_mprog_count(BPF_TCX_INGRESS, 1); 96 assert_mprog_count(BPF_TCX_EGRESS, 0); 97 98 cleanup_in: 99 err = bpf_prog_detach_opts(fd1, loopback, BPF_TCX_INGRESS, &optd); 100 ASSERT_OK(err, "prog_detach_in"); 101 102 assert_mprog_count(BPF_TCX_INGRESS, 0); 103 assert_mprog_count(BPF_TCX_EGRESS, 0); 104 105 cleanup: 106 test_tc_link__destroy(skel); 107 } 108 109 static void test_tc_opts_before_target(int target) 110 { 111 LIBBPF_OPTS(bpf_prog_attach_opts, opta); 112 LIBBPF_OPTS(bpf_prog_detach_opts, optd); 113 LIBBPF_OPTS(bpf_prog_query_opts, optq); 114 __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; 115 struct test_tc_link *skel; 116 __u32 prog_ids[5]; 117 int err; 118 119 skel = test_tc_link__open_and_load(); 120 if (!ASSERT_OK_PTR(skel, "skel_load")) 121 goto cleanup; 122 123 fd1 = bpf_program__fd(skel->progs.tc1); 124 fd2 = bpf_program__fd(skel->progs.tc2); 125 fd3 = bpf_program__fd(skel->progs.tc3); 126 fd4 = bpf_program__fd(skel->progs.tc4); 127 128 id1 = id_from_prog_fd(fd1); 129 id2 = id_from_prog_fd(fd2); 130 id3 = id_from_prog_fd(fd3); 131 id4 = id_from_prog_fd(fd4); 132 133 ASSERT_NEQ(id1, id2, "prog_ids_1_2"); 134 ASSERT_NEQ(id3, id4, "prog_ids_3_4"); 135 ASSERT_NEQ(id2, id3, "prog_ids_2_3"); 136 137 assert_mprog_count(target, 0); 138 139 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 140 if (!ASSERT_EQ(err, 0, "prog_attach")) 141 goto cleanup; 142 143 assert_mprog_count(target, 1); 144 145 err = bpf_prog_attach_opts(fd2, loopback, target, &opta); 146 if (!ASSERT_EQ(err, 0, "prog_attach")) 147 goto cleanup_target; 148 149 assert_mprog_count(target, 2); 150 151 optq.prog_ids = prog_ids; 152 153 memset(prog_ids, 0, sizeof(prog_ids)); 154 optq.count = ARRAY_SIZE(prog_ids); 155 156 err = bpf_prog_query_opts(loopback, target, &optq); 157 if (!ASSERT_OK(err, "prog_query")) 158 goto cleanup_target2; 159 160 ASSERT_EQ(optq.count, 2, "count"); 161 ASSERT_EQ(optq.revision, 3, "revision"); 162 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); 163 ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); 164 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); 165 166 ASSERT_OK(system(ping_cmd), ping_cmd); 167 168 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 169 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 170 ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); 171 ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); 172 173 LIBBPF_OPTS_RESET(opta, 174 .flags = BPF_F_BEFORE, 175 .relative_fd = fd2, 176 ); 177 178 err = bpf_prog_attach_opts(fd3, loopback, target, &opta); 179 if (!ASSERT_EQ(err, 0, "prog_attach")) 180 goto cleanup_target2; 181 182 memset(prog_ids, 0, sizeof(prog_ids)); 183 optq.count = ARRAY_SIZE(prog_ids); 184 185 err = bpf_prog_query_opts(loopback, target, &optq); 186 if (!ASSERT_OK(err, "prog_query")) 187 goto cleanup_target3; 188 189 ASSERT_EQ(optq.count, 3, "count"); 190 ASSERT_EQ(optq.revision, 4, "revision"); 191 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); 192 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); 193 ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]"); 194 ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); 195 196 LIBBPF_OPTS_RESET(opta, 197 .flags = BPF_F_BEFORE, 198 .relative_id = id1, 199 ); 200 201 err = bpf_prog_attach_opts(fd4, loopback, target, &opta); 202 if (!ASSERT_EQ(err, 0, "prog_attach")) 203 goto cleanup_target3; 204 205 assert_mprog_count(target, 4); 206 207 memset(prog_ids, 0, sizeof(prog_ids)); 208 optq.count = ARRAY_SIZE(prog_ids); 209 210 err = bpf_prog_query_opts(loopback, target, &optq); 211 if (!ASSERT_OK(err, "prog_query")) 212 goto cleanup_target4; 213 214 ASSERT_EQ(optq.count, 4, "count"); 215 ASSERT_EQ(optq.revision, 5, "revision"); 216 ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]"); 217 ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]"); 218 ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]"); 219 ASSERT_EQ(optq.prog_ids[3], id2, "prog_ids[3]"); 220 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); 221 222 ASSERT_OK(system(ping_cmd), ping_cmd); 223 224 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 225 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 226 ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); 227 ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); 228 229 cleanup_target4: 230 err = bpf_prog_detach_opts(fd4, loopback, target, &optd); 231 ASSERT_OK(err, "prog_detach"); 232 assert_mprog_count(target, 3); 233 234 cleanup_target3: 235 err = bpf_prog_detach_opts(fd3, loopback, target, &optd); 236 ASSERT_OK(err, "prog_detach"); 237 assert_mprog_count(target, 2); 238 239 cleanup_target2: 240 err = bpf_prog_detach_opts(fd2, loopback, target, &optd); 241 ASSERT_OK(err, "prog_detach"); 242 assert_mprog_count(target, 1); 243 244 cleanup_target: 245 err = bpf_prog_detach_opts(fd1, loopback, target, &optd); 246 ASSERT_OK(err, "prog_detach"); 247 assert_mprog_count(target, 0); 248 249 cleanup: 250 test_tc_link__destroy(skel); 251 } 252 253 void serial_test_tc_opts_before(void) 254 { 255 test_tc_opts_before_target(BPF_TCX_INGRESS); 256 test_tc_opts_before_target(BPF_TCX_EGRESS); 257 } 258 259 static void test_tc_opts_after_target(int target) 260 { 261 LIBBPF_OPTS(bpf_prog_attach_opts, opta); 262 LIBBPF_OPTS(bpf_prog_detach_opts, optd); 263 LIBBPF_OPTS(bpf_prog_query_opts, optq); 264 __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; 265 struct test_tc_link *skel; 266 __u32 prog_ids[5]; 267 int err; 268 269 skel = test_tc_link__open_and_load(); 270 if (!ASSERT_OK_PTR(skel, "skel_load")) 271 goto cleanup; 272 273 fd1 = bpf_program__fd(skel->progs.tc1); 274 fd2 = bpf_program__fd(skel->progs.tc2); 275 fd3 = bpf_program__fd(skel->progs.tc3); 276 fd4 = bpf_program__fd(skel->progs.tc4); 277 278 id1 = id_from_prog_fd(fd1); 279 id2 = id_from_prog_fd(fd2); 280 id3 = id_from_prog_fd(fd3); 281 id4 = id_from_prog_fd(fd4); 282 283 ASSERT_NEQ(id1, id2, "prog_ids_1_2"); 284 ASSERT_NEQ(id3, id4, "prog_ids_3_4"); 285 ASSERT_NEQ(id2, id3, "prog_ids_2_3"); 286 287 assert_mprog_count(target, 0); 288 289 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 290 if (!ASSERT_EQ(err, 0, "prog_attach")) 291 goto cleanup; 292 293 assert_mprog_count(target, 1); 294 295 err = bpf_prog_attach_opts(fd2, loopback, target, &opta); 296 if (!ASSERT_EQ(err, 0, "prog_attach")) 297 goto cleanup_target; 298 299 assert_mprog_count(target, 2); 300 301 optq.prog_ids = prog_ids; 302 303 memset(prog_ids, 0, sizeof(prog_ids)); 304 optq.count = ARRAY_SIZE(prog_ids); 305 306 err = bpf_prog_query_opts(loopback, target, &optq); 307 if (!ASSERT_OK(err, "prog_query")) 308 goto cleanup_target2; 309 310 ASSERT_EQ(optq.count, 2, "count"); 311 ASSERT_EQ(optq.revision, 3, "revision"); 312 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); 313 ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); 314 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); 315 316 ASSERT_OK(system(ping_cmd), ping_cmd); 317 318 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 319 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 320 ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); 321 ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); 322 323 LIBBPF_OPTS_RESET(opta, 324 .flags = BPF_F_AFTER, 325 .relative_fd = fd1, 326 ); 327 328 err = bpf_prog_attach_opts(fd3, loopback, target, &opta); 329 if (!ASSERT_EQ(err, 0, "prog_attach")) 330 goto cleanup_target2; 331 332 memset(prog_ids, 0, sizeof(prog_ids)); 333 optq.count = ARRAY_SIZE(prog_ids); 334 335 err = bpf_prog_query_opts(loopback, target, &optq); 336 if (!ASSERT_OK(err, "prog_query")) 337 goto cleanup_target3; 338 339 ASSERT_EQ(optq.count, 3, "count"); 340 ASSERT_EQ(optq.revision, 4, "revision"); 341 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); 342 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); 343 ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]"); 344 ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); 345 346 LIBBPF_OPTS_RESET(opta, 347 .flags = BPF_F_AFTER, 348 .relative_id = id2, 349 ); 350 351 err = bpf_prog_attach_opts(fd4, loopback, target, &opta); 352 if (!ASSERT_EQ(err, 0, "prog_attach")) 353 goto cleanup_target3; 354 355 assert_mprog_count(target, 4); 356 357 memset(prog_ids, 0, sizeof(prog_ids)); 358 optq.count = ARRAY_SIZE(prog_ids); 359 360 err = bpf_prog_query_opts(loopback, target, &optq); 361 if (!ASSERT_OK(err, "prog_query")) 362 goto cleanup_target4; 363 364 ASSERT_EQ(optq.count, 4, "count"); 365 ASSERT_EQ(optq.revision, 5, "revision"); 366 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); 367 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); 368 ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]"); 369 ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); 370 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); 371 372 ASSERT_OK(system(ping_cmd), ping_cmd); 373 374 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 375 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 376 ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); 377 ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); 378 379 cleanup_target4: 380 err = bpf_prog_detach_opts(fd4, loopback, target, &optd); 381 ASSERT_OK(err, "prog_detach"); 382 assert_mprog_count(target, 3); 383 384 memset(prog_ids, 0, sizeof(prog_ids)); 385 optq.count = ARRAY_SIZE(prog_ids); 386 387 err = bpf_prog_query_opts(loopback, target, &optq); 388 if (!ASSERT_OK(err, "prog_query")) 389 goto cleanup_target3; 390 391 ASSERT_EQ(optq.count, 3, "count"); 392 ASSERT_EQ(optq.revision, 6, "revision"); 393 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); 394 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); 395 ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]"); 396 ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); 397 398 cleanup_target3: 399 err = bpf_prog_detach_opts(fd3, loopback, target, &optd); 400 ASSERT_OK(err, "prog_detach"); 401 assert_mprog_count(target, 2); 402 403 memset(prog_ids, 0, sizeof(prog_ids)); 404 optq.count = ARRAY_SIZE(prog_ids); 405 406 err = bpf_prog_query_opts(loopback, target, &optq); 407 if (!ASSERT_OK(err, "prog_query")) 408 goto cleanup_target2; 409 410 ASSERT_EQ(optq.count, 2, "count"); 411 ASSERT_EQ(optq.revision, 7, "revision"); 412 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); 413 ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); 414 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); 415 416 cleanup_target2: 417 err = bpf_prog_detach_opts(fd2, loopback, target, &optd); 418 ASSERT_OK(err, "prog_detach"); 419 assert_mprog_count(target, 1); 420 421 memset(prog_ids, 0, sizeof(prog_ids)); 422 optq.count = ARRAY_SIZE(prog_ids); 423 424 err = bpf_prog_query_opts(loopback, target, &optq); 425 if (!ASSERT_OK(err, "prog_query")) 426 goto cleanup_target; 427 428 ASSERT_EQ(optq.count, 1, "count"); 429 ASSERT_EQ(optq.revision, 8, "revision"); 430 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); 431 ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); 432 433 cleanup_target: 434 err = bpf_prog_detach_opts(fd1, loopback, target, &optd); 435 ASSERT_OK(err, "prog_detach"); 436 assert_mprog_count(target, 0); 437 438 cleanup: 439 test_tc_link__destroy(skel); 440 } 441 442 void serial_test_tc_opts_after(void) 443 { 444 test_tc_opts_after_target(BPF_TCX_INGRESS); 445 test_tc_opts_after_target(BPF_TCX_EGRESS); 446 } 447 448 static void test_tc_opts_revision_target(int target) 449 { 450 LIBBPF_OPTS(bpf_prog_attach_opts, opta); 451 LIBBPF_OPTS(bpf_prog_detach_opts, optd); 452 LIBBPF_OPTS(bpf_prog_query_opts, optq); 453 __u32 fd1, fd2, id1, id2; 454 struct test_tc_link *skel; 455 __u32 prog_ids[3]; 456 int err; 457 458 skel = test_tc_link__open_and_load(); 459 if (!ASSERT_OK_PTR(skel, "skel_load")) 460 goto cleanup; 461 462 fd1 = bpf_program__fd(skel->progs.tc1); 463 fd2 = bpf_program__fd(skel->progs.tc2); 464 465 id1 = id_from_prog_fd(fd1); 466 id2 = id_from_prog_fd(fd2); 467 468 ASSERT_NEQ(id1, id2, "prog_ids_1_2"); 469 470 assert_mprog_count(target, 0); 471 472 LIBBPF_OPTS_RESET(opta, 473 .expected_revision = 1, 474 ); 475 476 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 477 if (!ASSERT_EQ(err, 0, "prog_attach")) 478 goto cleanup; 479 480 assert_mprog_count(target, 1); 481 482 LIBBPF_OPTS_RESET(opta, 483 .expected_revision = 1, 484 ); 485 486 err = bpf_prog_attach_opts(fd2, loopback, target, &opta); 487 if (!ASSERT_EQ(err, -ESTALE, "prog_attach")) 488 goto cleanup_target; 489 490 assert_mprog_count(target, 1); 491 492 LIBBPF_OPTS_RESET(opta, 493 .expected_revision = 2, 494 ); 495 496 err = bpf_prog_attach_opts(fd2, loopback, target, &opta); 497 if (!ASSERT_EQ(err, 0, "prog_attach")) 498 goto cleanup_target; 499 500 assert_mprog_count(target, 2); 501 502 optq.prog_ids = prog_ids; 503 504 memset(prog_ids, 0, sizeof(prog_ids)); 505 optq.count = ARRAY_SIZE(prog_ids); 506 507 err = bpf_prog_query_opts(loopback, target, &optq); 508 if (!ASSERT_OK(err, "prog_query")) 509 goto cleanup_target2; 510 511 ASSERT_EQ(optq.count, 2, "count"); 512 ASSERT_EQ(optq.revision, 3, "revision"); 513 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); 514 ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); 515 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); 516 517 ASSERT_OK(system(ping_cmd), ping_cmd); 518 519 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 520 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 521 522 LIBBPF_OPTS_RESET(optd, 523 .expected_revision = 2, 524 ); 525 526 err = bpf_prog_detach_opts(fd2, loopback, target, &optd); 527 ASSERT_EQ(err, -ESTALE, "prog_detach"); 528 assert_mprog_count(target, 2); 529 530 cleanup_target2: 531 LIBBPF_OPTS_RESET(optd, 532 .expected_revision = 3, 533 ); 534 535 err = bpf_prog_detach_opts(fd2, loopback, target, &optd); 536 ASSERT_OK(err, "prog_detach"); 537 assert_mprog_count(target, 1); 538 539 cleanup_target: 540 LIBBPF_OPTS_RESET(optd); 541 542 err = bpf_prog_detach_opts(fd1, loopback, target, &optd); 543 ASSERT_OK(err, "prog_detach"); 544 assert_mprog_count(target, 0); 545 546 cleanup: 547 test_tc_link__destroy(skel); 548 } 549 550 void serial_test_tc_opts_revision(void) 551 { 552 test_tc_opts_revision_target(BPF_TCX_INGRESS); 553 test_tc_opts_revision_target(BPF_TCX_EGRESS); 554 } 555 556 static void test_tc_chain_classic(int target, bool chain_tc_old) 557 { 558 LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); 559 LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback); 560 LIBBPF_OPTS(bpf_prog_attach_opts, opta); 561 LIBBPF_OPTS(bpf_prog_detach_opts, optd); 562 bool hook_created = false, tc_attached = false; 563 __u32 fd1, fd2, fd3, id1, id2, id3; 564 struct test_tc_link *skel; 565 int err; 566 567 skel = test_tc_link__open_and_load(); 568 if (!ASSERT_OK_PTR(skel, "skel_load")) 569 goto cleanup; 570 571 fd1 = bpf_program__fd(skel->progs.tc1); 572 fd2 = bpf_program__fd(skel->progs.tc2); 573 fd3 = bpf_program__fd(skel->progs.tc3); 574 575 id1 = id_from_prog_fd(fd1); 576 id2 = id_from_prog_fd(fd2); 577 id3 = id_from_prog_fd(fd3); 578 579 ASSERT_NEQ(id1, id2, "prog_ids_1_2"); 580 ASSERT_NEQ(id2, id3, "prog_ids_2_3"); 581 582 assert_mprog_count(target, 0); 583 584 if (chain_tc_old) { 585 tc_hook.attach_point = target == BPF_TCX_INGRESS ? 586 BPF_TC_INGRESS : BPF_TC_EGRESS; 587 err = bpf_tc_hook_create(&tc_hook); 588 if (err == 0) 589 hook_created = true; 590 err = err == -EEXIST ? 0 : err; 591 if (!ASSERT_OK(err, "bpf_tc_hook_create")) 592 goto cleanup; 593 594 tc_opts.prog_fd = fd3; 595 err = bpf_tc_attach(&tc_hook, &tc_opts); 596 if (!ASSERT_OK(err, "bpf_tc_attach")) 597 goto cleanup; 598 tc_attached = true; 599 } 600 601 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 602 if (!ASSERT_EQ(err, 0, "prog_attach")) 603 goto cleanup; 604 605 err = bpf_prog_attach_opts(fd2, loopback, target, &opta); 606 if (!ASSERT_EQ(err, 0, "prog_attach")) 607 goto cleanup_detach; 608 609 assert_mprog_count(target, 2); 610 611 ASSERT_OK(system(ping_cmd), ping_cmd); 612 613 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 614 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 615 ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3"); 616 617 skel->bss->seen_tc1 = false; 618 skel->bss->seen_tc2 = false; 619 skel->bss->seen_tc3 = false; 620 621 err = bpf_prog_detach_opts(fd2, loopback, target, &optd); 622 if (!ASSERT_OK(err, "prog_detach")) 623 goto cleanup_detach; 624 625 assert_mprog_count(target, 1); 626 627 ASSERT_OK(system(ping_cmd), ping_cmd); 628 629 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 630 ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); 631 ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3"); 632 633 cleanup_detach: 634 err = bpf_prog_detach_opts(fd1, loopback, target, &optd); 635 if (!ASSERT_OK(err, "prog_detach")) 636 goto cleanup; 637 638 __assert_mprog_count(target, 0, chain_tc_old, loopback); 639 cleanup: 640 if (tc_attached) { 641 tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0; 642 err = bpf_tc_detach(&tc_hook, &tc_opts); 643 ASSERT_OK(err, "bpf_tc_detach"); 644 } 645 if (hook_created) { 646 tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS; 647 bpf_tc_hook_destroy(&tc_hook); 648 } 649 test_tc_link__destroy(skel); 650 assert_mprog_count(target, 0); 651 } 652 653 void serial_test_tc_opts_chain_classic(void) 654 { 655 test_tc_chain_classic(BPF_TCX_INGRESS, false); 656 test_tc_chain_classic(BPF_TCX_EGRESS, false); 657 test_tc_chain_classic(BPF_TCX_INGRESS, true); 658 test_tc_chain_classic(BPF_TCX_EGRESS, true); 659 } 660 661 static void test_tc_opts_replace_target(int target) 662 { 663 LIBBPF_OPTS(bpf_prog_attach_opts, opta); 664 LIBBPF_OPTS(bpf_prog_detach_opts, optd); 665 LIBBPF_OPTS(bpf_prog_query_opts, optq); 666 __u32 fd1, fd2, fd3, id1, id2, id3, detach_fd; 667 __u32 prog_ids[4], prog_flags[4]; 668 struct test_tc_link *skel; 669 int err; 670 671 skel = test_tc_link__open_and_load(); 672 if (!ASSERT_OK_PTR(skel, "skel_load")) 673 goto cleanup; 674 675 fd1 = bpf_program__fd(skel->progs.tc1); 676 fd2 = bpf_program__fd(skel->progs.tc2); 677 fd3 = bpf_program__fd(skel->progs.tc3); 678 679 id1 = id_from_prog_fd(fd1); 680 id2 = id_from_prog_fd(fd2); 681 id3 = id_from_prog_fd(fd3); 682 683 ASSERT_NEQ(id1, id2, "prog_ids_1_2"); 684 ASSERT_NEQ(id2, id3, "prog_ids_2_3"); 685 686 assert_mprog_count(target, 0); 687 688 LIBBPF_OPTS_RESET(opta, 689 .expected_revision = 1, 690 ); 691 692 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 693 if (!ASSERT_EQ(err, 0, "prog_attach")) 694 goto cleanup; 695 696 assert_mprog_count(target, 1); 697 698 LIBBPF_OPTS_RESET(opta, 699 .flags = BPF_F_BEFORE, 700 .relative_id = id1, 701 .expected_revision = 2, 702 ); 703 704 err = bpf_prog_attach_opts(fd2, loopback, target, &opta); 705 if (!ASSERT_EQ(err, 0, "prog_attach")) 706 goto cleanup_target; 707 708 detach_fd = fd2; 709 710 assert_mprog_count(target, 2); 711 712 optq.prog_attach_flags = prog_flags; 713 optq.prog_ids = prog_ids; 714 715 memset(prog_flags, 0, sizeof(prog_flags)); 716 memset(prog_ids, 0, sizeof(prog_ids)); 717 optq.count = ARRAY_SIZE(prog_ids); 718 719 err = bpf_prog_query_opts(loopback, target, &optq); 720 if (!ASSERT_OK(err, "prog_query")) 721 goto cleanup_target2; 722 723 ASSERT_EQ(optq.count, 2, "count"); 724 ASSERT_EQ(optq.revision, 3, "revision"); 725 ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); 726 ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]"); 727 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); 728 729 ASSERT_EQ(optq.prog_attach_flags[0], 0, "prog_flags[0]"); 730 ASSERT_EQ(optq.prog_attach_flags[1], 0, "prog_flags[1]"); 731 ASSERT_EQ(optq.prog_attach_flags[2], 0, "prog_flags[2]"); 732 733 ASSERT_OK(system(ping_cmd), ping_cmd); 734 735 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 736 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 737 ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); 738 739 skel->bss->seen_tc1 = false; 740 skel->bss->seen_tc2 = false; 741 skel->bss->seen_tc3 = false; 742 743 LIBBPF_OPTS_RESET(opta, 744 .flags = BPF_F_REPLACE, 745 .replace_prog_fd = fd2, 746 .expected_revision = 3, 747 ); 748 749 err = bpf_prog_attach_opts(fd3, loopback, target, &opta); 750 if (!ASSERT_EQ(err, 0, "prog_attach")) 751 goto cleanup_target2; 752 753 detach_fd = fd3; 754 755 assert_mprog_count(target, 2); 756 757 memset(prog_ids, 0, sizeof(prog_ids)); 758 optq.count = ARRAY_SIZE(prog_ids); 759 760 err = bpf_prog_query_opts(loopback, target, &optq); 761 if (!ASSERT_OK(err, "prog_query")) 762 goto cleanup_target2; 763 764 ASSERT_EQ(optq.count, 2, "count"); 765 ASSERT_EQ(optq.revision, 4, "revision"); 766 ASSERT_EQ(optq.prog_ids[0], id3, "prog_ids[0]"); 767 ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]"); 768 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); 769 770 ASSERT_OK(system(ping_cmd), ping_cmd); 771 772 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 773 ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); 774 ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); 775 776 skel->bss->seen_tc1 = false; 777 skel->bss->seen_tc2 = false; 778 skel->bss->seen_tc3 = false; 779 780 LIBBPF_OPTS_RESET(opta, 781 .flags = BPF_F_REPLACE | BPF_F_BEFORE, 782 .replace_prog_fd = fd3, 783 .relative_fd = fd1, 784 .expected_revision = 4, 785 ); 786 787 err = bpf_prog_attach_opts(fd2, loopback, target, &opta); 788 if (!ASSERT_EQ(err, 0, "prog_attach")) 789 goto cleanup_target2; 790 791 detach_fd = fd2; 792 793 assert_mprog_count(target, 2); 794 795 memset(prog_ids, 0, sizeof(prog_ids)); 796 optq.count = ARRAY_SIZE(prog_ids); 797 798 err = bpf_prog_query_opts(loopback, target, &optq); 799 if (!ASSERT_OK(err, "prog_query")) 800 goto cleanup_target2; 801 802 ASSERT_EQ(optq.count, 2, "count"); 803 ASSERT_EQ(optq.revision, 5, "revision"); 804 ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); 805 ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]"); 806 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); 807 808 ASSERT_OK(system(ping_cmd), ping_cmd); 809 810 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 811 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 812 ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); 813 814 LIBBPF_OPTS_RESET(opta, 815 .flags = BPF_F_REPLACE, 816 .replace_prog_fd = fd2, 817 ); 818 819 err = bpf_prog_attach_opts(fd2, loopback, target, &opta); 820 ASSERT_EQ(err, -EEXIST, "prog_attach"); 821 assert_mprog_count(target, 2); 822 823 LIBBPF_OPTS_RESET(opta, 824 .flags = BPF_F_REPLACE | BPF_F_AFTER, 825 .replace_prog_fd = fd2, 826 .relative_fd = fd1, 827 .expected_revision = 5, 828 ); 829 830 err = bpf_prog_attach_opts(fd3, loopback, target, &opta); 831 ASSERT_EQ(err, -ERANGE, "prog_attach"); 832 assert_mprog_count(target, 2); 833 834 LIBBPF_OPTS_RESET(opta, 835 .flags = BPF_F_BEFORE | BPF_F_AFTER | BPF_F_REPLACE, 836 .replace_prog_fd = fd2, 837 .relative_fd = fd1, 838 .expected_revision = 5, 839 ); 840 841 err = bpf_prog_attach_opts(fd3, loopback, target, &opta); 842 ASSERT_EQ(err, -ERANGE, "prog_attach"); 843 assert_mprog_count(target, 2); 844 845 LIBBPF_OPTS_RESET(optd, 846 .flags = BPF_F_BEFORE, 847 .relative_id = id1, 848 .expected_revision = 5, 849 ); 850 851 cleanup_target2: 852 err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd); 853 ASSERT_OK(err, "prog_detach"); 854 assert_mprog_count(target, 1); 855 856 cleanup_target: 857 LIBBPF_OPTS_RESET(optd); 858 859 err = bpf_prog_detach_opts(fd1, loopback, target, &optd); 860 ASSERT_OK(err, "prog_detach"); 861 assert_mprog_count(target, 0); 862 863 cleanup: 864 test_tc_link__destroy(skel); 865 } 866 867 void serial_test_tc_opts_replace(void) 868 { 869 test_tc_opts_replace_target(BPF_TCX_INGRESS); 870 test_tc_opts_replace_target(BPF_TCX_EGRESS); 871 } 872 873 static void test_tc_opts_invalid_target(int target) 874 { 875 LIBBPF_OPTS(bpf_prog_attach_opts, opta); 876 LIBBPF_OPTS(bpf_prog_detach_opts, optd); 877 __u32 fd1, fd2, id1, id2; 878 struct test_tc_link *skel; 879 int err; 880 881 skel = test_tc_link__open_and_load(); 882 if (!ASSERT_OK_PTR(skel, "skel_load")) 883 goto cleanup; 884 885 fd1 = bpf_program__fd(skel->progs.tc1); 886 fd2 = bpf_program__fd(skel->progs.tc2); 887 888 id1 = id_from_prog_fd(fd1); 889 id2 = id_from_prog_fd(fd2); 890 891 ASSERT_NEQ(id1, id2, "prog_ids_1_2"); 892 893 assert_mprog_count(target, 0); 894 895 LIBBPF_OPTS_RESET(opta, 896 .flags = BPF_F_BEFORE | BPF_F_AFTER, 897 ); 898 899 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 900 ASSERT_EQ(err, -ERANGE, "prog_attach"); 901 assert_mprog_count(target, 0); 902 903 LIBBPF_OPTS_RESET(opta, 904 .flags = BPF_F_BEFORE | BPF_F_ID, 905 ); 906 907 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 908 ASSERT_EQ(err, -ENOENT, "prog_attach"); 909 assert_mprog_count(target, 0); 910 911 LIBBPF_OPTS_RESET(opta, 912 .flags = BPF_F_AFTER | BPF_F_ID, 913 ); 914 915 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 916 ASSERT_EQ(err, -ENOENT, "prog_attach"); 917 assert_mprog_count(target, 0); 918 919 LIBBPF_OPTS_RESET(opta, 920 .relative_fd = fd2, 921 ); 922 923 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 924 ASSERT_EQ(err, -EINVAL, "prog_attach"); 925 assert_mprog_count(target, 0); 926 927 LIBBPF_OPTS_RESET(opta, 928 .flags = BPF_F_BEFORE | BPF_F_AFTER, 929 .relative_fd = fd2, 930 ); 931 932 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 933 ASSERT_EQ(err, -ENOENT, "prog_attach"); 934 assert_mprog_count(target, 0); 935 936 LIBBPF_OPTS_RESET(opta, 937 .flags = BPF_F_ID, 938 .relative_id = id2, 939 ); 940 941 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 942 ASSERT_EQ(err, -EINVAL, "prog_attach"); 943 assert_mprog_count(target, 0); 944 945 LIBBPF_OPTS_RESET(opta, 946 .flags = BPF_F_BEFORE, 947 .relative_fd = fd1, 948 ); 949 950 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 951 ASSERT_EQ(err, -ENOENT, "prog_attach"); 952 assert_mprog_count(target, 0); 953 954 LIBBPF_OPTS_RESET(opta, 955 .flags = BPF_F_AFTER, 956 .relative_fd = fd1, 957 ); 958 959 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 960 ASSERT_EQ(err, -ENOENT, "prog_attach"); 961 assert_mprog_count(target, 0); 962 963 LIBBPF_OPTS_RESET(opta); 964 965 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 966 if (!ASSERT_EQ(err, 0, "prog_attach")) 967 goto cleanup; 968 969 assert_mprog_count(target, 1); 970 971 LIBBPF_OPTS_RESET(opta); 972 973 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 974 ASSERT_EQ(err, -EEXIST, "prog_attach"); 975 assert_mprog_count(target, 1); 976 977 LIBBPF_OPTS_RESET(opta, 978 .flags = BPF_F_BEFORE, 979 .relative_fd = fd1, 980 ); 981 982 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 983 ASSERT_EQ(err, -EEXIST, "prog_attach"); 984 assert_mprog_count(target, 1); 985 986 LIBBPF_OPTS_RESET(opta, 987 .flags = BPF_F_AFTER, 988 .relative_fd = fd1, 989 ); 990 991 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 992 ASSERT_EQ(err, -EEXIST, "prog_attach"); 993 assert_mprog_count(target, 1); 994 995 LIBBPF_OPTS_RESET(opta, 996 .flags = BPF_F_REPLACE, 997 .relative_fd = fd1, 998 ); 999 1000 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 1001 ASSERT_EQ(err, -EINVAL, "prog_attach_x1"); 1002 assert_mprog_count(target, 1); 1003 1004 LIBBPF_OPTS_RESET(opta, 1005 .flags = BPF_F_REPLACE, 1006 .replace_prog_fd = fd1, 1007 ); 1008 1009 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 1010 ASSERT_EQ(err, -EEXIST, "prog_attach"); 1011 assert_mprog_count(target, 1); 1012 1013 err = bpf_prog_detach_opts(fd1, loopback, target, &optd); 1014 ASSERT_OK(err, "prog_detach"); 1015 assert_mprog_count(target, 0); 1016 cleanup: 1017 test_tc_link__destroy(skel); 1018 } 1019 1020 void serial_test_tc_opts_invalid(void) 1021 { 1022 test_tc_opts_invalid_target(BPF_TCX_INGRESS); 1023 test_tc_opts_invalid_target(BPF_TCX_EGRESS); 1024 } 1025 1026 static void test_tc_opts_prepend_target(int target) 1027 { 1028 LIBBPF_OPTS(bpf_prog_attach_opts, opta); 1029 LIBBPF_OPTS(bpf_prog_detach_opts, optd); 1030 LIBBPF_OPTS(bpf_prog_query_opts, optq); 1031 __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; 1032 struct test_tc_link *skel; 1033 __u32 prog_ids[5]; 1034 int err; 1035 1036 skel = test_tc_link__open_and_load(); 1037 if (!ASSERT_OK_PTR(skel, "skel_load")) 1038 goto cleanup; 1039 1040 fd1 = bpf_program__fd(skel->progs.tc1); 1041 fd2 = bpf_program__fd(skel->progs.tc2); 1042 fd3 = bpf_program__fd(skel->progs.tc3); 1043 fd4 = bpf_program__fd(skel->progs.tc4); 1044 1045 id1 = id_from_prog_fd(fd1); 1046 id2 = id_from_prog_fd(fd2); 1047 id3 = id_from_prog_fd(fd3); 1048 id4 = id_from_prog_fd(fd4); 1049 1050 ASSERT_NEQ(id1, id2, "prog_ids_1_2"); 1051 ASSERT_NEQ(id3, id4, "prog_ids_3_4"); 1052 ASSERT_NEQ(id2, id3, "prog_ids_2_3"); 1053 1054 assert_mprog_count(target, 0); 1055 1056 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 1057 if (!ASSERT_EQ(err, 0, "prog_attach")) 1058 goto cleanup; 1059 1060 assert_mprog_count(target, 1); 1061 1062 LIBBPF_OPTS_RESET(opta, 1063 .flags = BPF_F_BEFORE, 1064 ); 1065 1066 err = bpf_prog_attach_opts(fd2, loopback, target, &opta); 1067 if (!ASSERT_EQ(err, 0, "prog_attach")) 1068 goto cleanup_target; 1069 1070 assert_mprog_count(target, 2); 1071 1072 optq.prog_ids = prog_ids; 1073 1074 memset(prog_ids, 0, sizeof(prog_ids)); 1075 optq.count = ARRAY_SIZE(prog_ids); 1076 1077 err = bpf_prog_query_opts(loopback, target, &optq); 1078 if (!ASSERT_OK(err, "prog_query")) 1079 goto cleanup_target2; 1080 1081 ASSERT_EQ(optq.count, 2, "count"); 1082 ASSERT_EQ(optq.revision, 3, "revision"); 1083 ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); 1084 ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]"); 1085 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); 1086 1087 ASSERT_OK(system(ping_cmd), ping_cmd); 1088 1089 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 1090 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 1091 ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); 1092 ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); 1093 1094 LIBBPF_OPTS_RESET(opta, 1095 .flags = BPF_F_BEFORE, 1096 ); 1097 1098 err = bpf_prog_attach_opts(fd3, loopback, target, &opta); 1099 if (!ASSERT_EQ(err, 0, "prog_attach")) 1100 goto cleanup_target2; 1101 1102 LIBBPF_OPTS_RESET(opta, 1103 .flags = BPF_F_BEFORE, 1104 ); 1105 1106 err = bpf_prog_attach_opts(fd4, loopback, target, &opta); 1107 if (!ASSERT_EQ(err, 0, "prog_attach")) 1108 goto cleanup_target3; 1109 1110 assert_mprog_count(target, 4); 1111 1112 memset(prog_ids, 0, sizeof(prog_ids)); 1113 optq.count = ARRAY_SIZE(prog_ids); 1114 1115 err = bpf_prog_query_opts(loopback, target, &optq); 1116 if (!ASSERT_OK(err, "prog_query")) 1117 goto cleanup_target4; 1118 1119 ASSERT_EQ(optq.count, 4, "count"); 1120 ASSERT_EQ(optq.revision, 5, "revision"); 1121 ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]"); 1122 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); 1123 ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]"); 1124 ASSERT_EQ(optq.prog_ids[3], id1, "prog_ids[3]"); 1125 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); 1126 1127 ASSERT_OK(system(ping_cmd), ping_cmd); 1128 1129 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 1130 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 1131 ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); 1132 ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); 1133 1134 cleanup_target4: 1135 err = bpf_prog_detach_opts(fd4, loopback, target, &optd); 1136 ASSERT_OK(err, "prog_detach"); 1137 assert_mprog_count(target, 3); 1138 1139 cleanup_target3: 1140 err = bpf_prog_detach_opts(fd3, loopback, target, &optd); 1141 ASSERT_OK(err, "prog_detach"); 1142 assert_mprog_count(target, 2); 1143 1144 cleanup_target2: 1145 err = bpf_prog_detach_opts(fd2, loopback, target, &optd); 1146 ASSERT_OK(err, "prog_detach"); 1147 assert_mprog_count(target, 1); 1148 1149 cleanup_target: 1150 err = bpf_prog_detach_opts(fd1, loopback, target, &optd); 1151 ASSERT_OK(err, "prog_detach"); 1152 assert_mprog_count(target, 0); 1153 1154 cleanup: 1155 test_tc_link__destroy(skel); 1156 } 1157 1158 void serial_test_tc_opts_prepend(void) 1159 { 1160 test_tc_opts_prepend_target(BPF_TCX_INGRESS); 1161 test_tc_opts_prepend_target(BPF_TCX_EGRESS); 1162 } 1163 1164 static void test_tc_opts_append_target(int target) 1165 { 1166 LIBBPF_OPTS(bpf_prog_attach_opts, opta); 1167 LIBBPF_OPTS(bpf_prog_detach_opts, optd); 1168 LIBBPF_OPTS(bpf_prog_query_opts, optq); 1169 __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; 1170 struct test_tc_link *skel; 1171 __u32 prog_ids[5]; 1172 int err; 1173 1174 skel = test_tc_link__open_and_load(); 1175 if (!ASSERT_OK_PTR(skel, "skel_load")) 1176 goto cleanup; 1177 1178 fd1 = bpf_program__fd(skel->progs.tc1); 1179 fd2 = bpf_program__fd(skel->progs.tc2); 1180 fd3 = bpf_program__fd(skel->progs.tc3); 1181 fd4 = bpf_program__fd(skel->progs.tc4); 1182 1183 id1 = id_from_prog_fd(fd1); 1184 id2 = id_from_prog_fd(fd2); 1185 id3 = id_from_prog_fd(fd3); 1186 id4 = id_from_prog_fd(fd4); 1187 1188 ASSERT_NEQ(id1, id2, "prog_ids_1_2"); 1189 ASSERT_NEQ(id3, id4, "prog_ids_3_4"); 1190 ASSERT_NEQ(id2, id3, "prog_ids_2_3"); 1191 1192 assert_mprog_count(target, 0); 1193 1194 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 1195 if (!ASSERT_EQ(err, 0, "prog_attach")) 1196 goto cleanup; 1197 1198 assert_mprog_count(target, 1); 1199 1200 LIBBPF_OPTS_RESET(opta, 1201 .flags = BPF_F_AFTER, 1202 ); 1203 1204 err = bpf_prog_attach_opts(fd2, loopback, target, &opta); 1205 if (!ASSERT_EQ(err, 0, "prog_attach")) 1206 goto cleanup_target; 1207 1208 assert_mprog_count(target, 2); 1209 1210 optq.prog_ids = prog_ids; 1211 1212 memset(prog_ids, 0, sizeof(prog_ids)); 1213 optq.count = ARRAY_SIZE(prog_ids); 1214 1215 err = bpf_prog_query_opts(loopback, target, &optq); 1216 if (!ASSERT_OK(err, "prog_query")) 1217 goto cleanup_target2; 1218 1219 ASSERT_EQ(optq.count, 2, "count"); 1220 ASSERT_EQ(optq.revision, 3, "revision"); 1221 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); 1222 ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); 1223 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); 1224 1225 ASSERT_OK(system(ping_cmd), ping_cmd); 1226 1227 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 1228 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 1229 ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3"); 1230 ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); 1231 1232 LIBBPF_OPTS_RESET(opta, 1233 .flags = BPF_F_AFTER, 1234 ); 1235 1236 err = bpf_prog_attach_opts(fd3, loopback, target, &opta); 1237 if (!ASSERT_EQ(err, 0, "prog_attach")) 1238 goto cleanup_target2; 1239 1240 LIBBPF_OPTS_RESET(opta, 1241 .flags = BPF_F_AFTER, 1242 ); 1243 1244 err = bpf_prog_attach_opts(fd4, loopback, target, &opta); 1245 if (!ASSERT_EQ(err, 0, "prog_attach")) 1246 goto cleanup_target3; 1247 1248 assert_mprog_count(target, 4); 1249 1250 memset(prog_ids, 0, sizeof(prog_ids)); 1251 optq.count = ARRAY_SIZE(prog_ids); 1252 1253 err = bpf_prog_query_opts(loopback, target, &optq); 1254 if (!ASSERT_OK(err, "prog_query")) 1255 goto cleanup_target4; 1256 1257 ASSERT_EQ(optq.count, 4, "count"); 1258 ASSERT_EQ(optq.revision, 5, "revision"); 1259 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); 1260 ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); 1261 ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]"); 1262 ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); 1263 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); 1264 1265 ASSERT_OK(system(ping_cmd), ping_cmd); 1266 1267 ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); 1268 ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); 1269 ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3"); 1270 ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); 1271 1272 cleanup_target4: 1273 err = bpf_prog_detach_opts(fd4, loopback, target, &optd); 1274 ASSERT_OK(err, "prog_detach"); 1275 assert_mprog_count(target, 3); 1276 1277 cleanup_target3: 1278 err = bpf_prog_detach_opts(fd3, loopback, target, &optd); 1279 ASSERT_OK(err, "prog_detach"); 1280 assert_mprog_count(target, 2); 1281 1282 cleanup_target2: 1283 err = bpf_prog_detach_opts(fd2, loopback, target, &optd); 1284 ASSERT_OK(err, "prog_detach"); 1285 assert_mprog_count(target, 1); 1286 1287 cleanup_target: 1288 err = bpf_prog_detach_opts(fd1, loopback, target, &optd); 1289 ASSERT_OK(err, "prog_detach"); 1290 assert_mprog_count(target, 0); 1291 1292 cleanup: 1293 test_tc_link__destroy(skel); 1294 } 1295 1296 void serial_test_tc_opts_append(void) 1297 { 1298 test_tc_opts_append_target(BPF_TCX_INGRESS); 1299 test_tc_opts_append_target(BPF_TCX_EGRESS); 1300 } 1301 1302 static void test_tc_opts_dev_cleanup_target(int target) 1303 { 1304 LIBBPF_OPTS(bpf_prog_attach_opts, opta); 1305 LIBBPF_OPTS(bpf_prog_detach_opts, optd); 1306 LIBBPF_OPTS(bpf_prog_query_opts, optq); 1307 __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; 1308 struct test_tc_link *skel; 1309 int err, ifindex; 1310 1311 ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth"); 1312 ifindex = if_nametoindex("tcx_opts1"); 1313 ASSERT_NEQ(ifindex, 0, "non_zero_ifindex"); 1314 1315 skel = test_tc_link__open_and_load(); 1316 if (!ASSERT_OK_PTR(skel, "skel_load")) 1317 goto cleanup; 1318 1319 fd1 = bpf_program__fd(skel->progs.tc1); 1320 fd2 = bpf_program__fd(skel->progs.tc2); 1321 fd3 = bpf_program__fd(skel->progs.tc3); 1322 fd4 = bpf_program__fd(skel->progs.tc4); 1323 1324 id1 = id_from_prog_fd(fd1); 1325 id2 = id_from_prog_fd(fd2); 1326 id3 = id_from_prog_fd(fd3); 1327 id4 = id_from_prog_fd(fd4); 1328 1329 ASSERT_NEQ(id1, id2, "prog_ids_1_2"); 1330 ASSERT_NEQ(id3, id4, "prog_ids_3_4"); 1331 ASSERT_NEQ(id2, id3, "prog_ids_2_3"); 1332 1333 assert_mprog_count_ifindex(ifindex, target, 0); 1334 1335 err = bpf_prog_attach_opts(fd1, ifindex, target, &opta); 1336 if (!ASSERT_EQ(err, 0, "prog_attach")) 1337 goto cleanup; 1338 1339 assert_mprog_count_ifindex(ifindex, target, 1); 1340 1341 err = bpf_prog_attach_opts(fd2, ifindex, target, &opta); 1342 if (!ASSERT_EQ(err, 0, "prog_attach")) 1343 goto cleanup1; 1344 1345 assert_mprog_count_ifindex(ifindex, target, 2); 1346 1347 err = bpf_prog_attach_opts(fd3, ifindex, target, &opta); 1348 if (!ASSERT_EQ(err, 0, "prog_attach")) 1349 goto cleanup2; 1350 1351 assert_mprog_count_ifindex(ifindex, target, 3); 1352 1353 err = bpf_prog_attach_opts(fd4, ifindex, target, &opta); 1354 if (!ASSERT_EQ(err, 0, "prog_attach")) 1355 goto cleanup3; 1356 1357 assert_mprog_count_ifindex(ifindex, target, 4); 1358 1359 ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); 1360 ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); 1361 ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); 1362 return; 1363 cleanup3: 1364 err = bpf_prog_detach_opts(fd3, loopback, target, &optd); 1365 ASSERT_OK(err, "prog_detach"); 1366 1367 assert_mprog_count_ifindex(ifindex, target, 2); 1368 cleanup2: 1369 err = bpf_prog_detach_opts(fd2, loopback, target, &optd); 1370 ASSERT_OK(err, "prog_detach"); 1371 1372 assert_mprog_count_ifindex(ifindex, target, 1); 1373 cleanup1: 1374 err = bpf_prog_detach_opts(fd1, loopback, target, &optd); 1375 ASSERT_OK(err, "prog_detach"); 1376 1377 assert_mprog_count_ifindex(ifindex, target, 0); 1378 cleanup: 1379 test_tc_link__destroy(skel); 1380 1381 ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); 1382 ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); 1383 ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); 1384 } 1385 1386 void serial_test_tc_opts_dev_cleanup(void) 1387 { 1388 test_tc_opts_dev_cleanup_target(BPF_TCX_INGRESS); 1389 test_tc_opts_dev_cleanup_target(BPF_TCX_EGRESS); 1390 } 1391 1392 static void test_tc_opts_mixed_target(int target) 1393 { 1394 LIBBPF_OPTS(bpf_prog_attach_opts, opta); 1395 LIBBPF_OPTS(bpf_prog_detach_opts, optd); 1396 LIBBPF_OPTS(bpf_prog_query_opts, optq); 1397 LIBBPF_OPTS(bpf_tcx_opts, optl); 1398 __u32 pid1, pid2, pid3, pid4, lid2, lid4; 1399 __u32 prog_flags[4], link_flags[4]; 1400 __u32 prog_ids[4], link_ids[4]; 1401 struct test_tc_link *skel; 1402 struct bpf_link *link; 1403 int err, detach_fd; 1404 1405 skel = test_tc_link__open(); 1406 if (!ASSERT_OK_PTR(skel, "skel_open")) 1407 goto cleanup; 1408 1409 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), 1410 0, "tc1_attach_type"); 1411 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), 1412 0, "tc2_attach_type"); 1413 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target), 1414 0, "tc3_attach_type"); 1415 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target), 1416 0, "tc4_attach_type"); 1417 1418 err = test_tc_link__load(skel); 1419 if (!ASSERT_OK(err, "skel_load")) 1420 goto cleanup; 1421 1422 pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); 1423 pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); 1424 pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3)); 1425 pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4)); 1426 1427 ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); 1428 ASSERT_NEQ(pid3, pid4, "prog_ids_3_4"); 1429 ASSERT_NEQ(pid2, pid3, "prog_ids_2_3"); 1430 1431 assert_mprog_count(target, 0); 1432 1433 err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1), 1434 loopback, target, &opta); 1435 if (!ASSERT_EQ(err, 0, "prog_attach")) 1436 goto cleanup; 1437 1438 detach_fd = bpf_program__fd(skel->progs.tc1); 1439 1440 assert_mprog_count(target, 1); 1441 1442 link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); 1443 if (!ASSERT_OK_PTR(link, "link_attach")) 1444 goto cleanup1; 1445 skel->links.tc2 = link; 1446 1447 lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); 1448 1449 assert_mprog_count(target, 2); 1450 1451 LIBBPF_OPTS_RESET(opta, 1452 .flags = BPF_F_REPLACE, 1453 .replace_prog_fd = bpf_program__fd(skel->progs.tc1), 1454 ); 1455 1456 err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc2), 1457 loopback, target, &opta); 1458 ASSERT_EQ(err, -EEXIST, "prog_attach"); 1459 1460 assert_mprog_count(target, 2); 1461 1462 LIBBPF_OPTS_RESET(opta, 1463 .flags = BPF_F_REPLACE, 1464 .replace_prog_fd = bpf_program__fd(skel->progs.tc2), 1465 ); 1466 1467 err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1), 1468 loopback, target, &opta); 1469 ASSERT_EQ(err, -EEXIST, "prog_attach"); 1470 1471 assert_mprog_count(target, 2); 1472 1473 LIBBPF_OPTS_RESET(opta, 1474 .flags = BPF_F_REPLACE, 1475 .replace_prog_fd = bpf_program__fd(skel->progs.tc2), 1476 ); 1477 1478 err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc3), 1479 loopback, target, &opta); 1480 ASSERT_EQ(err, -EBUSY, "prog_attach"); 1481 1482 assert_mprog_count(target, 2); 1483 1484 LIBBPF_OPTS_RESET(opta, 1485 .flags = BPF_F_REPLACE, 1486 .replace_prog_fd = bpf_program__fd(skel->progs.tc1), 1487 ); 1488 1489 err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc3), 1490 loopback, target, &opta); 1491 if (!ASSERT_EQ(err, 0, "prog_attach")) 1492 goto cleanup1; 1493 1494 detach_fd = bpf_program__fd(skel->progs.tc3); 1495 1496 assert_mprog_count(target, 2); 1497 1498 link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl); 1499 if (!ASSERT_OK_PTR(link, "link_attach")) 1500 goto cleanup1; 1501 skel->links.tc4 = link; 1502 1503 lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4)); 1504 1505 assert_mprog_count(target, 3); 1506 1507 LIBBPF_OPTS_RESET(opta, 1508 .flags = BPF_F_REPLACE, 1509 .replace_prog_fd = bpf_program__fd(skel->progs.tc4), 1510 ); 1511 1512 err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc2), 1513 loopback, target, &opta); 1514 ASSERT_EQ(err, -EEXIST, "prog_attach"); 1515 1516 optq.prog_ids = prog_ids; 1517 optq.prog_attach_flags = prog_flags; 1518 optq.link_ids = link_ids; 1519 optq.link_attach_flags = link_flags; 1520 1521 memset(prog_ids, 0, sizeof(prog_ids)); 1522 memset(prog_flags, 0, sizeof(prog_flags)); 1523 memset(link_ids, 0, sizeof(link_ids)); 1524 memset(link_flags, 0, sizeof(link_flags)); 1525 optq.count = ARRAY_SIZE(prog_ids); 1526 1527 err = bpf_prog_query_opts(loopback, target, &optq); 1528 if (!ASSERT_OK(err, "prog_query")) 1529 goto cleanup1; 1530 1531 ASSERT_EQ(optq.count, 3, "count"); 1532 ASSERT_EQ(optq.revision, 5, "revision"); 1533 ASSERT_EQ(optq.prog_ids[0], pid3, "prog_ids[0]"); 1534 ASSERT_EQ(optq.prog_attach_flags[0], 0, "prog_flags[0]"); 1535 ASSERT_EQ(optq.link_ids[0], 0, "link_ids[0]"); 1536 ASSERT_EQ(optq.link_attach_flags[0], 0, "link_flags[0]"); 1537 ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]"); 1538 ASSERT_EQ(optq.prog_attach_flags[1], 0, "prog_flags[1]"); 1539 ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]"); 1540 ASSERT_EQ(optq.link_attach_flags[1], 0, "link_flags[1]"); 1541 ASSERT_EQ(optq.prog_ids[2], pid4, "prog_ids[2]"); 1542 ASSERT_EQ(optq.prog_attach_flags[2], 0, "prog_flags[2]"); 1543 ASSERT_EQ(optq.link_ids[2], lid4, "link_ids[2]"); 1544 ASSERT_EQ(optq.link_attach_flags[2], 0, "link_flags[2]"); 1545 ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); 1546 ASSERT_EQ(optq.prog_attach_flags[3], 0, "prog_flags[3]"); 1547 ASSERT_EQ(optq.link_ids[3], 0, "link_ids[3]"); 1548 ASSERT_EQ(optq.link_attach_flags[3], 0, "link_flags[3]"); 1549 1550 ASSERT_OK(system(ping_cmd), ping_cmd); 1551 1552 cleanup1: 1553 err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd); 1554 ASSERT_OK(err, "prog_detach"); 1555 assert_mprog_count(target, 2); 1556 1557 cleanup: 1558 test_tc_link__destroy(skel); 1559 assert_mprog_count(target, 0); 1560 } 1561 1562 void serial_test_tc_opts_mixed(void) 1563 { 1564 test_tc_opts_mixed_target(BPF_TCX_INGRESS); 1565 test_tc_opts_mixed_target(BPF_TCX_EGRESS); 1566 } 1567 1568 static void test_tc_opts_demixed_target(int target) 1569 { 1570 LIBBPF_OPTS(bpf_prog_attach_opts, opta); 1571 LIBBPF_OPTS(bpf_prog_detach_opts, optd); 1572 LIBBPF_OPTS(bpf_tcx_opts, optl); 1573 struct test_tc_link *skel; 1574 struct bpf_link *link; 1575 __u32 pid1, pid2; 1576 int err; 1577 1578 skel = test_tc_link__open(); 1579 if (!ASSERT_OK_PTR(skel, "skel_open")) 1580 goto cleanup; 1581 1582 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target), 1583 0, "tc1_attach_type"); 1584 ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target), 1585 0, "tc2_attach_type"); 1586 1587 err = test_tc_link__load(skel); 1588 if (!ASSERT_OK(err, "skel_load")) 1589 goto cleanup; 1590 1591 pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); 1592 pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); 1593 ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); 1594 1595 assert_mprog_count(target, 0); 1596 1597 err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1), 1598 loopback, target, &opta); 1599 if (!ASSERT_EQ(err, 0, "prog_attach")) 1600 goto cleanup; 1601 1602 assert_mprog_count(target, 1); 1603 1604 link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl); 1605 if (!ASSERT_OK_PTR(link, "link_attach")) 1606 goto cleanup1; 1607 skel->links.tc2 = link; 1608 1609 assert_mprog_count(target, 2); 1610 1611 LIBBPF_OPTS_RESET(optd, 1612 .flags = BPF_F_AFTER, 1613 ); 1614 1615 err = bpf_prog_detach_opts(0, loopback, target, &optd); 1616 ASSERT_EQ(err, -EBUSY, "prog_detach"); 1617 1618 assert_mprog_count(target, 2); 1619 1620 LIBBPF_OPTS_RESET(optd, 1621 .flags = BPF_F_BEFORE, 1622 ); 1623 1624 err = bpf_prog_detach_opts(0, loopback, target, &optd); 1625 ASSERT_OK(err, "prog_detach"); 1626 1627 assert_mprog_count(target, 1); 1628 goto cleanup; 1629 1630 cleanup1: 1631 err = bpf_prog_detach_opts(bpf_program__fd(skel->progs.tc1), 1632 loopback, target, &optd); 1633 ASSERT_OK(err, "prog_detach"); 1634 assert_mprog_count(target, 2); 1635 1636 cleanup: 1637 test_tc_link__destroy(skel); 1638 assert_mprog_count(target, 0); 1639 } 1640 1641 void serial_test_tc_opts_demixed(void) 1642 { 1643 test_tc_opts_demixed_target(BPF_TCX_INGRESS); 1644 test_tc_opts_demixed_target(BPF_TCX_EGRESS); 1645 } 1646 1647 static void test_tc_opts_detach_target(int target) 1648 { 1649 LIBBPF_OPTS(bpf_prog_attach_opts, opta); 1650 LIBBPF_OPTS(bpf_prog_detach_opts, optd); 1651 LIBBPF_OPTS(bpf_prog_query_opts, optq); 1652 __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; 1653 struct test_tc_link *skel; 1654 __u32 prog_ids[5]; 1655 int err; 1656 1657 skel = test_tc_link__open_and_load(); 1658 if (!ASSERT_OK_PTR(skel, "skel_load")) 1659 goto cleanup; 1660 1661 fd1 = bpf_program__fd(skel->progs.tc1); 1662 fd2 = bpf_program__fd(skel->progs.tc2); 1663 fd3 = bpf_program__fd(skel->progs.tc3); 1664 fd4 = bpf_program__fd(skel->progs.tc4); 1665 1666 id1 = id_from_prog_fd(fd1); 1667 id2 = id_from_prog_fd(fd2); 1668 id3 = id_from_prog_fd(fd3); 1669 id4 = id_from_prog_fd(fd4); 1670 1671 ASSERT_NEQ(id1, id2, "prog_ids_1_2"); 1672 ASSERT_NEQ(id3, id4, "prog_ids_3_4"); 1673 ASSERT_NEQ(id2, id3, "prog_ids_2_3"); 1674 1675 assert_mprog_count(target, 0); 1676 1677 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 1678 if (!ASSERT_EQ(err, 0, "prog_attach")) 1679 goto cleanup; 1680 1681 assert_mprog_count(target, 1); 1682 1683 err = bpf_prog_attach_opts(fd2, loopback, target, &opta); 1684 if (!ASSERT_EQ(err, 0, "prog_attach")) 1685 goto cleanup1; 1686 1687 assert_mprog_count(target, 2); 1688 1689 err = bpf_prog_attach_opts(fd3, loopback, target, &opta); 1690 if (!ASSERT_EQ(err, 0, "prog_attach")) 1691 goto cleanup2; 1692 1693 assert_mprog_count(target, 3); 1694 1695 err = bpf_prog_attach_opts(fd4, loopback, target, &opta); 1696 if (!ASSERT_EQ(err, 0, "prog_attach")) 1697 goto cleanup3; 1698 1699 assert_mprog_count(target, 4); 1700 1701 optq.prog_ids = prog_ids; 1702 1703 memset(prog_ids, 0, sizeof(prog_ids)); 1704 optq.count = ARRAY_SIZE(prog_ids); 1705 1706 err = bpf_prog_query_opts(loopback, target, &optq); 1707 if (!ASSERT_OK(err, "prog_query")) 1708 goto cleanup4; 1709 1710 ASSERT_EQ(optq.count, 4, "count"); 1711 ASSERT_EQ(optq.revision, 5, "revision"); 1712 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); 1713 ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); 1714 ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]"); 1715 ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); 1716 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); 1717 1718 LIBBPF_OPTS_RESET(optd, 1719 .flags = BPF_F_BEFORE, 1720 ); 1721 1722 err = bpf_prog_detach_opts(0, loopback, target, &optd); 1723 ASSERT_OK(err, "prog_detach"); 1724 1725 assert_mprog_count(target, 3); 1726 1727 memset(prog_ids, 0, sizeof(prog_ids)); 1728 optq.count = ARRAY_SIZE(prog_ids); 1729 1730 err = bpf_prog_query_opts(loopback, target, &optq); 1731 if (!ASSERT_OK(err, "prog_query")) 1732 goto cleanup4; 1733 1734 ASSERT_EQ(optq.count, 3, "count"); 1735 ASSERT_EQ(optq.revision, 6, "revision"); 1736 ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); 1737 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); 1738 ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]"); 1739 ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); 1740 1741 LIBBPF_OPTS_RESET(optd, 1742 .flags = BPF_F_AFTER, 1743 ); 1744 1745 err = bpf_prog_detach_opts(0, loopback, target, &optd); 1746 ASSERT_OK(err, "prog_detach"); 1747 1748 assert_mprog_count(target, 2); 1749 1750 memset(prog_ids, 0, sizeof(prog_ids)); 1751 optq.count = ARRAY_SIZE(prog_ids); 1752 1753 err = bpf_prog_query_opts(loopback, target, &optq); 1754 if (!ASSERT_OK(err, "prog_query")) 1755 goto cleanup4; 1756 1757 ASSERT_EQ(optq.count, 2, "count"); 1758 ASSERT_EQ(optq.revision, 7, "revision"); 1759 ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); 1760 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); 1761 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); 1762 1763 LIBBPF_OPTS_RESET(optd); 1764 1765 err = bpf_prog_detach_opts(fd3, loopback, target, &optd); 1766 ASSERT_OK(err, "prog_detach"); 1767 assert_mprog_count(target, 1); 1768 1769 err = bpf_prog_detach_opts(fd2, loopback, target, &optd); 1770 ASSERT_OK(err, "prog_detach"); 1771 assert_mprog_count(target, 0); 1772 1773 LIBBPF_OPTS_RESET(optd, 1774 .flags = BPF_F_BEFORE, 1775 ); 1776 1777 err = bpf_prog_detach_opts(0, loopback, target, &optd); 1778 ASSERT_EQ(err, -ENOENT, "prog_detach"); 1779 1780 LIBBPF_OPTS_RESET(optd, 1781 .flags = BPF_F_AFTER, 1782 ); 1783 1784 err = bpf_prog_detach_opts(0, loopback, target, &optd); 1785 ASSERT_EQ(err, -ENOENT, "prog_detach"); 1786 goto cleanup; 1787 1788 cleanup4: 1789 err = bpf_prog_detach_opts(fd4, loopback, target, &optd); 1790 ASSERT_OK(err, "prog_detach"); 1791 assert_mprog_count(target, 3); 1792 1793 cleanup3: 1794 err = bpf_prog_detach_opts(fd3, loopback, target, &optd); 1795 ASSERT_OK(err, "prog_detach"); 1796 assert_mprog_count(target, 2); 1797 1798 cleanup2: 1799 err = bpf_prog_detach_opts(fd2, loopback, target, &optd); 1800 ASSERT_OK(err, "prog_detach"); 1801 assert_mprog_count(target, 1); 1802 1803 cleanup1: 1804 err = bpf_prog_detach_opts(fd1, loopback, target, &optd); 1805 ASSERT_OK(err, "prog_detach"); 1806 assert_mprog_count(target, 0); 1807 1808 cleanup: 1809 test_tc_link__destroy(skel); 1810 } 1811 1812 void serial_test_tc_opts_detach(void) 1813 { 1814 test_tc_opts_detach_target(BPF_TCX_INGRESS); 1815 test_tc_opts_detach_target(BPF_TCX_EGRESS); 1816 } 1817 1818 static void test_tc_opts_detach_before_target(int target) 1819 { 1820 LIBBPF_OPTS(bpf_prog_attach_opts, opta); 1821 LIBBPF_OPTS(bpf_prog_detach_opts, optd); 1822 LIBBPF_OPTS(bpf_prog_query_opts, optq); 1823 __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; 1824 struct test_tc_link *skel; 1825 __u32 prog_ids[5]; 1826 int err; 1827 1828 skel = test_tc_link__open_and_load(); 1829 if (!ASSERT_OK_PTR(skel, "skel_load")) 1830 goto cleanup; 1831 1832 fd1 = bpf_program__fd(skel->progs.tc1); 1833 fd2 = bpf_program__fd(skel->progs.tc2); 1834 fd3 = bpf_program__fd(skel->progs.tc3); 1835 fd4 = bpf_program__fd(skel->progs.tc4); 1836 1837 id1 = id_from_prog_fd(fd1); 1838 id2 = id_from_prog_fd(fd2); 1839 id3 = id_from_prog_fd(fd3); 1840 id4 = id_from_prog_fd(fd4); 1841 1842 ASSERT_NEQ(id1, id2, "prog_ids_1_2"); 1843 ASSERT_NEQ(id3, id4, "prog_ids_3_4"); 1844 ASSERT_NEQ(id2, id3, "prog_ids_2_3"); 1845 1846 assert_mprog_count(target, 0); 1847 1848 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 1849 if (!ASSERT_EQ(err, 0, "prog_attach")) 1850 goto cleanup; 1851 1852 assert_mprog_count(target, 1); 1853 1854 err = bpf_prog_attach_opts(fd2, loopback, target, &opta); 1855 if (!ASSERT_EQ(err, 0, "prog_attach")) 1856 goto cleanup1; 1857 1858 assert_mprog_count(target, 2); 1859 1860 err = bpf_prog_attach_opts(fd3, loopback, target, &opta); 1861 if (!ASSERT_EQ(err, 0, "prog_attach")) 1862 goto cleanup2; 1863 1864 assert_mprog_count(target, 3); 1865 1866 err = bpf_prog_attach_opts(fd4, loopback, target, &opta); 1867 if (!ASSERT_EQ(err, 0, "prog_attach")) 1868 goto cleanup3; 1869 1870 assert_mprog_count(target, 4); 1871 1872 optq.prog_ids = prog_ids; 1873 1874 memset(prog_ids, 0, sizeof(prog_ids)); 1875 optq.count = ARRAY_SIZE(prog_ids); 1876 1877 err = bpf_prog_query_opts(loopback, target, &optq); 1878 if (!ASSERT_OK(err, "prog_query")) 1879 goto cleanup4; 1880 1881 ASSERT_EQ(optq.count, 4, "count"); 1882 ASSERT_EQ(optq.revision, 5, "revision"); 1883 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); 1884 ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); 1885 ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]"); 1886 ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); 1887 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); 1888 1889 LIBBPF_OPTS_RESET(optd, 1890 .flags = BPF_F_BEFORE, 1891 .relative_fd = fd2, 1892 ); 1893 1894 err = bpf_prog_detach_opts(fd1, loopback, target, &optd); 1895 ASSERT_OK(err, "prog_detach"); 1896 1897 assert_mprog_count(target, 3); 1898 1899 memset(prog_ids, 0, sizeof(prog_ids)); 1900 optq.count = ARRAY_SIZE(prog_ids); 1901 1902 err = bpf_prog_query_opts(loopback, target, &optq); 1903 if (!ASSERT_OK(err, "prog_query")) 1904 goto cleanup4; 1905 1906 ASSERT_EQ(optq.count, 3, "count"); 1907 ASSERT_EQ(optq.revision, 6, "revision"); 1908 ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]"); 1909 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); 1910 ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]"); 1911 ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); 1912 1913 LIBBPF_OPTS_RESET(optd, 1914 .flags = BPF_F_BEFORE, 1915 .relative_fd = fd2, 1916 ); 1917 1918 err = bpf_prog_detach_opts(fd1, loopback, target, &optd); 1919 ASSERT_EQ(err, -ENOENT, "prog_detach"); 1920 assert_mprog_count(target, 3); 1921 1922 LIBBPF_OPTS_RESET(optd, 1923 .flags = BPF_F_BEFORE, 1924 .relative_fd = fd4, 1925 ); 1926 1927 err = bpf_prog_detach_opts(fd2, loopback, target, &optd); 1928 ASSERT_EQ(err, -ERANGE, "prog_detach"); 1929 assert_mprog_count(target, 3); 1930 1931 LIBBPF_OPTS_RESET(optd, 1932 .flags = BPF_F_BEFORE, 1933 .relative_fd = fd1, 1934 ); 1935 1936 err = bpf_prog_detach_opts(fd2, loopback, target, &optd); 1937 ASSERT_EQ(err, -ENOENT, "prog_detach"); 1938 assert_mprog_count(target, 3); 1939 1940 LIBBPF_OPTS_RESET(optd, 1941 .flags = BPF_F_BEFORE, 1942 .relative_fd = fd3, 1943 ); 1944 1945 err = bpf_prog_detach_opts(fd2, loopback, target, &optd); 1946 ASSERT_OK(err, "prog_detach"); 1947 1948 assert_mprog_count(target, 2); 1949 1950 memset(prog_ids, 0, sizeof(prog_ids)); 1951 optq.count = ARRAY_SIZE(prog_ids); 1952 1953 err = bpf_prog_query_opts(loopback, target, &optq); 1954 if (!ASSERT_OK(err, "prog_query")) 1955 goto cleanup4; 1956 1957 ASSERT_EQ(optq.count, 2, "count"); 1958 ASSERT_EQ(optq.revision, 7, "revision"); 1959 ASSERT_EQ(optq.prog_ids[0], id3, "prog_ids[0]"); 1960 ASSERT_EQ(optq.prog_ids[1], id4, "prog_ids[1]"); 1961 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); 1962 1963 LIBBPF_OPTS_RESET(optd, 1964 .flags = BPF_F_BEFORE, 1965 .relative_fd = fd4, 1966 ); 1967 1968 err = bpf_prog_detach_opts(0, loopback, target, &optd); 1969 ASSERT_OK(err, "prog_detach"); 1970 1971 assert_mprog_count(target, 1); 1972 1973 memset(prog_ids, 0, sizeof(prog_ids)); 1974 optq.count = ARRAY_SIZE(prog_ids); 1975 1976 err = bpf_prog_query_opts(loopback, target, &optq); 1977 if (!ASSERT_OK(err, "prog_query")) 1978 goto cleanup4; 1979 1980 ASSERT_EQ(optq.count, 1, "count"); 1981 ASSERT_EQ(optq.revision, 8, "revision"); 1982 ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]"); 1983 ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); 1984 1985 LIBBPF_OPTS_RESET(optd, 1986 .flags = BPF_F_BEFORE, 1987 ); 1988 1989 err = bpf_prog_detach_opts(0, loopback, target, &optd); 1990 ASSERT_OK(err, "prog_detach"); 1991 1992 assert_mprog_count(target, 0); 1993 goto cleanup; 1994 1995 cleanup4: 1996 err = bpf_prog_detach_opts(fd4, loopback, target, &optd); 1997 ASSERT_OK(err, "prog_detach"); 1998 assert_mprog_count(target, 3); 1999 2000 cleanup3: 2001 err = bpf_prog_detach_opts(fd3, loopback, target, &optd); 2002 ASSERT_OK(err, "prog_detach"); 2003 assert_mprog_count(target, 2); 2004 2005 cleanup2: 2006 err = bpf_prog_detach_opts(fd2, loopback, target, &optd); 2007 ASSERT_OK(err, "prog_detach"); 2008 assert_mprog_count(target, 1); 2009 2010 cleanup1: 2011 err = bpf_prog_detach_opts(fd1, loopback, target, &optd); 2012 ASSERT_OK(err, "prog_detach"); 2013 assert_mprog_count(target, 0); 2014 2015 cleanup: 2016 test_tc_link__destroy(skel); 2017 } 2018 2019 void serial_test_tc_opts_detach_before(void) 2020 { 2021 test_tc_opts_detach_before_target(BPF_TCX_INGRESS); 2022 test_tc_opts_detach_before_target(BPF_TCX_EGRESS); 2023 } 2024 2025 static void test_tc_opts_detach_after_target(int target) 2026 { 2027 LIBBPF_OPTS(bpf_prog_attach_opts, opta); 2028 LIBBPF_OPTS(bpf_prog_detach_opts, optd); 2029 LIBBPF_OPTS(bpf_prog_query_opts, optq); 2030 __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; 2031 struct test_tc_link *skel; 2032 __u32 prog_ids[5]; 2033 int err; 2034 2035 skel = test_tc_link__open_and_load(); 2036 if (!ASSERT_OK_PTR(skel, "skel_load")) 2037 goto cleanup; 2038 2039 fd1 = bpf_program__fd(skel->progs.tc1); 2040 fd2 = bpf_program__fd(skel->progs.tc2); 2041 fd3 = bpf_program__fd(skel->progs.tc3); 2042 fd4 = bpf_program__fd(skel->progs.tc4); 2043 2044 id1 = id_from_prog_fd(fd1); 2045 id2 = id_from_prog_fd(fd2); 2046 id3 = id_from_prog_fd(fd3); 2047 id4 = id_from_prog_fd(fd4); 2048 2049 ASSERT_NEQ(id1, id2, "prog_ids_1_2"); 2050 ASSERT_NEQ(id3, id4, "prog_ids_3_4"); 2051 ASSERT_NEQ(id2, id3, "prog_ids_2_3"); 2052 2053 assert_mprog_count(target, 0); 2054 2055 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 2056 if (!ASSERT_EQ(err, 0, "prog_attach")) 2057 goto cleanup; 2058 2059 assert_mprog_count(target, 1); 2060 2061 err = bpf_prog_attach_opts(fd2, loopback, target, &opta); 2062 if (!ASSERT_EQ(err, 0, "prog_attach")) 2063 goto cleanup1; 2064 2065 assert_mprog_count(target, 2); 2066 2067 err = bpf_prog_attach_opts(fd3, loopback, target, &opta); 2068 if (!ASSERT_EQ(err, 0, "prog_attach")) 2069 goto cleanup2; 2070 2071 assert_mprog_count(target, 3); 2072 2073 err = bpf_prog_attach_opts(fd4, loopback, target, &opta); 2074 if (!ASSERT_EQ(err, 0, "prog_attach")) 2075 goto cleanup3; 2076 2077 assert_mprog_count(target, 4); 2078 2079 optq.prog_ids = prog_ids; 2080 2081 memset(prog_ids, 0, sizeof(prog_ids)); 2082 optq.count = ARRAY_SIZE(prog_ids); 2083 2084 err = bpf_prog_query_opts(loopback, target, &optq); 2085 if (!ASSERT_OK(err, "prog_query")) 2086 goto cleanup4; 2087 2088 ASSERT_EQ(optq.count, 4, "count"); 2089 ASSERT_EQ(optq.revision, 5, "revision"); 2090 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); 2091 ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]"); 2092 ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]"); 2093 ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]"); 2094 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]"); 2095 2096 LIBBPF_OPTS_RESET(optd, 2097 .flags = BPF_F_AFTER, 2098 .relative_fd = fd1, 2099 ); 2100 2101 err = bpf_prog_detach_opts(fd2, loopback, target, &optd); 2102 ASSERT_OK(err, "prog_detach"); 2103 2104 assert_mprog_count(target, 3); 2105 2106 memset(prog_ids, 0, sizeof(prog_ids)); 2107 optq.count = ARRAY_SIZE(prog_ids); 2108 2109 err = bpf_prog_query_opts(loopback, target, &optq); 2110 if (!ASSERT_OK(err, "prog_query")) 2111 goto cleanup4; 2112 2113 ASSERT_EQ(optq.count, 3, "count"); 2114 ASSERT_EQ(optq.revision, 6, "revision"); 2115 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); 2116 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]"); 2117 ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]"); 2118 ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]"); 2119 2120 LIBBPF_OPTS_RESET(optd, 2121 .flags = BPF_F_AFTER, 2122 .relative_fd = fd1, 2123 ); 2124 2125 err = bpf_prog_detach_opts(fd2, loopback, target, &optd); 2126 ASSERT_EQ(err, -ENOENT, "prog_detach"); 2127 assert_mprog_count(target, 3); 2128 2129 LIBBPF_OPTS_RESET(optd, 2130 .flags = BPF_F_AFTER, 2131 .relative_fd = fd4, 2132 ); 2133 2134 err = bpf_prog_detach_opts(fd1, loopback, target, &optd); 2135 ASSERT_EQ(err, -ERANGE, "prog_detach"); 2136 assert_mprog_count(target, 3); 2137 2138 LIBBPF_OPTS_RESET(optd, 2139 .flags = BPF_F_AFTER, 2140 .relative_fd = fd3, 2141 ); 2142 2143 err = bpf_prog_detach_opts(fd1, loopback, target, &optd); 2144 ASSERT_EQ(err, -ERANGE, "prog_detach"); 2145 assert_mprog_count(target, 3); 2146 2147 LIBBPF_OPTS_RESET(optd, 2148 .flags = BPF_F_AFTER, 2149 .relative_fd = fd1, 2150 ); 2151 2152 err = bpf_prog_detach_opts(fd1, loopback, target, &optd); 2153 ASSERT_EQ(err, -ERANGE, "prog_detach"); 2154 assert_mprog_count(target, 3); 2155 2156 LIBBPF_OPTS_RESET(optd, 2157 .flags = BPF_F_AFTER, 2158 .relative_fd = fd1, 2159 ); 2160 2161 err = bpf_prog_detach_opts(fd3, loopback, target, &optd); 2162 ASSERT_OK(err, "prog_detach"); 2163 2164 assert_mprog_count(target, 2); 2165 2166 memset(prog_ids, 0, sizeof(prog_ids)); 2167 optq.count = ARRAY_SIZE(prog_ids); 2168 2169 err = bpf_prog_query_opts(loopback, target, &optq); 2170 if (!ASSERT_OK(err, "prog_query")) 2171 goto cleanup4; 2172 2173 ASSERT_EQ(optq.count, 2, "count"); 2174 ASSERT_EQ(optq.revision, 7, "revision"); 2175 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); 2176 ASSERT_EQ(optq.prog_ids[1], id4, "prog_ids[1]"); 2177 ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); 2178 2179 LIBBPF_OPTS_RESET(optd, 2180 .flags = BPF_F_AFTER, 2181 .relative_fd = fd1, 2182 ); 2183 2184 err = bpf_prog_detach_opts(0, loopback, target, &optd); 2185 ASSERT_OK(err, "prog_detach"); 2186 2187 assert_mprog_count(target, 1); 2188 2189 memset(prog_ids, 0, sizeof(prog_ids)); 2190 optq.count = ARRAY_SIZE(prog_ids); 2191 2192 err = bpf_prog_query_opts(loopback, target, &optq); 2193 if (!ASSERT_OK(err, "prog_query")) 2194 goto cleanup4; 2195 2196 ASSERT_EQ(optq.count, 1, "count"); 2197 ASSERT_EQ(optq.revision, 8, "revision"); 2198 ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]"); 2199 ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); 2200 2201 LIBBPF_OPTS_RESET(optd, 2202 .flags = BPF_F_AFTER, 2203 ); 2204 2205 err = bpf_prog_detach_opts(0, loopback, target, &optd); 2206 ASSERT_OK(err, "prog_detach"); 2207 2208 assert_mprog_count(target, 0); 2209 goto cleanup; 2210 2211 cleanup4: 2212 err = bpf_prog_detach_opts(fd4, loopback, target, &optd); 2213 ASSERT_OK(err, "prog_detach"); 2214 assert_mprog_count(target, 3); 2215 2216 cleanup3: 2217 err = bpf_prog_detach_opts(fd3, loopback, target, &optd); 2218 ASSERT_OK(err, "prog_detach"); 2219 assert_mprog_count(target, 2); 2220 2221 cleanup2: 2222 err = bpf_prog_detach_opts(fd2, loopback, target, &optd); 2223 ASSERT_OK(err, "prog_detach"); 2224 assert_mprog_count(target, 1); 2225 2226 cleanup1: 2227 err = bpf_prog_detach_opts(fd1, loopback, target, &optd); 2228 ASSERT_OK(err, "prog_detach"); 2229 assert_mprog_count(target, 0); 2230 2231 cleanup: 2232 test_tc_link__destroy(skel); 2233 } 2234 2235 void serial_test_tc_opts_detach_after(void) 2236 { 2237 test_tc_opts_detach_after_target(BPF_TCX_INGRESS); 2238 test_tc_opts_detach_after_target(BPF_TCX_EGRESS); 2239 } 2240 2241 static void test_tc_opts_delete_empty(int target, bool chain_tc_old) 2242 { 2243 LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback); 2244 LIBBPF_OPTS(bpf_prog_detach_opts, optd); 2245 int err; 2246 2247 assert_mprog_count(target, 0); 2248 if (chain_tc_old) { 2249 tc_hook.attach_point = target == BPF_TCX_INGRESS ? 2250 BPF_TC_INGRESS : BPF_TC_EGRESS; 2251 err = bpf_tc_hook_create(&tc_hook); 2252 ASSERT_OK(err, "bpf_tc_hook_create"); 2253 __assert_mprog_count(target, 0, true, loopback); 2254 } 2255 err = bpf_prog_detach_opts(0, loopback, target, &optd); 2256 ASSERT_EQ(err, -ENOENT, "prog_detach"); 2257 if (chain_tc_old) { 2258 tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS; 2259 bpf_tc_hook_destroy(&tc_hook); 2260 } 2261 assert_mprog_count(target, 0); 2262 } 2263 2264 void serial_test_tc_opts_delete_empty(void) 2265 { 2266 test_tc_opts_delete_empty(BPF_TCX_INGRESS, false); 2267 test_tc_opts_delete_empty(BPF_TCX_EGRESS, false); 2268 test_tc_opts_delete_empty(BPF_TCX_INGRESS, true); 2269 test_tc_opts_delete_empty(BPF_TCX_EGRESS, true); 2270 } 2271 2272 static void test_tc_chain_mixed(int target) 2273 { 2274 LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); 2275 LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback); 2276 LIBBPF_OPTS(bpf_prog_attach_opts, opta); 2277 LIBBPF_OPTS(bpf_prog_detach_opts, optd); 2278 __u32 fd1, fd2, fd3, id1, id2, id3; 2279 struct test_tc_link *skel; 2280 int err, detach_fd; 2281 2282 skel = test_tc_link__open_and_load(); 2283 if (!ASSERT_OK_PTR(skel, "skel_load")) 2284 goto cleanup; 2285 2286 fd1 = bpf_program__fd(skel->progs.tc4); 2287 fd2 = bpf_program__fd(skel->progs.tc5); 2288 fd3 = bpf_program__fd(skel->progs.tc6); 2289 2290 id1 = id_from_prog_fd(fd1); 2291 id2 = id_from_prog_fd(fd2); 2292 id3 = id_from_prog_fd(fd3); 2293 2294 ASSERT_NEQ(id1, id2, "prog_ids_1_2"); 2295 ASSERT_NEQ(id2, id3, "prog_ids_2_3"); 2296 2297 assert_mprog_count(target, 0); 2298 2299 tc_hook.attach_point = target == BPF_TCX_INGRESS ? 2300 BPF_TC_INGRESS : BPF_TC_EGRESS; 2301 err = bpf_tc_hook_create(&tc_hook); 2302 err = err == -EEXIST ? 0 : err; 2303 if (!ASSERT_OK(err, "bpf_tc_hook_create")) 2304 goto cleanup; 2305 2306 tc_opts.prog_fd = fd2; 2307 err = bpf_tc_attach(&tc_hook, &tc_opts); 2308 if (!ASSERT_OK(err, "bpf_tc_attach")) 2309 goto cleanup_hook; 2310 2311 err = bpf_prog_attach_opts(fd3, loopback, target, &opta); 2312 if (!ASSERT_EQ(err, 0, "prog_attach")) 2313 goto cleanup_filter; 2314 2315 detach_fd = fd3; 2316 2317 assert_mprog_count(target, 1); 2318 2319 ASSERT_OK(system(ping_cmd), ping_cmd); 2320 2321 ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); 2322 ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5"); 2323 ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6"); 2324 2325 skel->bss->seen_tc4 = false; 2326 skel->bss->seen_tc5 = false; 2327 skel->bss->seen_tc6 = false; 2328 2329 LIBBPF_OPTS_RESET(opta, 2330 .flags = BPF_F_REPLACE, 2331 .replace_prog_fd = fd3, 2332 ); 2333 2334 err = bpf_prog_attach_opts(fd1, loopback, target, &opta); 2335 if (!ASSERT_EQ(err, 0, "prog_attach")) 2336 goto cleanup_opts; 2337 2338 detach_fd = fd1; 2339 2340 assert_mprog_count(target, 1); 2341 2342 ASSERT_OK(system(ping_cmd), ping_cmd); 2343 2344 ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4"); 2345 ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5"); 2346 ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6"); 2347 2348 skel->bss->seen_tc4 = false; 2349 skel->bss->seen_tc5 = false; 2350 skel->bss->seen_tc6 = false; 2351 2352 cleanup_opts: 2353 err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd); 2354 ASSERT_OK(err, "prog_detach"); 2355 __assert_mprog_count(target, 0, true, loopback); 2356 2357 ASSERT_OK(system(ping_cmd), ping_cmd); 2358 2359 ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4"); 2360 ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5"); 2361 ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6"); 2362 2363 cleanup_filter: 2364 tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0; 2365 err = bpf_tc_detach(&tc_hook, &tc_opts); 2366 ASSERT_OK(err, "bpf_tc_detach"); 2367 2368 cleanup_hook: 2369 tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS; 2370 bpf_tc_hook_destroy(&tc_hook); 2371 2372 cleanup: 2373 test_tc_link__destroy(skel); 2374 } 2375 2376 void serial_test_tc_opts_chain_mixed(void) 2377 { 2378 test_tc_chain_mixed(BPF_TCX_INGRESS); 2379 test_tc_chain_mixed(BPF_TCX_EGRESS); 2380 } 2381