1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Isovalent */
3 #include <uapi/linux/if_link.h>
4 #include <net/if.h>
5 #include <test_progs.h>
6 
7 #define loopback 1
8 #define ping_cmd "ping -q -c1 -w1 127.0.0.1 > /dev/null"
9 
10 #include "test_tc_link.skel.h"
11 #include "tc_helpers.h"
12 
serial_test_tc_opts_basic(void)13 void serial_test_tc_opts_basic(void)
14 {
15 	LIBBPF_OPTS(bpf_prog_attach_opts, opta);
16 	LIBBPF_OPTS(bpf_prog_detach_opts, optd);
17 	LIBBPF_OPTS(bpf_prog_query_opts, optq);
18 	__u32 fd1, fd2, id1, id2;
19 	struct test_tc_link *skel;
20 	__u32 prog_ids[2];
21 	int err;
22 
23 	skel = test_tc_link__open_and_load();
24 	if (!ASSERT_OK_PTR(skel, "skel_load"))
25 		goto cleanup;
26 
27 	fd1 = bpf_program__fd(skel->progs.tc1);
28 	fd2 = bpf_program__fd(skel->progs.tc2);
29 
30 	id1 = id_from_prog_fd(fd1);
31 	id2 = id_from_prog_fd(fd2);
32 
33 	ASSERT_NEQ(id1, id2, "prog_ids_1_2");
34 
35 	assert_mprog_count(BPF_TCX_INGRESS, 0);
36 	assert_mprog_count(BPF_TCX_EGRESS, 0);
37 
38 	ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1");
39 	ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
40 
41 	err = bpf_prog_attach_opts(fd1, loopback, BPF_TCX_INGRESS, &opta);
42 	if (!ASSERT_EQ(err, 0, "prog_attach"))
43 		goto cleanup;
44 
45 	assert_mprog_count(BPF_TCX_INGRESS, 1);
46 	assert_mprog_count(BPF_TCX_EGRESS, 0);
47 
48 	optq.prog_ids = prog_ids;
49 
50 	memset(prog_ids, 0, sizeof(prog_ids));
51 	optq.count = ARRAY_SIZE(prog_ids);
52 
53 	err = bpf_prog_query_opts(loopback, BPF_TCX_INGRESS, &optq);
54 	if (!ASSERT_OK(err, "prog_query"))
55 		goto cleanup_in;
56 
57 	ASSERT_EQ(optq.count, 1, "count");
58 	ASSERT_EQ(optq.revision, 2, "revision");
59 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
60 	ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
61 
62 	tc_skel_reset_all_seen(skel);
63 	ASSERT_OK(system(ping_cmd), ping_cmd);
64 
65 	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
66 	ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
67 
68 	err = bpf_prog_attach_opts(fd2, loopback, BPF_TCX_EGRESS, &opta);
69 	if (!ASSERT_EQ(err, 0, "prog_attach"))
70 		goto cleanup_in;
71 
72 	assert_mprog_count(BPF_TCX_INGRESS, 1);
73 	assert_mprog_count(BPF_TCX_EGRESS, 1);
74 
75 	memset(prog_ids, 0, sizeof(prog_ids));
76 	optq.count = ARRAY_SIZE(prog_ids);
77 
78 	err = bpf_prog_query_opts(loopback, BPF_TCX_EGRESS, &optq);
79 	if (!ASSERT_OK(err, "prog_query"))
80 		goto cleanup_eg;
81 
82 	ASSERT_EQ(optq.count, 1, "count");
83 	ASSERT_EQ(optq.revision, 2, "revision");
84 	ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
85 	ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
86 
87 	tc_skel_reset_all_seen(skel);
88 	ASSERT_OK(system(ping_cmd), ping_cmd);
89 
90 	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
91 	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
92 
93 cleanup_eg:
94 	err = bpf_prog_detach_opts(fd2, loopback, BPF_TCX_EGRESS, &optd);
95 	ASSERT_OK(err, "prog_detach_eg");
96 
97 	assert_mprog_count(BPF_TCX_INGRESS, 1);
98 	assert_mprog_count(BPF_TCX_EGRESS, 0);
99 
100 cleanup_in:
101 	err = bpf_prog_detach_opts(fd1, loopback, BPF_TCX_INGRESS, &optd);
102 	ASSERT_OK(err, "prog_detach_in");
103 
104 	assert_mprog_count(BPF_TCX_INGRESS, 0);
105 	assert_mprog_count(BPF_TCX_EGRESS, 0);
106 
107 cleanup:
108 	test_tc_link__destroy(skel);
109 }
110 
test_tc_opts_before_target(int target)111 static void test_tc_opts_before_target(int target)
112 {
113 	LIBBPF_OPTS(bpf_prog_attach_opts, opta);
114 	LIBBPF_OPTS(bpf_prog_detach_opts, optd);
115 	LIBBPF_OPTS(bpf_prog_query_opts, optq);
116 	__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
117 	struct test_tc_link *skel;
118 	__u32 prog_ids[5];
119 	int err;
120 
121 	skel = test_tc_link__open_and_load();
122 	if (!ASSERT_OK_PTR(skel, "skel_load"))
123 		goto cleanup;
124 
125 	fd1 = bpf_program__fd(skel->progs.tc1);
126 	fd2 = bpf_program__fd(skel->progs.tc2);
127 	fd3 = bpf_program__fd(skel->progs.tc3);
128 	fd4 = bpf_program__fd(skel->progs.tc4);
129 
130 	id1 = id_from_prog_fd(fd1);
131 	id2 = id_from_prog_fd(fd2);
132 	id3 = id_from_prog_fd(fd3);
133 	id4 = id_from_prog_fd(fd4);
134 
135 	ASSERT_NEQ(id1, id2, "prog_ids_1_2");
136 	ASSERT_NEQ(id3, id4, "prog_ids_3_4");
137 	ASSERT_NEQ(id2, id3, "prog_ids_2_3");
138 
139 	assert_mprog_count(target, 0);
140 
141 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
142 	if (!ASSERT_EQ(err, 0, "prog_attach"))
143 		goto cleanup;
144 
145 	assert_mprog_count(target, 1);
146 
147 	err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
148 	if (!ASSERT_EQ(err, 0, "prog_attach"))
149 		goto cleanup_target;
150 
151 	assert_mprog_count(target, 2);
152 
153 	optq.prog_ids = prog_ids;
154 
155 	memset(prog_ids, 0, sizeof(prog_ids));
156 	optq.count = ARRAY_SIZE(prog_ids);
157 
158 	err = bpf_prog_query_opts(loopback, target, &optq);
159 	if (!ASSERT_OK(err, "prog_query"))
160 		goto cleanup_target2;
161 
162 	ASSERT_EQ(optq.count, 2, "count");
163 	ASSERT_EQ(optq.revision, 3, "revision");
164 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
165 	ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
166 	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
167 
168 	tc_skel_reset_all_seen(skel);
169 	ASSERT_OK(system(ping_cmd), ping_cmd);
170 
171 	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
172 	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
173 	ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
174 	ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
175 
176 	LIBBPF_OPTS_RESET(opta,
177 		.flags = BPF_F_BEFORE,
178 		.relative_fd = fd2,
179 	);
180 
181 	err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
182 	if (!ASSERT_EQ(err, 0, "prog_attach"))
183 		goto cleanup_target2;
184 
185 	memset(prog_ids, 0, sizeof(prog_ids));
186 	optq.count = ARRAY_SIZE(prog_ids);
187 
188 	err = bpf_prog_query_opts(loopback, target, &optq);
189 	if (!ASSERT_OK(err, "prog_query"))
190 		goto cleanup_target3;
191 
192 	ASSERT_EQ(optq.count, 3, "count");
193 	ASSERT_EQ(optq.revision, 4, "revision");
194 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
195 	ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
196 	ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
197 	ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
198 
199 	LIBBPF_OPTS_RESET(opta,
200 		.flags = BPF_F_BEFORE,
201 		.relative_id = id1,
202 	);
203 
204 	err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
205 	if (!ASSERT_EQ(err, 0, "prog_attach"))
206 		goto cleanup_target3;
207 
208 	assert_mprog_count(target, 4);
209 
210 	memset(prog_ids, 0, sizeof(prog_ids));
211 	optq.count = ARRAY_SIZE(prog_ids);
212 
213 	err = bpf_prog_query_opts(loopback, target, &optq);
214 	if (!ASSERT_OK(err, "prog_query"))
215 		goto cleanup_target4;
216 
217 	ASSERT_EQ(optq.count, 4, "count");
218 	ASSERT_EQ(optq.revision, 5, "revision");
219 	ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]");
220 	ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
221 	ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
222 	ASSERT_EQ(optq.prog_ids[3], id2, "prog_ids[3]");
223 	ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
224 
225 	tc_skel_reset_all_seen(skel);
226 	ASSERT_OK(system(ping_cmd), ping_cmd);
227 
228 	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
229 	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
230 	ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
231 	ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
232 
233 cleanup_target4:
234 	err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
235 	ASSERT_OK(err, "prog_detach");
236 	assert_mprog_count(target, 3);
237 
238 cleanup_target3:
239 	err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
240 	ASSERT_OK(err, "prog_detach");
241 	assert_mprog_count(target, 2);
242 
243 cleanup_target2:
244 	err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
245 	ASSERT_OK(err, "prog_detach");
246 	assert_mprog_count(target, 1);
247 
248 cleanup_target:
249 	err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
250 	ASSERT_OK(err, "prog_detach");
251 	assert_mprog_count(target, 0);
252 
253 cleanup:
254 	test_tc_link__destroy(skel);
255 }
256 
serial_test_tc_opts_before(void)257 void serial_test_tc_opts_before(void)
258 {
259 	test_tc_opts_before_target(BPF_TCX_INGRESS);
260 	test_tc_opts_before_target(BPF_TCX_EGRESS);
261 }
262 
test_tc_opts_after_target(int target)263 static void test_tc_opts_after_target(int target)
264 {
265 	LIBBPF_OPTS(bpf_prog_attach_opts, opta);
266 	LIBBPF_OPTS(bpf_prog_detach_opts, optd);
267 	LIBBPF_OPTS(bpf_prog_query_opts, optq);
268 	__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
269 	struct test_tc_link *skel;
270 	__u32 prog_ids[5];
271 	int err;
272 
273 	skel = test_tc_link__open_and_load();
274 	if (!ASSERT_OK_PTR(skel, "skel_load"))
275 		goto cleanup;
276 
277 	fd1 = bpf_program__fd(skel->progs.tc1);
278 	fd2 = bpf_program__fd(skel->progs.tc2);
279 	fd3 = bpf_program__fd(skel->progs.tc3);
280 	fd4 = bpf_program__fd(skel->progs.tc4);
281 
282 	id1 = id_from_prog_fd(fd1);
283 	id2 = id_from_prog_fd(fd2);
284 	id3 = id_from_prog_fd(fd3);
285 	id4 = id_from_prog_fd(fd4);
286 
287 	ASSERT_NEQ(id1, id2, "prog_ids_1_2");
288 	ASSERT_NEQ(id3, id4, "prog_ids_3_4");
289 	ASSERT_NEQ(id2, id3, "prog_ids_2_3");
290 
291 	assert_mprog_count(target, 0);
292 
293 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
294 	if (!ASSERT_EQ(err, 0, "prog_attach"))
295 		goto cleanup;
296 
297 	assert_mprog_count(target, 1);
298 
299 	err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
300 	if (!ASSERT_EQ(err, 0, "prog_attach"))
301 		goto cleanup_target;
302 
303 	assert_mprog_count(target, 2);
304 
305 	optq.prog_ids = prog_ids;
306 
307 	memset(prog_ids, 0, sizeof(prog_ids));
308 	optq.count = ARRAY_SIZE(prog_ids);
309 
310 	err = bpf_prog_query_opts(loopback, target, &optq);
311 	if (!ASSERT_OK(err, "prog_query"))
312 		goto cleanup_target2;
313 
314 	ASSERT_EQ(optq.count, 2, "count");
315 	ASSERT_EQ(optq.revision, 3, "revision");
316 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
317 	ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
318 	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
319 
320 	tc_skel_reset_all_seen(skel);
321 	ASSERT_OK(system(ping_cmd), ping_cmd);
322 
323 	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
324 	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
325 	ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
326 	ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
327 
328 	LIBBPF_OPTS_RESET(opta,
329 		.flags = BPF_F_AFTER,
330 		.relative_fd = fd1,
331 	);
332 
333 	err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
334 	if (!ASSERT_EQ(err, 0, "prog_attach"))
335 		goto cleanup_target2;
336 
337 	memset(prog_ids, 0, sizeof(prog_ids));
338 	optq.count = ARRAY_SIZE(prog_ids);
339 
340 	err = bpf_prog_query_opts(loopback, target, &optq);
341 	if (!ASSERT_OK(err, "prog_query"))
342 		goto cleanup_target3;
343 
344 	ASSERT_EQ(optq.count, 3, "count");
345 	ASSERT_EQ(optq.revision, 4, "revision");
346 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
347 	ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
348 	ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
349 	ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
350 
351 	LIBBPF_OPTS_RESET(opta,
352 		.flags = BPF_F_AFTER,
353 		.relative_id = id2,
354 	);
355 
356 	err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
357 	if (!ASSERT_EQ(err, 0, "prog_attach"))
358 		goto cleanup_target3;
359 
360 	assert_mprog_count(target, 4);
361 
362 	memset(prog_ids, 0, sizeof(prog_ids));
363 	optq.count = ARRAY_SIZE(prog_ids);
364 
365 	err = bpf_prog_query_opts(loopback, target, &optq);
366 	if (!ASSERT_OK(err, "prog_query"))
367 		goto cleanup_target4;
368 
369 	ASSERT_EQ(optq.count, 4, "count");
370 	ASSERT_EQ(optq.revision, 5, "revision");
371 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
372 	ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
373 	ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
374 	ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
375 	ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
376 
377 	tc_skel_reset_all_seen(skel);
378 	ASSERT_OK(system(ping_cmd), ping_cmd);
379 
380 	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
381 	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
382 	ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
383 	ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
384 
385 cleanup_target4:
386 	err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
387 	ASSERT_OK(err, "prog_detach");
388 	assert_mprog_count(target, 3);
389 
390 	memset(prog_ids, 0, sizeof(prog_ids));
391 	optq.count = ARRAY_SIZE(prog_ids);
392 
393 	err = bpf_prog_query_opts(loopback, target, &optq);
394 	if (!ASSERT_OK(err, "prog_query"))
395 		goto cleanup_target3;
396 
397 	ASSERT_EQ(optq.count, 3, "count");
398 	ASSERT_EQ(optq.revision, 6, "revision");
399 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
400 	ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
401 	ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
402 	ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
403 
404 cleanup_target3:
405 	err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
406 	ASSERT_OK(err, "prog_detach");
407 	assert_mprog_count(target, 2);
408 
409 	memset(prog_ids, 0, sizeof(prog_ids));
410 	optq.count = ARRAY_SIZE(prog_ids);
411 
412 	err = bpf_prog_query_opts(loopback, target, &optq);
413 	if (!ASSERT_OK(err, "prog_query"))
414 		goto cleanup_target2;
415 
416 	ASSERT_EQ(optq.count, 2, "count");
417 	ASSERT_EQ(optq.revision, 7, "revision");
418 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
419 	ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
420 	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
421 
422 cleanup_target2:
423 	err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
424 	ASSERT_OK(err, "prog_detach");
425 	assert_mprog_count(target, 1);
426 
427 	memset(prog_ids, 0, sizeof(prog_ids));
428 	optq.count = ARRAY_SIZE(prog_ids);
429 
430 	err = bpf_prog_query_opts(loopback, target, &optq);
431 	if (!ASSERT_OK(err, "prog_query"))
432 		goto cleanup_target;
433 
434 	ASSERT_EQ(optq.count, 1, "count");
435 	ASSERT_EQ(optq.revision, 8, "revision");
436 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
437 	ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
438 
439 cleanup_target:
440 	err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
441 	ASSERT_OK(err, "prog_detach");
442 	assert_mprog_count(target, 0);
443 
444 cleanup:
445 	test_tc_link__destroy(skel);
446 }
447 
serial_test_tc_opts_after(void)448 void serial_test_tc_opts_after(void)
449 {
450 	test_tc_opts_after_target(BPF_TCX_INGRESS);
451 	test_tc_opts_after_target(BPF_TCX_EGRESS);
452 }
453 
test_tc_opts_revision_target(int target)454 static void test_tc_opts_revision_target(int target)
455 {
456 	LIBBPF_OPTS(bpf_prog_attach_opts, opta);
457 	LIBBPF_OPTS(bpf_prog_detach_opts, optd);
458 	LIBBPF_OPTS(bpf_prog_query_opts, optq);
459 	__u32 fd1, fd2, id1, id2;
460 	struct test_tc_link *skel;
461 	__u32 prog_ids[3];
462 	int err;
463 
464 	skel = test_tc_link__open_and_load();
465 	if (!ASSERT_OK_PTR(skel, "skel_load"))
466 		goto cleanup;
467 
468 	fd1 = bpf_program__fd(skel->progs.tc1);
469 	fd2 = bpf_program__fd(skel->progs.tc2);
470 
471 	id1 = id_from_prog_fd(fd1);
472 	id2 = id_from_prog_fd(fd2);
473 
474 	ASSERT_NEQ(id1, id2, "prog_ids_1_2");
475 
476 	assert_mprog_count(target, 0);
477 
478 	LIBBPF_OPTS_RESET(opta,
479 		.expected_revision = 1,
480 	);
481 
482 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
483 	if (!ASSERT_EQ(err, 0, "prog_attach"))
484 		goto cleanup;
485 
486 	assert_mprog_count(target, 1);
487 
488 	LIBBPF_OPTS_RESET(opta,
489 		.expected_revision = 1,
490 	);
491 
492 	err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
493 	if (!ASSERT_EQ(err, -ESTALE, "prog_attach"))
494 		goto cleanup_target;
495 
496 	assert_mprog_count(target, 1);
497 
498 	LIBBPF_OPTS_RESET(opta,
499 		.expected_revision = 2,
500 	);
501 
502 	err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
503 	if (!ASSERT_EQ(err, 0, "prog_attach"))
504 		goto cleanup_target;
505 
506 	assert_mprog_count(target, 2);
507 
508 	optq.prog_ids = prog_ids;
509 
510 	memset(prog_ids, 0, sizeof(prog_ids));
511 	optq.count = ARRAY_SIZE(prog_ids);
512 
513 	err = bpf_prog_query_opts(loopback, target, &optq);
514 	if (!ASSERT_OK(err, "prog_query"))
515 		goto cleanup_target2;
516 
517 	ASSERT_EQ(optq.count, 2, "count");
518 	ASSERT_EQ(optq.revision, 3, "revision");
519 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
520 	ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
521 	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
522 
523 	tc_skel_reset_all_seen(skel);
524 	ASSERT_OK(system(ping_cmd), ping_cmd);
525 
526 	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
527 	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
528 
529 	LIBBPF_OPTS_RESET(optd,
530 		.expected_revision = 2,
531 	);
532 
533 	err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
534 	ASSERT_EQ(err, -ESTALE, "prog_detach");
535 	assert_mprog_count(target, 2);
536 
537 cleanup_target2:
538 	LIBBPF_OPTS_RESET(optd,
539 		.expected_revision = 3,
540 	);
541 
542 	err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
543 	ASSERT_OK(err, "prog_detach");
544 	assert_mprog_count(target, 1);
545 
546 cleanup_target:
547 	LIBBPF_OPTS_RESET(optd);
548 
549 	err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
550 	ASSERT_OK(err, "prog_detach");
551 	assert_mprog_count(target, 0);
552 
553 cleanup:
554 	test_tc_link__destroy(skel);
555 }
556 
serial_test_tc_opts_revision(void)557 void serial_test_tc_opts_revision(void)
558 {
559 	test_tc_opts_revision_target(BPF_TCX_INGRESS);
560 	test_tc_opts_revision_target(BPF_TCX_EGRESS);
561 }
562 
test_tc_chain_classic(int target,bool chain_tc_old)563 static void test_tc_chain_classic(int target, bool chain_tc_old)
564 {
565 	LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
566 	LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback);
567 	LIBBPF_OPTS(bpf_prog_attach_opts, opta);
568 	LIBBPF_OPTS(bpf_prog_detach_opts, optd);
569 	bool hook_created = false, tc_attached = false;
570 	__u32 fd1, fd2, fd3, id1, id2, id3;
571 	struct test_tc_link *skel;
572 	int err;
573 
574 	skel = test_tc_link__open_and_load();
575 	if (!ASSERT_OK_PTR(skel, "skel_load"))
576 		goto cleanup;
577 
578 	fd1 = bpf_program__fd(skel->progs.tc1);
579 	fd2 = bpf_program__fd(skel->progs.tc2);
580 	fd3 = bpf_program__fd(skel->progs.tc3);
581 
582 	id1 = id_from_prog_fd(fd1);
583 	id2 = id_from_prog_fd(fd2);
584 	id3 = id_from_prog_fd(fd3);
585 
586 	ASSERT_NEQ(id1, id2, "prog_ids_1_2");
587 	ASSERT_NEQ(id2, id3, "prog_ids_2_3");
588 
589 	assert_mprog_count(target, 0);
590 
591 	if (chain_tc_old) {
592 		tc_hook.attach_point = target == BPF_TCX_INGRESS ?
593 				       BPF_TC_INGRESS : BPF_TC_EGRESS;
594 		err = bpf_tc_hook_create(&tc_hook);
595 		if (err == 0)
596 			hook_created = true;
597 		err = err == -EEXIST ? 0 : err;
598 		if (!ASSERT_OK(err, "bpf_tc_hook_create"))
599 			goto cleanup;
600 
601 		tc_opts.prog_fd = fd3;
602 		err = bpf_tc_attach(&tc_hook, &tc_opts);
603 		if (!ASSERT_OK(err, "bpf_tc_attach"))
604 			goto cleanup;
605 		tc_attached = true;
606 	}
607 
608 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
609 	if (!ASSERT_EQ(err, 0, "prog_attach"))
610 		goto cleanup;
611 
612 	err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
613 	if (!ASSERT_EQ(err, 0, "prog_attach"))
614 		goto cleanup_detach;
615 
616 	assert_mprog_count(target, 2);
617 
618 	tc_skel_reset_all_seen(skel);
619 	ASSERT_OK(system(ping_cmd), ping_cmd);
620 
621 	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
622 	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
623 	ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
624 
625 	err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
626 	if (!ASSERT_OK(err, "prog_detach"))
627 		goto cleanup_detach;
628 
629 	assert_mprog_count(target, 1);
630 
631 	tc_skel_reset_all_seen(skel);
632 	ASSERT_OK(system(ping_cmd), ping_cmd);
633 
634 	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
635 	ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
636 	ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
637 
638 cleanup_detach:
639 	err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
640 	if (!ASSERT_OK(err, "prog_detach"))
641 		goto cleanup;
642 
643 	assert_mprog_count(target, 0);
644 cleanup:
645 	if (tc_attached) {
646 		tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
647 		err = bpf_tc_detach(&tc_hook, &tc_opts);
648 		ASSERT_OK(err, "bpf_tc_detach");
649 	}
650 	if (hook_created) {
651 		tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
652 		bpf_tc_hook_destroy(&tc_hook);
653 	}
654 	test_tc_link__destroy(skel);
655 	assert_mprog_count(target, 0);
656 }
657 
serial_test_tc_opts_chain_classic(void)658 void serial_test_tc_opts_chain_classic(void)
659 {
660 	test_tc_chain_classic(BPF_TCX_INGRESS, false);
661 	test_tc_chain_classic(BPF_TCX_EGRESS, false);
662 	test_tc_chain_classic(BPF_TCX_INGRESS, true);
663 	test_tc_chain_classic(BPF_TCX_EGRESS, true);
664 }
665 
test_tc_opts_replace_target(int target)666 static void test_tc_opts_replace_target(int target)
667 {
668 	LIBBPF_OPTS(bpf_prog_attach_opts, opta);
669 	LIBBPF_OPTS(bpf_prog_detach_opts, optd);
670 	LIBBPF_OPTS(bpf_prog_query_opts, optq);
671 	__u32 fd1, fd2, fd3, id1, id2, id3, detach_fd;
672 	__u32 prog_ids[4], prog_flags[4];
673 	struct test_tc_link *skel;
674 	int err;
675 
676 	skel = test_tc_link__open_and_load();
677 	if (!ASSERT_OK_PTR(skel, "skel_load"))
678 		goto cleanup;
679 
680 	fd1 = bpf_program__fd(skel->progs.tc1);
681 	fd2 = bpf_program__fd(skel->progs.tc2);
682 	fd3 = bpf_program__fd(skel->progs.tc3);
683 
684 	id1 = id_from_prog_fd(fd1);
685 	id2 = id_from_prog_fd(fd2);
686 	id3 = id_from_prog_fd(fd3);
687 
688 	ASSERT_NEQ(id1, id2, "prog_ids_1_2");
689 	ASSERT_NEQ(id2, id3, "prog_ids_2_3");
690 
691 	assert_mprog_count(target, 0);
692 
693 	LIBBPF_OPTS_RESET(opta,
694 		.expected_revision = 1,
695 	);
696 
697 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
698 	if (!ASSERT_EQ(err, 0, "prog_attach"))
699 		goto cleanup;
700 
701 	assert_mprog_count(target, 1);
702 
703 	LIBBPF_OPTS_RESET(opta,
704 		.flags = BPF_F_BEFORE,
705 		.relative_id = id1,
706 		.expected_revision = 2,
707 	);
708 
709 	err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
710 	if (!ASSERT_EQ(err, 0, "prog_attach"))
711 		goto cleanup_target;
712 
713 	detach_fd = fd2;
714 
715 	assert_mprog_count(target, 2);
716 
717 	optq.prog_attach_flags = prog_flags;
718 	optq.prog_ids = prog_ids;
719 
720 	memset(prog_flags, 0, sizeof(prog_flags));
721 	memset(prog_ids, 0, sizeof(prog_ids));
722 	optq.count = ARRAY_SIZE(prog_ids);
723 
724 	err = bpf_prog_query_opts(loopback, target, &optq);
725 	if (!ASSERT_OK(err, "prog_query"))
726 		goto cleanup_target2;
727 
728 	ASSERT_EQ(optq.count, 2, "count");
729 	ASSERT_EQ(optq.revision, 3, "revision");
730 	ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
731 	ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
732 	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
733 
734 	ASSERT_EQ(optq.prog_attach_flags[0], 0, "prog_flags[0]");
735 	ASSERT_EQ(optq.prog_attach_flags[1], 0, "prog_flags[1]");
736 	ASSERT_EQ(optq.prog_attach_flags[2], 0, "prog_flags[2]");
737 
738 	tc_skel_reset_all_seen(skel);
739 	ASSERT_OK(system(ping_cmd), ping_cmd);
740 
741 	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
742 	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
743 	ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
744 
745 	LIBBPF_OPTS_RESET(opta,
746 		.flags = BPF_F_REPLACE,
747 		.replace_prog_fd = fd2,
748 		.expected_revision = 3,
749 	);
750 
751 	err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
752 	if (!ASSERT_EQ(err, 0, "prog_attach"))
753 		goto cleanup_target2;
754 
755 	detach_fd = fd3;
756 
757 	assert_mprog_count(target, 2);
758 
759 	memset(prog_ids, 0, sizeof(prog_ids));
760 	optq.count = ARRAY_SIZE(prog_ids);
761 
762 	err = bpf_prog_query_opts(loopback, target, &optq);
763 	if (!ASSERT_OK(err, "prog_query"))
764 		goto cleanup_target2;
765 
766 	ASSERT_EQ(optq.count, 2, "count");
767 	ASSERT_EQ(optq.revision, 4, "revision");
768 	ASSERT_EQ(optq.prog_ids[0], id3, "prog_ids[0]");
769 	ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
770 	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
771 
772 	tc_skel_reset_all_seen(skel);
773 	ASSERT_OK(system(ping_cmd), ping_cmd);
774 
775 	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
776 	ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
777 	ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
778 
779 	LIBBPF_OPTS_RESET(opta,
780 		.flags = BPF_F_REPLACE | BPF_F_BEFORE,
781 		.replace_prog_fd = fd3,
782 		.relative_fd = fd1,
783 		.expected_revision = 4,
784 	);
785 
786 	err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
787 	if (!ASSERT_EQ(err, 0, "prog_attach"))
788 		goto cleanup_target2;
789 
790 	detach_fd = fd2;
791 
792 	assert_mprog_count(target, 2);
793 
794 	memset(prog_ids, 0, sizeof(prog_ids));
795 	optq.count = ARRAY_SIZE(prog_ids);
796 
797 	err = bpf_prog_query_opts(loopback, target, &optq);
798 	if (!ASSERT_OK(err, "prog_query"))
799 		goto cleanup_target2;
800 
801 	ASSERT_EQ(optq.count, 2, "count");
802 	ASSERT_EQ(optq.revision, 5, "revision");
803 	ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
804 	ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
805 	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
806 
807 	tc_skel_reset_all_seen(skel);
808 	ASSERT_OK(system(ping_cmd), ping_cmd);
809 
810 	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
811 	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
812 	ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
813 
814 	LIBBPF_OPTS_RESET(opta,
815 		.flags = BPF_F_REPLACE,
816 		.replace_prog_fd = fd2,
817 	);
818 
819 	err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
820 	ASSERT_EQ(err, -EEXIST, "prog_attach");
821 	assert_mprog_count(target, 2);
822 
823 	LIBBPF_OPTS_RESET(opta,
824 		.flags = BPF_F_REPLACE | BPF_F_AFTER,
825 		.replace_prog_fd = fd2,
826 		.relative_fd = fd1,
827 		.expected_revision = 5,
828 	);
829 
830 	err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
831 	ASSERT_EQ(err, -ERANGE, "prog_attach");
832 	assert_mprog_count(target, 2);
833 
834 	LIBBPF_OPTS_RESET(opta,
835 		.flags = BPF_F_BEFORE | BPF_F_AFTER | BPF_F_REPLACE,
836 		.replace_prog_fd = fd2,
837 		.relative_fd = fd1,
838 		.expected_revision = 5,
839 	);
840 
841 	err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
842 	ASSERT_EQ(err, -ERANGE, "prog_attach");
843 	assert_mprog_count(target, 2);
844 
845 	LIBBPF_OPTS_RESET(optd,
846 		.flags = BPF_F_BEFORE,
847 		.relative_id = id1,
848 		.expected_revision = 5,
849 	);
850 
851 cleanup_target2:
852 	err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd);
853 	ASSERT_OK(err, "prog_detach");
854 	assert_mprog_count(target, 1);
855 
856 cleanup_target:
857 	LIBBPF_OPTS_RESET(optd);
858 
859 	err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
860 	ASSERT_OK(err, "prog_detach");
861 	assert_mprog_count(target, 0);
862 
863 cleanup:
864 	test_tc_link__destroy(skel);
865 }
866 
serial_test_tc_opts_replace(void)867 void serial_test_tc_opts_replace(void)
868 {
869 	test_tc_opts_replace_target(BPF_TCX_INGRESS);
870 	test_tc_opts_replace_target(BPF_TCX_EGRESS);
871 }
872 
test_tc_opts_invalid_target(int target)873 static void test_tc_opts_invalid_target(int target)
874 {
875 	LIBBPF_OPTS(bpf_prog_attach_opts, opta);
876 	LIBBPF_OPTS(bpf_prog_detach_opts, optd);
877 	__u32 fd1, fd2, id1, id2;
878 	struct test_tc_link *skel;
879 	int err;
880 
881 	skel = test_tc_link__open_and_load();
882 	if (!ASSERT_OK_PTR(skel, "skel_load"))
883 		goto cleanup;
884 
885 	fd1 = bpf_program__fd(skel->progs.tc1);
886 	fd2 = bpf_program__fd(skel->progs.tc2);
887 
888 	id1 = id_from_prog_fd(fd1);
889 	id2 = id_from_prog_fd(fd2);
890 
891 	ASSERT_NEQ(id1, id2, "prog_ids_1_2");
892 
893 	assert_mprog_count(target, 0);
894 
895 	LIBBPF_OPTS_RESET(opta,
896 		.flags = BPF_F_BEFORE | BPF_F_AFTER,
897 	);
898 
899 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
900 	ASSERT_EQ(err, -ERANGE, "prog_attach");
901 	assert_mprog_count(target, 0);
902 
903 	LIBBPF_OPTS_RESET(opta,
904 		.flags = BPF_F_BEFORE | BPF_F_ID,
905 	);
906 
907 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
908 	ASSERT_EQ(err, -ENOENT, "prog_attach");
909 	assert_mprog_count(target, 0);
910 
911 	LIBBPF_OPTS_RESET(opta,
912 		.flags = BPF_F_AFTER | BPF_F_ID,
913 	);
914 
915 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
916 	ASSERT_EQ(err, -ENOENT, "prog_attach");
917 	assert_mprog_count(target, 0);
918 
919 	LIBBPF_OPTS_RESET(opta,
920 		.relative_fd = fd2,
921 	);
922 
923 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
924 	ASSERT_EQ(err, -EINVAL, "prog_attach");
925 	assert_mprog_count(target, 0);
926 
927 	LIBBPF_OPTS_RESET(opta,
928 		.flags = BPF_F_BEFORE | BPF_F_AFTER,
929 		.relative_fd = fd2,
930 	);
931 
932 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
933 	ASSERT_EQ(err, -ENOENT, "prog_attach");
934 	assert_mprog_count(target, 0);
935 
936 	LIBBPF_OPTS_RESET(opta,
937 		.flags = BPF_F_ID,
938 		.relative_id = id2,
939 	);
940 
941 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
942 	ASSERT_EQ(err, -EINVAL, "prog_attach");
943 	assert_mprog_count(target, 0);
944 
945 	LIBBPF_OPTS_RESET(opta,
946 		.flags = BPF_F_BEFORE,
947 		.relative_fd = fd1,
948 	);
949 
950 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
951 	ASSERT_EQ(err, -ENOENT, "prog_attach");
952 	assert_mprog_count(target, 0);
953 
954 	LIBBPF_OPTS_RESET(opta,
955 		.flags = BPF_F_AFTER,
956 		.relative_fd = fd1,
957 	);
958 
959 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
960 	ASSERT_EQ(err, -ENOENT, "prog_attach");
961 	assert_mprog_count(target, 0);
962 
963 	LIBBPF_OPTS_RESET(opta);
964 
965 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
966 	if (!ASSERT_EQ(err, 0, "prog_attach"))
967 		goto cleanup;
968 
969 	assert_mprog_count(target, 1);
970 
971 	LIBBPF_OPTS_RESET(opta);
972 
973 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
974 	ASSERT_EQ(err, -EEXIST, "prog_attach");
975 	assert_mprog_count(target, 1);
976 
977 	LIBBPF_OPTS_RESET(opta,
978 		.flags = BPF_F_BEFORE,
979 		.relative_fd = fd1,
980 	);
981 
982 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
983 	ASSERT_EQ(err, -EEXIST, "prog_attach");
984 	assert_mprog_count(target, 1);
985 
986 	LIBBPF_OPTS_RESET(opta,
987 		.flags = BPF_F_AFTER,
988 		.relative_fd = fd1,
989 	);
990 
991 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
992 	ASSERT_EQ(err, -EEXIST, "prog_attach");
993 	assert_mprog_count(target, 1);
994 
995 	LIBBPF_OPTS_RESET(opta,
996 		.flags = BPF_F_REPLACE,
997 		.relative_fd = fd1,
998 	);
999 
1000 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
1001 	ASSERT_EQ(err, -EINVAL, "prog_attach_x1");
1002 	assert_mprog_count(target, 1);
1003 
1004 	LIBBPF_OPTS_RESET(opta,
1005 		.flags = BPF_F_REPLACE,
1006 		.replace_prog_fd = fd1,
1007 	);
1008 
1009 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
1010 	ASSERT_EQ(err, -EEXIST, "prog_attach");
1011 	assert_mprog_count(target, 1);
1012 
1013 	err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
1014 	ASSERT_OK(err, "prog_detach");
1015 	assert_mprog_count(target, 0);
1016 cleanup:
1017 	test_tc_link__destroy(skel);
1018 }
1019 
serial_test_tc_opts_invalid(void)1020 void serial_test_tc_opts_invalid(void)
1021 {
1022 	test_tc_opts_invalid_target(BPF_TCX_INGRESS);
1023 	test_tc_opts_invalid_target(BPF_TCX_EGRESS);
1024 }
1025 
test_tc_opts_prepend_target(int target)1026 static void test_tc_opts_prepend_target(int target)
1027 {
1028 	LIBBPF_OPTS(bpf_prog_attach_opts, opta);
1029 	LIBBPF_OPTS(bpf_prog_detach_opts, optd);
1030 	LIBBPF_OPTS(bpf_prog_query_opts, optq);
1031 	__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
1032 	struct test_tc_link *skel;
1033 	__u32 prog_ids[5];
1034 	int err;
1035 
1036 	skel = test_tc_link__open_and_load();
1037 	if (!ASSERT_OK_PTR(skel, "skel_load"))
1038 		goto cleanup;
1039 
1040 	fd1 = bpf_program__fd(skel->progs.tc1);
1041 	fd2 = bpf_program__fd(skel->progs.tc2);
1042 	fd3 = bpf_program__fd(skel->progs.tc3);
1043 	fd4 = bpf_program__fd(skel->progs.tc4);
1044 
1045 	id1 = id_from_prog_fd(fd1);
1046 	id2 = id_from_prog_fd(fd2);
1047 	id3 = id_from_prog_fd(fd3);
1048 	id4 = id_from_prog_fd(fd4);
1049 
1050 	ASSERT_NEQ(id1, id2, "prog_ids_1_2");
1051 	ASSERT_NEQ(id3, id4, "prog_ids_3_4");
1052 	ASSERT_NEQ(id2, id3, "prog_ids_2_3");
1053 
1054 	assert_mprog_count(target, 0);
1055 
1056 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
1057 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1058 		goto cleanup;
1059 
1060 	assert_mprog_count(target, 1);
1061 
1062 	LIBBPF_OPTS_RESET(opta,
1063 		.flags = BPF_F_BEFORE,
1064 	);
1065 
1066 	err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
1067 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1068 		goto cleanup_target;
1069 
1070 	assert_mprog_count(target, 2);
1071 
1072 	optq.prog_ids = prog_ids;
1073 
1074 	memset(prog_ids, 0, sizeof(prog_ids));
1075 	optq.count = ARRAY_SIZE(prog_ids);
1076 
1077 	err = bpf_prog_query_opts(loopback, target, &optq);
1078 	if (!ASSERT_OK(err, "prog_query"))
1079 		goto cleanup_target2;
1080 
1081 	ASSERT_EQ(optq.count, 2, "count");
1082 	ASSERT_EQ(optq.revision, 3, "revision");
1083 	ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
1084 	ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
1085 	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
1086 
1087 	tc_skel_reset_all_seen(skel);
1088 	ASSERT_OK(system(ping_cmd), ping_cmd);
1089 
1090 	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
1091 	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
1092 	ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
1093 	ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
1094 
1095 	LIBBPF_OPTS_RESET(opta,
1096 		.flags = BPF_F_BEFORE,
1097 	);
1098 
1099 	err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
1100 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1101 		goto cleanup_target2;
1102 
1103 	LIBBPF_OPTS_RESET(opta,
1104 		.flags = BPF_F_BEFORE,
1105 	);
1106 
1107 	err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
1108 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1109 		goto cleanup_target3;
1110 
1111 	assert_mprog_count(target, 4);
1112 
1113 	memset(prog_ids, 0, sizeof(prog_ids));
1114 	optq.count = ARRAY_SIZE(prog_ids);
1115 
1116 	err = bpf_prog_query_opts(loopback, target, &optq);
1117 	if (!ASSERT_OK(err, "prog_query"))
1118 		goto cleanup_target4;
1119 
1120 	ASSERT_EQ(optq.count, 4, "count");
1121 	ASSERT_EQ(optq.revision, 5, "revision");
1122 	ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]");
1123 	ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
1124 	ASSERT_EQ(optq.prog_ids[2], id2, "prog_ids[2]");
1125 	ASSERT_EQ(optq.prog_ids[3], id1, "prog_ids[3]");
1126 	ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
1127 
1128 	tc_skel_reset_all_seen(skel);
1129 	ASSERT_OK(system(ping_cmd), ping_cmd);
1130 
1131 	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
1132 	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
1133 	ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
1134 	ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
1135 
1136 cleanup_target4:
1137 	err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
1138 	ASSERT_OK(err, "prog_detach");
1139 	assert_mprog_count(target, 3);
1140 
1141 cleanup_target3:
1142 	err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
1143 	ASSERT_OK(err, "prog_detach");
1144 	assert_mprog_count(target, 2);
1145 
1146 cleanup_target2:
1147 	err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
1148 	ASSERT_OK(err, "prog_detach");
1149 	assert_mprog_count(target, 1);
1150 
1151 cleanup_target:
1152 	err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
1153 	ASSERT_OK(err, "prog_detach");
1154 	assert_mprog_count(target, 0);
1155 
1156 cleanup:
1157 	test_tc_link__destroy(skel);
1158 }
1159 
serial_test_tc_opts_prepend(void)1160 void serial_test_tc_opts_prepend(void)
1161 {
1162 	test_tc_opts_prepend_target(BPF_TCX_INGRESS);
1163 	test_tc_opts_prepend_target(BPF_TCX_EGRESS);
1164 }
1165 
test_tc_opts_append_target(int target)1166 static void test_tc_opts_append_target(int target)
1167 {
1168 	LIBBPF_OPTS(bpf_prog_attach_opts, opta);
1169 	LIBBPF_OPTS(bpf_prog_detach_opts, optd);
1170 	LIBBPF_OPTS(bpf_prog_query_opts, optq);
1171 	__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
1172 	struct test_tc_link *skel;
1173 	__u32 prog_ids[5];
1174 	int err;
1175 
1176 	skel = test_tc_link__open_and_load();
1177 	if (!ASSERT_OK_PTR(skel, "skel_load"))
1178 		goto cleanup;
1179 
1180 	fd1 = bpf_program__fd(skel->progs.tc1);
1181 	fd2 = bpf_program__fd(skel->progs.tc2);
1182 	fd3 = bpf_program__fd(skel->progs.tc3);
1183 	fd4 = bpf_program__fd(skel->progs.tc4);
1184 
1185 	id1 = id_from_prog_fd(fd1);
1186 	id2 = id_from_prog_fd(fd2);
1187 	id3 = id_from_prog_fd(fd3);
1188 	id4 = id_from_prog_fd(fd4);
1189 
1190 	ASSERT_NEQ(id1, id2, "prog_ids_1_2");
1191 	ASSERT_NEQ(id3, id4, "prog_ids_3_4");
1192 	ASSERT_NEQ(id2, id3, "prog_ids_2_3");
1193 
1194 	assert_mprog_count(target, 0);
1195 
1196 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
1197 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1198 		goto cleanup;
1199 
1200 	assert_mprog_count(target, 1);
1201 
1202 	LIBBPF_OPTS_RESET(opta,
1203 		.flags = BPF_F_AFTER,
1204 	);
1205 
1206 	err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
1207 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1208 		goto cleanup_target;
1209 
1210 	assert_mprog_count(target, 2);
1211 
1212 	optq.prog_ids = prog_ids;
1213 
1214 	memset(prog_ids, 0, sizeof(prog_ids));
1215 	optq.count = ARRAY_SIZE(prog_ids);
1216 
1217 	err = bpf_prog_query_opts(loopback, target, &optq);
1218 	if (!ASSERT_OK(err, "prog_query"))
1219 		goto cleanup_target2;
1220 
1221 	ASSERT_EQ(optq.count, 2, "count");
1222 	ASSERT_EQ(optq.revision, 3, "revision");
1223 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
1224 	ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
1225 	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
1226 
1227 	tc_skel_reset_all_seen(skel);
1228 	ASSERT_OK(system(ping_cmd), ping_cmd);
1229 
1230 	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
1231 	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
1232 	ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
1233 	ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
1234 
1235 	LIBBPF_OPTS_RESET(opta,
1236 		.flags = BPF_F_AFTER,
1237 	);
1238 
1239 	err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
1240 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1241 		goto cleanup_target2;
1242 
1243 	LIBBPF_OPTS_RESET(opta,
1244 		.flags = BPF_F_AFTER,
1245 	);
1246 
1247 	err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
1248 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1249 		goto cleanup_target3;
1250 
1251 	assert_mprog_count(target, 4);
1252 
1253 	memset(prog_ids, 0, sizeof(prog_ids));
1254 	optq.count = ARRAY_SIZE(prog_ids);
1255 
1256 	err = bpf_prog_query_opts(loopback, target, &optq);
1257 	if (!ASSERT_OK(err, "prog_query"))
1258 		goto cleanup_target4;
1259 
1260 	ASSERT_EQ(optq.count, 4, "count");
1261 	ASSERT_EQ(optq.revision, 5, "revision");
1262 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
1263 	ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
1264 	ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
1265 	ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
1266 	ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
1267 
1268 	tc_skel_reset_all_seen(skel);
1269 	ASSERT_OK(system(ping_cmd), ping_cmd);
1270 
1271 	ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
1272 	ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
1273 	ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
1274 	ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
1275 
1276 cleanup_target4:
1277 	err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
1278 	ASSERT_OK(err, "prog_detach");
1279 	assert_mprog_count(target, 3);
1280 
1281 cleanup_target3:
1282 	err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
1283 	ASSERT_OK(err, "prog_detach");
1284 	assert_mprog_count(target, 2);
1285 
1286 cleanup_target2:
1287 	err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
1288 	ASSERT_OK(err, "prog_detach");
1289 	assert_mprog_count(target, 1);
1290 
1291 cleanup_target:
1292 	err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
1293 	ASSERT_OK(err, "prog_detach");
1294 	assert_mprog_count(target, 0);
1295 
1296 cleanup:
1297 	test_tc_link__destroy(skel);
1298 }
1299 
serial_test_tc_opts_append(void)1300 void serial_test_tc_opts_append(void)
1301 {
1302 	test_tc_opts_append_target(BPF_TCX_INGRESS);
1303 	test_tc_opts_append_target(BPF_TCX_EGRESS);
1304 }
1305 
test_tc_opts_dev_cleanup_target(int target)1306 static void test_tc_opts_dev_cleanup_target(int target)
1307 {
1308 	LIBBPF_OPTS(bpf_prog_attach_opts, opta);
1309 	LIBBPF_OPTS(bpf_prog_detach_opts, optd);
1310 	LIBBPF_OPTS(bpf_prog_query_opts, optq);
1311 	__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
1312 	struct test_tc_link *skel;
1313 	int err, ifindex;
1314 
1315 	ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth");
1316 	ifindex = if_nametoindex("tcx_opts1");
1317 	ASSERT_NEQ(ifindex, 0, "non_zero_ifindex");
1318 
1319 	skel = test_tc_link__open_and_load();
1320 	if (!ASSERT_OK_PTR(skel, "skel_load"))
1321 		goto cleanup;
1322 
1323 	fd1 = bpf_program__fd(skel->progs.tc1);
1324 	fd2 = bpf_program__fd(skel->progs.tc2);
1325 	fd3 = bpf_program__fd(skel->progs.tc3);
1326 	fd4 = bpf_program__fd(skel->progs.tc4);
1327 
1328 	id1 = id_from_prog_fd(fd1);
1329 	id2 = id_from_prog_fd(fd2);
1330 	id3 = id_from_prog_fd(fd3);
1331 	id4 = id_from_prog_fd(fd4);
1332 
1333 	ASSERT_NEQ(id1, id2, "prog_ids_1_2");
1334 	ASSERT_NEQ(id3, id4, "prog_ids_3_4");
1335 	ASSERT_NEQ(id2, id3, "prog_ids_2_3");
1336 
1337 	assert_mprog_count_ifindex(ifindex, target, 0);
1338 
1339 	err = bpf_prog_attach_opts(fd1, ifindex, target, &opta);
1340 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1341 		goto cleanup;
1342 
1343 	assert_mprog_count_ifindex(ifindex, target, 1);
1344 
1345 	err = bpf_prog_attach_opts(fd2, ifindex, target, &opta);
1346 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1347 		goto cleanup1;
1348 
1349 	assert_mprog_count_ifindex(ifindex, target, 2);
1350 
1351 	err = bpf_prog_attach_opts(fd3, ifindex, target, &opta);
1352 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1353 		goto cleanup2;
1354 
1355 	assert_mprog_count_ifindex(ifindex, target, 3);
1356 
1357 	err = bpf_prog_attach_opts(fd4, ifindex, target, &opta);
1358 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1359 		goto cleanup3;
1360 
1361 	assert_mprog_count_ifindex(ifindex, target, 4);
1362 
1363 	ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
1364 	ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
1365 	ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
1366 	return;
1367 cleanup3:
1368 	err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
1369 	ASSERT_OK(err, "prog_detach");
1370 
1371 	assert_mprog_count_ifindex(ifindex, target, 2);
1372 cleanup2:
1373 	err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
1374 	ASSERT_OK(err, "prog_detach");
1375 
1376 	assert_mprog_count_ifindex(ifindex, target, 1);
1377 cleanup1:
1378 	err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
1379 	ASSERT_OK(err, "prog_detach");
1380 
1381 	assert_mprog_count_ifindex(ifindex, target, 0);
1382 cleanup:
1383 	test_tc_link__destroy(skel);
1384 
1385 	ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
1386 	ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
1387 	ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
1388 }
1389 
serial_test_tc_opts_dev_cleanup(void)1390 void serial_test_tc_opts_dev_cleanup(void)
1391 {
1392 	test_tc_opts_dev_cleanup_target(BPF_TCX_INGRESS);
1393 	test_tc_opts_dev_cleanup_target(BPF_TCX_EGRESS);
1394 }
1395 
test_tc_opts_mixed_target(int target)1396 static void test_tc_opts_mixed_target(int target)
1397 {
1398 	LIBBPF_OPTS(bpf_prog_attach_opts, opta);
1399 	LIBBPF_OPTS(bpf_prog_detach_opts, optd);
1400 	LIBBPF_OPTS(bpf_prog_query_opts, optq);
1401 	LIBBPF_OPTS(bpf_tcx_opts, optl);
1402 	__u32 pid1, pid2, pid3, pid4, lid2, lid4;
1403 	__u32 prog_flags[4], link_flags[4];
1404 	__u32 prog_ids[4], link_ids[4];
1405 	struct test_tc_link *skel;
1406 	struct bpf_link *link;
1407 	int err, detach_fd;
1408 
1409 	skel = test_tc_link__open();
1410 	if (!ASSERT_OK_PTR(skel, "skel_open"))
1411 		goto cleanup;
1412 
1413 	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
1414 		  0, "tc1_attach_type");
1415 	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
1416 		  0, "tc2_attach_type");
1417 	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, target),
1418 		  0, "tc3_attach_type");
1419 	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc4, target),
1420 		  0, "tc4_attach_type");
1421 
1422 	err = test_tc_link__load(skel);
1423 	if (!ASSERT_OK(err, "skel_load"))
1424 		goto cleanup;
1425 
1426 	pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
1427 	pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
1428 	pid3 = id_from_prog_fd(bpf_program__fd(skel->progs.tc3));
1429 	pid4 = id_from_prog_fd(bpf_program__fd(skel->progs.tc4));
1430 
1431 	ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
1432 	ASSERT_NEQ(pid3, pid4, "prog_ids_3_4");
1433 	ASSERT_NEQ(pid2, pid3, "prog_ids_2_3");
1434 
1435 	assert_mprog_count(target, 0);
1436 
1437 	err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1),
1438 				   loopback, target, &opta);
1439 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1440 		goto cleanup;
1441 
1442 	detach_fd = bpf_program__fd(skel->progs.tc1);
1443 
1444 	assert_mprog_count(target, 1);
1445 
1446 	link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
1447 	if (!ASSERT_OK_PTR(link, "link_attach"))
1448 		goto cleanup1;
1449 	skel->links.tc2 = link;
1450 
1451 	lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2));
1452 
1453 	assert_mprog_count(target, 2);
1454 
1455 	LIBBPF_OPTS_RESET(opta,
1456 		.flags = BPF_F_REPLACE,
1457 		.replace_prog_fd = bpf_program__fd(skel->progs.tc1),
1458 	);
1459 
1460 	err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc2),
1461 				   loopback, target, &opta);
1462 	ASSERT_EQ(err, -EEXIST, "prog_attach");
1463 
1464 	assert_mprog_count(target, 2);
1465 
1466 	LIBBPF_OPTS_RESET(opta,
1467 		.flags = BPF_F_REPLACE,
1468 		.replace_prog_fd = bpf_program__fd(skel->progs.tc2),
1469 	);
1470 
1471 	err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1),
1472 				   loopback, target, &opta);
1473 	ASSERT_EQ(err, -EEXIST, "prog_attach");
1474 
1475 	assert_mprog_count(target, 2);
1476 
1477 	LIBBPF_OPTS_RESET(opta,
1478 		.flags = BPF_F_REPLACE,
1479 		.replace_prog_fd = bpf_program__fd(skel->progs.tc2),
1480 	);
1481 
1482 	err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc3),
1483 				   loopback, target, &opta);
1484 	ASSERT_EQ(err, -EBUSY, "prog_attach");
1485 
1486 	assert_mprog_count(target, 2);
1487 
1488 	LIBBPF_OPTS_RESET(opta,
1489 		.flags = BPF_F_REPLACE,
1490 		.replace_prog_fd = bpf_program__fd(skel->progs.tc1),
1491 	);
1492 
1493 	err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc3),
1494 				   loopback, target, &opta);
1495 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1496 		goto cleanup1;
1497 
1498 	detach_fd = bpf_program__fd(skel->progs.tc3);
1499 
1500 	assert_mprog_count(target, 2);
1501 
1502 	link = bpf_program__attach_tcx(skel->progs.tc4, loopback, &optl);
1503 	if (!ASSERT_OK_PTR(link, "link_attach"))
1504 		goto cleanup1;
1505 	skel->links.tc4 = link;
1506 
1507 	lid4 = id_from_link_fd(bpf_link__fd(skel->links.tc4));
1508 
1509 	assert_mprog_count(target, 3);
1510 
1511 	LIBBPF_OPTS_RESET(opta,
1512 		.flags = BPF_F_REPLACE,
1513 		.replace_prog_fd = bpf_program__fd(skel->progs.tc4),
1514 	);
1515 
1516 	err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc2),
1517 				   loopback, target, &opta);
1518 	ASSERT_EQ(err, -EEXIST, "prog_attach");
1519 
1520 	optq.prog_ids = prog_ids;
1521 	optq.prog_attach_flags = prog_flags;
1522 	optq.link_ids = link_ids;
1523 	optq.link_attach_flags = link_flags;
1524 
1525 	memset(prog_ids, 0, sizeof(prog_ids));
1526 	memset(prog_flags, 0, sizeof(prog_flags));
1527 	memset(link_ids, 0, sizeof(link_ids));
1528 	memset(link_flags, 0, sizeof(link_flags));
1529 	optq.count = ARRAY_SIZE(prog_ids);
1530 
1531 	err = bpf_prog_query_opts(loopback, target, &optq);
1532 	if (!ASSERT_OK(err, "prog_query"))
1533 		goto cleanup1;
1534 
1535 	ASSERT_EQ(optq.count, 3, "count");
1536 	ASSERT_EQ(optq.revision, 5, "revision");
1537 	ASSERT_EQ(optq.prog_ids[0], pid3, "prog_ids[0]");
1538 	ASSERT_EQ(optq.prog_attach_flags[0], 0, "prog_flags[0]");
1539 	ASSERT_EQ(optq.link_ids[0], 0, "link_ids[0]");
1540 	ASSERT_EQ(optq.link_attach_flags[0], 0, "link_flags[0]");
1541 	ASSERT_EQ(optq.prog_ids[1], pid2, "prog_ids[1]");
1542 	ASSERT_EQ(optq.prog_attach_flags[1], 0, "prog_flags[1]");
1543 	ASSERT_EQ(optq.link_ids[1], lid2, "link_ids[1]");
1544 	ASSERT_EQ(optq.link_attach_flags[1], 0, "link_flags[1]");
1545 	ASSERT_EQ(optq.prog_ids[2], pid4, "prog_ids[2]");
1546 	ASSERT_EQ(optq.prog_attach_flags[2], 0, "prog_flags[2]");
1547 	ASSERT_EQ(optq.link_ids[2], lid4, "link_ids[2]");
1548 	ASSERT_EQ(optq.link_attach_flags[2], 0, "link_flags[2]");
1549 	ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
1550 	ASSERT_EQ(optq.prog_attach_flags[3], 0, "prog_flags[3]");
1551 	ASSERT_EQ(optq.link_ids[3], 0, "link_ids[3]");
1552 	ASSERT_EQ(optq.link_attach_flags[3], 0, "link_flags[3]");
1553 
1554 	ASSERT_OK(system(ping_cmd), ping_cmd);
1555 
1556 cleanup1:
1557 	err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd);
1558 	ASSERT_OK(err, "prog_detach");
1559 	assert_mprog_count(target, 2);
1560 
1561 cleanup:
1562 	test_tc_link__destroy(skel);
1563 	assert_mprog_count(target, 0);
1564 }
1565 
serial_test_tc_opts_mixed(void)1566 void serial_test_tc_opts_mixed(void)
1567 {
1568 	test_tc_opts_mixed_target(BPF_TCX_INGRESS);
1569 	test_tc_opts_mixed_target(BPF_TCX_EGRESS);
1570 }
1571 
test_tc_opts_demixed_target(int target)1572 static void test_tc_opts_demixed_target(int target)
1573 {
1574 	LIBBPF_OPTS(bpf_prog_attach_opts, opta);
1575 	LIBBPF_OPTS(bpf_prog_detach_opts, optd);
1576 	LIBBPF_OPTS(bpf_tcx_opts, optl);
1577 	struct test_tc_link *skel;
1578 	struct bpf_link *link;
1579 	__u32 pid1, pid2;
1580 	int err;
1581 
1582 	skel = test_tc_link__open();
1583 	if (!ASSERT_OK_PTR(skel, "skel_open"))
1584 		goto cleanup;
1585 
1586 	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, target),
1587 		  0, "tc1_attach_type");
1588 	ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, target),
1589 		  0, "tc2_attach_type");
1590 
1591 	err = test_tc_link__load(skel);
1592 	if (!ASSERT_OK(err, "skel_load"))
1593 		goto cleanup;
1594 
1595 	pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1));
1596 	pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2));
1597 	ASSERT_NEQ(pid1, pid2, "prog_ids_1_2");
1598 
1599 	assert_mprog_count(target, 0);
1600 
1601 	err = bpf_prog_attach_opts(bpf_program__fd(skel->progs.tc1),
1602 				   loopback, target, &opta);
1603 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1604 		goto cleanup;
1605 
1606 	assert_mprog_count(target, 1);
1607 
1608 	link = bpf_program__attach_tcx(skel->progs.tc2, loopback, &optl);
1609 	if (!ASSERT_OK_PTR(link, "link_attach"))
1610 		goto cleanup1;
1611 	skel->links.tc2 = link;
1612 
1613 	assert_mprog_count(target, 2);
1614 
1615 	LIBBPF_OPTS_RESET(optd,
1616 		.flags = BPF_F_AFTER,
1617 	);
1618 
1619 	err = bpf_prog_detach_opts(0, loopback, target, &optd);
1620 	ASSERT_EQ(err, -EBUSY, "prog_detach");
1621 
1622 	assert_mprog_count(target, 2);
1623 
1624 	LIBBPF_OPTS_RESET(optd,
1625 		.flags = BPF_F_BEFORE,
1626 	);
1627 
1628 	err = bpf_prog_detach_opts(0, loopback, target, &optd);
1629 	ASSERT_OK(err, "prog_detach");
1630 
1631 	assert_mprog_count(target, 1);
1632 	goto cleanup;
1633 
1634 cleanup1:
1635 	err = bpf_prog_detach_opts(bpf_program__fd(skel->progs.tc1),
1636 				   loopback, target, &optd);
1637 	ASSERT_OK(err, "prog_detach");
1638 	assert_mprog_count(target, 2);
1639 
1640 cleanup:
1641 	test_tc_link__destroy(skel);
1642 	assert_mprog_count(target, 0);
1643 }
1644 
serial_test_tc_opts_demixed(void)1645 void serial_test_tc_opts_demixed(void)
1646 {
1647 	test_tc_opts_demixed_target(BPF_TCX_INGRESS);
1648 	test_tc_opts_demixed_target(BPF_TCX_EGRESS);
1649 }
1650 
test_tc_opts_detach_target(int target)1651 static void test_tc_opts_detach_target(int target)
1652 {
1653 	LIBBPF_OPTS(bpf_prog_attach_opts, opta);
1654 	LIBBPF_OPTS(bpf_prog_detach_opts, optd);
1655 	LIBBPF_OPTS(bpf_prog_query_opts, optq);
1656 	__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
1657 	struct test_tc_link *skel;
1658 	__u32 prog_ids[5];
1659 	int err;
1660 
1661 	skel = test_tc_link__open_and_load();
1662 	if (!ASSERT_OK_PTR(skel, "skel_load"))
1663 		goto cleanup;
1664 
1665 	fd1 = bpf_program__fd(skel->progs.tc1);
1666 	fd2 = bpf_program__fd(skel->progs.tc2);
1667 	fd3 = bpf_program__fd(skel->progs.tc3);
1668 	fd4 = bpf_program__fd(skel->progs.tc4);
1669 
1670 	id1 = id_from_prog_fd(fd1);
1671 	id2 = id_from_prog_fd(fd2);
1672 	id3 = id_from_prog_fd(fd3);
1673 	id4 = id_from_prog_fd(fd4);
1674 
1675 	ASSERT_NEQ(id1, id2, "prog_ids_1_2");
1676 	ASSERT_NEQ(id3, id4, "prog_ids_3_4");
1677 	ASSERT_NEQ(id2, id3, "prog_ids_2_3");
1678 
1679 	assert_mprog_count(target, 0);
1680 
1681 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
1682 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1683 		goto cleanup;
1684 
1685 	assert_mprog_count(target, 1);
1686 
1687 	err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
1688 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1689 		goto cleanup1;
1690 
1691 	assert_mprog_count(target, 2);
1692 
1693 	err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
1694 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1695 		goto cleanup2;
1696 
1697 	assert_mprog_count(target, 3);
1698 
1699 	err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
1700 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1701 		goto cleanup3;
1702 
1703 	assert_mprog_count(target, 4);
1704 
1705 	optq.prog_ids = prog_ids;
1706 
1707 	memset(prog_ids, 0, sizeof(prog_ids));
1708 	optq.count = ARRAY_SIZE(prog_ids);
1709 
1710 	err = bpf_prog_query_opts(loopback, target, &optq);
1711 	if (!ASSERT_OK(err, "prog_query"))
1712 		goto cleanup4;
1713 
1714 	ASSERT_EQ(optq.count, 4, "count");
1715 	ASSERT_EQ(optq.revision, 5, "revision");
1716 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
1717 	ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
1718 	ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
1719 	ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
1720 	ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
1721 
1722 	LIBBPF_OPTS_RESET(optd,
1723 		.flags = BPF_F_BEFORE,
1724 	);
1725 
1726 	err = bpf_prog_detach_opts(0, loopback, target, &optd);
1727 	ASSERT_OK(err, "prog_detach");
1728 
1729 	assert_mprog_count(target, 3);
1730 
1731 	memset(prog_ids, 0, sizeof(prog_ids));
1732 	optq.count = ARRAY_SIZE(prog_ids);
1733 
1734 	err = bpf_prog_query_opts(loopback, target, &optq);
1735 	if (!ASSERT_OK(err, "prog_query"))
1736 		goto cleanup4;
1737 
1738 	ASSERT_EQ(optq.count, 3, "count");
1739 	ASSERT_EQ(optq.revision, 6, "revision");
1740 	ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
1741 	ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
1742 	ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]");
1743 	ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
1744 
1745 	LIBBPF_OPTS_RESET(optd,
1746 		.flags = BPF_F_AFTER,
1747 	);
1748 
1749 	err = bpf_prog_detach_opts(0, loopback, target, &optd);
1750 	ASSERT_OK(err, "prog_detach");
1751 
1752 	assert_mprog_count(target, 2);
1753 
1754 	memset(prog_ids, 0, sizeof(prog_ids));
1755 	optq.count = ARRAY_SIZE(prog_ids);
1756 
1757 	err = bpf_prog_query_opts(loopback, target, &optq);
1758 	if (!ASSERT_OK(err, "prog_query"))
1759 		goto cleanup4;
1760 
1761 	ASSERT_EQ(optq.count, 2, "count");
1762 	ASSERT_EQ(optq.revision, 7, "revision");
1763 	ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
1764 	ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
1765 	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
1766 
1767 	LIBBPF_OPTS_RESET(optd);
1768 
1769 	err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
1770 	ASSERT_OK(err, "prog_detach");
1771 	assert_mprog_count(target, 1);
1772 
1773 	err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
1774 	ASSERT_OK(err, "prog_detach");
1775 	assert_mprog_count(target, 0);
1776 
1777 	LIBBPF_OPTS_RESET(optd,
1778 		.flags = BPF_F_BEFORE,
1779 	);
1780 
1781 	err = bpf_prog_detach_opts(0, loopback, target, &optd);
1782 	ASSERT_EQ(err, -ENOENT, "prog_detach");
1783 
1784 	LIBBPF_OPTS_RESET(optd,
1785 		.flags = BPF_F_AFTER,
1786 	);
1787 
1788 	err = bpf_prog_detach_opts(0, loopback, target, &optd);
1789 	ASSERT_EQ(err, -ENOENT, "prog_detach");
1790 	goto cleanup;
1791 
1792 cleanup4:
1793 	err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
1794 	ASSERT_OK(err, "prog_detach");
1795 	assert_mprog_count(target, 3);
1796 
1797 cleanup3:
1798 	err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
1799 	ASSERT_OK(err, "prog_detach");
1800 	assert_mprog_count(target, 2);
1801 
1802 cleanup2:
1803 	err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
1804 	ASSERT_OK(err, "prog_detach");
1805 	assert_mprog_count(target, 1);
1806 
1807 cleanup1:
1808 	err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
1809 	ASSERT_OK(err, "prog_detach");
1810 	assert_mprog_count(target, 0);
1811 
1812 cleanup:
1813 	test_tc_link__destroy(skel);
1814 }
1815 
serial_test_tc_opts_detach(void)1816 void serial_test_tc_opts_detach(void)
1817 {
1818 	test_tc_opts_detach_target(BPF_TCX_INGRESS);
1819 	test_tc_opts_detach_target(BPF_TCX_EGRESS);
1820 }
1821 
test_tc_opts_detach_before_target(int target)1822 static void test_tc_opts_detach_before_target(int target)
1823 {
1824 	LIBBPF_OPTS(bpf_prog_attach_opts, opta);
1825 	LIBBPF_OPTS(bpf_prog_detach_opts, optd);
1826 	LIBBPF_OPTS(bpf_prog_query_opts, optq);
1827 	__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
1828 	struct test_tc_link *skel;
1829 	__u32 prog_ids[5];
1830 	int err;
1831 
1832 	skel = test_tc_link__open_and_load();
1833 	if (!ASSERT_OK_PTR(skel, "skel_load"))
1834 		goto cleanup;
1835 
1836 	fd1 = bpf_program__fd(skel->progs.tc1);
1837 	fd2 = bpf_program__fd(skel->progs.tc2);
1838 	fd3 = bpf_program__fd(skel->progs.tc3);
1839 	fd4 = bpf_program__fd(skel->progs.tc4);
1840 
1841 	id1 = id_from_prog_fd(fd1);
1842 	id2 = id_from_prog_fd(fd2);
1843 	id3 = id_from_prog_fd(fd3);
1844 	id4 = id_from_prog_fd(fd4);
1845 
1846 	ASSERT_NEQ(id1, id2, "prog_ids_1_2");
1847 	ASSERT_NEQ(id3, id4, "prog_ids_3_4");
1848 	ASSERT_NEQ(id2, id3, "prog_ids_2_3");
1849 
1850 	assert_mprog_count(target, 0);
1851 
1852 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
1853 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1854 		goto cleanup;
1855 
1856 	assert_mprog_count(target, 1);
1857 
1858 	err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
1859 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1860 		goto cleanup1;
1861 
1862 	assert_mprog_count(target, 2);
1863 
1864 	err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
1865 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1866 		goto cleanup2;
1867 
1868 	assert_mprog_count(target, 3);
1869 
1870 	err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
1871 	if (!ASSERT_EQ(err, 0, "prog_attach"))
1872 		goto cleanup3;
1873 
1874 	assert_mprog_count(target, 4);
1875 
1876 	optq.prog_ids = prog_ids;
1877 
1878 	memset(prog_ids, 0, sizeof(prog_ids));
1879 	optq.count = ARRAY_SIZE(prog_ids);
1880 
1881 	err = bpf_prog_query_opts(loopback, target, &optq);
1882 	if (!ASSERT_OK(err, "prog_query"))
1883 		goto cleanup4;
1884 
1885 	ASSERT_EQ(optq.count, 4, "count");
1886 	ASSERT_EQ(optq.revision, 5, "revision");
1887 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
1888 	ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
1889 	ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
1890 	ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
1891 	ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
1892 
1893 	LIBBPF_OPTS_RESET(optd,
1894 		.flags = BPF_F_BEFORE,
1895 		.relative_fd = fd2,
1896 	);
1897 
1898 	err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
1899 	ASSERT_OK(err, "prog_detach");
1900 
1901 	assert_mprog_count(target, 3);
1902 
1903 	memset(prog_ids, 0, sizeof(prog_ids));
1904 	optq.count = ARRAY_SIZE(prog_ids);
1905 
1906 	err = bpf_prog_query_opts(loopback, target, &optq);
1907 	if (!ASSERT_OK(err, "prog_query"))
1908 		goto cleanup4;
1909 
1910 	ASSERT_EQ(optq.count, 3, "count");
1911 	ASSERT_EQ(optq.revision, 6, "revision");
1912 	ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
1913 	ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
1914 	ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]");
1915 	ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
1916 
1917 	LIBBPF_OPTS_RESET(optd,
1918 		.flags = BPF_F_BEFORE,
1919 		.relative_fd = fd2,
1920 	);
1921 
1922 	err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
1923 	ASSERT_EQ(err, -ENOENT, "prog_detach");
1924 	assert_mprog_count(target, 3);
1925 
1926 	LIBBPF_OPTS_RESET(optd,
1927 		.flags = BPF_F_BEFORE,
1928 		.relative_fd = fd4,
1929 	);
1930 
1931 	err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
1932 	ASSERT_EQ(err, -ERANGE, "prog_detach");
1933 	assert_mprog_count(target, 3);
1934 
1935 	LIBBPF_OPTS_RESET(optd,
1936 		.flags = BPF_F_BEFORE,
1937 		.relative_fd = fd1,
1938 	);
1939 
1940 	err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
1941 	ASSERT_EQ(err, -ENOENT, "prog_detach");
1942 	assert_mprog_count(target, 3);
1943 
1944 	LIBBPF_OPTS_RESET(optd,
1945 		.flags = BPF_F_BEFORE,
1946 		.relative_fd = fd3,
1947 	);
1948 
1949 	err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
1950 	ASSERT_OK(err, "prog_detach");
1951 
1952 	assert_mprog_count(target, 2);
1953 
1954 	memset(prog_ids, 0, sizeof(prog_ids));
1955 	optq.count = ARRAY_SIZE(prog_ids);
1956 
1957 	err = bpf_prog_query_opts(loopback, target, &optq);
1958 	if (!ASSERT_OK(err, "prog_query"))
1959 		goto cleanup4;
1960 
1961 	ASSERT_EQ(optq.count, 2, "count");
1962 	ASSERT_EQ(optq.revision, 7, "revision");
1963 	ASSERT_EQ(optq.prog_ids[0], id3, "prog_ids[0]");
1964 	ASSERT_EQ(optq.prog_ids[1], id4, "prog_ids[1]");
1965 	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
1966 
1967 	LIBBPF_OPTS_RESET(optd,
1968 		.flags = BPF_F_BEFORE,
1969 		.relative_fd = fd4,
1970 	);
1971 
1972 	err = bpf_prog_detach_opts(0, loopback, target, &optd);
1973 	ASSERT_OK(err, "prog_detach");
1974 
1975 	assert_mprog_count(target, 1);
1976 
1977 	memset(prog_ids, 0, sizeof(prog_ids));
1978 	optq.count = ARRAY_SIZE(prog_ids);
1979 
1980 	err = bpf_prog_query_opts(loopback, target, &optq);
1981 	if (!ASSERT_OK(err, "prog_query"))
1982 		goto cleanup4;
1983 
1984 	ASSERT_EQ(optq.count, 1, "count");
1985 	ASSERT_EQ(optq.revision, 8, "revision");
1986 	ASSERT_EQ(optq.prog_ids[0], id4, "prog_ids[0]");
1987 	ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
1988 
1989 	LIBBPF_OPTS_RESET(optd,
1990 		.flags = BPF_F_BEFORE,
1991 	);
1992 
1993 	err = bpf_prog_detach_opts(0, loopback, target, &optd);
1994 	ASSERT_OK(err, "prog_detach");
1995 
1996 	assert_mprog_count(target, 0);
1997 	goto cleanup;
1998 
1999 cleanup4:
2000 	err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
2001 	ASSERT_OK(err, "prog_detach");
2002 	assert_mprog_count(target, 3);
2003 
2004 cleanup3:
2005 	err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
2006 	ASSERT_OK(err, "prog_detach");
2007 	assert_mprog_count(target, 2);
2008 
2009 cleanup2:
2010 	err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
2011 	ASSERT_OK(err, "prog_detach");
2012 	assert_mprog_count(target, 1);
2013 
2014 cleanup1:
2015 	err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
2016 	ASSERT_OK(err, "prog_detach");
2017 	assert_mprog_count(target, 0);
2018 
2019 cleanup:
2020 	test_tc_link__destroy(skel);
2021 }
2022 
serial_test_tc_opts_detach_before(void)2023 void serial_test_tc_opts_detach_before(void)
2024 {
2025 	test_tc_opts_detach_before_target(BPF_TCX_INGRESS);
2026 	test_tc_opts_detach_before_target(BPF_TCX_EGRESS);
2027 }
2028 
test_tc_opts_detach_after_target(int target)2029 static void test_tc_opts_detach_after_target(int target)
2030 {
2031 	LIBBPF_OPTS(bpf_prog_attach_opts, opta);
2032 	LIBBPF_OPTS(bpf_prog_detach_opts, optd);
2033 	LIBBPF_OPTS(bpf_prog_query_opts, optq);
2034 	__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
2035 	struct test_tc_link *skel;
2036 	__u32 prog_ids[5];
2037 	int err;
2038 
2039 	skel = test_tc_link__open_and_load();
2040 	if (!ASSERT_OK_PTR(skel, "skel_load"))
2041 		goto cleanup;
2042 
2043 	fd1 = bpf_program__fd(skel->progs.tc1);
2044 	fd2 = bpf_program__fd(skel->progs.tc2);
2045 	fd3 = bpf_program__fd(skel->progs.tc3);
2046 	fd4 = bpf_program__fd(skel->progs.tc4);
2047 
2048 	id1 = id_from_prog_fd(fd1);
2049 	id2 = id_from_prog_fd(fd2);
2050 	id3 = id_from_prog_fd(fd3);
2051 	id4 = id_from_prog_fd(fd4);
2052 
2053 	ASSERT_NEQ(id1, id2, "prog_ids_1_2");
2054 	ASSERT_NEQ(id3, id4, "prog_ids_3_4");
2055 	ASSERT_NEQ(id2, id3, "prog_ids_2_3");
2056 
2057 	assert_mprog_count(target, 0);
2058 
2059 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
2060 	if (!ASSERT_EQ(err, 0, "prog_attach"))
2061 		goto cleanup;
2062 
2063 	assert_mprog_count(target, 1);
2064 
2065 	err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
2066 	if (!ASSERT_EQ(err, 0, "prog_attach"))
2067 		goto cleanup1;
2068 
2069 	assert_mprog_count(target, 2);
2070 
2071 	err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
2072 	if (!ASSERT_EQ(err, 0, "prog_attach"))
2073 		goto cleanup2;
2074 
2075 	assert_mprog_count(target, 3);
2076 
2077 	err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
2078 	if (!ASSERT_EQ(err, 0, "prog_attach"))
2079 		goto cleanup3;
2080 
2081 	assert_mprog_count(target, 4);
2082 
2083 	optq.prog_ids = prog_ids;
2084 
2085 	memset(prog_ids, 0, sizeof(prog_ids));
2086 	optq.count = ARRAY_SIZE(prog_ids);
2087 
2088 	err = bpf_prog_query_opts(loopback, target, &optq);
2089 	if (!ASSERT_OK(err, "prog_query"))
2090 		goto cleanup4;
2091 
2092 	ASSERT_EQ(optq.count, 4, "count");
2093 	ASSERT_EQ(optq.revision, 5, "revision");
2094 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
2095 	ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
2096 	ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
2097 	ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
2098 	ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
2099 
2100 	LIBBPF_OPTS_RESET(optd,
2101 		.flags = BPF_F_AFTER,
2102 		.relative_fd = fd1,
2103 	);
2104 
2105 	err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
2106 	ASSERT_OK(err, "prog_detach");
2107 
2108 	assert_mprog_count(target, 3);
2109 
2110 	memset(prog_ids, 0, sizeof(prog_ids));
2111 	optq.count = ARRAY_SIZE(prog_ids);
2112 
2113 	err = bpf_prog_query_opts(loopback, target, &optq);
2114 	if (!ASSERT_OK(err, "prog_query"))
2115 		goto cleanup4;
2116 
2117 	ASSERT_EQ(optq.count, 3, "count");
2118 	ASSERT_EQ(optq.revision, 6, "revision");
2119 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
2120 	ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
2121 	ASSERT_EQ(optq.prog_ids[2], id4, "prog_ids[2]");
2122 	ASSERT_EQ(optq.prog_ids[3], 0, "prog_ids[3]");
2123 
2124 	LIBBPF_OPTS_RESET(optd,
2125 		.flags = BPF_F_AFTER,
2126 		.relative_fd = fd1,
2127 	);
2128 
2129 	err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
2130 	ASSERT_EQ(err, -ENOENT, "prog_detach");
2131 	assert_mprog_count(target, 3);
2132 
2133 	LIBBPF_OPTS_RESET(optd,
2134 		.flags = BPF_F_AFTER,
2135 		.relative_fd = fd4,
2136 	);
2137 
2138 	err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
2139 	ASSERT_EQ(err, -ERANGE, "prog_detach");
2140 	assert_mprog_count(target, 3);
2141 
2142 	LIBBPF_OPTS_RESET(optd,
2143 		.flags = BPF_F_AFTER,
2144 		.relative_fd = fd3,
2145 	);
2146 
2147 	err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
2148 	ASSERT_EQ(err, -ERANGE, "prog_detach");
2149 	assert_mprog_count(target, 3);
2150 
2151 	LIBBPF_OPTS_RESET(optd,
2152 		.flags = BPF_F_AFTER,
2153 		.relative_fd = fd1,
2154 	);
2155 
2156 	err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
2157 	ASSERT_EQ(err, -ERANGE, "prog_detach");
2158 	assert_mprog_count(target, 3);
2159 
2160 	LIBBPF_OPTS_RESET(optd,
2161 		.flags = BPF_F_AFTER,
2162 		.relative_fd = fd1,
2163 	);
2164 
2165 	err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
2166 	ASSERT_OK(err, "prog_detach");
2167 
2168 	assert_mprog_count(target, 2);
2169 
2170 	memset(prog_ids, 0, sizeof(prog_ids));
2171 	optq.count = ARRAY_SIZE(prog_ids);
2172 
2173 	err = bpf_prog_query_opts(loopback, target, &optq);
2174 	if (!ASSERT_OK(err, "prog_query"))
2175 		goto cleanup4;
2176 
2177 	ASSERT_EQ(optq.count, 2, "count");
2178 	ASSERT_EQ(optq.revision, 7, "revision");
2179 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
2180 	ASSERT_EQ(optq.prog_ids[1], id4, "prog_ids[1]");
2181 	ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
2182 
2183 	LIBBPF_OPTS_RESET(optd,
2184 		.flags = BPF_F_AFTER,
2185 		.relative_fd = fd1,
2186 	);
2187 
2188 	err = bpf_prog_detach_opts(0, loopback, target, &optd);
2189 	ASSERT_OK(err, "prog_detach");
2190 
2191 	assert_mprog_count(target, 1);
2192 
2193 	memset(prog_ids, 0, sizeof(prog_ids));
2194 	optq.count = ARRAY_SIZE(prog_ids);
2195 
2196 	err = bpf_prog_query_opts(loopback, target, &optq);
2197 	if (!ASSERT_OK(err, "prog_query"))
2198 		goto cleanup4;
2199 
2200 	ASSERT_EQ(optq.count, 1, "count");
2201 	ASSERT_EQ(optq.revision, 8, "revision");
2202 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
2203 	ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
2204 
2205 	LIBBPF_OPTS_RESET(optd,
2206 		.flags = BPF_F_AFTER,
2207 	);
2208 
2209 	err = bpf_prog_detach_opts(0, loopback, target, &optd);
2210 	ASSERT_OK(err, "prog_detach");
2211 
2212 	assert_mprog_count(target, 0);
2213 	goto cleanup;
2214 
2215 cleanup4:
2216 	err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
2217 	ASSERT_OK(err, "prog_detach");
2218 	assert_mprog_count(target, 3);
2219 
2220 cleanup3:
2221 	err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
2222 	ASSERT_OK(err, "prog_detach");
2223 	assert_mprog_count(target, 2);
2224 
2225 cleanup2:
2226 	err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
2227 	ASSERT_OK(err, "prog_detach");
2228 	assert_mprog_count(target, 1);
2229 
2230 cleanup1:
2231 	err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
2232 	ASSERT_OK(err, "prog_detach");
2233 	assert_mprog_count(target, 0);
2234 
2235 cleanup:
2236 	test_tc_link__destroy(skel);
2237 }
2238 
serial_test_tc_opts_detach_after(void)2239 void serial_test_tc_opts_detach_after(void)
2240 {
2241 	test_tc_opts_detach_after_target(BPF_TCX_INGRESS);
2242 	test_tc_opts_detach_after_target(BPF_TCX_EGRESS);
2243 }
2244 
test_tc_opts_delete_empty(int target,bool chain_tc_old)2245 static void test_tc_opts_delete_empty(int target, bool chain_tc_old)
2246 {
2247 	LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback);
2248 	LIBBPF_OPTS(bpf_prog_detach_opts, optd);
2249 	int err;
2250 
2251 	assert_mprog_count(target, 0);
2252 	if (chain_tc_old) {
2253 		tc_hook.attach_point = target == BPF_TCX_INGRESS ?
2254 				       BPF_TC_INGRESS : BPF_TC_EGRESS;
2255 		err = bpf_tc_hook_create(&tc_hook);
2256 		ASSERT_OK(err, "bpf_tc_hook_create");
2257 		assert_mprog_count(target, 0);
2258 	}
2259 	err = bpf_prog_detach_opts(0, loopback, target, &optd);
2260 	ASSERT_EQ(err, -ENOENT, "prog_detach");
2261 	if (chain_tc_old) {
2262 		tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
2263 		bpf_tc_hook_destroy(&tc_hook);
2264 	}
2265 	assert_mprog_count(target, 0);
2266 }
2267 
serial_test_tc_opts_delete_empty(void)2268 void serial_test_tc_opts_delete_empty(void)
2269 {
2270 	test_tc_opts_delete_empty(BPF_TCX_INGRESS, false);
2271 	test_tc_opts_delete_empty(BPF_TCX_EGRESS, false);
2272 	test_tc_opts_delete_empty(BPF_TCX_INGRESS, true);
2273 	test_tc_opts_delete_empty(BPF_TCX_EGRESS, true);
2274 }
2275 
test_tc_chain_mixed(int target)2276 static void test_tc_chain_mixed(int target)
2277 {
2278 	LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
2279 	LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback);
2280 	LIBBPF_OPTS(bpf_prog_attach_opts, opta);
2281 	LIBBPF_OPTS(bpf_prog_detach_opts, optd);
2282 	__u32 fd1, fd2, fd3, id1, id2, id3;
2283 	struct test_tc_link *skel;
2284 	int err, detach_fd;
2285 
2286 	skel = test_tc_link__open_and_load();
2287 	if (!ASSERT_OK_PTR(skel, "skel_load"))
2288 		goto cleanup;
2289 
2290 	fd1 = bpf_program__fd(skel->progs.tc4);
2291 	fd2 = bpf_program__fd(skel->progs.tc5);
2292 	fd3 = bpf_program__fd(skel->progs.tc6);
2293 
2294 	id1 = id_from_prog_fd(fd1);
2295 	id2 = id_from_prog_fd(fd2);
2296 	id3 = id_from_prog_fd(fd3);
2297 
2298 	ASSERT_NEQ(id1, id2, "prog_ids_1_2");
2299 	ASSERT_NEQ(id2, id3, "prog_ids_2_3");
2300 
2301 	assert_mprog_count(target, 0);
2302 
2303 	tc_hook.attach_point = target == BPF_TCX_INGRESS ?
2304 			       BPF_TC_INGRESS : BPF_TC_EGRESS;
2305 	err = bpf_tc_hook_create(&tc_hook);
2306 	err = err == -EEXIST ? 0 : err;
2307 	if (!ASSERT_OK(err, "bpf_tc_hook_create"))
2308 		goto cleanup;
2309 
2310 	tc_opts.prog_fd = fd2;
2311 	err = bpf_tc_attach(&tc_hook, &tc_opts);
2312 	if (!ASSERT_OK(err, "bpf_tc_attach"))
2313 		goto cleanup_hook;
2314 
2315 	err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
2316 	if (!ASSERT_EQ(err, 0, "prog_attach"))
2317 		goto cleanup_filter;
2318 
2319 	detach_fd = fd3;
2320 
2321 	assert_mprog_count(target, 1);
2322 
2323 	tc_skel_reset_all_seen(skel);
2324 	ASSERT_OK(system(ping_cmd), ping_cmd);
2325 
2326 	ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
2327 	ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5");
2328 	ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6");
2329 
2330 	LIBBPF_OPTS_RESET(opta,
2331 		.flags = BPF_F_REPLACE,
2332 		.replace_prog_fd = fd3,
2333 	);
2334 
2335 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
2336 	if (!ASSERT_EQ(err, 0, "prog_attach"))
2337 		goto cleanup_opts;
2338 
2339 	detach_fd = fd1;
2340 
2341 	assert_mprog_count(target, 1);
2342 
2343 	tc_skel_reset_all_seen(skel);
2344 	ASSERT_OK(system(ping_cmd), ping_cmd);
2345 
2346 	ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
2347 	ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
2348 	ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
2349 
2350 cleanup_opts:
2351 	err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd);
2352 	ASSERT_OK(err, "prog_detach");
2353 	assert_mprog_count(target, 0);
2354 
2355 	tc_skel_reset_all_seen(skel);
2356 	ASSERT_OK(system(ping_cmd), ping_cmd);
2357 
2358 	ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
2359 	ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
2360 	ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
2361 
2362 cleanup_filter:
2363 	tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
2364 	err = bpf_tc_detach(&tc_hook, &tc_opts);
2365 	ASSERT_OK(err, "bpf_tc_detach");
2366 
2367 cleanup_hook:
2368 	tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
2369 	bpf_tc_hook_destroy(&tc_hook);
2370 
2371 cleanup:
2372 	test_tc_link__destroy(skel);
2373 }
2374 
serial_test_tc_opts_chain_mixed(void)2375 void serial_test_tc_opts_chain_mixed(void)
2376 {
2377 	test_tc_chain_mixed(BPF_TCX_INGRESS);
2378 	test_tc_chain_mixed(BPF_TCX_EGRESS);
2379 }
2380 
generate_dummy_prog(void)2381 static int generate_dummy_prog(void)
2382 {
2383 	const struct bpf_insn prog_insns[] = {
2384 		BPF_MOV64_IMM(BPF_REG_0, 0),
2385 		BPF_EXIT_INSN(),
2386 	};
2387 	const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn);
2388 	LIBBPF_OPTS(bpf_prog_load_opts, opts);
2389 	const size_t log_buf_sz = 256;
2390 	char log_buf[log_buf_sz];
2391 	int fd = -1;
2392 
2393 	opts.log_buf = log_buf;
2394 	opts.log_size = log_buf_sz;
2395 
2396 	log_buf[0] = '\0';
2397 	opts.log_level = 0;
2398 	fd = bpf_prog_load(BPF_PROG_TYPE_SCHED_CLS, "tcx_prog", "GPL",
2399 			   prog_insns, prog_insn_cnt, &opts);
2400 	ASSERT_STREQ(log_buf, "", "log_0");
2401 	ASSERT_GE(fd, 0, "prog_fd");
2402 	return fd;
2403 }
2404 
test_tc_opts_max_target(int target,int flags,bool relative)2405 static void test_tc_opts_max_target(int target, int flags, bool relative)
2406 {
2407 	int err, ifindex, i, prog_fd, last_fd = -1;
2408 	LIBBPF_OPTS(bpf_prog_attach_opts, opta);
2409 	const int max_progs = 63;
2410 
2411 	ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth");
2412 	ifindex = if_nametoindex("tcx_opts1");
2413 	ASSERT_NEQ(ifindex, 0, "non_zero_ifindex");
2414 
2415 	assert_mprog_count_ifindex(ifindex, target, 0);
2416 
2417 	for (i = 0; i < max_progs; i++) {
2418 		prog_fd = generate_dummy_prog();
2419 		if (!ASSERT_GE(prog_fd, 0, "dummy_prog"))
2420 			goto cleanup;
2421 		err = bpf_prog_attach_opts(prog_fd, ifindex, target, &opta);
2422 		if (!ASSERT_EQ(err, 0, "prog_attach"))
2423 			goto cleanup;
2424 		assert_mprog_count_ifindex(ifindex, target, i + 1);
2425 		if (i == max_progs - 1 && relative)
2426 			last_fd = prog_fd;
2427 		else
2428 			close(prog_fd);
2429 	}
2430 
2431 	prog_fd = generate_dummy_prog();
2432 	if (!ASSERT_GE(prog_fd, 0, "dummy_prog"))
2433 		goto cleanup;
2434 	opta.flags = flags;
2435 	if (last_fd > 0)
2436 		opta.relative_fd = last_fd;
2437 	err = bpf_prog_attach_opts(prog_fd, ifindex, target, &opta);
2438 	ASSERT_EQ(err, -ERANGE, "prog_64_attach");
2439 	assert_mprog_count_ifindex(ifindex, target, max_progs);
2440 	close(prog_fd);
2441 cleanup:
2442 	if (last_fd > 0)
2443 		close(last_fd);
2444 	ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
2445 	ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
2446 	ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
2447 }
2448 
serial_test_tc_opts_max(void)2449 void serial_test_tc_opts_max(void)
2450 {
2451 	test_tc_opts_max_target(BPF_TCX_INGRESS, 0, false);
2452 	test_tc_opts_max_target(BPF_TCX_EGRESS, 0, false);
2453 
2454 	test_tc_opts_max_target(BPF_TCX_INGRESS, BPF_F_BEFORE, false);
2455 	test_tc_opts_max_target(BPF_TCX_EGRESS, BPF_F_BEFORE, true);
2456 
2457 	test_tc_opts_max_target(BPF_TCX_INGRESS, BPF_F_AFTER, true);
2458 	test_tc_opts_max_target(BPF_TCX_EGRESS, BPF_F_AFTER, false);
2459 }
2460 
test_tc_opts_query_target(int target)2461 static void test_tc_opts_query_target(int target)
2462 {
2463 	const size_t attr_size = offsetofend(union bpf_attr, query);
2464 	LIBBPF_OPTS(bpf_prog_attach_opts, opta);
2465 	LIBBPF_OPTS(bpf_prog_detach_opts, optd);
2466 	LIBBPF_OPTS(bpf_prog_query_opts, optq);
2467 	__u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
2468 	struct test_tc_link *skel;
2469 	union bpf_attr attr;
2470 	__u32 prog_ids[5];
2471 	int err;
2472 
2473 	skel = test_tc_link__open_and_load();
2474 	if (!ASSERT_OK_PTR(skel, "skel_load"))
2475 		goto cleanup;
2476 
2477 	fd1 = bpf_program__fd(skel->progs.tc1);
2478 	fd2 = bpf_program__fd(skel->progs.tc2);
2479 	fd3 = bpf_program__fd(skel->progs.tc3);
2480 	fd4 = bpf_program__fd(skel->progs.tc4);
2481 
2482 	id1 = id_from_prog_fd(fd1);
2483 	id2 = id_from_prog_fd(fd2);
2484 	id3 = id_from_prog_fd(fd3);
2485 	id4 = id_from_prog_fd(fd4);
2486 
2487 	assert_mprog_count(target, 0);
2488 
2489 	LIBBPF_OPTS_RESET(opta,
2490 		.expected_revision = 1,
2491 	);
2492 
2493 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
2494 	if (!ASSERT_EQ(err, 0, "prog_attach"))
2495 		goto cleanup;
2496 
2497 	assert_mprog_count(target, 1);
2498 
2499 	LIBBPF_OPTS_RESET(opta,
2500 		.expected_revision = 2,
2501 	);
2502 
2503 	err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
2504 	if (!ASSERT_EQ(err, 0, "prog_attach"))
2505 		goto cleanup1;
2506 
2507 	assert_mprog_count(target, 2);
2508 
2509 	LIBBPF_OPTS_RESET(opta,
2510 		.expected_revision = 3,
2511 	);
2512 
2513 	err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
2514 	if (!ASSERT_EQ(err, 0, "prog_attach"))
2515 		goto cleanup2;
2516 
2517 	assert_mprog_count(target, 3);
2518 
2519 	LIBBPF_OPTS_RESET(opta,
2520 		.expected_revision = 4,
2521 	);
2522 
2523 	err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
2524 	if (!ASSERT_EQ(err, 0, "prog_attach"))
2525 		goto cleanup3;
2526 
2527 	assert_mprog_count(target, 4);
2528 
2529 	/* Test 1: Double query via libbpf API */
2530 	err = bpf_prog_query_opts(loopback, target, &optq);
2531 	if (!ASSERT_OK(err, "prog_query"))
2532 		goto cleanup4;
2533 
2534 	ASSERT_EQ(optq.count, 4, "count");
2535 	ASSERT_EQ(optq.revision, 5, "revision");
2536 	ASSERT_EQ(optq.prog_ids, NULL, "prog_ids");
2537 	ASSERT_EQ(optq.link_ids, NULL, "link_ids");
2538 
2539 	memset(prog_ids, 0, sizeof(prog_ids));
2540 	optq.prog_ids = prog_ids;
2541 
2542 	err = bpf_prog_query_opts(loopback, target, &optq);
2543 	if (!ASSERT_OK(err, "prog_query"))
2544 		goto cleanup4;
2545 
2546 	ASSERT_EQ(optq.count, 4, "count");
2547 	ASSERT_EQ(optq.revision, 5, "revision");
2548 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
2549 	ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
2550 	ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
2551 	ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
2552 	ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
2553 	ASSERT_EQ(optq.link_ids, NULL, "link_ids");
2554 
2555 	/* Test 2: Double query via bpf_attr & bpf(2) directly */
2556 	memset(&attr, 0, attr_size);
2557 	attr.query.target_ifindex = loopback;
2558 	attr.query.attach_type = target;
2559 
2560 	err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
2561 	if (!ASSERT_OK(err, "prog_query"))
2562 		goto cleanup4;
2563 
2564 	ASSERT_EQ(attr.query.count, 4, "count");
2565 	ASSERT_EQ(attr.query.revision, 5, "revision");
2566 	ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
2567 	ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
2568 	ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
2569 	ASSERT_EQ(attr.query.attach_type, target, "attach_type");
2570 	ASSERT_EQ(attr.query.prog_ids, 0, "prog_ids");
2571 	ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
2572 	ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
2573 	ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
2574 
2575 	memset(prog_ids, 0, sizeof(prog_ids));
2576 	attr.query.prog_ids = ptr_to_u64(prog_ids);
2577 
2578 	err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
2579 	if (!ASSERT_OK(err, "prog_query"))
2580 		goto cleanup4;
2581 
2582 	ASSERT_EQ(attr.query.count, 4, "count");
2583 	ASSERT_EQ(attr.query.revision, 5, "revision");
2584 	ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
2585 	ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
2586 	ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
2587 	ASSERT_EQ(attr.query.attach_type, target, "attach_type");
2588 	ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
2589 	ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]");
2590 	ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]");
2591 	ASSERT_EQ(prog_ids[2], id3, "prog_ids[2]");
2592 	ASSERT_EQ(prog_ids[3], id4, "prog_ids[3]");
2593 	ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
2594 	ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
2595 	ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
2596 	ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
2597 
2598 cleanup4:
2599 	err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
2600 	ASSERT_OK(err, "prog_detach");
2601 	assert_mprog_count(target, 3);
2602 
2603 cleanup3:
2604 	err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
2605 	ASSERT_OK(err, "prog_detach");
2606 	assert_mprog_count(target, 2);
2607 
2608 cleanup2:
2609 	err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
2610 	ASSERT_OK(err, "prog_detach");
2611 	assert_mprog_count(target, 1);
2612 
2613 cleanup1:
2614 	err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
2615 	ASSERT_OK(err, "prog_detach");
2616 	assert_mprog_count(target, 0);
2617 
2618 cleanup:
2619 	test_tc_link__destroy(skel);
2620 }
2621 
serial_test_tc_opts_query(void)2622 void serial_test_tc_opts_query(void)
2623 {
2624 	test_tc_opts_query_target(BPF_TCX_INGRESS);
2625 	test_tc_opts_query_target(BPF_TCX_EGRESS);
2626 }
2627 
test_tc_opts_query_attach_target(int target)2628 static void test_tc_opts_query_attach_target(int target)
2629 {
2630 	LIBBPF_OPTS(bpf_prog_attach_opts, opta);
2631 	LIBBPF_OPTS(bpf_prog_detach_opts, optd);
2632 	LIBBPF_OPTS(bpf_prog_query_opts, optq);
2633 	struct test_tc_link *skel;
2634 	__u32 prog_ids[2];
2635 	__u32 fd1, id1;
2636 	int err;
2637 
2638 	skel = test_tc_link__open_and_load();
2639 	if (!ASSERT_OK_PTR(skel, "skel_load"))
2640 		goto cleanup;
2641 
2642 	fd1 = bpf_program__fd(skel->progs.tc1);
2643 	id1 = id_from_prog_fd(fd1);
2644 
2645 	err = bpf_prog_query_opts(loopback, target, &optq);
2646 	if (!ASSERT_OK(err, "prog_query"))
2647 		goto cleanup;
2648 
2649 	ASSERT_EQ(optq.count, 0, "count");
2650 	ASSERT_EQ(optq.revision, 1, "revision");
2651 
2652 	LIBBPF_OPTS_RESET(opta,
2653 		.expected_revision = optq.revision,
2654 	);
2655 
2656 	err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
2657 	if (!ASSERT_EQ(err, 0, "prog_attach"))
2658 		goto cleanup;
2659 
2660 	memset(prog_ids, 0, sizeof(prog_ids));
2661 	optq.prog_ids = prog_ids;
2662 	optq.count = ARRAY_SIZE(prog_ids);
2663 
2664 	err = bpf_prog_query_opts(loopback, target, &optq);
2665 	if (!ASSERT_OK(err, "prog_query"))
2666 		goto cleanup1;
2667 
2668 	ASSERT_EQ(optq.count, 1, "count");
2669 	ASSERT_EQ(optq.revision, 2, "revision");
2670 	ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
2671 	ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
2672 
2673 cleanup1:
2674 	err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
2675 	ASSERT_OK(err, "prog_detach");
2676 	assert_mprog_count(target, 0);
2677 cleanup:
2678 	test_tc_link__destroy(skel);
2679 }
2680 
serial_test_tc_opts_query_attach(void)2681 void serial_test_tc_opts_query_attach(void)
2682 {
2683 	test_tc_opts_query_attach_target(BPF_TCX_INGRESS);
2684 	test_tc_opts_query_attach_target(BPF_TCX_EGRESS);
2685 }
2686