1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3 #include <linux/capability.h>
4 #include <stdlib.h>
5 #include <test_progs.h>
6 #include <bpf/btf.h>
7
8 #include "autoconf_helper.h"
9 #include "unpriv_helpers.h"
10 #include "cap_helpers.h"
11
12 #define str_has_pfx(str, pfx) \
13 (strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0)
14
15 #define TEST_LOADER_LOG_BUF_SZ 1048576
16
17 #define TEST_TAG_EXPECT_FAILURE "comment:test_expect_failure"
18 #define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success"
19 #define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg="
20 #define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv"
21 #define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv"
22 #define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv="
23 #define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level="
24 #define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags="
25 #define TEST_TAG_DESCRIPTION_PFX "comment:test_description="
26 #define TEST_TAG_RETVAL_PFX "comment:test_retval="
27 #define TEST_TAG_RETVAL_PFX_UNPRIV "comment:test_retval_unpriv="
28 #define TEST_TAG_AUXILIARY "comment:test_auxiliary"
29 #define TEST_TAG_AUXILIARY_UNPRIV "comment:test_auxiliary_unpriv"
30
31 /* Warning: duplicated in bpf_misc.h */
32 #define POINTER_VALUE 0xcafe4all
33 #define TEST_DATA_LEN 64
34
35 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
36 #define EFFICIENT_UNALIGNED_ACCESS 1
37 #else
38 #define EFFICIENT_UNALIGNED_ACCESS 0
39 #endif
40
41 static int sysctl_unpriv_disabled = -1;
42
43 enum mode {
44 PRIV = 1,
45 UNPRIV = 2
46 };
47
48 struct test_subspec {
49 char *name;
50 bool expect_failure;
51 const char **expect_msgs;
52 size_t expect_msg_cnt;
53 int retval;
54 bool execute;
55 };
56
57 struct test_spec {
58 const char *prog_name;
59 struct test_subspec priv;
60 struct test_subspec unpriv;
61 int log_level;
62 int prog_flags;
63 int mode_mask;
64 bool auxiliary;
65 bool valid;
66 };
67
tester_init(struct test_loader * tester)68 static int tester_init(struct test_loader *tester)
69 {
70 if (!tester->log_buf) {
71 tester->log_buf_sz = TEST_LOADER_LOG_BUF_SZ;
72 tester->log_buf = malloc(tester->log_buf_sz);
73 if (!ASSERT_OK_PTR(tester->log_buf, "tester_log_buf"))
74 return -ENOMEM;
75 }
76
77 return 0;
78 }
79
test_loader_fini(struct test_loader * tester)80 void test_loader_fini(struct test_loader *tester)
81 {
82 if (!tester)
83 return;
84
85 free(tester->log_buf);
86 }
87
free_test_spec(struct test_spec * spec)88 static void free_test_spec(struct test_spec *spec)
89 {
90 free(spec->priv.name);
91 free(spec->unpriv.name);
92 free(spec->priv.expect_msgs);
93 free(spec->unpriv.expect_msgs);
94
95 spec->priv.name = NULL;
96 spec->unpriv.name = NULL;
97 spec->priv.expect_msgs = NULL;
98 spec->unpriv.expect_msgs = NULL;
99 }
100
push_msg(const char * msg,struct test_subspec * subspec)101 static int push_msg(const char *msg, struct test_subspec *subspec)
102 {
103 void *tmp;
104
105 tmp = realloc(subspec->expect_msgs, (1 + subspec->expect_msg_cnt) * sizeof(void *));
106 if (!tmp) {
107 ASSERT_FAIL("failed to realloc memory for messages\n");
108 return -ENOMEM;
109 }
110 subspec->expect_msgs = tmp;
111 subspec->expect_msgs[subspec->expect_msg_cnt++] = msg;
112
113 return 0;
114 }
115
parse_int(const char * str,int * val,const char * name)116 static int parse_int(const char *str, int *val, const char *name)
117 {
118 char *end;
119 long tmp;
120
121 errno = 0;
122 if (str_has_pfx(str, "0x"))
123 tmp = strtol(str + 2, &end, 16);
124 else
125 tmp = strtol(str, &end, 10);
126 if (errno || end[0] != '\0') {
127 PRINT_FAIL("failed to parse %s from '%s'\n", name, str);
128 return -EINVAL;
129 }
130 *val = tmp;
131 return 0;
132 }
133
parse_retval(const char * str,int * val,const char * name)134 static int parse_retval(const char *str, int *val, const char *name)
135 {
136 struct {
137 char *name;
138 int val;
139 } named_values[] = {
140 { "INT_MIN" , INT_MIN },
141 { "POINTER_VALUE", POINTER_VALUE },
142 { "TEST_DATA_LEN", TEST_DATA_LEN },
143 };
144 int i;
145
146 for (i = 0; i < ARRAY_SIZE(named_values); ++i) {
147 if (strcmp(str, named_values[i].name) != 0)
148 continue;
149 *val = named_values[i].val;
150 return 0;
151 }
152
153 return parse_int(str, val, name);
154 }
155
156 /* Uses btf_decl_tag attributes to describe the expected test
157 * behavior, see bpf_misc.h for detailed description of each attribute
158 * and attribute combinations.
159 */
parse_test_spec(struct test_loader * tester,struct bpf_object * obj,struct bpf_program * prog,struct test_spec * spec)160 static int parse_test_spec(struct test_loader *tester,
161 struct bpf_object *obj,
162 struct bpf_program *prog,
163 struct test_spec *spec)
164 {
165 const char *description = NULL;
166 bool has_unpriv_result = false;
167 bool has_unpriv_retval = false;
168 int func_id, i, err = 0;
169 struct btf *btf;
170
171 memset(spec, 0, sizeof(*spec));
172
173 spec->prog_name = bpf_program__name(prog);
174
175 btf = bpf_object__btf(obj);
176 if (!btf) {
177 ASSERT_FAIL("BPF object has no BTF");
178 return -EINVAL;
179 }
180
181 func_id = btf__find_by_name_kind(btf, spec->prog_name, BTF_KIND_FUNC);
182 if (func_id < 0) {
183 ASSERT_FAIL("failed to find FUNC BTF type for '%s'", spec->prog_name);
184 return -EINVAL;
185 }
186
187 for (i = 1; i < btf__type_cnt(btf); i++) {
188 const char *s, *val, *msg;
189 const struct btf_type *t;
190 int tmp;
191
192 t = btf__type_by_id(btf, i);
193 if (!btf_is_decl_tag(t))
194 continue;
195
196 if (t->type != func_id || btf_decl_tag(t)->component_idx != -1)
197 continue;
198
199 s = btf__str_by_offset(btf, t->name_off);
200 if (str_has_pfx(s, TEST_TAG_DESCRIPTION_PFX)) {
201 description = s + sizeof(TEST_TAG_DESCRIPTION_PFX) - 1;
202 } else if (strcmp(s, TEST_TAG_EXPECT_FAILURE) == 0) {
203 spec->priv.expect_failure = true;
204 spec->mode_mask |= PRIV;
205 } else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS) == 0) {
206 spec->priv.expect_failure = false;
207 spec->mode_mask |= PRIV;
208 } else if (strcmp(s, TEST_TAG_EXPECT_FAILURE_UNPRIV) == 0) {
209 spec->unpriv.expect_failure = true;
210 spec->mode_mask |= UNPRIV;
211 has_unpriv_result = true;
212 } else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS_UNPRIV) == 0) {
213 spec->unpriv.expect_failure = false;
214 spec->mode_mask |= UNPRIV;
215 has_unpriv_result = true;
216 } else if (strcmp(s, TEST_TAG_AUXILIARY) == 0) {
217 spec->auxiliary = true;
218 spec->mode_mask |= PRIV;
219 } else if (strcmp(s, TEST_TAG_AUXILIARY_UNPRIV) == 0) {
220 spec->auxiliary = true;
221 spec->mode_mask |= UNPRIV;
222 } else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX)) {
223 msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1;
224 err = push_msg(msg, &spec->priv);
225 if (err)
226 goto cleanup;
227 spec->mode_mask |= PRIV;
228 } else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV)) {
229 msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX_UNPRIV) - 1;
230 err = push_msg(msg, &spec->unpriv);
231 if (err)
232 goto cleanup;
233 spec->mode_mask |= UNPRIV;
234 } else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX)) {
235 val = s + sizeof(TEST_TAG_RETVAL_PFX) - 1;
236 err = parse_retval(val, &spec->priv.retval, "__retval");
237 if (err)
238 goto cleanup;
239 spec->priv.execute = true;
240 spec->mode_mask |= PRIV;
241 } else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX_UNPRIV)) {
242 val = s + sizeof(TEST_TAG_RETVAL_PFX_UNPRIV) - 1;
243 err = parse_retval(val, &spec->unpriv.retval, "__retval_unpriv");
244 if (err)
245 goto cleanup;
246 spec->mode_mask |= UNPRIV;
247 spec->unpriv.execute = true;
248 has_unpriv_retval = true;
249 } else if (str_has_pfx(s, TEST_TAG_LOG_LEVEL_PFX)) {
250 val = s + sizeof(TEST_TAG_LOG_LEVEL_PFX) - 1;
251 err = parse_int(val, &spec->log_level, "test log level");
252 if (err)
253 goto cleanup;
254 } else if (str_has_pfx(s, TEST_TAG_PROG_FLAGS_PFX)) {
255 val = s + sizeof(TEST_TAG_PROG_FLAGS_PFX) - 1;
256 if (strcmp(val, "BPF_F_STRICT_ALIGNMENT") == 0) {
257 spec->prog_flags |= BPF_F_STRICT_ALIGNMENT;
258 } else if (strcmp(val, "BPF_F_ANY_ALIGNMENT") == 0) {
259 spec->prog_flags |= BPF_F_ANY_ALIGNMENT;
260 } else if (strcmp(val, "BPF_F_TEST_RND_HI32") == 0) {
261 spec->prog_flags |= BPF_F_TEST_RND_HI32;
262 } else if (strcmp(val, "BPF_F_TEST_STATE_FREQ") == 0) {
263 spec->prog_flags |= BPF_F_TEST_STATE_FREQ;
264 } else if (strcmp(val, "BPF_F_SLEEPABLE") == 0) {
265 spec->prog_flags |= BPF_F_SLEEPABLE;
266 } else if (strcmp(val, "BPF_F_XDP_HAS_FRAGS") == 0) {
267 spec->prog_flags |= BPF_F_XDP_HAS_FRAGS;
268 } else /* assume numeric value */ {
269 err = parse_int(val, &tmp, "test prog flags");
270 if (err)
271 goto cleanup;
272 spec->prog_flags |= tmp;
273 }
274 }
275 }
276
277 if (spec->mode_mask == 0)
278 spec->mode_mask = PRIV;
279
280 if (!description)
281 description = spec->prog_name;
282
283 if (spec->mode_mask & PRIV) {
284 spec->priv.name = strdup(description);
285 if (!spec->priv.name) {
286 PRINT_FAIL("failed to allocate memory for priv.name\n");
287 err = -ENOMEM;
288 goto cleanup;
289 }
290 }
291
292 if (spec->mode_mask & UNPRIV) {
293 int descr_len = strlen(description);
294 const char *suffix = " @unpriv";
295 char *name;
296
297 name = malloc(descr_len + strlen(suffix) + 1);
298 if (!name) {
299 PRINT_FAIL("failed to allocate memory for unpriv.name\n");
300 err = -ENOMEM;
301 goto cleanup;
302 }
303
304 strcpy(name, description);
305 strcpy(&name[descr_len], suffix);
306 spec->unpriv.name = name;
307 }
308
309 if (spec->mode_mask & (PRIV | UNPRIV)) {
310 if (!has_unpriv_result)
311 spec->unpriv.expect_failure = spec->priv.expect_failure;
312
313 if (!has_unpriv_retval) {
314 spec->unpriv.retval = spec->priv.retval;
315 spec->unpriv.execute = spec->priv.execute;
316 }
317
318 if (!spec->unpriv.expect_msgs) {
319 size_t sz = spec->priv.expect_msg_cnt * sizeof(void *);
320
321 spec->unpriv.expect_msgs = malloc(sz);
322 if (!spec->unpriv.expect_msgs) {
323 PRINT_FAIL("failed to allocate memory for unpriv.expect_msgs\n");
324 err = -ENOMEM;
325 goto cleanup;
326 }
327 memcpy(spec->unpriv.expect_msgs, spec->priv.expect_msgs, sz);
328 spec->unpriv.expect_msg_cnt = spec->priv.expect_msg_cnt;
329 }
330 }
331
332 spec->valid = true;
333
334 return 0;
335
336 cleanup:
337 free_test_spec(spec);
338 return err;
339 }
340
prepare_case(struct test_loader * tester,struct test_spec * spec,struct bpf_object * obj,struct bpf_program * prog)341 static void prepare_case(struct test_loader *tester,
342 struct test_spec *spec,
343 struct bpf_object *obj,
344 struct bpf_program *prog)
345 {
346 int min_log_level = 0, prog_flags;
347
348 if (env.verbosity > VERBOSE_NONE)
349 min_log_level = 1;
350 if (env.verbosity > VERBOSE_VERY)
351 min_log_level = 2;
352
353 bpf_program__set_log_buf(prog, tester->log_buf, tester->log_buf_sz);
354
355 /* Make sure we set at least minimal log level, unless test requires
356 * even higher level already. Make sure to preserve independent log
357 * level 4 (verifier stats), though.
358 */
359 if ((spec->log_level & 3) < min_log_level)
360 bpf_program__set_log_level(prog, (spec->log_level & 4) | min_log_level);
361 else
362 bpf_program__set_log_level(prog, spec->log_level);
363
364 prog_flags = bpf_program__flags(prog);
365 bpf_program__set_flags(prog, prog_flags | spec->prog_flags);
366
367 tester->log_buf[0] = '\0';
368 tester->next_match_pos = 0;
369 }
370
emit_verifier_log(const char * log_buf,bool force)371 static void emit_verifier_log(const char *log_buf, bool force)
372 {
373 if (!force && env.verbosity == VERBOSE_NONE)
374 return;
375 fprintf(stdout, "VERIFIER LOG:\n=============\n%s=============\n", log_buf);
376 }
377
validate_case(struct test_loader * tester,struct test_subspec * subspec,struct bpf_object * obj,struct bpf_program * prog,int load_err)378 static void validate_case(struct test_loader *tester,
379 struct test_subspec *subspec,
380 struct bpf_object *obj,
381 struct bpf_program *prog,
382 int load_err)
383 {
384 int i, j;
385
386 for (i = 0; i < subspec->expect_msg_cnt; i++) {
387 char *match;
388 const char *expect_msg;
389
390 expect_msg = subspec->expect_msgs[i];
391
392 match = strstr(tester->log_buf + tester->next_match_pos, expect_msg);
393 if (!ASSERT_OK_PTR(match, "expect_msg")) {
394 /* if we are in verbose mode, we've already emitted log */
395 if (env.verbosity == VERBOSE_NONE)
396 emit_verifier_log(tester->log_buf, true /*force*/);
397 for (j = 0; j < i; j++)
398 fprintf(stderr,
399 "MATCHED MSG: '%s'\n", subspec->expect_msgs[j]);
400 fprintf(stderr, "EXPECTED MSG: '%s'\n", expect_msg);
401 return;
402 }
403
404 tester->next_match_pos = match - tester->log_buf + strlen(expect_msg);
405 }
406 }
407
408 struct cap_state {
409 __u64 old_caps;
410 bool initialized;
411 };
412
drop_capabilities(struct cap_state * caps)413 static int drop_capabilities(struct cap_state *caps)
414 {
415 const __u64 caps_to_drop = (1ULL << CAP_SYS_ADMIN | 1ULL << CAP_NET_ADMIN |
416 1ULL << CAP_PERFMON | 1ULL << CAP_BPF);
417 int err;
418
419 err = cap_disable_effective(caps_to_drop, &caps->old_caps);
420 if (err) {
421 PRINT_FAIL("failed to drop capabilities: %i, %s\n", err, strerror(err));
422 return err;
423 }
424
425 caps->initialized = true;
426 return 0;
427 }
428
restore_capabilities(struct cap_state * caps)429 static int restore_capabilities(struct cap_state *caps)
430 {
431 int err;
432
433 if (!caps->initialized)
434 return 0;
435
436 err = cap_enable_effective(caps->old_caps, NULL);
437 if (err)
438 PRINT_FAIL("failed to restore capabilities: %i, %s\n", err, strerror(err));
439 caps->initialized = false;
440 return err;
441 }
442
can_execute_unpriv(struct test_loader * tester,struct test_spec * spec)443 static bool can_execute_unpriv(struct test_loader *tester, struct test_spec *spec)
444 {
445 if (sysctl_unpriv_disabled < 0)
446 sysctl_unpriv_disabled = get_unpriv_disabled() ? 1 : 0;
447 if (sysctl_unpriv_disabled)
448 return false;
449 if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS)
450 return false;
451 return true;
452 }
453
is_unpriv_capable_map(struct bpf_map * map)454 static bool is_unpriv_capable_map(struct bpf_map *map)
455 {
456 enum bpf_map_type type;
457 __u32 flags;
458
459 type = bpf_map__type(map);
460
461 switch (type) {
462 case BPF_MAP_TYPE_HASH:
463 case BPF_MAP_TYPE_PERCPU_HASH:
464 case BPF_MAP_TYPE_HASH_OF_MAPS:
465 flags = bpf_map__map_flags(map);
466 return !(flags & BPF_F_ZERO_SEED);
467 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
468 case BPF_MAP_TYPE_ARRAY:
469 case BPF_MAP_TYPE_RINGBUF:
470 case BPF_MAP_TYPE_PROG_ARRAY:
471 case BPF_MAP_TYPE_CGROUP_ARRAY:
472 case BPF_MAP_TYPE_PERCPU_ARRAY:
473 case BPF_MAP_TYPE_USER_RINGBUF:
474 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
475 case BPF_MAP_TYPE_CGROUP_STORAGE:
476 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
477 return true;
478 default:
479 return false;
480 }
481 }
482
do_prog_test_run(int fd_prog,int * retval)483 static int do_prog_test_run(int fd_prog, int *retval)
484 {
485 __u8 tmp_out[TEST_DATA_LEN << 2] = {};
486 __u8 tmp_in[TEST_DATA_LEN] = {};
487 int err, saved_errno;
488 LIBBPF_OPTS(bpf_test_run_opts, topts,
489 .data_in = tmp_in,
490 .data_size_in = sizeof(tmp_in),
491 .data_out = tmp_out,
492 .data_size_out = sizeof(tmp_out),
493 .repeat = 1,
494 );
495
496 err = bpf_prog_test_run_opts(fd_prog, &topts);
497 saved_errno = errno;
498
499 if (err) {
500 PRINT_FAIL("FAIL: Unexpected bpf_prog_test_run error: %d (%s) ",
501 saved_errno, strerror(saved_errno));
502 return err;
503 }
504
505 ASSERT_OK(0, "bpf_prog_test_run");
506 *retval = topts.retval;
507
508 return 0;
509 }
510
should_do_test_run(struct test_spec * spec,struct test_subspec * subspec)511 static bool should_do_test_run(struct test_spec *spec, struct test_subspec *subspec)
512 {
513 if (!subspec->execute)
514 return false;
515
516 if (subspec->expect_failure)
517 return false;
518
519 if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS) {
520 if (env.verbosity != VERBOSE_NONE)
521 printf("alignment prevents execution\n");
522 return false;
523 }
524
525 return true;
526 }
527
528 /* this function is forced noinline and has short generic name to look better
529 * in test_progs output (in case of a failure)
530 */
531 static noinline
run_subtest(struct test_loader * tester,struct bpf_object_open_opts * open_opts,const void * obj_bytes,size_t obj_byte_cnt,struct test_spec * specs,struct test_spec * spec,bool unpriv)532 void run_subtest(struct test_loader *tester,
533 struct bpf_object_open_opts *open_opts,
534 const void *obj_bytes,
535 size_t obj_byte_cnt,
536 struct test_spec *specs,
537 struct test_spec *spec,
538 bool unpriv)
539 {
540 struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv;
541 struct bpf_program *tprog, *tprog_iter;
542 struct test_spec *spec_iter;
543 struct cap_state caps = {};
544 struct bpf_object *tobj;
545 struct bpf_map *map;
546 int retval, err, i;
547 bool should_load;
548
549 if (!test__start_subtest(subspec->name))
550 return;
551
552 if (unpriv) {
553 if (!can_execute_unpriv(tester, spec)) {
554 test__skip();
555 test__end_subtest();
556 return;
557 }
558 if (drop_capabilities(&caps)) {
559 test__end_subtest();
560 return;
561 }
562 }
563
564 tobj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, open_opts);
565 if (!ASSERT_OK_PTR(tobj, "obj_open_mem")) /* shouldn't happen */
566 goto subtest_cleanup;
567
568 i = 0;
569 bpf_object__for_each_program(tprog_iter, tobj) {
570 spec_iter = &specs[i++];
571 should_load = false;
572
573 if (spec_iter->valid) {
574 if (strcmp(bpf_program__name(tprog_iter), spec->prog_name) == 0) {
575 tprog = tprog_iter;
576 should_load = true;
577 }
578
579 if (spec_iter->auxiliary &&
580 spec_iter->mode_mask & (unpriv ? UNPRIV : PRIV))
581 should_load = true;
582 }
583
584 bpf_program__set_autoload(tprog_iter, should_load);
585 }
586
587 prepare_case(tester, spec, tobj, tprog);
588
589 /* By default bpf_object__load() automatically creates all
590 * maps declared in the skeleton. Some map types are only
591 * allowed in priv mode. Disable autoload for such maps in
592 * unpriv mode.
593 */
594 bpf_object__for_each_map(map, tobj)
595 bpf_map__set_autocreate(map, !unpriv || is_unpriv_capable_map(map));
596
597 err = bpf_object__load(tobj);
598 if (subspec->expect_failure) {
599 if (!ASSERT_ERR(err, "unexpected_load_success")) {
600 emit_verifier_log(tester->log_buf, false /*force*/);
601 goto tobj_cleanup;
602 }
603 } else {
604 if (!ASSERT_OK(err, "unexpected_load_failure")) {
605 emit_verifier_log(tester->log_buf, true /*force*/);
606 goto tobj_cleanup;
607 }
608 }
609
610 emit_verifier_log(tester->log_buf, false /*force*/);
611 validate_case(tester, subspec, tobj, tprog, err);
612
613 if (should_do_test_run(spec, subspec)) {
614 /* For some reason test_verifier executes programs
615 * with all capabilities restored. Do the same here.
616 */
617 if (restore_capabilities(&caps))
618 goto tobj_cleanup;
619
620 if (tester->pre_execution_cb) {
621 err = tester->pre_execution_cb(tobj);
622 if (err) {
623 PRINT_FAIL("pre_execution_cb failed: %d\n", err);
624 goto tobj_cleanup;
625 }
626 }
627
628 do_prog_test_run(bpf_program__fd(tprog), &retval);
629 if (retval != subspec->retval && subspec->retval != POINTER_VALUE) {
630 PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval);
631 goto tobj_cleanup;
632 }
633 }
634
635 tobj_cleanup:
636 bpf_object__close(tobj);
637 subtest_cleanup:
638 test__end_subtest();
639 restore_capabilities(&caps);
640 }
641
process_subtest(struct test_loader * tester,const char * skel_name,skel_elf_bytes_fn elf_bytes_factory)642 static void process_subtest(struct test_loader *tester,
643 const char *skel_name,
644 skel_elf_bytes_fn elf_bytes_factory)
645 {
646 LIBBPF_OPTS(bpf_object_open_opts, open_opts, .object_name = skel_name);
647 struct test_spec *specs = NULL;
648 struct bpf_object *obj = NULL;
649 struct bpf_program *prog;
650 const void *obj_bytes;
651 int err, i, nr_progs;
652 size_t obj_byte_cnt;
653
654 if (tester_init(tester) < 0)
655 return; /* failed to initialize tester */
656
657 obj_bytes = elf_bytes_factory(&obj_byte_cnt);
658 obj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, &open_opts);
659 if (!ASSERT_OK_PTR(obj, "obj_open_mem"))
660 return;
661
662 nr_progs = 0;
663 bpf_object__for_each_program(prog, obj)
664 ++nr_progs;
665
666 specs = calloc(nr_progs, sizeof(struct test_spec));
667 if (!ASSERT_OK_PTR(specs, "Can't alloc specs array"))
668 return;
669
670 i = 0;
671 bpf_object__for_each_program(prog, obj) {
672 /* ignore tests for which we can't derive test specification */
673 err = parse_test_spec(tester, obj, prog, &specs[i++]);
674 if (err)
675 PRINT_FAIL("Can't parse test spec for program '%s'\n",
676 bpf_program__name(prog));
677 }
678
679 i = 0;
680 bpf_object__for_each_program(prog, obj) {
681 struct test_spec *spec = &specs[i++];
682
683 if (!spec->valid || spec->auxiliary)
684 continue;
685
686 if (spec->mode_mask & PRIV)
687 run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt,
688 specs, spec, false);
689 if (spec->mode_mask & UNPRIV)
690 run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt,
691 specs, spec, true);
692
693 }
694
695 for (i = 0; i < nr_progs; ++i)
696 free_test_spec(&specs[i]);
697 free(specs);
698 bpf_object__close(obj);
699 }
700
test_loader__run_subtests(struct test_loader * tester,const char * skel_name,skel_elf_bytes_fn elf_bytes_factory)701 void test_loader__run_subtests(struct test_loader *tester,
702 const char *skel_name,
703 skel_elf_bytes_fn elf_bytes_factory)
704 {
705 /* see comment in run_subtest() for why we do this function nesting */
706 process_subtest(tester, skel_name, elf_bytes_factory);
707 }
708