xref: /openbmc/linux/tools/testing/selftests/bpf/test_loader.c (revision b0bc615df488abd0e95107e4a9ecefb9bf8c250a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3 #include <linux/capability.h>
4 #include <stdlib.h>
5 #include <test_progs.h>
6 #include <bpf/btf.h>
7 
8 #include "autoconf_helper.h"
9 #include "unpriv_helpers.h"
10 #include "cap_helpers.h"
11 
12 #define str_has_pfx(str, pfx) \
13 	(strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0)
14 
15 #define TEST_LOADER_LOG_BUF_SZ 1048576
16 
17 #define TEST_TAG_EXPECT_FAILURE "comment:test_expect_failure"
18 #define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success"
19 #define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg="
20 #define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv"
21 #define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv"
22 #define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv="
23 #define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level="
24 #define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags="
25 #define TEST_TAG_DESCRIPTION_PFX "comment:test_description="
26 #define TEST_TAG_RETVAL_PFX "comment:test_retval="
27 #define TEST_TAG_RETVAL_PFX_UNPRIV "comment:test_retval_unpriv="
28 
29 /* Warning: duplicated in bpf_misc.h */
30 #define POINTER_VALUE	0xcafe4all
31 #define TEST_DATA_LEN	64
32 
33 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
34 #define EFFICIENT_UNALIGNED_ACCESS 1
35 #else
36 #define EFFICIENT_UNALIGNED_ACCESS 0
37 #endif
38 
39 static int sysctl_unpriv_disabled = -1;
40 
41 enum mode {
42 	PRIV = 1,
43 	UNPRIV = 2
44 };
45 
46 struct test_subspec {
47 	char *name;
48 	bool expect_failure;
49 	const char **expect_msgs;
50 	size_t expect_msg_cnt;
51 	int retval;
52 	bool execute;
53 };
54 
55 struct test_spec {
56 	const char *prog_name;
57 	struct test_subspec priv;
58 	struct test_subspec unpriv;
59 	int log_level;
60 	int prog_flags;
61 	int mode_mask;
62 };
63 
64 static int tester_init(struct test_loader *tester)
65 {
66 	if (!tester->log_buf) {
67 		tester->log_buf_sz = TEST_LOADER_LOG_BUF_SZ;
68 		tester->log_buf = malloc(tester->log_buf_sz);
69 		if (!ASSERT_OK_PTR(tester->log_buf, "tester_log_buf"))
70 			return -ENOMEM;
71 	}
72 
73 	return 0;
74 }
75 
76 void test_loader_fini(struct test_loader *tester)
77 {
78 	if (!tester)
79 		return;
80 
81 	free(tester->log_buf);
82 }
83 
84 static void free_test_spec(struct test_spec *spec)
85 {
86 	free(spec->priv.name);
87 	free(spec->unpriv.name);
88 	free(spec->priv.expect_msgs);
89 	free(spec->unpriv.expect_msgs);
90 }
91 
92 static int push_msg(const char *msg, struct test_subspec *subspec)
93 {
94 	void *tmp;
95 
96 	tmp = realloc(subspec->expect_msgs, (1 + subspec->expect_msg_cnt) * sizeof(void *));
97 	if (!tmp) {
98 		ASSERT_FAIL("failed to realloc memory for messages\n");
99 		return -ENOMEM;
100 	}
101 	subspec->expect_msgs = tmp;
102 	subspec->expect_msgs[subspec->expect_msg_cnt++] = msg;
103 
104 	return 0;
105 }
106 
107 static int parse_int(const char *str, int *val, const char *name)
108 {
109 	char *end;
110 	long tmp;
111 
112 	errno = 0;
113 	if (str_has_pfx(str, "0x"))
114 		tmp = strtol(str + 2, &end, 16);
115 	else
116 		tmp = strtol(str, &end, 10);
117 	if (errno || end[0] != '\0') {
118 		PRINT_FAIL("failed to parse %s from '%s'\n", name, str);
119 		return -EINVAL;
120 	}
121 	*val = tmp;
122 	return 0;
123 }
124 
125 static int parse_retval(const char *str, int *val, const char *name)
126 {
127 	struct {
128 		char *name;
129 		int val;
130 	} named_values[] = {
131 		{ "INT_MIN"      , INT_MIN },
132 		{ "POINTER_VALUE", POINTER_VALUE },
133 		{ "TEST_DATA_LEN", TEST_DATA_LEN },
134 	};
135 	int i;
136 
137 	for (i = 0; i < ARRAY_SIZE(named_values); ++i) {
138 		if (strcmp(str, named_values[i].name) != 0)
139 			continue;
140 		*val = named_values[i].val;
141 		return 0;
142 	}
143 
144 	return parse_int(str, val, name);
145 }
146 
147 /* Uses btf_decl_tag attributes to describe the expected test
148  * behavior, see bpf_misc.h for detailed description of each attribute
149  * and attribute combinations.
150  */
151 static int parse_test_spec(struct test_loader *tester,
152 			   struct bpf_object *obj,
153 			   struct bpf_program *prog,
154 			   struct test_spec *spec)
155 {
156 	const char *description = NULL;
157 	bool has_unpriv_result = false;
158 	bool has_unpriv_retval = false;
159 	int func_id, i, err = 0;
160 	struct btf *btf;
161 
162 	memset(spec, 0, sizeof(*spec));
163 
164 	spec->prog_name = bpf_program__name(prog);
165 
166 	btf = bpf_object__btf(obj);
167 	if (!btf) {
168 		ASSERT_FAIL("BPF object has no BTF");
169 		return -EINVAL;
170 	}
171 
172 	func_id = btf__find_by_name_kind(btf, spec->prog_name, BTF_KIND_FUNC);
173 	if (func_id < 0) {
174 		ASSERT_FAIL("failed to find FUNC BTF type for '%s'", spec->prog_name);
175 		return -EINVAL;
176 	}
177 
178 	for (i = 1; i < btf__type_cnt(btf); i++) {
179 		const char *s, *val, *msg;
180 		const struct btf_type *t;
181 		int tmp;
182 
183 		t = btf__type_by_id(btf, i);
184 		if (!btf_is_decl_tag(t))
185 			continue;
186 
187 		if (t->type != func_id || btf_decl_tag(t)->component_idx != -1)
188 			continue;
189 
190 		s = btf__str_by_offset(btf, t->name_off);
191 		if (str_has_pfx(s, TEST_TAG_DESCRIPTION_PFX)) {
192 			description = s + sizeof(TEST_TAG_DESCRIPTION_PFX) - 1;
193 		} else if (strcmp(s, TEST_TAG_EXPECT_FAILURE) == 0) {
194 			spec->priv.expect_failure = true;
195 			spec->mode_mask |= PRIV;
196 		} else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS) == 0) {
197 			spec->priv.expect_failure = false;
198 			spec->mode_mask |= PRIV;
199 		} else if (strcmp(s, TEST_TAG_EXPECT_FAILURE_UNPRIV) == 0) {
200 			spec->unpriv.expect_failure = true;
201 			spec->mode_mask |= UNPRIV;
202 			has_unpriv_result = true;
203 		} else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS_UNPRIV) == 0) {
204 			spec->unpriv.expect_failure = false;
205 			spec->mode_mask |= UNPRIV;
206 			has_unpriv_result = true;
207 		} else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX)) {
208 			msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1;
209 			err = push_msg(msg, &spec->priv);
210 			if (err)
211 				goto cleanup;
212 			spec->mode_mask |= PRIV;
213 		} else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV)) {
214 			msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX_UNPRIV) - 1;
215 			err = push_msg(msg, &spec->unpriv);
216 			if (err)
217 				goto cleanup;
218 			spec->mode_mask |= UNPRIV;
219 		} else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX)) {
220 			val = s + sizeof(TEST_TAG_RETVAL_PFX) - 1;
221 			err = parse_retval(val, &spec->priv.retval, "__retval");
222 			if (err)
223 				goto cleanup;
224 			spec->priv.execute = true;
225 			spec->mode_mask |= PRIV;
226 		} else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX_UNPRIV)) {
227 			val = s + sizeof(TEST_TAG_RETVAL_PFX_UNPRIV) - 1;
228 			err = parse_retval(val, &spec->unpriv.retval, "__retval_unpriv");
229 			if (err)
230 				goto cleanup;
231 			spec->mode_mask |= UNPRIV;
232 			spec->unpriv.execute = true;
233 			has_unpriv_retval = true;
234 		} else if (str_has_pfx(s, TEST_TAG_LOG_LEVEL_PFX)) {
235 			val = s + sizeof(TEST_TAG_LOG_LEVEL_PFX) - 1;
236 			err = parse_int(val, &spec->log_level, "test log level");
237 			if (err)
238 				goto cleanup;
239 		} else if (str_has_pfx(s, TEST_TAG_PROG_FLAGS_PFX)) {
240 			val = s + sizeof(TEST_TAG_PROG_FLAGS_PFX) - 1;
241 			if (strcmp(val, "BPF_F_STRICT_ALIGNMENT") == 0) {
242 				spec->prog_flags |= BPF_F_STRICT_ALIGNMENT;
243 			} else if (strcmp(val, "BPF_F_ANY_ALIGNMENT") == 0) {
244 				spec->prog_flags |= BPF_F_ANY_ALIGNMENT;
245 			} else if (strcmp(val, "BPF_F_TEST_RND_HI32") == 0) {
246 				spec->prog_flags |= BPF_F_TEST_RND_HI32;
247 			} else if (strcmp(val, "BPF_F_TEST_STATE_FREQ") == 0) {
248 				spec->prog_flags |= BPF_F_TEST_STATE_FREQ;
249 			} else if (strcmp(val, "BPF_F_SLEEPABLE") == 0) {
250 				spec->prog_flags |= BPF_F_SLEEPABLE;
251 			} else if (strcmp(val, "BPF_F_XDP_HAS_FRAGS") == 0) {
252 				spec->prog_flags |= BPF_F_XDP_HAS_FRAGS;
253 			} else /* assume numeric value */ {
254 				err = parse_int(val, &tmp, "test prog flags");
255 				if (err)
256 					goto cleanup;
257 				spec->prog_flags |= tmp;
258 			}
259 		}
260 	}
261 
262 	if (spec->mode_mask == 0)
263 		spec->mode_mask = PRIV;
264 
265 	if (!description)
266 		description = spec->prog_name;
267 
268 	if (spec->mode_mask & PRIV) {
269 		spec->priv.name = strdup(description);
270 		if (!spec->priv.name) {
271 			PRINT_FAIL("failed to allocate memory for priv.name\n");
272 			err = -ENOMEM;
273 			goto cleanup;
274 		}
275 	}
276 
277 	if (spec->mode_mask & UNPRIV) {
278 		int descr_len = strlen(description);
279 		const char *suffix = " @unpriv";
280 		char *name;
281 
282 		name = malloc(descr_len + strlen(suffix) + 1);
283 		if (!name) {
284 			PRINT_FAIL("failed to allocate memory for unpriv.name\n");
285 			err = -ENOMEM;
286 			goto cleanup;
287 		}
288 
289 		strcpy(name, description);
290 		strcpy(&name[descr_len], suffix);
291 		spec->unpriv.name = name;
292 	}
293 
294 	if (spec->mode_mask & (PRIV | UNPRIV)) {
295 		if (!has_unpriv_result)
296 			spec->unpriv.expect_failure = spec->priv.expect_failure;
297 
298 		if (!has_unpriv_retval) {
299 			spec->unpriv.retval = spec->priv.retval;
300 			spec->unpriv.execute = spec->priv.execute;
301 		}
302 
303 		if (!spec->unpriv.expect_msgs) {
304 			size_t sz = spec->priv.expect_msg_cnt * sizeof(void *);
305 
306 			spec->unpriv.expect_msgs = malloc(sz);
307 			if (!spec->unpriv.expect_msgs) {
308 				PRINT_FAIL("failed to allocate memory for unpriv.expect_msgs\n");
309 				err = -ENOMEM;
310 				goto cleanup;
311 			}
312 			memcpy(spec->unpriv.expect_msgs, spec->priv.expect_msgs, sz);
313 			spec->unpriv.expect_msg_cnt = spec->priv.expect_msg_cnt;
314 		}
315 	}
316 
317 	return 0;
318 
319 cleanup:
320 	free_test_spec(spec);
321 	return err;
322 }
323 
324 static void prepare_case(struct test_loader *tester,
325 			 struct test_spec *spec,
326 			 struct bpf_object *obj,
327 			 struct bpf_program *prog)
328 {
329 	int min_log_level = 0, prog_flags;
330 
331 	if (env.verbosity > VERBOSE_NONE)
332 		min_log_level = 1;
333 	if (env.verbosity > VERBOSE_VERY)
334 		min_log_level = 2;
335 
336 	bpf_program__set_log_buf(prog, tester->log_buf, tester->log_buf_sz);
337 
338 	/* Make sure we set at least minimal log level, unless test requires
339 	 * even higher level already. Make sure to preserve independent log
340 	 * level 4 (verifier stats), though.
341 	 */
342 	if ((spec->log_level & 3) < min_log_level)
343 		bpf_program__set_log_level(prog, (spec->log_level & 4) | min_log_level);
344 	else
345 		bpf_program__set_log_level(prog, spec->log_level);
346 
347 	prog_flags = bpf_program__flags(prog);
348 	bpf_program__set_flags(prog, prog_flags | spec->prog_flags);
349 
350 	tester->log_buf[0] = '\0';
351 	tester->next_match_pos = 0;
352 }
353 
354 static void emit_verifier_log(const char *log_buf, bool force)
355 {
356 	if (!force && env.verbosity == VERBOSE_NONE)
357 		return;
358 	fprintf(stdout, "VERIFIER LOG:\n=============\n%s=============\n", log_buf);
359 }
360 
361 static void validate_case(struct test_loader *tester,
362 			  struct test_subspec *subspec,
363 			  struct bpf_object *obj,
364 			  struct bpf_program *prog,
365 			  int load_err)
366 {
367 	int i, j;
368 
369 	for (i = 0; i < subspec->expect_msg_cnt; i++) {
370 		char *match;
371 		const char *expect_msg;
372 
373 		expect_msg = subspec->expect_msgs[i];
374 
375 		match = strstr(tester->log_buf + tester->next_match_pos, expect_msg);
376 		if (!ASSERT_OK_PTR(match, "expect_msg")) {
377 			/* if we are in verbose mode, we've already emitted log */
378 			if (env.verbosity == VERBOSE_NONE)
379 				emit_verifier_log(tester->log_buf, true /*force*/);
380 			for (j = 0; j < i; j++)
381 				fprintf(stderr,
382 					"MATCHED  MSG: '%s'\n", subspec->expect_msgs[j]);
383 			fprintf(stderr, "EXPECTED MSG: '%s'\n", expect_msg);
384 			return;
385 		}
386 
387 		tester->next_match_pos = match - tester->log_buf + strlen(expect_msg);
388 	}
389 }
390 
391 struct cap_state {
392 	__u64 old_caps;
393 	bool initialized;
394 };
395 
396 static int drop_capabilities(struct cap_state *caps)
397 {
398 	const __u64 caps_to_drop = (1ULL << CAP_SYS_ADMIN | 1ULL << CAP_NET_ADMIN |
399 				    1ULL << CAP_PERFMON   | 1ULL << CAP_BPF);
400 	int err;
401 
402 	err = cap_disable_effective(caps_to_drop, &caps->old_caps);
403 	if (err) {
404 		PRINT_FAIL("failed to drop capabilities: %i, %s\n", err, strerror(err));
405 		return err;
406 	}
407 
408 	caps->initialized = true;
409 	return 0;
410 }
411 
412 static int restore_capabilities(struct cap_state *caps)
413 {
414 	int err;
415 
416 	if (!caps->initialized)
417 		return 0;
418 
419 	err = cap_enable_effective(caps->old_caps, NULL);
420 	if (err)
421 		PRINT_FAIL("failed to restore capabilities: %i, %s\n", err, strerror(err));
422 	caps->initialized = false;
423 	return err;
424 }
425 
426 static bool can_execute_unpriv(struct test_loader *tester, struct test_spec *spec)
427 {
428 	if (sysctl_unpriv_disabled < 0)
429 		sysctl_unpriv_disabled = get_unpriv_disabled() ? 1 : 0;
430 	if (sysctl_unpriv_disabled)
431 		return false;
432 	if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS)
433 		return false;
434 	return true;
435 }
436 
437 static bool is_unpriv_capable_map(struct bpf_map *map)
438 {
439 	enum bpf_map_type type;
440 	__u32 flags;
441 
442 	type = bpf_map__type(map);
443 
444 	switch (type) {
445 	case BPF_MAP_TYPE_HASH:
446 	case BPF_MAP_TYPE_PERCPU_HASH:
447 	case BPF_MAP_TYPE_HASH_OF_MAPS:
448 		flags = bpf_map__map_flags(map);
449 		return !(flags & BPF_F_ZERO_SEED);
450 	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
451 	case BPF_MAP_TYPE_ARRAY:
452 	case BPF_MAP_TYPE_RINGBUF:
453 	case BPF_MAP_TYPE_PROG_ARRAY:
454 	case BPF_MAP_TYPE_CGROUP_ARRAY:
455 	case BPF_MAP_TYPE_PERCPU_ARRAY:
456 	case BPF_MAP_TYPE_USER_RINGBUF:
457 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
458 	case BPF_MAP_TYPE_CGROUP_STORAGE:
459 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
460 		return true;
461 	default:
462 		return false;
463 	}
464 }
465 
466 static int do_prog_test_run(int fd_prog, int *retval)
467 {
468 	__u8 tmp_out[TEST_DATA_LEN << 2] = {};
469 	__u8 tmp_in[TEST_DATA_LEN] = {};
470 	int err, saved_errno;
471 	LIBBPF_OPTS(bpf_test_run_opts, topts,
472 		.data_in = tmp_in,
473 		.data_size_in = sizeof(tmp_in),
474 		.data_out = tmp_out,
475 		.data_size_out = sizeof(tmp_out),
476 		.repeat = 1,
477 	);
478 
479 	err = bpf_prog_test_run_opts(fd_prog, &topts);
480 	saved_errno = errno;
481 
482 	if (err) {
483 		PRINT_FAIL("FAIL: Unexpected bpf_prog_test_run error: %d (%s) ",
484 			   saved_errno, strerror(saved_errno));
485 		return err;
486 	}
487 
488 	ASSERT_OK(0, "bpf_prog_test_run");
489 	*retval = topts.retval;
490 
491 	return 0;
492 }
493 
494 static bool should_do_test_run(struct test_spec *spec, struct test_subspec *subspec)
495 {
496 	if (!subspec->execute)
497 		return false;
498 
499 	if (subspec->expect_failure)
500 		return false;
501 
502 	if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS) {
503 		if (env.verbosity != VERBOSE_NONE)
504 			printf("alignment prevents execution\n");
505 		return false;
506 	}
507 
508 	return true;
509 }
510 
511 /* this function is forced noinline and has short generic name to look better
512  * in test_progs output (in case of a failure)
513  */
514 static noinline
515 void run_subtest(struct test_loader *tester,
516 		 struct bpf_object_open_opts *open_opts,
517 		 const void *obj_bytes,
518 		 size_t obj_byte_cnt,
519 		 struct test_spec *spec,
520 		 bool unpriv)
521 {
522 	struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv;
523 	struct cap_state caps = {};
524 	struct bpf_program *tprog;
525 	struct bpf_object *tobj;
526 	struct bpf_map *map;
527 	int retval;
528 	int err;
529 
530 	if (!test__start_subtest(subspec->name))
531 		return;
532 
533 	if (unpriv) {
534 		if (!can_execute_unpriv(tester, spec)) {
535 			test__skip();
536 			test__end_subtest();
537 			return;
538 		}
539 		if (drop_capabilities(&caps)) {
540 			test__end_subtest();
541 			return;
542 		}
543 	}
544 
545 	tobj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, open_opts);
546 	if (!ASSERT_OK_PTR(tobj, "obj_open_mem")) /* shouldn't happen */
547 		goto subtest_cleanup;
548 
549 	bpf_object__for_each_program(tprog, tobj)
550 		bpf_program__set_autoload(tprog, false);
551 
552 	bpf_object__for_each_program(tprog, tobj) {
553 		/* only load specified program */
554 		if (strcmp(bpf_program__name(tprog), spec->prog_name) == 0) {
555 			bpf_program__set_autoload(tprog, true);
556 			break;
557 		}
558 	}
559 
560 	prepare_case(tester, spec, tobj, tprog);
561 
562 	/* By default bpf_object__load() automatically creates all
563 	 * maps declared in the skeleton. Some map types are only
564 	 * allowed in priv mode. Disable autoload for such maps in
565 	 * unpriv mode.
566 	 */
567 	bpf_object__for_each_map(map, tobj)
568 		bpf_map__set_autocreate(map, !unpriv || is_unpriv_capable_map(map));
569 
570 	err = bpf_object__load(tobj);
571 	if (subspec->expect_failure) {
572 		if (!ASSERT_ERR(err, "unexpected_load_success")) {
573 			emit_verifier_log(tester->log_buf, false /*force*/);
574 			goto tobj_cleanup;
575 		}
576 	} else {
577 		if (!ASSERT_OK(err, "unexpected_load_failure")) {
578 			emit_verifier_log(tester->log_buf, true /*force*/);
579 			goto tobj_cleanup;
580 		}
581 	}
582 
583 	emit_verifier_log(tester->log_buf, false /*force*/);
584 	validate_case(tester, subspec, tobj, tprog, err);
585 
586 	if (should_do_test_run(spec, subspec)) {
587 		/* For some reason test_verifier executes programs
588 		 * with all capabilities restored. Do the same here.
589 		 */
590 		if (!restore_capabilities(&caps))
591 			goto tobj_cleanup;
592 
593 		do_prog_test_run(bpf_program__fd(tprog), &retval);
594 		if (retval != subspec->retval && subspec->retval != POINTER_VALUE) {
595 			PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval);
596 			goto tobj_cleanup;
597 		}
598 	}
599 
600 tobj_cleanup:
601 	bpf_object__close(tobj);
602 subtest_cleanup:
603 	test__end_subtest();
604 	restore_capabilities(&caps);
605 }
606 
607 static void process_subtest(struct test_loader *tester,
608 			    const char *skel_name,
609 			    skel_elf_bytes_fn elf_bytes_factory)
610 {
611 	LIBBPF_OPTS(bpf_object_open_opts, open_opts, .object_name = skel_name);
612 	struct bpf_object *obj = NULL;
613 	struct bpf_program *prog;
614 	const void *obj_bytes;
615 	size_t obj_byte_cnt;
616 	int err;
617 
618 	if (tester_init(tester) < 0)
619 		return; /* failed to initialize tester */
620 
621 	obj_bytes = elf_bytes_factory(&obj_byte_cnt);
622 	obj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, &open_opts);
623 	if (!ASSERT_OK_PTR(obj, "obj_open_mem"))
624 		return;
625 
626 	bpf_object__for_each_program(prog, obj) {
627 		struct test_spec spec;
628 
629 		/* if we can't derive test specification, go to the next test */
630 		err = parse_test_spec(tester, obj, prog, &spec);
631 		if (err) {
632 			PRINT_FAIL("Can't parse test spec for program '%s'\n",
633 				   bpf_program__name(prog));
634 			continue;
635 		}
636 
637 		if (spec.mode_mask & PRIV)
638 			run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt, &spec, false);
639 		if (spec.mode_mask & UNPRIV)
640 			run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt, &spec, true);
641 
642 		free_test_spec(&spec);
643 	}
644 
645 	bpf_object__close(obj);
646 }
647 
648 void test_loader__run_subtests(struct test_loader *tester,
649 			       const char *skel_name,
650 			       skel_elf_bytes_fn elf_bytes_factory)
651 {
652 	/* see comment in run_subtest() for why we do this function nesting */
653 	process_subtest(tester, skel_name, elf_bytes_factory);
654 }
655