1 // SPDX-License-Identifier: GPL-2.0
2 #include <bpf/btf.h>
3 #include <test_btf.h>
4 #include <linux/btf.h>
5 #include <test_progs.h>
6 #include <network_helpers.h>
7 
8 #include "linked_list.skel.h"
9 #include "linked_list_fail.skel.h"
10 
11 static char log_buf[1024 * 1024];
12 
13 static struct {
14 	const char *prog_name;
15 	const char *err_msg;
16 } linked_list_fail_tests[] = {
17 #define TEST(test, off) \
18 	{ #test "_missing_lock_push_front", \
19 	  "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
20 	{ #test "_missing_lock_push_back", \
21 	  "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
22 	{ #test "_missing_lock_pop_front", \
23 	  "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
24 	{ #test "_missing_lock_pop_back", \
25 	  "bpf_spin_lock at off=" #off " must be held for bpf_list_head" },
26 	TEST(kptr, 32)
27 	TEST(global, 16)
28 	TEST(map, 0)
29 	TEST(inner_map, 0)
30 #undef TEST
31 #define TEST(test, op) \
32 	{ #test "_kptr_incorrect_lock_" #op, \
33 	  "held lock and object are not in the same allocation\n" \
34 	  "bpf_spin_lock at off=32 must be held for bpf_list_head" }, \
35 	{ #test "_global_incorrect_lock_" #op, \
36 	  "held lock and object are not in the same allocation\n" \
37 	  "bpf_spin_lock at off=16 must be held for bpf_list_head" }, \
38 	{ #test "_map_incorrect_lock_" #op, \
39 	  "held lock and object are not in the same allocation\n" \
40 	  "bpf_spin_lock at off=0 must be held for bpf_list_head" }, \
41 	{ #test "_inner_map_incorrect_lock_" #op, \
42 	  "held lock and object are not in the same allocation\n" \
43 	  "bpf_spin_lock at off=0 must be held for bpf_list_head" },
44 	TEST(kptr, push_front)
45 	TEST(kptr, push_back)
46 	TEST(kptr, pop_front)
47 	TEST(kptr, pop_back)
48 	TEST(global, push_front)
49 	TEST(global, push_back)
50 	TEST(global, pop_front)
51 	TEST(global, pop_back)
52 	TEST(map, push_front)
53 	TEST(map, push_back)
54 	TEST(map, pop_front)
55 	TEST(map, pop_back)
56 	TEST(inner_map, push_front)
57 	TEST(inner_map, push_back)
58 	TEST(inner_map, pop_front)
59 	TEST(inner_map, pop_back)
60 #undef TEST
61 	{ "map_compat_kprobe", "tracing progs cannot use bpf_list_head yet" },
62 	{ "map_compat_kretprobe", "tracing progs cannot use bpf_list_head yet" },
63 	{ "map_compat_tp", "tracing progs cannot use bpf_list_head yet" },
64 	{ "map_compat_perf", "tracing progs cannot use bpf_list_head yet" },
65 	{ "map_compat_raw_tp", "tracing progs cannot use bpf_list_head yet" },
66 	{ "map_compat_raw_tp_w", "tracing progs cannot use bpf_list_head yet" },
67 	{ "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" },
68 	{ "obj_new_no_composite", "bpf_obj_new type ID argument must be of a struct" },
69 	{ "obj_new_no_struct", "bpf_obj_new type ID argument must be of a struct" },
70 	{ "obj_drop_non_zero_off", "R1 must have zero offset when passed to release func" },
71 	{ "new_null_ret", "R0 invalid mem access 'ptr_or_null_'" },
72 	{ "obj_new_acq", "Unreleased reference id=" },
73 	{ "use_after_drop", "invalid mem access 'scalar'" },
74 	{ "ptr_walk_scalar", "type=scalar expected=percpu_ptr_" },
75 	{ "direct_read_lock", "direct access to bpf_spin_lock is disallowed" },
76 	{ "direct_write_lock", "direct access to bpf_spin_lock is disallowed" },
77 	{ "direct_read_head", "direct access to bpf_list_head is disallowed" },
78 	{ "direct_write_head", "direct access to bpf_list_head is disallowed" },
79 	{ "direct_read_node", "direct access to bpf_list_node is disallowed" },
80 	{ "direct_write_node", "direct access to bpf_list_node is disallowed" },
81 	{ "write_after_push_front", "only read is supported" },
82 	{ "write_after_push_back", "only read is supported" },
83 	{ "use_after_unlock_push_front", "invalid mem access 'scalar'" },
84 	{ "use_after_unlock_push_back", "invalid mem access 'scalar'" },
85 	{ "double_push_front", "arg#1 expected pointer to allocated object" },
86 	{ "double_push_back", "arg#1 expected pointer to allocated object" },
87 	{ "no_node_value_type", "bpf_list_node not found at offset=0" },
88 	{ "incorrect_value_type",
89 	  "operation on bpf_list_head expects arg#1 bpf_list_node at offset=0 in struct foo, "
90 	  "but arg is at offset=0 in struct bar" },
91 	{ "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
92 	{ "incorrect_node_off1", "bpf_list_node not found at offset=1" },
93 	{ "incorrect_node_off2", "arg#1 offset=40, but expected bpf_list_node at offset=0 in struct foo" },
94 	{ "no_head_type", "bpf_list_head not found at offset=0" },
95 	{ "incorrect_head_var_off1", "R1 doesn't have constant offset" },
96 	{ "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
97 	{ "incorrect_head_off1", "bpf_list_head not found at offset=17" },
98 	{ "incorrect_head_off2", "bpf_list_head not found at offset=1" },
99 	{ "pop_front_off",
100 	  "15: (bf) r1 = r6                      ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) "
101 	  "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) refs=2,4\n"
102 	  "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
103 	{ "pop_back_off",
104 	  "15: (bf) r1 = r6                      ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) "
105 	  "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) refs=2,4\n"
106 	  "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
107 };
108 
109 static void test_linked_list_fail_prog(const char *prog_name, const char *err_msg)
110 {
111 	LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
112 						.kernel_log_size = sizeof(log_buf),
113 						.kernel_log_level = 1);
114 	struct linked_list_fail *skel;
115 	struct bpf_program *prog;
116 	int ret;
117 
118 	skel = linked_list_fail__open_opts(&opts);
119 	if (!ASSERT_OK_PTR(skel, "linked_list_fail__open_opts"))
120 		return;
121 
122 	prog = bpf_object__find_program_by_name(skel->obj, prog_name);
123 	if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
124 		goto end;
125 
126 	bpf_program__set_autoload(prog, true);
127 
128 	ret = linked_list_fail__load(skel);
129 	if (!ASSERT_ERR(ret, "linked_list_fail__load must fail"))
130 		goto end;
131 
132 	if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
133 		fprintf(stderr, "Expected: %s\n", err_msg);
134 		fprintf(stderr, "Verifier: %s\n", log_buf);
135 	}
136 
137 end:
138 	linked_list_fail__destroy(skel);
139 }
140 
141 static void clear_fields(struct bpf_map *map)
142 {
143 	char buf[24];
144 	int key = 0;
145 
146 	memset(buf, 0xff, sizeof(buf));
147 	ASSERT_OK(bpf_map__update_elem(map, &key, sizeof(key), buf, sizeof(buf), 0), "check_and_free_fields");
148 }
149 
150 enum {
151 	TEST_ALL,
152 	PUSH_POP,
153 	PUSH_POP_MULT,
154 	LIST_IN_LIST,
155 };
156 
157 static void test_linked_list_success(int mode, bool leave_in_map)
158 {
159 	LIBBPF_OPTS(bpf_test_run_opts, opts,
160 		.data_in = &pkt_v4,
161 		.data_size_in = sizeof(pkt_v4),
162 		.repeat = 1,
163 	);
164 	struct linked_list *skel;
165 	int ret;
166 
167 	skel = linked_list__open_and_load();
168 	if (!ASSERT_OK_PTR(skel, "linked_list__open_and_load"))
169 		return;
170 
171 	if (mode == LIST_IN_LIST)
172 		goto lil;
173 	if (mode == PUSH_POP_MULT)
174 		goto ppm;
175 
176 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop), &opts);
177 	ASSERT_OK(ret, "map_list_push_pop");
178 	ASSERT_OK(opts.retval, "map_list_push_pop retval");
179 	if (!leave_in_map)
180 		clear_fields(skel->maps.array_map);
181 
182 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop), &opts);
183 	ASSERT_OK(ret, "inner_map_list_push_pop");
184 	ASSERT_OK(opts.retval, "inner_map_list_push_pop retval");
185 	if (!leave_in_map)
186 		clear_fields(skel->maps.inner_map);
187 
188 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop), &opts);
189 	ASSERT_OK(ret, "global_list_push_pop");
190 	ASSERT_OK(opts.retval, "global_list_push_pop retval");
191 	if (!leave_in_map)
192 		clear_fields(skel->maps.bss_A);
193 
194 	if (mode == PUSH_POP)
195 		goto end;
196 
197 ppm:
198 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop_multiple), &opts);
199 	ASSERT_OK(ret, "map_list_push_pop_multiple");
200 	ASSERT_OK(opts.retval, "map_list_push_pop_multiple retval");
201 	if (!leave_in_map)
202 		clear_fields(skel->maps.array_map);
203 
204 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop_multiple), &opts);
205 	ASSERT_OK(ret, "inner_map_list_push_pop_multiple");
206 	ASSERT_OK(opts.retval, "inner_map_list_push_pop_multiple retval");
207 	if (!leave_in_map)
208 		clear_fields(skel->maps.inner_map);
209 
210 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_multiple), &opts);
211 	ASSERT_OK(ret, "global_list_push_pop_multiple");
212 	ASSERT_OK(opts.retval, "global_list_push_pop_multiple retval");
213 	if (!leave_in_map)
214 		clear_fields(skel->maps.bss_A);
215 
216 	if (mode == PUSH_POP_MULT)
217 		goto end;
218 
219 lil:
220 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_in_list), &opts);
221 	ASSERT_OK(ret, "map_list_in_list");
222 	ASSERT_OK(opts.retval, "map_list_in_list retval");
223 	if (!leave_in_map)
224 		clear_fields(skel->maps.array_map);
225 
226 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_in_list), &opts);
227 	ASSERT_OK(ret, "inner_map_list_in_list");
228 	ASSERT_OK(opts.retval, "inner_map_list_in_list retval");
229 	if (!leave_in_map)
230 		clear_fields(skel->maps.inner_map);
231 
232 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_in_list), &opts);
233 	ASSERT_OK(ret, "global_list_in_list");
234 	ASSERT_OK(opts.retval, "global_list_in_list retval");
235 	if (!leave_in_map)
236 		clear_fields(skel->maps.bss_A);
237 end:
238 	linked_list__destroy(skel);
239 }
240 
241 #define SPIN_LOCK 2
242 #define LIST_HEAD 3
243 #define LIST_NODE 4
244 
245 static struct btf *init_btf(void)
246 {
247 	int id, lid, hid, nid;
248 	struct btf *btf;
249 
250 	btf = btf__new_empty();
251 	if (!ASSERT_OK_PTR(btf, "btf__new_empty"))
252 		return NULL;
253 	id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED);
254 	if (!ASSERT_EQ(id, 1, "btf__add_int"))
255 		goto end;
256 	lid = btf__add_struct(btf, "bpf_spin_lock", 4);
257 	if (!ASSERT_EQ(lid, SPIN_LOCK, "btf__add_struct bpf_spin_lock"))
258 		goto end;
259 	hid = btf__add_struct(btf, "bpf_list_head", 16);
260 	if (!ASSERT_EQ(hid, LIST_HEAD, "btf__add_struct bpf_list_head"))
261 		goto end;
262 	nid = btf__add_struct(btf, "bpf_list_node", 16);
263 	if (!ASSERT_EQ(nid, LIST_NODE, "btf__add_struct bpf_list_node"))
264 		goto end;
265 	return btf;
266 end:
267 	btf__free(btf);
268 	return NULL;
269 }
270 
271 static void test_btf(void)
272 {
273 	struct btf *btf = NULL;
274 	int id, err;
275 
276 	while (test__start_subtest("btf: too many locks")) {
277 		btf = init_btf();
278 		if (!ASSERT_OK_PTR(btf, "init_btf"))
279 			break;
280 		id = btf__add_struct(btf, "foo", 24);
281 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
282 			break;
283 		err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
284 		if (!ASSERT_OK(err, "btf__add_struct foo::a"))
285 			break;
286 		err = btf__add_field(btf, "b", SPIN_LOCK, 32, 0);
287 		if (!ASSERT_OK(err, "btf__add_struct foo::a"))
288 			break;
289 		err = btf__add_field(btf, "c", LIST_HEAD, 64, 0);
290 		if (!ASSERT_OK(err, "btf__add_struct foo::a"))
291 			break;
292 
293 		err = btf__load_into_kernel(btf);
294 		ASSERT_EQ(err, -E2BIG, "check btf");
295 		btf__free(btf);
296 		break;
297 	}
298 
299 	while (test__start_subtest("btf: missing lock")) {
300 		btf = init_btf();
301 		if (!ASSERT_OK_PTR(btf, "init_btf"))
302 			break;
303 		id = btf__add_struct(btf, "foo", 16);
304 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
305 			break;
306 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
307 		if (!ASSERT_OK(err, "btf__add_struct foo::a"))
308 			break;
309 		id = btf__add_decl_tag(btf, "contains:baz:a", 5, 0);
310 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:baz:a"))
311 			break;
312 		id = btf__add_struct(btf, "baz", 16);
313 		if (!ASSERT_EQ(id, 7, "btf__add_struct baz"))
314 			break;
315 		err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
316 		if (!ASSERT_OK(err, "btf__add_field baz::a"))
317 			break;
318 
319 		err = btf__load_into_kernel(btf);
320 		ASSERT_EQ(err, -EINVAL, "check btf");
321 		btf__free(btf);
322 		break;
323 	}
324 
325 	while (test__start_subtest("btf: bad offset")) {
326 		btf = init_btf();
327 		if (!ASSERT_OK_PTR(btf, "init_btf"))
328 			break;
329 		id = btf__add_struct(btf, "foo", 36);
330 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
331 			break;
332 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
333 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
334 			break;
335 		err = btf__add_field(btf, "b", LIST_NODE, 0, 0);
336 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
337 			break;
338 		err = btf__add_field(btf, "c", SPIN_LOCK, 0, 0);
339 		if (!ASSERT_OK(err, "btf__add_field foo::c"))
340 			break;
341 		id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
342 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
343 			break;
344 
345 		err = btf__load_into_kernel(btf);
346 		ASSERT_EQ(err, -EEXIST, "check btf");
347 		btf__free(btf);
348 		break;
349 	}
350 
351 	while (test__start_subtest("btf: missing contains:")) {
352 		btf = init_btf();
353 		if (!ASSERT_OK_PTR(btf, "init_btf"))
354 			break;
355 		id = btf__add_struct(btf, "foo", 24);
356 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
357 			break;
358 		err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
359 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
360 			break;
361 		err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
362 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
363 			break;
364 
365 		err = btf__load_into_kernel(btf);
366 		ASSERT_EQ(err, -EINVAL, "check btf");
367 		btf__free(btf);
368 		break;
369 	}
370 
371 	while (test__start_subtest("btf: missing struct")) {
372 		btf = init_btf();
373 		if (!ASSERT_OK_PTR(btf, "init_btf"))
374 			break;
375 		id = btf__add_struct(btf, "foo", 24);
376 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
377 			break;
378 		err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
379 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
380 			break;
381 		err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
382 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
383 			break;
384 		id = btf__add_decl_tag(btf, "contains:bar:bar", 5, 1);
385 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:bar"))
386 			break;
387 
388 		err = btf__load_into_kernel(btf);
389 		ASSERT_EQ(err, -ENOENT, "check btf");
390 		btf__free(btf);
391 		break;
392 	}
393 
394 	while (test__start_subtest("btf: missing node")) {
395 		btf = init_btf();
396 		if (!ASSERT_OK_PTR(btf, "init_btf"))
397 			break;
398 		id = btf__add_struct(btf, "foo", 24);
399 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
400 			break;
401 		err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
402 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
403 			break;
404 		err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
405 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
406 			break;
407 		id = btf__add_decl_tag(btf, "contains:foo:c", 5, 1);
408 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:c"))
409 			break;
410 
411 		err = btf__load_into_kernel(btf);
412 		btf__free(btf);
413 		ASSERT_EQ(err, -ENOENT, "check btf");
414 		break;
415 	}
416 
417 	while (test__start_subtest("btf: node incorrect type")) {
418 		btf = init_btf();
419 		if (!ASSERT_OK_PTR(btf, "init_btf"))
420 			break;
421 		id = btf__add_struct(btf, "foo", 20);
422 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
423 			break;
424 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
425 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
426 			break;
427 		err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
428 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
429 			break;
430 		id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
431 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
432 			break;
433 		id = btf__add_struct(btf, "bar", 4);
434 		if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
435 			break;
436 		err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
437 		if (!ASSERT_OK(err, "btf__add_field bar::a"))
438 			break;
439 
440 		err = btf__load_into_kernel(btf);
441 		ASSERT_EQ(err, -EINVAL, "check btf");
442 		btf__free(btf);
443 		break;
444 	}
445 
446 	while (test__start_subtest("btf: multiple bpf_list_node with name b")) {
447 		btf = init_btf();
448 		if (!ASSERT_OK_PTR(btf, "init_btf"))
449 			break;
450 		id = btf__add_struct(btf, "foo", 52);
451 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
452 			break;
453 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
454 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
455 			break;
456 		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
457 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
458 			break;
459 		err = btf__add_field(btf, "b", LIST_NODE, 256, 0);
460 		if (!ASSERT_OK(err, "btf__add_field foo::c"))
461 			break;
462 		err = btf__add_field(btf, "d", SPIN_LOCK, 384, 0);
463 		if (!ASSERT_OK(err, "btf__add_field foo::d"))
464 			break;
465 		id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
466 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
467 			break;
468 
469 		err = btf__load_into_kernel(btf);
470 		ASSERT_EQ(err, -EINVAL, "check btf");
471 		btf__free(btf);
472 		break;
473 	}
474 
475 	while (test__start_subtest("btf: owning | owned AA cycle")) {
476 		btf = init_btf();
477 		if (!ASSERT_OK_PTR(btf, "init_btf"))
478 			break;
479 		id = btf__add_struct(btf, "foo", 36);
480 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
481 			break;
482 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
483 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
484 			break;
485 		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
486 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
487 			break;
488 		err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
489 		if (!ASSERT_OK(err, "btf__add_field foo::c"))
490 			break;
491 		id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
492 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
493 			break;
494 
495 		err = btf__load_into_kernel(btf);
496 		ASSERT_EQ(err, -ELOOP, "check btf");
497 		btf__free(btf);
498 		break;
499 	}
500 
501 	while (test__start_subtest("btf: owning | owned ABA cycle")) {
502 		btf = init_btf();
503 		if (!ASSERT_OK_PTR(btf, "init_btf"))
504 			break;
505 		id = btf__add_struct(btf, "foo", 36);
506 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
507 			break;
508 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
509 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
510 			break;
511 		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
512 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
513 			break;
514 		err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
515 		if (!ASSERT_OK(err, "btf__add_field foo::c"))
516 			break;
517 		id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
518 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
519 			break;
520 		id = btf__add_struct(btf, "bar", 36);
521 		if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
522 			break;
523 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
524 		if (!ASSERT_OK(err, "btf__add_field bar::a"))
525 			break;
526 		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
527 		if (!ASSERT_OK(err, "btf__add_field bar::b"))
528 			break;
529 		err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
530 		if (!ASSERT_OK(err, "btf__add_field bar::c"))
531 			break;
532 		id = btf__add_decl_tag(btf, "contains:foo:b", 7, 0);
533 		if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:foo:b"))
534 			break;
535 
536 		err = btf__load_into_kernel(btf);
537 		ASSERT_EQ(err, -ELOOP, "check btf");
538 		btf__free(btf);
539 		break;
540 	}
541 
542 	while (test__start_subtest("btf: owning -> owned")) {
543 		btf = init_btf();
544 		if (!ASSERT_OK_PTR(btf, "init_btf"))
545 			break;
546 		id = btf__add_struct(btf, "foo", 20);
547 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
548 			break;
549 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
550 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
551 			break;
552 		err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
553 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
554 			break;
555 		id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
556 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
557 			break;
558 		id = btf__add_struct(btf, "bar", 16);
559 		if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
560 			break;
561 		err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
562 		if (!ASSERT_OK(err, "btf__add_field bar::a"))
563 			break;
564 
565 		err = btf__load_into_kernel(btf);
566 		ASSERT_EQ(err, 0, "check btf");
567 		btf__free(btf);
568 		break;
569 	}
570 
571 	while (test__start_subtest("btf: owning -> owning | owned -> owned")) {
572 		btf = init_btf();
573 		if (!ASSERT_OK_PTR(btf, "init_btf"))
574 			break;
575 		id = btf__add_struct(btf, "foo", 20);
576 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
577 			break;
578 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
579 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
580 			break;
581 		err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
582 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
583 			break;
584 		id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
585 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
586 			break;
587 		id = btf__add_struct(btf, "bar", 36);
588 		if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
589 			break;
590 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
591 		if (!ASSERT_OK(err, "btf__add_field bar::a"))
592 			break;
593 		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
594 		if (!ASSERT_OK(err, "btf__add_field bar::b"))
595 			break;
596 		err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
597 		if (!ASSERT_OK(err, "btf__add_field bar::c"))
598 			break;
599 		id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
600 		if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
601 			break;
602 		id = btf__add_struct(btf, "baz", 16);
603 		if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
604 			break;
605 		err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
606 		if (!ASSERT_OK(err, "btf__add_field baz:a"))
607 			break;
608 
609 		err = btf__load_into_kernel(btf);
610 		ASSERT_EQ(err, 0, "check btf");
611 		btf__free(btf);
612 		break;
613 	}
614 
615 	while (test__start_subtest("btf: owning | owned -> owning | owned -> owned")) {
616 		btf = init_btf();
617 		if (!ASSERT_OK_PTR(btf, "init_btf"))
618 			break;
619 		id = btf__add_struct(btf, "foo", 36);
620 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
621 			break;
622 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
623 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
624 			break;
625 		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
626 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
627 			break;
628 		err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
629 		if (!ASSERT_OK(err, "btf__add_field foo::c"))
630 			break;
631 		id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
632 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
633 			break;
634 		id = btf__add_struct(btf, "bar", 36);
635 		if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
636 			break;
637 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
638 		if (!ASSERT_OK(err, "btf__add_field bar:a"))
639 			break;
640 		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
641 		if (!ASSERT_OK(err, "btf__add_field bar:b"))
642 			break;
643 		err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
644 		if (!ASSERT_OK(err, "btf__add_field bar:c"))
645 			break;
646 		id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
647 		if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
648 			break;
649 		id = btf__add_struct(btf, "baz", 16);
650 		if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
651 			break;
652 		err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
653 		if (!ASSERT_OK(err, "btf__add_field baz:a"))
654 			break;
655 
656 		err = btf__load_into_kernel(btf);
657 		ASSERT_EQ(err, -ELOOP, "check btf");
658 		btf__free(btf);
659 		break;
660 	}
661 
662 	while (test__start_subtest("btf: owning -> owning | owned -> owning | owned -> owned")) {
663 		btf = init_btf();
664 		if (!ASSERT_OK_PTR(btf, "init_btf"))
665 			break;
666 		id = btf__add_struct(btf, "foo", 20);
667 		if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
668 			break;
669 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
670 		if (!ASSERT_OK(err, "btf__add_field foo::a"))
671 			break;
672 		err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
673 		if (!ASSERT_OK(err, "btf__add_field foo::b"))
674 			break;
675 		id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
676 		if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
677 			break;
678 		id = btf__add_struct(btf, "bar", 36);
679 		if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
680 			break;
681 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
682 		if (!ASSERT_OK(err, "btf__add_field bar::a"))
683 			break;
684 		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
685 		if (!ASSERT_OK(err, "btf__add_field bar::b"))
686 			break;
687 		err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
688 		if (!ASSERT_OK(err, "btf__add_field bar::c"))
689 			break;
690 		id = btf__add_decl_tag(btf, "contains:baz:b", 7, 0);
691 		if (!ASSERT_EQ(id, 8, "btf__add_decl_tag"))
692 			break;
693 		id = btf__add_struct(btf, "baz", 36);
694 		if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
695 			break;
696 		err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
697 		if (!ASSERT_OK(err, "btf__add_field bar::a"))
698 			break;
699 		err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
700 		if (!ASSERT_OK(err, "btf__add_field bar::b"))
701 			break;
702 		err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
703 		if (!ASSERT_OK(err, "btf__add_field bar::c"))
704 			break;
705 		id = btf__add_decl_tag(btf, "contains:bam:a", 9, 0);
706 		if (!ASSERT_EQ(id, 10, "btf__add_decl_tag contains:bam:a"))
707 			break;
708 		id = btf__add_struct(btf, "bam", 16);
709 		if (!ASSERT_EQ(id, 11, "btf__add_struct bam"))
710 			break;
711 		err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
712 		if (!ASSERT_OK(err, "btf__add_field bam::a"))
713 			break;
714 
715 		err = btf__load_into_kernel(btf);
716 		ASSERT_EQ(err, -ELOOP, "check btf");
717 		btf__free(btf);
718 		break;
719 	}
720 }
721 
722 void test_linked_list(void)
723 {
724 	int i;
725 
726 	for (i = 0; i < ARRAY_SIZE(linked_list_fail_tests); i++) {
727 		if (!test__start_subtest(linked_list_fail_tests[i].prog_name))
728 			continue;
729 		test_linked_list_fail_prog(linked_list_fail_tests[i].prog_name,
730 					   linked_list_fail_tests[i].err_msg);
731 	}
732 	test_btf();
733 	test_linked_list_success(PUSH_POP, false);
734 	test_linked_list_success(PUSH_POP, true);
735 	test_linked_list_success(PUSH_POP_MULT, false);
736 	test_linked_list_success(PUSH_POP_MULT, true);
737 	test_linked_list_success(LIST_IN_LIST, false);
738 	test_linked_list_success(LIST_IN_LIST, true);
739 	test_linked_list_success(TEST_ALL, false);
740 }
741