1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <network_helpers.h>
4 
5 #include "map_kptr.skel.h"
6 #include "map_kptr_fail.skel.h"
7 
8 static char log_buf[1024 * 1024];
9 
10 struct {
11 	const char *prog_name;
12 	const char *err_msg;
13 } map_kptr_fail_tests[] = {
14 	{ "size_not_bpf_dw", "kptr access size must be BPF_DW" },
15 	{ "non_const_var_off", "kptr access cannot have variable offset" },
16 	{ "non_const_var_off_kptr_xchg", "R1 doesn't have constant offset. kptr has to be" },
17 	{ "misaligned_access_write", "kptr access misaligned expected=8 off=7" },
18 	{ "misaligned_access_read", "kptr access misaligned expected=8 off=1" },
19 	{ "reject_var_off_store", "variable untrusted_ptr_ access var_off=(0x0; 0x1e0)" },
20 	{ "reject_bad_type_match", "invalid kptr access, R1 type=untrusted_ptr_prog_test_ref_kfunc" },
21 	{ "marked_as_untrusted_or_null", "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_" },
22 	{ "correct_btf_id_check_size", "access beyond struct prog_test_ref_kfunc at off 32 size 4" },
23 	{ "inherit_untrusted_on_walk", "R1 type=untrusted_ptr_ expected=percpu_ptr_" },
24 	{ "reject_kptr_xchg_on_unref", "off=8 kptr isn't referenced kptr" },
25 	{ "reject_kptr_get_no_map_val", "arg#0 expected pointer to map value" },
26 	{ "reject_kptr_get_no_null_map_val", "arg#0 expected pointer to map value" },
27 	{ "reject_kptr_get_no_kptr", "arg#0 no referenced kptr at map value offset=0" },
28 	{ "reject_kptr_get_on_unref", "arg#0 no referenced kptr at map value offset=8" },
29 	{ "reject_kptr_get_bad_type_match", "kernel function bpf_kfunc_call_test_kptr_get args#0" },
30 	{ "mark_ref_as_untrusted_or_null", "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_" },
31 	{ "reject_untrusted_store_to_ref", "store to referenced kptr disallowed" },
32 	{ "reject_bad_type_xchg", "invalid kptr access, R2 type=ptr_prog_test_ref_kfunc expected=ptr_prog_test_member" },
33 	{ "reject_untrusted_xchg", "R2 type=untrusted_ptr_ expected=ptr_" },
34 	{ "reject_member_of_ref_xchg", "invalid kptr access, R2 type=ptr_prog_test_ref_kfunc" },
35 	{ "reject_indirect_helper_access", "kptr cannot be accessed indirectly by helper" },
36 	{ "reject_indirect_global_func_access", "kptr cannot be accessed indirectly by helper" },
37 	{ "kptr_xchg_ref_state", "Unreleased reference id=5 alloc_insn=" },
38 	{ "kptr_get_ref_state", "Unreleased reference id=3 alloc_insn=" },
39 };
40 
41 static void test_map_kptr_fail_prog(const char *prog_name, const char *err_msg)
42 {
43 	LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
44 						.kernel_log_size = sizeof(log_buf),
45 						.kernel_log_level = 1);
46 	struct map_kptr_fail *skel;
47 	struct bpf_program *prog;
48 	int ret;
49 
50 	skel = map_kptr_fail__open_opts(&opts);
51 	if (!ASSERT_OK_PTR(skel, "map_kptr_fail__open_opts"))
52 		return;
53 
54 	prog = bpf_object__find_program_by_name(skel->obj, prog_name);
55 	if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
56 		goto end;
57 
58 	bpf_program__set_autoload(prog, true);
59 
60 	ret = map_kptr_fail__load(skel);
61 	if (!ASSERT_ERR(ret, "map_kptr__load must fail"))
62 		goto end;
63 
64 	if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
65 		fprintf(stderr, "Expected: %s\n", err_msg);
66 		fprintf(stderr, "Verifier: %s\n", log_buf);
67 	}
68 
69 end:
70 	map_kptr_fail__destroy(skel);
71 }
72 
73 static void test_map_kptr_fail(void)
74 {
75 	int i;
76 
77 	for (i = 0; i < ARRAY_SIZE(map_kptr_fail_tests); i++) {
78 		if (!test__start_subtest(map_kptr_fail_tests[i].prog_name))
79 			continue;
80 		test_map_kptr_fail_prog(map_kptr_fail_tests[i].prog_name,
81 					map_kptr_fail_tests[i].err_msg);
82 	}
83 }
84 
85 static void test_map_kptr_success(bool test_run)
86 {
87 	LIBBPF_OPTS(bpf_test_run_opts, opts,
88 		.data_in = &pkt_v4,
89 		.data_size_in = sizeof(pkt_v4),
90 		.repeat = 1,
91 	);
92 	struct map_kptr *skel;
93 	int key = 0, ret;
94 	char buf[16];
95 
96 	skel = map_kptr__open_and_load();
97 	if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load"))
98 		return;
99 
100 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref), &opts);
101 	ASSERT_OK(ret, "test_map_kptr_ref refcount");
102 	ASSERT_OK(opts.retval, "test_map_kptr_ref retval");
103 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref2), &opts);
104 	ASSERT_OK(ret, "test_map_kptr_ref2 refcount");
105 	ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval");
106 
107 	if (test_run)
108 		goto exit;
109 
110 	ret = bpf_map__update_elem(skel->maps.array_map,
111 				   &key, sizeof(key), buf, sizeof(buf), 0);
112 	ASSERT_OK(ret, "array_map update");
113 	ret = bpf_map__update_elem(skel->maps.array_map,
114 				   &key, sizeof(key), buf, sizeof(buf), 0);
115 	ASSERT_OK(ret, "array_map update2");
116 
117 	ret = bpf_map__update_elem(skel->maps.hash_map,
118 				   &key, sizeof(key), buf, sizeof(buf), 0);
119 	ASSERT_OK(ret, "hash_map update");
120 	ret = bpf_map__delete_elem(skel->maps.hash_map, &key, sizeof(key), 0);
121 	ASSERT_OK(ret, "hash_map delete");
122 
123 	ret = bpf_map__update_elem(skel->maps.hash_malloc_map,
124 				   &key, sizeof(key), buf, sizeof(buf), 0);
125 	ASSERT_OK(ret, "hash_malloc_map update");
126 	ret = bpf_map__delete_elem(skel->maps.hash_malloc_map, &key, sizeof(key), 0);
127 	ASSERT_OK(ret, "hash_malloc_map delete");
128 
129 	ret = bpf_map__update_elem(skel->maps.lru_hash_map,
130 				   &key, sizeof(key), buf, sizeof(buf), 0);
131 	ASSERT_OK(ret, "lru_hash_map update");
132 	ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0);
133 	ASSERT_OK(ret, "lru_hash_map delete");
134 
135 exit:
136 	map_kptr__destroy(skel);
137 }
138 
139 void test_map_kptr(void)
140 {
141 	if (test__start_subtest("success")) {
142 		test_map_kptr_success(false);
143 		/* Do test_run twice, so that we see refcount going back to 1
144 		 * after we leave it in map from first iteration.
145 		 */
146 		test_map_kptr_success(true);
147 	}
148 	test_map_kptr_fail();
149 }
150