1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <linux/btf.h>
4 #include <linux/btf_ids.h>
5 #include <linux/error-injection.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/percpu-defs.h>
9 #include <linux/sysfs.h>
10 #include <linux/tracepoint.h>
11 #include "bpf_testmod.h"
12 
13 #define CREATE_TRACE_POINTS
14 #include "bpf_testmod-events.h"
15 
16 typedef int (*func_proto_typedef)(long);
17 typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
18 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
19 
20 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
21 long bpf_testmod_test_struct_arg_result;
22 
23 struct bpf_testmod_struct_arg_1 {
24 	int a;
25 };
26 struct bpf_testmod_struct_arg_2 {
27 	long a;
28 	long b;
29 };
30 
31 struct bpf_testmod_struct_arg_3 {
32 	int a;
33 	int b[];
34 };
35 
36 __diag_push();
37 __diag_ignore_all("-Wmissing-prototypes",
38 		  "Global functions as their definitions will be in bpf_testmod.ko BTF");
39 
40 noinline int
41 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
42 	bpf_testmod_test_struct_arg_result = a.a + a.b  + b + c;
43 	return bpf_testmod_test_struct_arg_result;
44 }
45 
46 noinline int
47 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
48 	bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
49 	return bpf_testmod_test_struct_arg_result;
50 }
51 
52 noinline int
53 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
54 	bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
55 	return bpf_testmod_test_struct_arg_result;
56 }
57 
58 noinline int
59 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
60 			      int c, int d, struct bpf_testmod_struct_arg_2 e) {
61 	bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
62 	return bpf_testmod_test_struct_arg_result;
63 }
64 
65 noinline int
66 bpf_testmod_test_struct_arg_5(void) {
67 	bpf_testmod_test_struct_arg_result = 1;
68 	return bpf_testmod_test_struct_arg_result;
69 }
70 
71 noinline int
72 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
73 	bpf_testmod_test_struct_arg_result = a->b[0];
74 	return bpf_testmod_test_struct_arg_result;
75 }
76 
77 __bpf_kfunc void
78 bpf_testmod_test_mod_kfunc(int i)
79 {
80 	*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
81 }
82 
83 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
84 {
85 	if (cnt < 0) {
86 		it->cnt = 0;
87 		return -EINVAL;
88 	}
89 
90 	it->value = value;
91 	it->cnt = cnt;
92 
93 	return 0;
94 }
95 
96 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
97 {
98 	if (it->cnt <= 0)
99 		return NULL;
100 
101 	it->cnt--;
102 
103 	return &it->value;
104 }
105 
106 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
107 {
108 	it->cnt = 0;
109 }
110 
111 struct bpf_testmod_btf_type_tag_1 {
112 	int a;
113 };
114 
115 struct bpf_testmod_btf_type_tag_2 {
116 	struct bpf_testmod_btf_type_tag_1 __user *p;
117 };
118 
119 struct bpf_testmod_btf_type_tag_3 {
120 	struct bpf_testmod_btf_type_tag_1 __percpu *p;
121 };
122 
123 noinline int
124 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
125 	BTF_TYPE_EMIT(func_proto_typedef);
126 	BTF_TYPE_EMIT(func_proto_typedef_nested1);
127 	BTF_TYPE_EMIT(func_proto_typedef_nested2);
128 	return arg->a;
129 }
130 
131 noinline int
132 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
133 	return arg->p->a;
134 }
135 
136 noinline int
137 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
138 	return arg->a;
139 }
140 
141 noinline int
142 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
143 	return arg->p->a;
144 }
145 
146 noinline int bpf_testmod_loop_test(int n)
147 {
148 	/* Make sum volatile, so smart compilers, such as clang, will not
149 	 * optimize the code by removing the loop.
150 	 */
151 	volatile int sum = 0;
152 	int i;
153 
154 	/* the primary goal of this test is to test LBR. Create a lot of
155 	 * branches in the function, so we can catch it easily.
156 	 */
157 	for (i = 0; i < n; i++)
158 		sum += i;
159 	return sum;
160 }
161 
162 __weak noinline struct file *bpf_testmod_return_ptr(int arg)
163 {
164 	static struct file f = {};
165 
166 	switch (arg) {
167 	case 1: return (void *)EINVAL;		/* user addr */
168 	case 2: return (void *)0xcafe4a11;	/* user addr */
169 	case 3: return (void *)-EINVAL;		/* canonical, but invalid */
170 	case 4: return (void *)(1ull << 60);	/* non-canonical and invalid */
171 	case 5: return (void *)~(1ull << 30);	/* trigger extable */
172 	case 6: return &f;			/* valid addr */
173 	case 7: return (void *)((long)&f | 1);	/* kernel tricks */
174 	default: return NULL;
175 	}
176 }
177 
178 noinline int bpf_testmod_fentry_test1(int a)
179 {
180 	return a + 1;
181 }
182 
183 noinline int bpf_testmod_fentry_test2(int a, u64 b)
184 {
185 	return a + b;
186 }
187 
188 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
189 {
190 	return a + b + c;
191 }
192 
193 __diag_pop();
194 
195 int bpf_testmod_fentry_ok;
196 
197 noinline ssize_t
198 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
199 		      struct bin_attribute *bin_attr,
200 		      char *buf, loff_t off, size_t len)
201 {
202 	struct bpf_testmod_test_read_ctx ctx = {
203 		.buf = buf,
204 		.off = off,
205 		.len = len,
206 	};
207 	struct bpf_testmod_struct_arg_1 struct_arg1 = {10};
208 	struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
209 	struct bpf_testmod_struct_arg_3 *struct_arg3;
210 	int i = 1;
211 
212 	while (bpf_testmod_return_ptr(i))
213 		i++;
214 
215 	(void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
216 	(void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
217 	(void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
218 	(void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
219 	(void)bpf_testmod_test_struct_arg_5();
220 
221 	struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
222 				sizeof(int)), GFP_KERNEL);
223 	if (struct_arg3 != NULL) {
224 		struct_arg3->b[0] = 1;
225 		(void)bpf_testmod_test_struct_arg_6(struct_arg3);
226 		kfree(struct_arg3);
227 	}
228 
229 	/* This is always true. Use the check to make sure the compiler
230 	 * doesn't remove bpf_testmod_loop_test.
231 	 */
232 	if (bpf_testmod_loop_test(101) > 100)
233 		trace_bpf_testmod_test_read(current, &ctx);
234 
235 	/* Magic number to enable writable tp */
236 	if (len == 64) {
237 		struct bpf_testmod_test_writable_ctx writable = {
238 			.val = 1024,
239 		};
240 		trace_bpf_testmod_test_writable_bare(&writable);
241 		if (writable.early_ret)
242 			return snprintf(buf, len, "%d\n", writable.val);
243 	}
244 
245 	if (bpf_testmod_fentry_test1(1) != 2 ||
246 	    bpf_testmod_fentry_test2(2, 3) != 5 ||
247 	    bpf_testmod_fentry_test3(4, 5, 6) != 15)
248 		goto out;
249 
250 	bpf_testmod_fentry_ok = 1;
251 out:
252 	return -EIO; /* always fail */
253 }
254 EXPORT_SYMBOL(bpf_testmod_test_read);
255 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
256 
257 noinline ssize_t
258 bpf_testmod_test_write(struct file *file, struct kobject *kobj,
259 		      struct bin_attribute *bin_attr,
260 		      char *buf, loff_t off, size_t len)
261 {
262 	struct bpf_testmod_test_write_ctx ctx = {
263 		.buf = buf,
264 		.off = off,
265 		.len = len,
266 	};
267 
268 	trace_bpf_testmod_test_write_bare(current, &ctx);
269 
270 	return -EIO; /* always fail */
271 }
272 EXPORT_SYMBOL(bpf_testmod_test_write);
273 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
274 
275 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
276 	.attr = { .name = "bpf_testmod", .mode = 0666, },
277 	.read = bpf_testmod_test_read,
278 	.write = bpf_testmod_test_write,
279 };
280 
281 BTF_SET8_START(bpf_testmod_common_kfunc_ids)
282 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
283 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
284 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
285 BTF_SET8_END(bpf_testmod_common_kfunc_ids)
286 
287 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
288 	.owner = THIS_MODULE,
289 	.set   = &bpf_testmod_common_kfunc_ids,
290 };
291 
292 BTF_SET8_START(bpf_testmod_check_kfunc_ids)
293 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
294 BTF_SET8_END(bpf_testmod_check_kfunc_ids)
295 
296 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
297 	.owner = THIS_MODULE,
298 	.set   = &bpf_testmod_check_kfunc_ids,
299 };
300 
301 noinline int bpf_fentry_shadow_test(int a)
302 {
303 	return a + 2;
304 }
305 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
306 
307 extern int bpf_fentry_test1(int a);
308 
309 static int bpf_testmod_init(void)
310 {
311 	int ret;
312 
313 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
314 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
315 	if (ret < 0)
316 		return ret;
317 	if (bpf_fentry_test1(0) < 0)
318 		return -EINVAL;
319 	return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
320 }
321 
322 static void bpf_testmod_exit(void)
323 {
324 	return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
325 }
326 
327 module_init(bpf_testmod_init);
328 module_exit(bpf_testmod_exit);
329 
330 MODULE_AUTHOR("Andrii Nakryiko");
331 MODULE_DESCRIPTION("BPF selftests module");
332 MODULE_LICENSE("Dual BSD/GPL");
333