1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <linux/btf.h>
4 #include <linux/btf_ids.h>
5 #include <linux/error-injection.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/percpu-defs.h>
9 #include <linux/sysfs.h>
10 #include <linux/tracepoint.h>
11 #include "bpf_testmod.h"
12 #include "bpf_testmod_kfunc.h"
13 
14 #define CREATE_TRACE_POINTS
15 #include "bpf_testmod-events.h"
16 
17 typedef int (*func_proto_typedef)(long);
18 typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
19 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
20 
21 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
22 long bpf_testmod_test_struct_arg_result;
23 
24 struct bpf_testmod_struct_arg_1 {
25 	int a;
26 };
27 struct bpf_testmod_struct_arg_2 {
28 	long a;
29 	long b;
30 };
31 
32 struct bpf_testmod_struct_arg_3 {
33 	int a;
34 	int b[];
35 };
36 
37 struct bpf_testmod_struct_arg_4 {
38 	u64 a;
39 	int b;
40 };
41 
42 __diag_push();
43 __diag_ignore_all("-Wmissing-prototypes",
44 		  "Global functions as their definitions will be in bpf_testmod.ko BTF");
45 
46 noinline int
47 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
48 	bpf_testmod_test_struct_arg_result = a.a + a.b  + b + c;
49 	return bpf_testmod_test_struct_arg_result;
50 }
51 
52 noinline int
53 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
54 	bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
55 	return bpf_testmod_test_struct_arg_result;
56 }
57 
58 noinline int
59 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
60 	bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
61 	return bpf_testmod_test_struct_arg_result;
62 }
63 
64 noinline int
65 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
66 			      int c, int d, struct bpf_testmod_struct_arg_2 e) {
67 	bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
68 	return bpf_testmod_test_struct_arg_result;
69 }
70 
71 noinline int
72 bpf_testmod_test_struct_arg_5(void) {
73 	bpf_testmod_test_struct_arg_result = 1;
74 	return bpf_testmod_test_struct_arg_result;
75 }
76 
77 noinline int
78 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
79 	bpf_testmod_test_struct_arg_result = a->b[0];
80 	return bpf_testmod_test_struct_arg_result;
81 }
82 
83 noinline int
84 bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e,
85 			      struct bpf_testmod_struct_arg_4 f)
86 {
87 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
88 		(long)e + f.a + f.b;
89 	return bpf_testmod_test_struct_arg_result;
90 }
91 
92 noinline int
93 bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
94 			      struct bpf_testmod_struct_arg_4 f, int g)
95 {
96 	bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
97 		(long)e + f.a + f.b + g;
98 	return bpf_testmod_test_struct_arg_result;
99 }
100 
101 __bpf_kfunc void
102 bpf_testmod_test_mod_kfunc(int i)
103 {
104 	*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
105 }
106 
107 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
108 {
109 	if (cnt < 0) {
110 		it->cnt = 0;
111 		return -EINVAL;
112 	}
113 
114 	it->value = value;
115 	it->cnt = cnt;
116 
117 	return 0;
118 }
119 
120 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
121 {
122 	if (it->cnt <= 0)
123 		return NULL;
124 
125 	it->cnt--;
126 
127 	return &it->value;
128 }
129 
130 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
131 {
132 	it->cnt = 0;
133 }
134 
135 struct bpf_testmod_btf_type_tag_1 {
136 	int a;
137 };
138 
139 struct bpf_testmod_btf_type_tag_2 {
140 	struct bpf_testmod_btf_type_tag_1 __user *p;
141 };
142 
143 struct bpf_testmod_btf_type_tag_3 {
144 	struct bpf_testmod_btf_type_tag_1 __percpu *p;
145 };
146 
147 noinline int
148 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
149 	BTF_TYPE_EMIT(func_proto_typedef);
150 	BTF_TYPE_EMIT(func_proto_typedef_nested1);
151 	BTF_TYPE_EMIT(func_proto_typedef_nested2);
152 	return arg->a;
153 }
154 
155 noinline int
156 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
157 	return arg->p->a;
158 }
159 
160 noinline int
161 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
162 	return arg->a;
163 }
164 
165 noinline int
166 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
167 	return arg->p->a;
168 }
169 
170 noinline int bpf_testmod_loop_test(int n)
171 {
172 	/* Make sum volatile, so smart compilers, such as clang, will not
173 	 * optimize the code by removing the loop.
174 	 */
175 	volatile int sum = 0;
176 	int i;
177 
178 	/* the primary goal of this test is to test LBR. Create a lot of
179 	 * branches in the function, so we can catch it easily.
180 	 */
181 	for (i = 0; i < n; i++)
182 		sum += i;
183 	return sum;
184 }
185 
186 __weak noinline struct file *bpf_testmod_return_ptr(int arg)
187 {
188 	static struct file f = {};
189 
190 	switch (arg) {
191 	case 1: return (void *)EINVAL;		/* user addr */
192 	case 2: return (void *)0xcafe4a11;	/* user addr */
193 	case 3: return (void *)-EINVAL;		/* canonical, but invalid */
194 	case 4: return (void *)(1ull << 60);	/* non-canonical and invalid */
195 	case 5: return (void *)~(1ull << 30);	/* trigger extable */
196 	case 6: return &f;			/* valid addr */
197 	case 7: return (void *)((long)&f | 1);	/* kernel tricks */
198 	default: return NULL;
199 	}
200 }
201 
202 noinline int bpf_testmod_fentry_test1(int a)
203 {
204 	return a + 1;
205 }
206 
207 noinline int bpf_testmod_fentry_test2(int a, u64 b)
208 {
209 	return a + b;
210 }
211 
212 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
213 {
214 	return a + b + c;
215 }
216 
217 noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d,
218 				      void *e, char f, int g)
219 {
220 	return a + (long)b + c + d + (long)e + f + g;
221 }
222 
223 noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
224 				       void *e, char f, int g,
225 				       unsigned int h, long i, __u64 j,
226 				       unsigned long k)
227 {
228 	return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
229 }
230 
231 int bpf_testmod_fentry_ok;
232 
233 noinline ssize_t
234 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
235 		      struct bin_attribute *bin_attr,
236 		      char *buf, loff_t off, size_t len)
237 {
238 	struct bpf_testmod_test_read_ctx ctx = {
239 		.buf = buf,
240 		.off = off,
241 		.len = len,
242 	};
243 	struct bpf_testmod_struct_arg_1 struct_arg1 = {10};
244 	struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
245 	struct bpf_testmod_struct_arg_3 *struct_arg3;
246 	struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
247 	int i = 1;
248 
249 	while (bpf_testmod_return_ptr(i))
250 		i++;
251 
252 	(void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
253 	(void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
254 	(void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
255 	(void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
256 	(void)bpf_testmod_test_struct_arg_5();
257 	(void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19,
258 					    (void *)20, struct_arg4);
259 	(void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
260 					    (void *)20, struct_arg4, 23);
261 
262 
263 	struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
264 				sizeof(int)), GFP_KERNEL);
265 	if (struct_arg3 != NULL) {
266 		struct_arg3->b[0] = 1;
267 		(void)bpf_testmod_test_struct_arg_6(struct_arg3);
268 		kfree(struct_arg3);
269 	}
270 
271 	/* This is always true. Use the check to make sure the compiler
272 	 * doesn't remove bpf_testmod_loop_test.
273 	 */
274 	if (bpf_testmod_loop_test(101) > 100)
275 		trace_bpf_testmod_test_read(current, &ctx);
276 
277 	/* Magic number to enable writable tp */
278 	if (len == 64) {
279 		struct bpf_testmod_test_writable_ctx writable = {
280 			.val = 1024,
281 		};
282 		trace_bpf_testmod_test_writable_bare(&writable);
283 		if (writable.early_ret)
284 			return snprintf(buf, len, "%d\n", writable.val);
285 	}
286 
287 	if (bpf_testmod_fentry_test1(1) != 2 ||
288 	    bpf_testmod_fentry_test2(2, 3) != 5 ||
289 	    bpf_testmod_fentry_test3(4, 5, 6) != 15 ||
290 	    bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20,
291 			21, 22) != 133 ||
292 	    bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20,
293 			21, 22, 23, 24, 25, 26) != 231)
294 		goto out;
295 
296 	bpf_testmod_fentry_ok = 1;
297 out:
298 	return -EIO; /* always fail */
299 }
300 EXPORT_SYMBOL(bpf_testmod_test_read);
301 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
302 
303 noinline ssize_t
304 bpf_testmod_test_write(struct file *file, struct kobject *kobj,
305 		      struct bin_attribute *bin_attr,
306 		      char *buf, loff_t off, size_t len)
307 {
308 	struct bpf_testmod_test_write_ctx ctx = {
309 		.buf = buf,
310 		.off = off,
311 		.len = len,
312 	};
313 
314 	trace_bpf_testmod_test_write_bare(current, &ctx);
315 
316 	return -EIO; /* always fail */
317 }
318 EXPORT_SYMBOL(bpf_testmod_test_write);
319 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
320 
321 noinline int bpf_fentry_shadow_test(int a)
322 {
323 	return a + 2;
324 }
325 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
326 
327 __diag_pop();
328 
329 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
330 	.attr = { .name = "bpf_testmod", .mode = 0666, },
331 	.read = bpf_testmod_test_read,
332 	.write = bpf_testmod_test_write,
333 };
334 
335 BTF_SET8_START(bpf_testmod_common_kfunc_ids)
336 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
337 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
338 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
339 BTF_SET8_END(bpf_testmod_common_kfunc_ids)
340 
341 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
342 	.owner = THIS_MODULE,
343 	.set   = &bpf_testmod_common_kfunc_ids,
344 };
345 
346 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
347 {
348 	return a + b + c + d;
349 }
350 
351 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
352 {
353 	return a + b;
354 }
355 
356 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
357 {
358 	return sk;
359 }
360 
361 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
362 {
363 	/* Provoke the compiler to assume that the caller has sign-extended a,
364 	 * b and c on platforms where this is required (e.g. s390x).
365 	 */
366 	return (long)a + (long)b + (long)c + d;
367 }
368 
369 static struct prog_test_ref_kfunc prog_test_struct = {
370 	.a = 42,
371 	.b = 108,
372 	.next = &prog_test_struct,
373 	.cnt = REFCOUNT_INIT(1),
374 };
375 
376 __bpf_kfunc struct prog_test_ref_kfunc *
377 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
378 {
379 	refcount_inc(&prog_test_struct.cnt);
380 	return &prog_test_struct;
381 }
382 
383 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
384 {
385 	WARN_ON_ONCE(1);
386 }
387 
388 __bpf_kfunc struct prog_test_member *
389 bpf_kfunc_call_memb_acquire(void)
390 {
391 	WARN_ON_ONCE(1);
392 	return NULL;
393 }
394 
395 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
396 {
397 	WARN_ON_ONCE(1);
398 }
399 
400 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
401 {
402 	if (size > 2 * sizeof(int))
403 		return NULL;
404 
405 	return (int *)p;
406 }
407 
408 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
409 						  const int rdwr_buf_size)
410 {
411 	return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
412 }
413 
414 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
415 						    const int rdonly_buf_size)
416 {
417 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
418 }
419 
420 /* the next 2 ones can't be really used for testing expect to ensure
421  * that the verifier rejects the call.
422  * Acquire functions must return struct pointers, so these ones are
423  * failing.
424  */
425 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
426 						    const int rdonly_buf_size)
427 {
428 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
429 }
430 
431 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
432 {
433 }
434 
435 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
436 {
437 }
438 
439 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
440 {
441 }
442 
443 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
444 {
445 }
446 
447 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
448 {
449 }
450 
451 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
452 {
453 }
454 
455 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
456 {
457 }
458 
459 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
460 {
461 }
462 
463 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
464 {
465 }
466 
467 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
468 {
469 }
470 
471 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
472 {
473 	/* p != NULL, but p->cnt could be 0 */
474 }
475 
476 __bpf_kfunc void bpf_kfunc_call_test_destructive(void)
477 {
478 }
479 
480 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
481 {
482 	return arg;
483 }
484 
485 BTF_SET8_START(bpf_testmod_check_kfunc_ids)
486 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
487 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
488 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
489 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
490 BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
491 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
492 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
493 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
494 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
495 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
496 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
497 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
498 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
499 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
500 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
501 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
502 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
503 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
504 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
505 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
506 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
507 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
508 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
509 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
510 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
511 BTF_SET8_END(bpf_testmod_check_kfunc_ids)
512 
513 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
514 	.owner = THIS_MODULE,
515 	.set   = &bpf_testmod_check_kfunc_ids,
516 };
517 
518 extern int bpf_fentry_test1(int a);
519 
520 static int bpf_testmod_init(void)
521 {
522 	int ret;
523 
524 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
525 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
526 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
527 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
528 	if (ret < 0)
529 		return ret;
530 	if (bpf_fentry_test1(0) < 0)
531 		return -EINVAL;
532 	return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
533 }
534 
535 static void bpf_testmod_exit(void)
536 {
537 	return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
538 }
539 
540 module_init(bpf_testmod_init);
541 module_exit(bpf_testmod_exit);
542 
543 MODULE_AUTHOR("Andrii Nakryiko");
544 MODULE_DESCRIPTION("BPF selftests module");
545 MODULE_LICENSE("Dual BSD/GPL");
546