1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <linux/btf.h>
4 #include <linux/btf_ids.h>
5 #include <linux/error-injection.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/percpu-defs.h>
9 #include <linux/sysfs.h>
10 #include <linux/tracepoint.h>
11 #include "bpf_testmod.h"
12 #include "bpf_testmod_kfunc.h"
13 
14 #define CREATE_TRACE_POINTS
15 #include "bpf_testmod-events.h"
16 
17 typedef int (*func_proto_typedef)(long);
18 typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
19 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
20 
21 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
22 long bpf_testmod_test_struct_arg_result;
23 
24 struct bpf_testmod_struct_arg_1 {
25 	int a;
26 };
27 struct bpf_testmod_struct_arg_2 {
28 	long a;
29 	long b;
30 };
31 
32 struct bpf_testmod_struct_arg_3 {
33 	int a;
34 	int b[];
35 };
36 
37 __diag_push();
38 __diag_ignore_all("-Wmissing-prototypes",
39 		  "Global functions as their definitions will be in bpf_testmod.ko BTF");
40 
41 noinline int
42 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
43 	bpf_testmod_test_struct_arg_result = a.a + a.b  + b + c;
44 	return bpf_testmod_test_struct_arg_result;
45 }
46 
47 noinline int
48 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
49 	bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
50 	return bpf_testmod_test_struct_arg_result;
51 }
52 
53 noinline int
54 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
55 	bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
56 	return bpf_testmod_test_struct_arg_result;
57 }
58 
59 noinline int
60 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
61 			      int c, int d, struct bpf_testmod_struct_arg_2 e) {
62 	bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
63 	return bpf_testmod_test_struct_arg_result;
64 }
65 
66 noinline int
67 bpf_testmod_test_struct_arg_5(void) {
68 	bpf_testmod_test_struct_arg_result = 1;
69 	return bpf_testmod_test_struct_arg_result;
70 }
71 
72 noinline int
73 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
74 	bpf_testmod_test_struct_arg_result = a->b[0];
75 	return bpf_testmod_test_struct_arg_result;
76 }
77 
78 __bpf_kfunc void
79 bpf_testmod_test_mod_kfunc(int i)
80 {
81 	*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
82 }
83 
84 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
85 {
86 	if (cnt < 0) {
87 		it->cnt = 0;
88 		return -EINVAL;
89 	}
90 
91 	it->value = value;
92 	it->cnt = cnt;
93 
94 	return 0;
95 }
96 
97 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
98 {
99 	if (it->cnt <= 0)
100 		return NULL;
101 
102 	it->cnt--;
103 
104 	return &it->value;
105 }
106 
107 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
108 {
109 	it->cnt = 0;
110 }
111 
112 struct bpf_testmod_btf_type_tag_1 {
113 	int a;
114 };
115 
116 struct bpf_testmod_btf_type_tag_2 {
117 	struct bpf_testmod_btf_type_tag_1 __user *p;
118 };
119 
120 struct bpf_testmod_btf_type_tag_3 {
121 	struct bpf_testmod_btf_type_tag_1 __percpu *p;
122 };
123 
124 noinline int
125 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
126 	BTF_TYPE_EMIT(func_proto_typedef);
127 	BTF_TYPE_EMIT(func_proto_typedef_nested1);
128 	BTF_TYPE_EMIT(func_proto_typedef_nested2);
129 	return arg->a;
130 }
131 
132 noinline int
133 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
134 	return arg->p->a;
135 }
136 
137 noinline int
138 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
139 	return arg->a;
140 }
141 
142 noinline int
143 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
144 	return arg->p->a;
145 }
146 
147 noinline int bpf_testmod_loop_test(int n)
148 {
149 	/* Make sum volatile, so smart compilers, such as clang, will not
150 	 * optimize the code by removing the loop.
151 	 */
152 	volatile int sum = 0;
153 	int i;
154 
155 	/* the primary goal of this test is to test LBR. Create a lot of
156 	 * branches in the function, so we can catch it easily.
157 	 */
158 	for (i = 0; i < n; i++)
159 		sum += i;
160 	return sum;
161 }
162 
163 __weak noinline struct file *bpf_testmod_return_ptr(int arg)
164 {
165 	static struct file f = {};
166 
167 	switch (arg) {
168 	case 1: return (void *)EINVAL;		/* user addr */
169 	case 2: return (void *)0xcafe4a11;	/* user addr */
170 	case 3: return (void *)-EINVAL;		/* canonical, but invalid */
171 	case 4: return (void *)(1ull << 60);	/* non-canonical and invalid */
172 	case 5: return (void *)~(1ull << 30);	/* trigger extable */
173 	case 6: return &f;			/* valid addr */
174 	case 7: return (void *)((long)&f | 1);	/* kernel tricks */
175 	default: return NULL;
176 	}
177 }
178 
179 noinline int bpf_testmod_fentry_test1(int a)
180 {
181 	return a + 1;
182 }
183 
184 noinline int bpf_testmod_fentry_test2(int a, u64 b)
185 {
186 	return a + b;
187 }
188 
189 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
190 {
191 	return a + b + c;
192 }
193 
194 int bpf_testmod_fentry_ok;
195 
196 noinline ssize_t
197 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
198 		      struct bin_attribute *bin_attr,
199 		      char *buf, loff_t off, size_t len)
200 {
201 	struct bpf_testmod_test_read_ctx ctx = {
202 		.buf = buf,
203 		.off = off,
204 		.len = len,
205 	};
206 	struct bpf_testmod_struct_arg_1 struct_arg1 = {10};
207 	struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
208 	struct bpf_testmod_struct_arg_3 *struct_arg3;
209 	int i = 1;
210 
211 	while (bpf_testmod_return_ptr(i))
212 		i++;
213 
214 	(void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
215 	(void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
216 	(void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
217 	(void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
218 	(void)bpf_testmod_test_struct_arg_5();
219 
220 	struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
221 				sizeof(int)), GFP_KERNEL);
222 	if (struct_arg3 != NULL) {
223 		struct_arg3->b[0] = 1;
224 		(void)bpf_testmod_test_struct_arg_6(struct_arg3);
225 		kfree(struct_arg3);
226 	}
227 
228 	/* This is always true. Use the check to make sure the compiler
229 	 * doesn't remove bpf_testmod_loop_test.
230 	 */
231 	if (bpf_testmod_loop_test(101) > 100)
232 		trace_bpf_testmod_test_read(current, &ctx);
233 
234 	/* Magic number to enable writable tp */
235 	if (len == 64) {
236 		struct bpf_testmod_test_writable_ctx writable = {
237 			.val = 1024,
238 		};
239 		trace_bpf_testmod_test_writable_bare(&writable);
240 		if (writable.early_ret)
241 			return snprintf(buf, len, "%d\n", writable.val);
242 	}
243 
244 	if (bpf_testmod_fentry_test1(1) != 2 ||
245 	    bpf_testmod_fentry_test2(2, 3) != 5 ||
246 	    bpf_testmod_fentry_test3(4, 5, 6) != 15)
247 		goto out;
248 
249 	bpf_testmod_fentry_ok = 1;
250 out:
251 	return -EIO; /* always fail */
252 }
253 EXPORT_SYMBOL(bpf_testmod_test_read);
254 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
255 
256 noinline ssize_t
257 bpf_testmod_test_write(struct file *file, struct kobject *kobj,
258 		      struct bin_attribute *bin_attr,
259 		      char *buf, loff_t off, size_t len)
260 {
261 	struct bpf_testmod_test_write_ctx ctx = {
262 		.buf = buf,
263 		.off = off,
264 		.len = len,
265 	};
266 
267 	trace_bpf_testmod_test_write_bare(current, &ctx);
268 
269 	return -EIO; /* always fail */
270 }
271 EXPORT_SYMBOL(bpf_testmod_test_write);
272 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
273 
274 noinline int bpf_fentry_shadow_test(int a)
275 {
276 	return a + 2;
277 }
278 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
279 
280 __diag_pop();
281 
282 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
283 	.attr = { .name = "bpf_testmod", .mode = 0666, },
284 	.read = bpf_testmod_test_read,
285 	.write = bpf_testmod_test_write,
286 };
287 
288 BTF_SET8_START(bpf_testmod_common_kfunc_ids)
289 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
290 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
291 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
292 BTF_SET8_END(bpf_testmod_common_kfunc_ids)
293 
294 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
295 	.owner = THIS_MODULE,
296 	.set   = &bpf_testmod_common_kfunc_ids,
297 };
298 
299 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
300 {
301 	return a + b + c + d;
302 }
303 
304 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
305 {
306 	return a + b;
307 }
308 
309 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
310 {
311 	return sk;
312 }
313 
314 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
315 {
316 	/* Provoke the compiler to assume that the caller has sign-extended a,
317 	 * b and c on platforms where this is required (e.g. s390x).
318 	 */
319 	return (long)a + (long)b + (long)c + d;
320 }
321 
322 static struct prog_test_ref_kfunc prog_test_struct = {
323 	.a = 42,
324 	.b = 108,
325 	.next = &prog_test_struct,
326 	.cnt = REFCOUNT_INIT(1),
327 };
328 
329 __bpf_kfunc struct prog_test_ref_kfunc *
330 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
331 {
332 	refcount_inc(&prog_test_struct.cnt);
333 	return &prog_test_struct;
334 }
335 
336 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
337 {
338 	WARN_ON_ONCE(1);
339 }
340 
341 __bpf_kfunc struct prog_test_member *
342 bpf_kfunc_call_memb_acquire(void)
343 {
344 	WARN_ON_ONCE(1);
345 	return NULL;
346 }
347 
348 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
349 {
350 	WARN_ON_ONCE(1);
351 }
352 
353 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
354 {
355 	if (size > 2 * sizeof(int))
356 		return NULL;
357 
358 	return (int *)p;
359 }
360 
361 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
362 						  const int rdwr_buf_size)
363 {
364 	return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
365 }
366 
367 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
368 						    const int rdonly_buf_size)
369 {
370 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
371 }
372 
373 /* the next 2 ones can't be really used for testing expect to ensure
374  * that the verifier rejects the call.
375  * Acquire functions must return struct pointers, so these ones are
376  * failing.
377  */
378 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
379 						    const int rdonly_buf_size)
380 {
381 	return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
382 }
383 
384 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
385 {
386 }
387 
388 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
389 {
390 }
391 
392 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
393 {
394 }
395 
396 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
397 {
398 }
399 
400 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
401 {
402 }
403 
404 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
405 {
406 }
407 
408 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
409 {
410 }
411 
412 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
413 {
414 }
415 
416 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
417 {
418 }
419 
420 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
421 {
422 }
423 
424 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
425 {
426 	/* p != NULL, but p->cnt could be 0 */
427 }
428 
429 __bpf_kfunc void bpf_kfunc_call_test_destructive(void)
430 {
431 }
432 
433 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
434 {
435 	return arg;
436 }
437 
438 BTF_SET8_START(bpf_testmod_check_kfunc_ids)
439 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
440 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
441 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
442 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
443 BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
444 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
445 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
446 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
447 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
448 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
449 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
450 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
451 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
452 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
453 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
454 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
455 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
456 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
457 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
458 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
459 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
460 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
461 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
462 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
463 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
464 BTF_SET8_END(bpf_testmod_check_kfunc_ids)
465 
466 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
467 	.owner = THIS_MODULE,
468 	.set   = &bpf_testmod_check_kfunc_ids,
469 };
470 
471 extern int bpf_fentry_test1(int a);
472 
473 static int bpf_testmod_init(void)
474 {
475 	int ret;
476 
477 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
478 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
479 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
480 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
481 	if (ret < 0)
482 		return ret;
483 	if (bpf_fentry_test1(0) < 0)
484 		return -EINVAL;
485 	return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
486 }
487 
488 static void bpf_testmod_exit(void)
489 {
490 	return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
491 }
492 
493 module_init(bpf_testmod_init);
494 module_exit(bpf_testmod_exit);
495 
496 MODULE_AUTHOR("Andrii Nakryiko");
497 MODULE_DESCRIPTION("BPF selftests module");
498 MODULE_LICENSE("Dual BSD/GPL");
499