xref: /openbmc/linux/kernel/trace/bpf_trace.c (revision 842ed298)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
8 #include <linux/bpf.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/btf.h>
11 #include <linux/filter.h>
12 #include <linux/uaccess.h>
13 #include <linux/ctype.h>
14 #include <linux/kprobes.h>
15 #include <linux/spinlock.h>
16 #include <linux/syscalls.h>
17 #include <linux/error-injection.h>
18 #include <linux/btf_ids.h>
19 #include <linux/bpf_lsm.h>
20 
21 #include <net/bpf_sk_storage.h>
22 
23 #include <uapi/linux/bpf.h>
24 #include <uapi/linux/btf.h>
25 
26 #include <asm/tlb.h>
27 
28 #include "trace_probe.h"
29 #include "trace.h"
30 
31 #define CREATE_TRACE_POINTS
32 #include "bpf_trace.h"
33 
34 #define bpf_event_rcu_dereference(p)					\
35 	rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
36 
37 #ifdef CONFIG_MODULES
38 struct bpf_trace_module {
39 	struct module *module;
40 	struct list_head list;
41 };
42 
43 static LIST_HEAD(bpf_trace_modules);
44 static DEFINE_MUTEX(bpf_module_mutex);
45 
46 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
47 {
48 	struct bpf_raw_event_map *btp, *ret = NULL;
49 	struct bpf_trace_module *btm;
50 	unsigned int i;
51 
52 	mutex_lock(&bpf_module_mutex);
53 	list_for_each_entry(btm, &bpf_trace_modules, list) {
54 		for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
55 			btp = &btm->module->bpf_raw_events[i];
56 			if (!strcmp(btp->tp->name, name)) {
57 				if (try_module_get(btm->module))
58 					ret = btp;
59 				goto out;
60 			}
61 		}
62 	}
63 out:
64 	mutex_unlock(&bpf_module_mutex);
65 	return ret;
66 }
67 #else
68 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
69 {
70 	return NULL;
71 }
72 #endif /* CONFIG_MODULES */
73 
74 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
75 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
76 
77 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
78 				  u64 flags, const struct btf **btf,
79 				  s32 *btf_id);
80 
81 /**
82  * trace_call_bpf - invoke BPF program
83  * @call: tracepoint event
84  * @ctx: opaque context pointer
85  *
86  * kprobe handlers execute BPF programs via this helper.
87  * Can be used from static tracepoints in the future.
88  *
89  * Return: BPF programs always return an integer which is interpreted by
90  * kprobe handler as:
91  * 0 - return from kprobe (event is filtered out)
92  * 1 - store kprobe event into ring buffer
93  * Other values are reserved and currently alias to 1
94  */
95 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
96 {
97 	unsigned int ret;
98 
99 	cant_sleep();
100 
101 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
102 		/*
103 		 * since some bpf program is already running on this cpu,
104 		 * don't call into another bpf program (same or different)
105 		 * and don't send kprobe event into ring-buffer,
106 		 * so return zero here
107 		 */
108 		ret = 0;
109 		goto out;
110 	}
111 
112 	/*
113 	 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
114 	 * to all call sites, we did a bpf_prog_array_valid() there to check
115 	 * whether call->prog_array is empty or not, which is
116 	 * a heuristic to speed up execution.
117 	 *
118 	 * If bpf_prog_array_valid() fetched prog_array was
119 	 * non-NULL, we go into trace_call_bpf() and do the actual
120 	 * proper rcu_dereference() under RCU lock.
121 	 * If it turns out that prog_array is NULL then, we bail out.
122 	 * For the opposite, if the bpf_prog_array_valid() fetched pointer
123 	 * was NULL, you'll skip the prog_array with the risk of missing
124 	 * out of events when it was updated in between this and the
125 	 * rcu_dereference() which is accepted risk.
126 	 */
127 	ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
128 
129  out:
130 	__this_cpu_dec(bpf_prog_active);
131 
132 	return ret;
133 }
134 
135 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
136 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
137 {
138 	regs_set_return_value(regs, rc);
139 	override_function_with_return(regs);
140 	return 0;
141 }
142 
143 static const struct bpf_func_proto bpf_override_return_proto = {
144 	.func		= bpf_override_return,
145 	.gpl_only	= true,
146 	.ret_type	= RET_INTEGER,
147 	.arg1_type	= ARG_PTR_TO_CTX,
148 	.arg2_type	= ARG_ANYTHING,
149 };
150 #endif
151 
152 static __always_inline int
153 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
154 {
155 	int ret;
156 
157 	ret = copy_from_user_nofault(dst, unsafe_ptr, size);
158 	if (unlikely(ret < 0))
159 		memset(dst, 0, size);
160 	return ret;
161 }
162 
163 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
164 	   const void __user *, unsafe_ptr)
165 {
166 	return bpf_probe_read_user_common(dst, size, unsafe_ptr);
167 }
168 
169 const struct bpf_func_proto bpf_probe_read_user_proto = {
170 	.func		= bpf_probe_read_user,
171 	.gpl_only	= true,
172 	.ret_type	= RET_INTEGER,
173 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
174 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
175 	.arg3_type	= ARG_ANYTHING,
176 };
177 
178 static __always_inline int
179 bpf_probe_read_user_str_common(void *dst, u32 size,
180 			       const void __user *unsafe_ptr)
181 {
182 	int ret;
183 
184 	/*
185 	 * NB: We rely on strncpy_from_user() not copying junk past the NUL
186 	 * terminator into `dst`.
187 	 *
188 	 * strncpy_from_user() does long-sized strides in the fast path. If the
189 	 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
190 	 * then there could be junk after the NUL in `dst`. If user takes `dst`
191 	 * and keys a hash map with it, then semantically identical strings can
192 	 * occupy multiple entries in the map.
193 	 */
194 	ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
195 	if (unlikely(ret < 0))
196 		memset(dst, 0, size);
197 	return ret;
198 }
199 
200 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
201 	   const void __user *, unsafe_ptr)
202 {
203 	return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
204 }
205 
206 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
207 	.func		= bpf_probe_read_user_str,
208 	.gpl_only	= true,
209 	.ret_type	= RET_INTEGER,
210 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
211 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
212 	.arg3_type	= ARG_ANYTHING,
213 };
214 
215 static __always_inline int
216 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
217 {
218 	int ret = security_locked_down(LOCKDOWN_BPF_READ);
219 
220 	if (unlikely(ret < 0))
221 		goto fail;
222 	ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
223 	if (unlikely(ret < 0))
224 		goto fail;
225 	return ret;
226 fail:
227 	memset(dst, 0, size);
228 	return ret;
229 }
230 
231 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
232 	   const void *, unsafe_ptr)
233 {
234 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
235 }
236 
237 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
238 	.func		= bpf_probe_read_kernel,
239 	.gpl_only	= true,
240 	.ret_type	= RET_INTEGER,
241 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
242 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
243 	.arg3_type	= ARG_ANYTHING,
244 };
245 
246 static __always_inline int
247 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
248 {
249 	int ret = security_locked_down(LOCKDOWN_BPF_READ);
250 
251 	if (unlikely(ret < 0))
252 		goto fail;
253 
254 	/*
255 	 * The strncpy_from_kernel_nofault() call will likely not fill the
256 	 * entire buffer, but that's okay in this circumstance as we're probing
257 	 * arbitrary memory anyway similar to bpf_probe_read_*() and might
258 	 * as well probe the stack. Thus, memory is explicitly cleared
259 	 * only in error case, so that improper users ignoring return
260 	 * code altogether don't copy garbage; otherwise length of string
261 	 * is returned that can be used for bpf_perf_event_output() et al.
262 	 */
263 	ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
264 	if (unlikely(ret < 0))
265 		goto fail;
266 
267 	return ret;
268 fail:
269 	memset(dst, 0, size);
270 	return ret;
271 }
272 
273 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
274 	   const void *, unsafe_ptr)
275 {
276 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
277 }
278 
279 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
280 	.func		= bpf_probe_read_kernel_str,
281 	.gpl_only	= true,
282 	.ret_type	= RET_INTEGER,
283 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
284 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
285 	.arg3_type	= ARG_ANYTHING,
286 };
287 
288 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
289 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
290 	   const void *, unsafe_ptr)
291 {
292 	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
293 		return bpf_probe_read_user_common(dst, size,
294 				(__force void __user *)unsafe_ptr);
295 	}
296 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
297 }
298 
299 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
300 	.func		= bpf_probe_read_compat,
301 	.gpl_only	= true,
302 	.ret_type	= RET_INTEGER,
303 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
304 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
305 	.arg3_type	= ARG_ANYTHING,
306 };
307 
308 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
309 	   const void *, unsafe_ptr)
310 {
311 	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
312 		return bpf_probe_read_user_str_common(dst, size,
313 				(__force void __user *)unsafe_ptr);
314 	}
315 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
316 }
317 
318 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
319 	.func		= bpf_probe_read_compat_str,
320 	.gpl_only	= true,
321 	.ret_type	= RET_INTEGER,
322 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
323 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
324 	.arg3_type	= ARG_ANYTHING,
325 };
326 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
327 
328 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
329 	   u32, size)
330 {
331 	/*
332 	 * Ensure we're in user context which is safe for the helper to
333 	 * run. This helper has no business in a kthread.
334 	 *
335 	 * access_ok() should prevent writing to non-user memory, but in
336 	 * some situations (nommu, temporary switch, etc) access_ok() does
337 	 * not provide enough validation, hence the check on KERNEL_DS.
338 	 *
339 	 * nmi_uaccess_okay() ensures the probe is not run in an interim
340 	 * state, when the task or mm are switched. This is specifically
341 	 * required to prevent the use of temporary mm.
342 	 */
343 
344 	if (unlikely(in_interrupt() ||
345 		     current->flags & (PF_KTHREAD | PF_EXITING)))
346 		return -EPERM;
347 	if (unlikely(uaccess_kernel()))
348 		return -EPERM;
349 	if (unlikely(!nmi_uaccess_okay()))
350 		return -EPERM;
351 
352 	return copy_to_user_nofault(unsafe_ptr, src, size);
353 }
354 
355 static const struct bpf_func_proto bpf_probe_write_user_proto = {
356 	.func		= bpf_probe_write_user,
357 	.gpl_only	= true,
358 	.ret_type	= RET_INTEGER,
359 	.arg1_type	= ARG_ANYTHING,
360 	.arg2_type	= ARG_PTR_TO_MEM,
361 	.arg3_type	= ARG_CONST_SIZE,
362 };
363 
364 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
365 {
366 	if (!capable(CAP_SYS_ADMIN))
367 		return NULL;
368 
369 	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
370 			    current->comm, task_pid_nr(current));
371 
372 	return &bpf_probe_write_user_proto;
373 }
374 
375 static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
376 		size_t bufsz)
377 {
378 	void __user *user_ptr = (__force void __user *)unsafe_ptr;
379 
380 	buf[0] = 0;
381 
382 	switch (fmt_ptype) {
383 	case 's':
384 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
385 		if ((unsigned long)unsafe_ptr < TASK_SIZE) {
386 			strncpy_from_user_nofault(buf, user_ptr, bufsz);
387 			break;
388 		}
389 		fallthrough;
390 #endif
391 	case 'k':
392 		strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
393 		break;
394 	case 'u':
395 		strncpy_from_user_nofault(buf, user_ptr, bufsz);
396 		break;
397 	}
398 }
399 
400 static DEFINE_RAW_SPINLOCK(trace_printk_lock);
401 
402 #define BPF_TRACE_PRINTK_SIZE   1024
403 
404 static __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
405 {
406 	static char buf[BPF_TRACE_PRINTK_SIZE];
407 	unsigned long flags;
408 	va_list ap;
409 	int ret;
410 
411 	raw_spin_lock_irqsave(&trace_printk_lock, flags);
412 	va_start(ap, fmt);
413 	ret = vsnprintf(buf, sizeof(buf), fmt, ap);
414 	va_end(ap);
415 	/* vsnprintf() will not append null for zero-length strings */
416 	if (ret == 0)
417 		buf[0] = '\0';
418 	trace_bpf_trace_printk(buf);
419 	raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
420 
421 	return ret;
422 }
423 
424 /*
425  * Only limited trace_printk() conversion specifiers allowed:
426  * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s
427  */
428 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
429 	   u64, arg2, u64, arg3)
430 {
431 	int i, mod[3] = {}, fmt_cnt = 0;
432 	char buf[64], fmt_ptype;
433 	void *unsafe_ptr = NULL;
434 	bool str_seen = false;
435 
436 	/*
437 	 * bpf_check()->check_func_arg()->check_stack_boundary()
438 	 * guarantees that fmt points to bpf program stack,
439 	 * fmt_size bytes of it were initialized and fmt_size > 0
440 	 */
441 	if (fmt[--fmt_size] != 0)
442 		return -EINVAL;
443 
444 	/* check format string for allowed specifiers */
445 	for (i = 0; i < fmt_size; i++) {
446 		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
447 			return -EINVAL;
448 
449 		if (fmt[i] != '%')
450 			continue;
451 
452 		if (fmt_cnt >= 3)
453 			return -EINVAL;
454 
455 		/* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
456 		i++;
457 		if (fmt[i] == 'l') {
458 			mod[fmt_cnt]++;
459 			i++;
460 		} else if (fmt[i] == 'p') {
461 			mod[fmt_cnt]++;
462 			if ((fmt[i + 1] == 'k' ||
463 			     fmt[i + 1] == 'u') &&
464 			    fmt[i + 2] == 's') {
465 				fmt_ptype = fmt[i + 1];
466 				i += 2;
467 				goto fmt_str;
468 			}
469 
470 			if (fmt[i + 1] == 'B') {
471 				i++;
472 				goto fmt_next;
473 			}
474 
475 			/* disallow any further format extensions */
476 			if (fmt[i + 1] != 0 &&
477 			    !isspace(fmt[i + 1]) &&
478 			    !ispunct(fmt[i + 1]))
479 				return -EINVAL;
480 
481 			goto fmt_next;
482 		} else if (fmt[i] == 's') {
483 			mod[fmt_cnt]++;
484 			fmt_ptype = fmt[i];
485 fmt_str:
486 			if (str_seen)
487 				/* allow only one '%s' per fmt string */
488 				return -EINVAL;
489 			str_seen = true;
490 
491 			if (fmt[i + 1] != 0 &&
492 			    !isspace(fmt[i + 1]) &&
493 			    !ispunct(fmt[i + 1]))
494 				return -EINVAL;
495 
496 			switch (fmt_cnt) {
497 			case 0:
498 				unsafe_ptr = (void *)(long)arg1;
499 				arg1 = (long)buf;
500 				break;
501 			case 1:
502 				unsafe_ptr = (void *)(long)arg2;
503 				arg2 = (long)buf;
504 				break;
505 			case 2:
506 				unsafe_ptr = (void *)(long)arg3;
507 				arg3 = (long)buf;
508 				break;
509 			}
510 
511 			bpf_trace_copy_string(buf, unsafe_ptr, fmt_ptype,
512 					sizeof(buf));
513 			goto fmt_next;
514 		}
515 
516 		if (fmt[i] == 'l') {
517 			mod[fmt_cnt]++;
518 			i++;
519 		}
520 
521 		if (fmt[i] != 'i' && fmt[i] != 'd' &&
522 		    fmt[i] != 'u' && fmt[i] != 'x')
523 			return -EINVAL;
524 fmt_next:
525 		fmt_cnt++;
526 	}
527 
528 /* Horrid workaround for getting va_list handling working with different
529  * argument type combinations generically for 32 and 64 bit archs.
530  */
531 #define __BPF_TP_EMIT()	__BPF_ARG3_TP()
532 #define __BPF_TP(...)							\
533 	bpf_do_trace_printk(fmt, ##__VA_ARGS__)
534 
535 #define __BPF_ARG1_TP(...)						\
536 	((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64))	\
537 	  ? __BPF_TP(arg1, ##__VA_ARGS__)				\
538 	  : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32))	\
539 	      ? __BPF_TP((long)arg1, ##__VA_ARGS__)			\
540 	      : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
541 
542 #define __BPF_ARG2_TP(...)						\
543 	((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64))	\
544 	  ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__)				\
545 	  : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32))	\
546 	      ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__)		\
547 	      : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
548 
549 #define __BPF_ARG3_TP(...)						\
550 	((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64))	\
551 	  ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__)				\
552 	  : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32))	\
553 	      ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__)		\
554 	      : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
555 
556 	return __BPF_TP_EMIT();
557 }
558 
559 static const struct bpf_func_proto bpf_trace_printk_proto = {
560 	.func		= bpf_trace_printk,
561 	.gpl_only	= true,
562 	.ret_type	= RET_INTEGER,
563 	.arg1_type	= ARG_PTR_TO_MEM,
564 	.arg2_type	= ARG_CONST_SIZE,
565 };
566 
567 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
568 {
569 	/*
570 	 * This program might be calling bpf_trace_printk,
571 	 * so enable the associated bpf_trace/bpf_trace_printk event.
572 	 * Repeat this each time as it is possible a user has
573 	 * disabled bpf_trace_printk events.  By loading a program
574 	 * calling bpf_trace_printk() however the user has expressed
575 	 * the intent to see such events.
576 	 */
577 	if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
578 		pr_warn_ratelimited("could not enable bpf_trace_printk events");
579 
580 	return &bpf_trace_printk_proto;
581 }
582 
583 #define MAX_SEQ_PRINTF_VARARGS		12
584 #define MAX_SEQ_PRINTF_MAX_MEMCPY	6
585 #define MAX_SEQ_PRINTF_STR_LEN		128
586 
587 struct bpf_seq_printf_buf {
588 	char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN];
589 };
590 static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf);
591 static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used);
592 
593 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
594 	   const void *, data, u32, data_len)
595 {
596 	int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0;
597 	int i, buf_used, copy_size, num_args;
598 	u64 params[MAX_SEQ_PRINTF_VARARGS];
599 	struct bpf_seq_printf_buf *bufs;
600 	const u64 *args = data;
601 
602 	buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used);
603 	if (WARN_ON_ONCE(buf_used > 1)) {
604 		err = -EBUSY;
605 		goto out;
606 	}
607 
608 	bufs = this_cpu_ptr(&bpf_seq_printf_buf);
609 
610 	/*
611 	 * bpf_check()->check_func_arg()->check_stack_boundary()
612 	 * guarantees that fmt points to bpf program stack,
613 	 * fmt_size bytes of it were initialized and fmt_size > 0
614 	 */
615 	if (fmt[--fmt_size] != 0)
616 		goto out;
617 
618 	if (data_len & 7)
619 		goto out;
620 
621 	for (i = 0; i < fmt_size; i++) {
622 		if (fmt[i] == '%') {
623 			if (fmt[i + 1] == '%')
624 				i++;
625 			else if (!data || !data_len)
626 				goto out;
627 		}
628 	}
629 
630 	num_args = data_len / 8;
631 
632 	/* check format string for allowed specifiers */
633 	for (i = 0; i < fmt_size; i++) {
634 		/* only printable ascii for now. */
635 		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
636 			err = -EINVAL;
637 			goto out;
638 		}
639 
640 		if (fmt[i] != '%')
641 			continue;
642 
643 		if (fmt[i + 1] == '%') {
644 			i++;
645 			continue;
646 		}
647 
648 		if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) {
649 			err = -E2BIG;
650 			goto out;
651 		}
652 
653 		if (fmt_cnt >= num_args) {
654 			err = -EINVAL;
655 			goto out;
656 		}
657 
658 		/* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
659 		i++;
660 
661 		/* skip optional "[0 +-][num]" width formating field */
662 		while (fmt[i] == '0' || fmt[i] == '+'  || fmt[i] == '-' ||
663 		       fmt[i] == ' ')
664 			i++;
665 		if (fmt[i] >= '1' && fmt[i] <= '9') {
666 			i++;
667 			while (fmt[i] >= '0' && fmt[i] <= '9')
668 				i++;
669 		}
670 
671 		if (fmt[i] == 's') {
672 			void *unsafe_ptr;
673 
674 			/* try our best to copy */
675 			if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
676 				err = -E2BIG;
677 				goto out;
678 			}
679 
680 			unsafe_ptr = (void *)(long)args[fmt_cnt];
681 			err = strncpy_from_kernel_nofault(bufs->buf[memcpy_cnt],
682 					unsafe_ptr, MAX_SEQ_PRINTF_STR_LEN);
683 			if (err < 0)
684 				bufs->buf[memcpy_cnt][0] = '\0';
685 			params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
686 
687 			fmt_cnt++;
688 			memcpy_cnt++;
689 			continue;
690 		}
691 
692 		if (fmt[i] == 'p') {
693 			if (fmt[i + 1] == 0 ||
694 			    fmt[i + 1] == 'K' ||
695 			    fmt[i + 1] == 'x' ||
696 			    fmt[i + 1] == 'B') {
697 				/* just kernel pointers */
698 				params[fmt_cnt] = args[fmt_cnt];
699 				fmt_cnt++;
700 				continue;
701 			}
702 
703 			/* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
704 			if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') {
705 				err = -EINVAL;
706 				goto out;
707 			}
708 			if (fmt[i + 2] != '4' && fmt[i + 2] != '6') {
709 				err = -EINVAL;
710 				goto out;
711 			}
712 
713 			if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
714 				err = -E2BIG;
715 				goto out;
716 			}
717 
718 
719 			copy_size = (fmt[i + 2] == '4') ? 4 : 16;
720 
721 			err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt],
722 						(void *) (long) args[fmt_cnt],
723 						copy_size);
724 			if (err < 0)
725 				memset(bufs->buf[memcpy_cnt], 0, copy_size);
726 			params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
727 
728 			i += 2;
729 			fmt_cnt++;
730 			memcpy_cnt++;
731 			continue;
732 		}
733 
734 		if (fmt[i] == 'l') {
735 			i++;
736 			if (fmt[i] == 'l')
737 				i++;
738 		}
739 
740 		if (fmt[i] != 'i' && fmt[i] != 'd' &&
741 		    fmt[i] != 'u' && fmt[i] != 'x' &&
742 		    fmt[i] != 'X') {
743 			err = -EINVAL;
744 			goto out;
745 		}
746 
747 		params[fmt_cnt] = args[fmt_cnt];
748 		fmt_cnt++;
749 	}
750 
751 	/* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give
752 	 * all of them to seq_printf().
753 	 */
754 	seq_printf(m, fmt, params[0], params[1], params[2], params[3],
755 		   params[4], params[5], params[6], params[7], params[8],
756 		   params[9], params[10], params[11]);
757 
758 	err = seq_has_overflowed(m) ? -EOVERFLOW : 0;
759 out:
760 	this_cpu_dec(bpf_seq_printf_buf_used);
761 	return err;
762 }
763 
764 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
765 
766 static const struct bpf_func_proto bpf_seq_printf_proto = {
767 	.func		= bpf_seq_printf,
768 	.gpl_only	= true,
769 	.ret_type	= RET_INTEGER,
770 	.arg1_type	= ARG_PTR_TO_BTF_ID,
771 	.arg1_btf_id	= &btf_seq_file_ids[0],
772 	.arg2_type	= ARG_PTR_TO_MEM,
773 	.arg3_type	= ARG_CONST_SIZE,
774 	.arg4_type      = ARG_PTR_TO_MEM_OR_NULL,
775 	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
776 };
777 
778 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
779 {
780 	return seq_write(m, data, len) ? -EOVERFLOW : 0;
781 }
782 
783 static const struct bpf_func_proto bpf_seq_write_proto = {
784 	.func		= bpf_seq_write,
785 	.gpl_only	= true,
786 	.ret_type	= RET_INTEGER,
787 	.arg1_type	= ARG_PTR_TO_BTF_ID,
788 	.arg1_btf_id	= &btf_seq_file_ids[0],
789 	.arg2_type	= ARG_PTR_TO_MEM,
790 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
791 };
792 
793 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
794 	   u32, btf_ptr_size, u64, flags)
795 {
796 	const struct btf *btf;
797 	s32 btf_id;
798 	int ret;
799 
800 	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
801 	if (ret)
802 		return ret;
803 
804 	return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
805 }
806 
807 static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
808 	.func		= bpf_seq_printf_btf,
809 	.gpl_only	= true,
810 	.ret_type	= RET_INTEGER,
811 	.arg1_type	= ARG_PTR_TO_BTF_ID,
812 	.arg1_btf_id	= &btf_seq_file_ids[0],
813 	.arg2_type	= ARG_PTR_TO_MEM,
814 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
815 	.arg4_type	= ARG_ANYTHING,
816 };
817 
818 static __always_inline int
819 get_map_perf_counter(struct bpf_map *map, u64 flags,
820 		     u64 *value, u64 *enabled, u64 *running)
821 {
822 	struct bpf_array *array = container_of(map, struct bpf_array, map);
823 	unsigned int cpu = smp_processor_id();
824 	u64 index = flags & BPF_F_INDEX_MASK;
825 	struct bpf_event_entry *ee;
826 
827 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
828 		return -EINVAL;
829 	if (index == BPF_F_CURRENT_CPU)
830 		index = cpu;
831 	if (unlikely(index >= array->map.max_entries))
832 		return -E2BIG;
833 
834 	ee = READ_ONCE(array->ptrs[index]);
835 	if (!ee)
836 		return -ENOENT;
837 
838 	return perf_event_read_local(ee->event, value, enabled, running);
839 }
840 
841 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
842 {
843 	u64 value = 0;
844 	int err;
845 
846 	err = get_map_perf_counter(map, flags, &value, NULL, NULL);
847 	/*
848 	 * this api is ugly since we miss [-22..-2] range of valid
849 	 * counter values, but that's uapi
850 	 */
851 	if (err)
852 		return err;
853 	return value;
854 }
855 
856 static const struct bpf_func_proto bpf_perf_event_read_proto = {
857 	.func		= bpf_perf_event_read,
858 	.gpl_only	= true,
859 	.ret_type	= RET_INTEGER,
860 	.arg1_type	= ARG_CONST_MAP_PTR,
861 	.arg2_type	= ARG_ANYTHING,
862 };
863 
864 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
865 	   struct bpf_perf_event_value *, buf, u32, size)
866 {
867 	int err = -EINVAL;
868 
869 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
870 		goto clear;
871 	err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
872 				   &buf->running);
873 	if (unlikely(err))
874 		goto clear;
875 	return 0;
876 clear:
877 	memset(buf, 0, size);
878 	return err;
879 }
880 
881 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
882 	.func		= bpf_perf_event_read_value,
883 	.gpl_only	= true,
884 	.ret_type	= RET_INTEGER,
885 	.arg1_type	= ARG_CONST_MAP_PTR,
886 	.arg2_type	= ARG_ANYTHING,
887 	.arg3_type	= ARG_PTR_TO_UNINIT_MEM,
888 	.arg4_type	= ARG_CONST_SIZE,
889 };
890 
891 static __always_inline u64
892 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
893 			u64 flags, struct perf_sample_data *sd)
894 {
895 	struct bpf_array *array = container_of(map, struct bpf_array, map);
896 	unsigned int cpu = smp_processor_id();
897 	u64 index = flags & BPF_F_INDEX_MASK;
898 	struct bpf_event_entry *ee;
899 	struct perf_event *event;
900 
901 	if (index == BPF_F_CURRENT_CPU)
902 		index = cpu;
903 	if (unlikely(index >= array->map.max_entries))
904 		return -E2BIG;
905 
906 	ee = READ_ONCE(array->ptrs[index]);
907 	if (!ee)
908 		return -ENOENT;
909 
910 	event = ee->event;
911 	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
912 		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
913 		return -EINVAL;
914 
915 	if (unlikely(event->oncpu != cpu))
916 		return -EOPNOTSUPP;
917 
918 	return perf_event_output(event, sd, regs);
919 }
920 
921 /*
922  * Support executing tracepoints in normal, irq, and nmi context that each call
923  * bpf_perf_event_output
924  */
925 struct bpf_trace_sample_data {
926 	struct perf_sample_data sds[3];
927 };
928 
929 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
930 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
931 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
932 	   u64, flags, void *, data, u64, size)
933 {
934 	struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
935 	int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
936 	struct perf_raw_record raw = {
937 		.frag = {
938 			.size = size,
939 			.data = data,
940 		},
941 	};
942 	struct perf_sample_data *sd;
943 	int err;
944 
945 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
946 		err = -EBUSY;
947 		goto out;
948 	}
949 
950 	sd = &sds->sds[nest_level - 1];
951 
952 	if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
953 		err = -EINVAL;
954 		goto out;
955 	}
956 
957 	perf_sample_data_init(sd, 0, 0);
958 	sd->raw = &raw;
959 
960 	err = __bpf_perf_event_output(regs, map, flags, sd);
961 
962 out:
963 	this_cpu_dec(bpf_trace_nest_level);
964 	return err;
965 }
966 
967 static const struct bpf_func_proto bpf_perf_event_output_proto = {
968 	.func		= bpf_perf_event_output,
969 	.gpl_only	= true,
970 	.ret_type	= RET_INTEGER,
971 	.arg1_type	= ARG_PTR_TO_CTX,
972 	.arg2_type	= ARG_CONST_MAP_PTR,
973 	.arg3_type	= ARG_ANYTHING,
974 	.arg4_type	= ARG_PTR_TO_MEM,
975 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
976 };
977 
978 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
979 struct bpf_nested_pt_regs {
980 	struct pt_regs regs[3];
981 };
982 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
983 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
984 
985 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
986 		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
987 {
988 	int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
989 	struct perf_raw_frag frag = {
990 		.copy		= ctx_copy,
991 		.size		= ctx_size,
992 		.data		= ctx,
993 	};
994 	struct perf_raw_record raw = {
995 		.frag = {
996 			{
997 				.next	= ctx_size ? &frag : NULL,
998 			},
999 			.size	= meta_size,
1000 			.data	= meta,
1001 		},
1002 	};
1003 	struct perf_sample_data *sd;
1004 	struct pt_regs *regs;
1005 	u64 ret;
1006 
1007 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
1008 		ret = -EBUSY;
1009 		goto out;
1010 	}
1011 	sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
1012 	regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
1013 
1014 	perf_fetch_caller_regs(regs);
1015 	perf_sample_data_init(sd, 0, 0);
1016 	sd->raw = &raw;
1017 
1018 	ret = __bpf_perf_event_output(regs, map, flags, sd);
1019 out:
1020 	this_cpu_dec(bpf_event_output_nest_level);
1021 	return ret;
1022 }
1023 
1024 BPF_CALL_0(bpf_get_current_task)
1025 {
1026 	return (long) current;
1027 }
1028 
1029 const struct bpf_func_proto bpf_get_current_task_proto = {
1030 	.func		= bpf_get_current_task,
1031 	.gpl_only	= true,
1032 	.ret_type	= RET_INTEGER,
1033 };
1034 
1035 BPF_CALL_0(bpf_get_current_task_btf)
1036 {
1037 	return (unsigned long) current;
1038 }
1039 
1040 BTF_ID_LIST_SINGLE(bpf_get_current_btf_ids, struct, task_struct)
1041 
1042 static const struct bpf_func_proto bpf_get_current_task_btf_proto = {
1043 	.func		= bpf_get_current_task_btf,
1044 	.gpl_only	= true,
1045 	.ret_type	= RET_PTR_TO_BTF_ID,
1046 	.ret_btf_id	= &bpf_get_current_btf_ids[0],
1047 };
1048 
1049 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
1050 {
1051 	struct bpf_array *array = container_of(map, struct bpf_array, map);
1052 	struct cgroup *cgrp;
1053 
1054 	if (unlikely(idx >= array->map.max_entries))
1055 		return -E2BIG;
1056 
1057 	cgrp = READ_ONCE(array->ptrs[idx]);
1058 	if (unlikely(!cgrp))
1059 		return -EAGAIN;
1060 
1061 	return task_under_cgroup_hierarchy(current, cgrp);
1062 }
1063 
1064 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
1065 	.func           = bpf_current_task_under_cgroup,
1066 	.gpl_only       = false,
1067 	.ret_type       = RET_INTEGER,
1068 	.arg1_type      = ARG_CONST_MAP_PTR,
1069 	.arg2_type      = ARG_ANYTHING,
1070 };
1071 
1072 struct send_signal_irq_work {
1073 	struct irq_work irq_work;
1074 	struct task_struct *task;
1075 	u32 sig;
1076 	enum pid_type type;
1077 };
1078 
1079 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
1080 
1081 static void do_bpf_send_signal(struct irq_work *entry)
1082 {
1083 	struct send_signal_irq_work *work;
1084 
1085 	work = container_of(entry, struct send_signal_irq_work, irq_work);
1086 	group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
1087 }
1088 
1089 static int bpf_send_signal_common(u32 sig, enum pid_type type)
1090 {
1091 	struct send_signal_irq_work *work = NULL;
1092 
1093 	/* Similar to bpf_probe_write_user, task needs to be
1094 	 * in a sound condition and kernel memory access be
1095 	 * permitted in order to send signal to the current
1096 	 * task.
1097 	 */
1098 	if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
1099 		return -EPERM;
1100 	if (unlikely(uaccess_kernel()))
1101 		return -EPERM;
1102 	if (unlikely(!nmi_uaccess_okay()))
1103 		return -EPERM;
1104 
1105 	if (irqs_disabled()) {
1106 		/* Do an early check on signal validity. Otherwise,
1107 		 * the error is lost in deferred irq_work.
1108 		 */
1109 		if (unlikely(!valid_signal(sig)))
1110 			return -EINVAL;
1111 
1112 		work = this_cpu_ptr(&send_signal_work);
1113 		if (irq_work_is_busy(&work->irq_work))
1114 			return -EBUSY;
1115 
1116 		/* Add the current task, which is the target of sending signal,
1117 		 * to the irq_work. The current task may change when queued
1118 		 * irq works get executed.
1119 		 */
1120 		work->task = current;
1121 		work->sig = sig;
1122 		work->type = type;
1123 		irq_work_queue(&work->irq_work);
1124 		return 0;
1125 	}
1126 
1127 	return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
1128 }
1129 
1130 BPF_CALL_1(bpf_send_signal, u32, sig)
1131 {
1132 	return bpf_send_signal_common(sig, PIDTYPE_TGID);
1133 }
1134 
1135 static const struct bpf_func_proto bpf_send_signal_proto = {
1136 	.func		= bpf_send_signal,
1137 	.gpl_only	= false,
1138 	.ret_type	= RET_INTEGER,
1139 	.arg1_type	= ARG_ANYTHING,
1140 };
1141 
1142 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
1143 {
1144 	return bpf_send_signal_common(sig, PIDTYPE_PID);
1145 }
1146 
1147 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
1148 	.func		= bpf_send_signal_thread,
1149 	.gpl_only	= false,
1150 	.ret_type	= RET_INTEGER,
1151 	.arg1_type	= ARG_ANYTHING,
1152 };
1153 
1154 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
1155 {
1156 	long len;
1157 	char *p;
1158 
1159 	if (!sz)
1160 		return 0;
1161 
1162 	p = d_path(path, buf, sz);
1163 	if (IS_ERR(p)) {
1164 		len = PTR_ERR(p);
1165 	} else {
1166 		len = buf + sz - p;
1167 		memmove(buf, p, len);
1168 	}
1169 
1170 	return len;
1171 }
1172 
1173 BTF_SET_START(btf_allowlist_d_path)
1174 #ifdef CONFIG_SECURITY
1175 BTF_ID(func, security_file_permission)
1176 BTF_ID(func, security_inode_getattr)
1177 BTF_ID(func, security_file_open)
1178 #endif
1179 #ifdef CONFIG_SECURITY_PATH
1180 BTF_ID(func, security_path_truncate)
1181 #endif
1182 BTF_ID(func, vfs_truncate)
1183 BTF_ID(func, vfs_fallocate)
1184 BTF_ID(func, dentry_open)
1185 BTF_ID(func, vfs_getattr)
1186 BTF_ID(func, filp_close)
1187 BTF_SET_END(btf_allowlist_d_path)
1188 
1189 static bool bpf_d_path_allowed(const struct bpf_prog *prog)
1190 {
1191 	if (prog->type == BPF_PROG_TYPE_LSM)
1192 		return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
1193 
1194 	return btf_id_set_contains(&btf_allowlist_d_path,
1195 				   prog->aux->attach_btf_id);
1196 }
1197 
1198 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
1199 
1200 static const struct bpf_func_proto bpf_d_path_proto = {
1201 	.func		= bpf_d_path,
1202 	.gpl_only	= false,
1203 	.ret_type	= RET_INTEGER,
1204 	.arg1_type	= ARG_PTR_TO_BTF_ID,
1205 	.arg1_btf_id	= &bpf_d_path_btf_ids[0],
1206 	.arg2_type	= ARG_PTR_TO_MEM,
1207 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1208 	.allowed	= bpf_d_path_allowed,
1209 };
1210 
1211 #define BTF_F_ALL	(BTF_F_COMPACT  | BTF_F_NONAME | \
1212 			 BTF_F_PTR_RAW | BTF_F_ZERO)
1213 
1214 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
1215 				  u64 flags, const struct btf **btf,
1216 				  s32 *btf_id)
1217 {
1218 	const struct btf_type *t;
1219 
1220 	if (unlikely(flags & ~(BTF_F_ALL)))
1221 		return -EINVAL;
1222 
1223 	if (btf_ptr_size != sizeof(struct btf_ptr))
1224 		return -EINVAL;
1225 
1226 	*btf = bpf_get_btf_vmlinux();
1227 
1228 	if (IS_ERR_OR_NULL(*btf))
1229 		return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
1230 
1231 	if (ptr->type_id > 0)
1232 		*btf_id = ptr->type_id;
1233 	else
1234 		return -EINVAL;
1235 
1236 	if (*btf_id > 0)
1237 		t = btf_type_by_id(*btf, *btf_id);
1238 	if (*btf_id <= 0 || !t)
1239 		return -ENOENT;
1240 
1241 	return 0;
1242 }
1243 
1244 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1245 	   u32, btf_ptr_size, u64, flags)
1246 {
1247 	const struct btf *btf;
1248 	s32 btf_id;
1249 	int ret;
1250 
1251 	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1252 	if (ret)
1253 		return ret;
1254 
1255 	return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1256 				      flags);
1257 }
1258 
1259 const struct bpf_func_proto bpf_snprintf_btf_proto = {
1260 	.func		= bpf_snprintf_btf,
1261 	.gpl_only	= false,
1262 	.ret_type	= RET_INTEGER,
1263 	.arg1_type	= ARG_PTR_TO_MEM,
1264 	.arg2_type	= ARG_CONST_SIZE,
1265 	.arg3_type	= ARG_PTR_TO_MEM,
1266 	.arg4_type	= ARG_CONST_SIZE,
1267 	.arg5_type	= ARG_ANYTHING,
1268 };
1269 
1270 const struct bpf_func_proto *
1271 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1272 {
1273 	switch (func_id) {
1274 	case BPF_FUNC_map_lookup_elem:
1275 		return &bpf_map_lookup_elem_proto;
1276 	case BPF_FUNC_map_update_elem:
1277 		return &bpf_map_update_elem_proto;
1278 	case BPF_FUNC_map_delete_elem:
1279 		return &bpf_map_delete_elem_proto;
1280 	case BPF_FUNC_map_push_elem:
1281 		return &bpf_map_push_elem_proto;
1282 	case BPF_FUNC_map_pop_elem:
1283 		return &bpf_map_pop_elem_proto;
1284 	case BPF_FUNC_map_peek_elem:
1285 		return &bpf_map_peek_elem_proto;
1286 	case BPF_FUNC_ktime_get_ns:
1287 		return &bpf_ktime_get_ns_proto;
1288 	case BPF_FUNC_ktime_get_boot_ns:
1289 		return &bpf_ktime_get_boot_ns_proto;
1290 	case BPF_FUNC_ktime_get_coarse_ns:
1291 		return &bpf_ktime_get_coarse_ns_proto;
1292 	case BPF_FUNC_tail_call:
1293 		return &bpf_tail_call_proto;
1294 	case BPF_FUNC_get_current_pid_tgid:
1295 		return &bpf_get_current_pid_tgid_proto;
1296 	case BPF_FUNC_get_current_task:
1297 		return &bpf_get_current_task_proto;
1298 	case BPF_FUNC_get_current_task_btf:
1299 		return &bpf_get_current_task_btf_proto;
1300 	case BPF_FUNC_get_current_uid_gid:
1301 		return &bpf_get_current_uid_gid_proto;
1302 	case BPF_FUNC_get_current_comm:
1303 		return &bpf_get_current_comm_proto;
1304 	case BPF_FUNC_trace_printk:
1305 		return bpf_get_trace_printk_proto();
1306 	case BPF_FUNC_get_smp_processor_id:
1307 		return &bpf_get_smp_processor_id_proto;
1308 	case BPF_FUNC_get_numa_node_id:
1309 		return &bpf_get_numa_node_id_proto;
1310 	case BPF_FUNC_perf_event_read:
1311 		return &bpf_perf_event_read_proto;
1312 	case BPF_FUNC_probe_write_user:
1313 		return bpf_get_probe_write_proto();
1314 	case BPF_FUNC_current_task_under_cgroup:
1315 		return &bpf_current_task_under_cgroup_proto;
1316 	case BPF_FUNC_get_prandom_u32:
1317 		return &bpf_get_prandom_u32_proto;
1318 	case BPF_FUNC_probe_read_user:
1319 		return &bpf_probe_read_user_proto;
1320 	case BPF_FUNC_probe_read_kernel:
1321 		return &bpf_probe_read_kernel_proto;
1322 	case BPF_FUNC_probe_read_user_str:
1323 		return &bpf_probe_read_user_str_proto;
1324 	case BPF_FUNC_probe_read_kernel_str:
1325 		return &bpf_probe_read_kernel_str_proto;
1326 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1327 	case BPF_FUNC_probe_read:
1328 		return &bpf_probe_read_compat_proto;
1329 	case BPF_FUNC_probe_read_str:
1330 		return &bpf_probe_read_compat_str_proto;
1331 #endif
1332 #ifdef CONFIG_CGROUPS
1333 	case BPF_FUNC_get_current_cgroup_id:
1334 		return &bpf_get_current_cgroup_id_proto;
1335 #endif
1336 	case BPF_FUNC_send_signal:
1337 		return &bpf_send_signal_proto;
1338 	case BPF_FUNC_send_signal_thread:
1339 		return &bpf_send_signal_thread_proto;
1340 	case BPF_FUNC_perf_event_read_value:
1341 		return &bpf_perf_event_read_value_proto;
1342 	case BPF_FUNC_get_ns_current_pid_tgid:
1343 		return &bpf_get_ns_current_pid_tgid_proto;
1344 	case BPF_FUNC_ringbuf_output:
1345 		return &bpf_ringbuf_output_proto;
1346 	case BPF_FUNC_ringbuf_reserve:
1347 		return &bpf_ringbuf_reserve_proto;
1348 	case BPF_FUNC_ringbuf_submit:
1349 		return &bpf_ringbuf_submit_proto;
1350 	case BPF_FUNC_ringbuf_discard:
1351 		return &bpf_ringbuf_discard_proto;
1352 	case BPF_FUNC_ringbuf_query:
1353 		return &bpf_ringbuf_query_proto;
1354 	case BPF_FUNC_jiffies64:
1355 		return &bpf_jiffies64_proto;
1356 	case BPF_FUNC_get_task_stack:
1357 		return &bpf_get_task_stack_proto;
1358 	case BPF_FUNC_copy_from_user:
1359 		return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
1360 	case BPF_FUNC_snprintf_btf:
1361 		return &bpf_snprintf_btf_proto;
1362 	case BPF_FUNC_per_cpu_ptr:
1363 		return &bpf_per_cpu_ptr_proto;
1364 	case BPF_FUNC_this_cpu_ptr:
1365 		return &bpf_this_cpu_ptr_proto;
1366 	default:
1367 		return NULL;
1368 	}
1369 }
1370 
1371 static const struct bpf_func_proto *
1372 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1373 {
1374 	switch (func_id) {
1375 	case BPF_FUNC_perf_event_output:
1376 		return &bpf_perf_event_output_proto;
1377 	case BPF_FUNC_get_stackid:
1378 		return &bpf_get_stackid_proto;
1379 	case BPF_FUNC_get_stack:
1380 		return &bpf_get_stack_proto;
1381 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1382 	case BPF_FUNC_override_return:
1383 		return &bpf_override_return_proto;
1384 #endif
1385 	default:
1386 		return bpf_tracing_func_proto(func_id, prog);
1387 	}
1388 }
1389 
1390 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
1391 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1392 					const struct bpf_prog *prog,
1393 					struct bpf_insn_access_aux *info)
1394 {
1395 	if (off < 0 || off >= sizeof(struct pt_regs))
1396 		return false;
1397 	if (type != BPF_READ)
1398 		return false;
1399 	if (off % size != 0)
1400 		return false;
1401 	/*
1402 	 * Assertion for 32 bit to make sure last 8 byte access
1403 	 * (BPF_DW) to the last 4 byte member is disallowed.
1404 	 */
1405 	if (off + size > sizeof(struct pt_regs))
1406 		return false;
1407 
1408 	return true;
1409 }
1410 
1411 const struct bpf_verifier_ops kprobe_verifier_ops = {
1412 	.get_func_proto  = kprobe_prog_func_proto,
1413 	.is_valid_access = kprobe_prog_is_valid_access,
1414 };
1415 
1416 const struct bpf_prog_ops kprobe_prog_ops = {
1417 };
1418 
1419 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1420 	   u64, flags, void *, data, u64, size)
1421 {
1422 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1423 
1424 	/*
1425 	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1426 	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1427 	 * from there and call the same bpf_perf_event_output() helper inline.
1428 	 */
1429 	return ____bpf_perf_event_output(regs, map, flags, data, size);
1430 }
1431 
1432 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1433 	.func		= bpf_perf_event_output_tp,
1434 	.gpl_only	= true,
1435 	.ret_type	= RET_INTEGER,
1436 	.arg1_type	= ARG_PTR_TO_CTX,
1437 	.arg2_type	= ARG_CONST_MAP_PTR,
1438 	.arg3_type	= ARG_ANYTHING,
1439 	.arg4_type	= ARG_PTR_TO_MEM,
1440 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1441 };
1442 
1443 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1444 	   u64, flags)
1445 {
1446 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1447 
1448 	/*
1449 	 * Same comment as in bpf_perf_event_output_tp(), only that this time
1450 	 * the other helper's function body cannot be inlined due to being
1451 	 * external, thus we need to call raw helper function.
1452 	 */
1453 	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1454 			       flags, 0, 0);
1455 }
1456 
1457 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1458 	.func		= bpf_get_stackid_tp,
1459 	.gpl_only	= true,
1460 	.ret_type	= RET_INTEGER,
1461 	.arg1_type	= ARG_PTR_TO_CTX,
1462 	.arg2_type	= ARG_CONST_MAP_PTR,
1463 	.arg3_type	= ARG_ANYTHING,
1464 };
1465 
1466 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1467 	   u64, flags)
1468 {
1469 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1470 
1471 	return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1472 			     (unsigned long) size, flags, 0);
1473 }
1474 
1475 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1476 	.func		= bpf_get_stack_tp,
1477 	.gpl_only	= true,
1478 	.ret_type	= RET_INTEGER,
1479 	.arg1_type	= ARG_PTR_TO_CTX,
1480 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1481 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1482 	.arg4_type	= ARG_ANYTHING,
1483 };
1484 
1485 static const struct bpf_func_proto *
1486 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1487 {
1488 	switch (func_id) {
1489 	case BPF_FUNC_perf_event_output:
1490 		return &bpf_perf_event_output_proto_tp;
1491 	case BPF_FUNC_get_stackid:
1492 		return &bpf_get_stackid_proto_tp;
1493 	case BPF_FUNC_get_stack:
1494 		return &bpf_get_stack_proto_tp;
1495 	default:
1496 		return bpf_tracing_func_proto(func_id, prog);
1497 	}
1498 }
1499 
1500 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1501 				    const struct bpf_prog *prog,
1502 				    struct bpf_insn_access_aux *info)
1503 {
1504 	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1505 		return false;
1506 	if (type != BPF_READ)
1507 		return false;
1508 	if (off % size != 0)
1509 		return false;
1510 
1511 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1512 	return true;
1513 }
1514 
1515 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1516 	.get_func_proto  = tp_prog_func_proto,
1517 	.is_valid_access = tp_prog_is_valid_access,
1518 };
1519 
1520 const struct bpf_prog_ops tracepoint_prog_ops = {
1521 };
1522 
1523 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1524 	   struct bpf_perf_event_value *, buf, u32, size)
1525 {
1526 	int err = -EINVAL;
1527 
1528 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1529 		goto clear;
1530 	err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1531 				    &buf->running);
1532 	if (unlikely(err))
1533 		goto clear;
1534 	return 0;
1535 clear:
1536 	memset(buf, 0, size);
1537 	return err;
1538 }
1539 
1540 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1541          .func           = bpf_perf_prog_read_value,
1542          .gpl_only       = true,
1543          .ret_type       = RET_INTEGER,
1544          .arg1_type      = ARG_PTR_TO_CTX,
1545          .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1546          .arg3_type      = ARG_CONST_SIZE,
1547 };
1548 
1549 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1550 	   void *, buf, u32, size, u64, flags)
1551 {
1552 #ifndef CONFIG_X86
1553 	return -ENOENT;
1554 #else
1555 	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1556 	struct perf_branch_stack *br_stack = ctx->data->br_stack;
1557 	u32 to_copy;
1558 
1559 	if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1560 		return -EINVAL;
1561 
1562 	if (unlikely(!br_stack))
1563 		return -EINVAL;
1564 
1565 	if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1566 		return br_stack->nr * br_entry_size;
1567 
1568 	if (!buf || (size % br_entry_size != 0))
1569 		return -EINVAL;
1570 
1571 	to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1572 	memcpy(buf, br_stack->entries, to_copy);
1573 
1574 	return to_copy;
1575 #endif
1576 }
1577 
1578 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1579 	.func           = bpf_read_branch_records,
1580 	.gpl_only       = true,
1581 	.ret_type       = RET_INTEGER,
1582 	.arg1_type      = ARG_PTR_TO_CTX,
1583 	.arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1584 	.arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1585 	.arg4_type      = ARG_ANYTHING,
1586 };
1587 
1588 static const struct bpf_func_proto *
1589 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1590 {
1591 	switch (func_id) {
1592 	case BPF_FUNC_perf_event_output:
1593 		return &bpf_perf_event_output_proto_tp;
1594 	case BPF_FUNC_get_stackid:
1595 		return &bpf_get_stackid_proto_pe;
1596 	case BPF_FUNC_get_stack:
1597 		return &bpf_get_stack_proto_pe;
1598 	case BPF_FUNC_perf_prog_read_value:
1599 		return &bpf_perf_prog_read_value_proto;
1600 	case BPF_FUNC_read_branch_records:
1601 		return &bpf_read_branch_records_proto;
1602 	default:
1603 		return bpf_tracing_func_proto(func_id, prog);
1604 	}
1605 }
1606 
1607 /*
1608  * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1609  * to avoid potential recursive reuse issue when/if tracepoints are added
1610  * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1611  *
1612  * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1613  * in normal, irq, and nmi context.
1614  */
1615 struct bpf_raw_tp_regs {
1616 	struct pt_regs regs[3];
1617 };
1618 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1619 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1620 static struct pt_regs *get_bpf_raw_tp_regs(void)
1621 {
1622 	struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1623 	int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1624 
1625 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1626 		this_cpu_dec(bpf_raw_tp_nest_level);
1627 		return ERR_PTR(-EBUSY);
1628 	}
1629 
1630 	return &tp_regs->regs[nest_level - 1];
1631 }
1632 
1633 static void put_bpf_raw_tp_regs(void)
1634 {
1635 	this_cpu_dec(bpf_raw_tp_nest_level);
1636 }
1637 
1638 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1639 	   struct bpf_map *, map, u64, flags, void *, data, u64, size)
1640 {
1641 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1642 	int ret;
1643 
1644 	if (IS_ERR(regs))
1645 		return PTR_ERR(regs);
1646 
1647 	perf_fetch_caller_regs(regs);
1648 	ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1649 
1650 	put_bpf_raw_tp_regs();
1651 	return ret;
1652 }
1653 
1654 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1655 	.func		= bpf_perf_event_output_raw_tp,
1656 	.gpl_only	= true,
1657 	.ret_type	= RET_INTEGER,
1658 	.arg1_type	= ARG_PTR_TO_CTX,
1659 	.arg2_type	= ARG_CONST_MAP_PTR,
1660 	.arg3_type	= ARG_ANYTHING,
1661 	.arg4_type	= ARG_PTR_TO_MEM,
1662 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1663 };
1664 
1665 extern const struct bpf_func_proto bpf_skb_output_proto;
1666 extern const struct bpf_func_proto bpf_xdp_output_proto;
1667 
1668 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1669 	   struct bpf_map *, map, u64, flags)
1670 {
1671 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1672 	int ret;
1673 
1674 	if (IS_ERR(regs))
1675 		return PTR_ERR(regs);
1676 
1677 	perf_fetch_caller_regs(regs);
1678 	/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1679 	ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1680 			      flags, 0, 0);
1681 	put_bpf_raw_tp_regs();
1682 	return ret;
1683 }
1684 
1685 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1686 	.func		= bpf_get_stackid_raw_tp,
1687 	.gpl_only	= true,
1688 	.ret_type	= RET_INTEGER,
1689 	.arg1_type	= ARG_PTR_TO_CTX,
1690 	.arg2_type	= ARG_CONST_MAP_PTR,
1691 	.arg3_type	= ARG_ANYTHING,
1692 };
1693 
1694 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1695 	   void *, buf, u32, size, u64, flags)
1696 {
1697 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1698 	int ret;
1699 
1700 	if (IS_ERR(regs))
1701 		return PTR_ERR(regs);
1702 
1703 	perf_fetch_caller_regs(regs);
1704 	ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1705 			    (unsigned long) size, flags, 0);
1706 	put_bpf_raw_tp_regs();
1707 	return ret;
1708 }
1709 
1710 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1711 	.func		= bpf_get_stack_raw_tp,
1712 	.gpl_only	= true,
1713 	.ret_type	= RET_INTEGER,
1714 	.arg1_type	= ARG_PTR_TO_CTX,
1715 	.arg2_type	= ARG_PTR_TO_MEM,
1716 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1717 	.arg4_type	= ARG_ANYTHING,
1718 };
1719 
1720 static const struct bpf_func_proto *
1721 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1722 {
1723 	switch (func_id) {
1724 	case BPF_FUNC_perf_event_output:
1725 		return &bpf_perf_event_output_proto_raw_tp;
1726 	case BPF_FUNC_get_stackid:
1727 		return &bpf_get_stackid_proto_raw_tp;
1728 	case BPF_FUNC_get_stack:
1729 		return &bpf_get_stack_proto_raw_tp;
1730 	default:
1731 		return bpf_tracing_func_proto(func_id, prog);
1732 	}
1733 }
1734 
1735 const struct bpf_func_proto *
1736 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1737 {
1738 	switch (func_id) {
1739 #ifdef CONFIG_NET
1740 	case BPF_FUNC_skb_output:
1741 		return &bpf_skb_output_proto;
1742 	case BPF_FUNC_xdp_output:
1743 		return &bpf_xdp_output_proto;
1744 	case BPF_FUNC_skc_to_tcp6_sock:
1745 		return &bpf_skc_to_tcp6_sock_proto;
1746 	case BPF_FUNC_skc_to_tcp_sock:
1747 		return &bpf_skc_to_tcp_sock_proto;
1748 	case BPF_FUNC_skc_to_tcp_timewait_sock:
1749 		return &bpf_skc_to_tcp_timewait_sock_proto;
1750 	case BPF_FUNC_skc_to_tcp_request_sock:
1751 		return &bpf_skc_to_tcp_request_sock_proto;
1752 	case BPF_FUNC_skc_to_udp6_sock:
1753 		return &bpf_skc_to_udp6_sock_proto;
1754 	case BPF_FUNC_sk_storage_get:
1755 		return &bpf_sk_storage_get_tracing_proto;
1756 	case BPF_FUNC_sk_storage_delete:
1757 		return &bpf_sk_storage_delete_tracing_proto;
1758 	case BPF_FUNC_sock_from_file:
1759 		return &bpf_sock_from_file_proto;
1760 #endif
1761 	case BPF_FUNC_seq_printf:
1762 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1763 		       &bpf_seq_printf_proto :
1764 		       NULL;
1765 	case BPF_FUNC_seq_write:
1766 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1767 		       &bpf_seq_write_proto :
1768 		       NULL;
1769 	case BPF_FUNC_seq_printf_btf:
1770 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1771 		       &bpf_seq_printf_btf_proto :
1772 		       NULL;
1773 	case BPF_FUNC_d_path:
1774 		return &bpf_d_path_proto;
1775 	default:
1776 		return raw_tp_prog_func_proto(func_id, prog);
1777 	}
1778 }
1779 
1780 static bool raw_tp_prog_is_valid_access(int off, int size,
1781 					enum bpf_access_type type,
1782 					const struct bpf_prog *prog,
1783 					struct bpf_insn_access_aux *info)
1784 {
1785 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1786 		return false;
1787 	if (type != BPF_READ)
1788 		return false;
1789 	if (off % size != 0)
1790 		return false;
1791 	return true;
1792 }
1793 
1794 static bool tracing_prog_is_valid_access(int off, int size,
1795 					 enum bpf_access_type type,
1796 					 const struct bpf_prog *prog,
1797 					 struct bpf_insn_access_aux *info)
1798 {
1799 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1800 		return false;
1801 	if (type != BPF_READ)
1802 		return false;
1803 	if (off % size != 0)
1804 		return false;
1805 	return btf_ctx_access(off, size, type, prog, info);
1806 }
1807 
1808 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1809 				     const union bpf_attr *kattr,
1810 				     union bpf_attr __user *uattr)
1811 {
1812 	return -ENOTSUPP;
1813 }
1814 
1815 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1816 	.get_func_proto  = raw_tp_prog_func_proto,
1817 	.is_valid_access = raw_tp_prog_is_valid_access,
1818 };
1819 
1820 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1821 #ifdef CONFIG_NET
1822 	.test_run = bpf_prog_test_run_raw_tp,
1823 #endif
1824 };
1825 
1826 const struct bpf_verifier_ops tracing_verifier_ops = {
1827 	.get_func_proto  = tracing_prog_func_proto,
1828 	.is_valid_access = tracing_prog_is_valid_access,
1829 };
1830 
1831 const struct bpf_prog_ops tracing_prog_ops = {
1832 	.test_run = bpf_prog_test_run_tracing,
1833 };
1834 
1835 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1836 						 enum bpf_access_type type,
1837 						 const struct bpf_prog *prog,
1838 						 struct bpf_insn_access_aux *info)
1839 {
1840 	if (off == 0) {
1841 		if (size != sizeof(u64) || type != BPF_READ)
1842 			return false;
1843 		info->reg_type = PTR_TO_TP_BUFFER;
1844 	}
1845 	return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1846 }
1847 
1848 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1849 	.get_func_proto  = raw_tp_prog_func_proto,
1850 	.is_valid_access = raw_tp_writable_prog_is_valid_access,
1851 };
1852 
1853 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1854 };
1855 
1856 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1857 				    const struct bpf_prog *prog,
1858 				    struct bpf_insn_access_aux *info)
1859 {
1860 	const int size_u64 = sizeof(u64);
1861 
1862 	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1863 		return false;
1864 	if (type != BPF_READ)
1865 		return false;
1866 	if (off % size != 0) {
1867 		if (sizeof(unsigned long) != 4)
1868 			return false;
1869 		if (size != 8)
1870 			return false;
1871 		if (off % size != 4)
1872 			return false;
1873 	}
1874 
1875 	switch (off) {
1876 	case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1877 		bpf_ctx_record_field_size(info, size_u64);
1878 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1879 			return false;
1880 		break;
1881 	case bpf_ctx_range(struct bpf_perf_event_data, addr):
1882 		bpf_ctx_record_field_size(info, size_u64);
1883 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1884 			return false;
1885 		break;
1886 	default:
1887 		if (size != sizeof(long))
1888 			return false;
1889 	}
1890 
1891 	return true;
1892 }
1893 
1894 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1895 				      const struct bpf_insn *si,
1896 				      struct bpf_insn *insn_buf,
1897 				      struct bpf_prog *prog, u32 *target_size)
1898 {
1899 	struct bpf_insn *insn = insn_buf;
1900 
1901 	switch (si->off) {
1902 	case offsetof(struct bpf_perf_event_data, sample_period):
1903 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1904 						       data), si->dst_reg, si->src_reg,
1905 				      offsetof(struct bpf_perf_event_data_kern, data));
1906 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1907 				      bpf_target_off(struct perf_sample_data, period, 8,
1908 						     target_size));
1909 		break;
1910 	case offsetof(struct bpf_perf_event_data, addr):
1911 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1912 						       data), si->dst_reg, si->src_reg,
1913 				      offsetof(struct bpf_perf_event_data_kern, data));
1914 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1915 				      bpf_target_off(struct perf_sample_data, addr, 8,
1916 						     target_size));
1917 		break;
1918 	default:
1919 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1920 						       regs), si->dst_reg, si->src_reg,
1921 				      offsetof(struct bpf_perf_event_data_kern, regs));
1922 		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1923 				      si->off);
1924 		break;
1925 	}
1926 
1927 	return insn - insn_buf;
1928 }
1929 
1930 const struct bpf_verifier_ops perf_event_verifier_ops = {
1931 	.get_func_proto		= pe_prog_func_proto,
1932 	.is_valid_access	= pe_prog_is_valid_access,
1933 	.convert_ctx_access	= pe_prog_convert_ctx_access,
1934 };
1935 
1936 const struct bpf_prog_ops perf_event_prog_ops = {
1937 };
1938 
1939 static DEFINE_MUTEX(bpf_event_mutex);
1940 
1941 #define BPF_TRACE_MAX_PROGS 64
1942 
1943 int perf_event_attach_bpf_prog(struct perf_event *event,
1944 			       struct bpf_prog *prog)
1945 {
1946 	struct bpf_prog_array *old_array;
1947 	struct bpf_prog_array *new_array;
1948 	int ret = -EEXIST;
1949 
1950 	/*
1951 	 * Kprobe override only works if they are on the function entry,
1952 	 * and only if they are on the opt-in list.
1953 	 */
1954 	if (prog->kprobe_override &&
1955 	    (!trace_kprobe_on_func_entry(event->tp_event) ||
1956 	     !trace_kprobe_error_injectable(event->tp_event)))
1957 		return -EINVAL;
1958 
1959 	mutex_lock(&bpf_event_mutex);
1960 
1961 	if (event->prog)
1962 		goto unlock;
1963 
1964 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1965 	if (old_array &&
1966 	    bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1967 		ret = -E2BIG;
1968 		goto unlock;
1969 	}
1970 
1971 	ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1972 	if (ret < 0)
1973 		goto unlock;
1974 
1975 	/* set the new array to event->tp_event and set event->prog */
1976 	event->prog = prog;
1977 	rcu_assign_pointer(event->tp_event->prog_array, new_array);
1978 	bpf_prog_array_free(old_array);
1979 
1980 unlock:
1981 	mutex_unlock(&bpf_event_mutex);
1982 	return ret;
1983 }
1984 
1985 void perf_event_detach_bpf_prog(struct perf_event *event)
1986 {
1987 	struct bpf_prog_array *old_array;
1988 	struct bpf_prog_array *new_array;
1989 	int ret;
1990 
1991 	mutex_lock(&bpf_event_mutex);
1992 
1993 	if (!event->prog)
1994 		goto unlock;
1995 
1996 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1997 	ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1998 	if (ret == -ENOENT)
1999 		goto unlock;
2000 	if (ret < 0) {
2001 		bpf_prog_array_delete_safe(old_array, event->prog);
2002 	} else {
2003 		rcu_assign_pointer(event->tp_event->prog_array, new_array);
2004 		bpf_prog_array_free(old_array);
2005 	}
2006 
2007 	bpf_prog_put(event->prog);
2008 	event->prog = NULL;
2009 
2010 unlock:
2011 	mutex_unlock(&bpf_event_mutex);
2012 }
2013 
2014 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
2015 {
2016 	struct perf_event_query_bpf __user *uquery = info;
2017 	struct perf_event_query_bpf query = {};
2018 	struct bpf_prog_array *progs;
2019 	u32 *ids, prog_cnt, ids_len;
2020 	int ret;
2021 
2022 	if (!perfmon_capable())
2023 		return -EPERM;
2024 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
2025 		return -EINVAL;
2026 	if (copy_from_user(&query, uquery, sizeof(query)))
2027 		return -EFAULT;
2028 
2029 	ids_len = query.ids_len;
2030 	if (ids_len > BPF_TRACE_MAX_PROGS)
2031 		return -E2BIG;
2032 	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2033 	if (!ids)
2034 		return -ENOMEM;
2035 	/*
2036 	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2037 	 * is required when user only wants to check for uquery->prog_cnt.
2038 	 * There is no need to check for it since the case is handled
2039 	 * gracefully in bpf_prog_array_copy_info.
2040 	 */
2041 
2042 	mutex_lock(&bpf_event_mutex);
2043 	progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2044 	ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
2045 	mutex_unlock(&bpf_event_mutex);
2046 
2047 	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2048 	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2049 		ret = -EFAULT;
2050 
2051 	kfree(ids);
2052 	return ret;
2053 }
2054 
2055 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2056 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2057 
2058 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2059 {
2060 	struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2061 
2062 	for (; btp < __stop__bpf_raw_tp; btp++) {
2063 		if (!strcmp(btp->tp->name, name))
2064 			return btp;
2065 	}
2066 
2067 	return bpf_get_raw_tracepoint_module(name);
2068 }
2069 
2070 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2071 {
2072 	struct module *mod;
2073 
2074 	preempt_disable();
2075 	mod = __module_address((unsigned long)btp);
2076 	module_put(mod);
2077 	preempt_enable();
2078 }
2079 
2080 static __always_inline
2081 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2082 {
2083 	cant_sleep();
2084 	rcu_read_lock();
2085 	(void) BPF_PROG_RUN(prog, args);
2086 	rcu_read_unlock();
2087 }
2088 
2089 #define UNPACK(...)			__VA_ARGS__
2090 #define REPEAT_1(FN, DL, X, ...)	FN(X)
2091 #define REPEAT_2(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2092 #define REPEAT_3(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2093 #define REPEAT_4(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2094 #define REPEAT_5(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2095 #define REPEAT_6(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2096 #define REPEAT_7(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2097 #define REPEAT_8(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2098 #define REPEAT_9(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2099 #define REPEAT_10(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2100 #define REPEAT_11(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2101 #define REPEAT_12(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2102 #define REPEAT(X, FN, DL, ...)		REPEAT_##X(FN, DL, __VA_ARGS__)
2103 
2104 #define SARG(X)		u64 arg##X
2105 #define COPY(X)		args[X] = arg##X
2106 
2107 #define __DL_COM	(,)
2108 #define __DL_SEM	(;)
2109 
2110 #define __SEQ_0_11	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2111 
2112 #define BPF_TRACE_DEFN_x(x)						\
2113 	void bpf_trace_run##x(struct bpf_prog *prog,			\
2114 			      REPEAT(x, SARG, __DL_COM, __SEQ_0_11))	\
2115 	{								\
2116 		u64 args[x];						\
2117 		REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);			\
2118 		__bpf_trace_run(prog, args);				\
2119 	}								\
2120 	EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2121 BPF_TRACE_DEFN_x(1);
2122 BPF_TRACE_DEFN_x(2);
2123 BPF_TRACE_DEFN_x(3);
2124 BPF_TRACE_DEFN_x(4);
2125 BPF_TRACE_DEFN_x(5);
2126 BPF_TRACE_DEFN_x(6);
2127 BPF_TRACE_DEFN_x(7);
2128 BPF_TRACE_DEFN_x(8);
2129 BPF_TRACE_DEFN_x(9);
2130 BPF_TRACE_DEFN_x(10);
2131 BPF_TRACE_DEFN_x(11);
2132 BPF_TRACE_DEFN_x(12);
2133 
2134 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2135 {
2136 	struct tracepoint *tp = btp->tp;
2137 
2138 	/*
2139 	 * check that program doesn't access arguments beyond what's
2140 	 * available in this tracepoint
2141 	 */
2142 	if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2143 		return -EINVAL;
2144 
2145 	if (prog->aux->max_tp_access > btp->writable_size)
2146 		return -EINVAL;
2147 
2148 	return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
2149 }
2150 
2151 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2152 {
2153 	return __bpf_probe_register(btp, prog);
2154 }
2155 
2156 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2157 {
2158 	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
2159 }
2160 
2161 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2162 			    u32 *fd_type, const char **buf,
2163 			    u64 *probe_offset, u64 *probe_addr)
2164 {
2165 	bool is_tracepoint, is_syscall_tp;
2166 	struct bpf_prog *prog;
2167 	int flags, err = 0;
2168 
2169 	prog = event->prog;
2170 	if (!prog)
2171 		return -ENOENT;
2172 
2173 	/* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2174 	if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2175 		return -EOPNOTSUPP;
2176 
2177 	*prog_id = prog->aux->id;
2178 	flags = event->tp_event->flags;
2179 	is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2180 	is_syscall_tp = is_syscall_trace_event(event->tp_event);
2181 
2182 	if (is_tracepoint || is_syscall_tp) {
2183 		*buf = is_tracepoint ? event->tp_event->tp->name
2184 				     : event->tp_event->name;
2185 		*fd_type = BPF_FD_TYPE_TRACEPOINT;
2186 		*probe_offset = 0x0;
2187 		*probe_addr = 0x0;
2188 	} else {
2189 		/* kprobe/uprobe */
2190 		err = -EOPNOTSUPP;
2191 #ifdef CONFIG_KPROBE_EVENTS
2192 		if (flags & TRACE_EVENT_FL_KPROBE)
2193 			err = bpf_get_kprobe_info(event, fd_type, buf,
2194 						  probe_offset, probe_addr,
2195 						  event->attr.type == PERF_TYPE_TRACEPOINT);
2196 #endif
2197 #ifdef CONFIG_UPROBE_EVENTS
2198 		if (flags & TRACE_EVENT_FL_UPROBE)
2199 			err = bpf_get_uprobe_info(event, fd_type, buf,
2200 						  probe_offset,
2201 						  event->attr.type == PERF_TYPE_TRACEPOINT);
2202 #endif
2203 	}
2204 
2205 	return err;
2206 }
2207 
2208 static int __init send_signal_irq_work_init(void)
2209 {
2210 	int cpu;
2211 	struct send_signal_irq_work *work;
2212 
2213 	for_each_possible_cpu(cpu) {
2214 		work = per_cpu_ptr(&send_signal_work, cpu);
2215 		init_irq_work(&work->irq_work, do_bpf_send_signal);
2216 	}
2217 	return 0;
2218 }
2219 
2220 subsys_initcall(send_signal_irq_work_init);
2221 
2222 #ifdef CONFIG_MODULES
2223 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2224 			    void *module)
2225 {
2226 	struct bpf_trace_module *btm, *tmp;
2227 	struct module *mod = module;
2228 	int ret = 0;
2229 
2230 	if (mod->num_bpf_raw_events == 0 ||
2231 	    (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2232 		goto out;
2233 
2234 	mutex_lock(&bpf_module_mutex);
2235 
2236 	switch (op) {
2237 	case MODULE_STATE_COMING:
2238 		btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2239 		if (btm) {
2240 			btm->module = module;
2241 			list_add(&btm->list, &bpf_trace_modules);
2242 		} else {
2243 			ret = -ENOMEM;
2244 		}
2245 		break;
2246 	case MODULE_STATE_GOING:
2247 		list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2248 			if (btm->module == module) {
2249 				list_del(&btm->list);
2250 				kfree(btm);
2251 				break;
2252 			}
2253 		}
2254 		break;
2255 	}
2256 
2257 	mutex_unlock(&bpf_module_mutex);
2258 
2259 out:
2260 	return notifier_from_errno(ret);
2261 }
2262 
2263 static struct notifier_block bpf_module_nb = {
2264 	.notifier_call = bpf_event_notify,
2265 };
2266 
2267 static int __init bpf_event_init(void)
2268 {
2269 	register_module_notifier(&bpf_module_nb);
2270 	return 0;
2271 }
2272 
2273 fs_initcall(bpf_event_init);
2274 #endif /* CONFIG_MODULES */
2275