xref: /openbmc/linux/kernel/trace/bpf_trace.c (revision 9ac17575)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
8 #include <linux/bpf.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/filter.h>
11 #include <linux/uaccess.h>
12 #include <linux/ctype.h>
13 #include <linux/kprobes.h>
14 #include <linux/syscalls.h>
15 #include <linux/error-injection.h>
16 
17 #include <asm/tlb.h>
18 
19 #include "trace_probe.h"
20 #include "trace.h"
21 
22 #define bpf_event_rcu_dereference(p)					\
23 	rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
24 
25 #ifdef CONFIG_MODULES
26 struct bpf_trace_module {
27 	struct module *module;
28 	struct list_head list;
29 };
30 
31 static LIST_HEAD(bpf_trace_modules);
32 static DEFINE_MUTEX(bpf_module_mutex);
33 
34 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
35 {
36 	struct bpf_raw_event_map *btp, *ret = NULL;
37 	struct bpf_trace_module *btm;
38 	unsigned int i;
39 
40 	mutex_lock(&bpf_module_mutex);
41 	list_for_each_entry(btm, &bpf_trace_modules, list) {
42 		for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
43 			btp = &btm->module->bpf_raw_events[i];
44 			if (!strcmp(btp->tp->name, name)) {
45 				if (try_module_get(btm->module))
46 					ret = btp;
47 				goto out;
48 			}
49 		}
50 	}
51 out:
52 	mutex_unlock(&bpf_module_mutex);
53 	return ret;
54 }
55 #else
56 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
57 {
58 	return NULL;
59 }
60 #endif /* CONFIG_MODULES */
61 
62 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
63 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
64 
65 /**
66  * trace_call_bpf - invoke BPF program
67  * @call: tracepoint event
68  * @ctx: opaque context pointer
69  *
70  * kprobe handlers execute BPF programs via this helper.
71  * Can be used from static tracepoints in the future.
72  *
73  * Return: BPF programs always return an integer which is interpreted by
74  * kprobe handler as:
75  * 0 - return from kprobe (event is filtered out)
76  * 1 - store kprobe event into ring buffer
77  * Other values are reserved and currently alias to 1
78  */
79 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
80 {
81 	unsigned int ret;
82 
83 	if (in_nmi()) /* not supported yet */
84 		return 1;
85 
86 	cant_sleep();
87 
88 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
89 		/*
90 		 * since some bpf program is already running on this cpu,
91 		 * don't call into another bpf program (same or different)
92 		 * and don't send kprobe event into ring-buffer,
93 		 * so return zero here
94 		 */
95 		ret = 0;
96 		goto out;
97 	}
98 
99 	/*
100 	 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
101 	 * to all call sites, we did a bpf_prog_array_valid() there to check
102 	 * whether call->prog_array is empty or not, which is
103 	 * a heurisitc to speed up execution.
104 	 *
105 	 * If bpf_prog_array_valid() fetched prog_array was
106 	 * non-NULL, we go into trace_call_bpf() and do the actual
107 	 * proper rcu_dereference() under RCU lock.
108 	 * If it turns out that prog_array is NULL then, we bail out.
109 	 * For the opposite, if the bpf_prog_array_valid() fetched pointer
110 	 * was NULL, you'll skip the prog_array with the risk of missing
111 	 * out of events when it was updated in between this and the
112 	 * rcu_dereference() which is accepted risk.
113 	 */
114 	ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
115 
116  out:
117 	__this_cpu_dec(bpf_prog_active);
118 
119 	return ret;
120 }
121 
122 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
123 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
124 {
125 	regs_set_return_value(regs, rc);
126 	override_function_with_return(regs);
127 	return 0;
128 }
129 
130 static const struct bpf_func_proto bpf_override_return_proto = {
131 	.func		= bpf_override_return,
132 	.gpl_only	= true,
133 	.ret_type	= RET_INTEGER,
134 	.arg1_type	= ARG_PTR_TO_CTX,
135 	.arg2_type	= ARG_ANYTHING,
136 };
137 #endif
138 
139 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
140 	   const void __user *, unsafe_ptr)
141 {
142 	int ret = probe_user_read(dst, unsafe_ptr, size);
143 
144 	if (unlikely(ret < 0))
145 		memset(dst, 0, size);
146 
147 	return ret;
148 }
149 
150 const struct bpf_func_proto bpf_probe_read_user_proto = {
151 	.func		= bpf_probe_read_user,
152 	.gpl_only	= true,
153 	.ret_type	= RET_INTEGER,
154 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
155 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
156 	.arg3_type	= ARG_ANYTHING,
157 };
158 
159 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
160 	   const void __user *, unsafe_ptr)
161 {
162 	int ret = strncpy_from_unsafe_user(dst, unsafe_ptr, size);
163 
164 	if (unlikely(ret < 0))
165 		memset(dst, 0, size);
166 
167 	return ret;
168 }
169 
170 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
171 	.func		= bpf_probe_read_user_str,
172 	.gpl_only	= true,
173 	.ret_type	= RET_INTEGER,
174 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
175 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
176 	.arg3_type	= ARG_ANYTHING,
177 };
178 
179 static __always_inline int
180 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr,
181 			     const bool compat)
182 {
183 	int ret = security_locked_down(LOCKDOWN_BPF_READ);
184 
185 	if (unlikely(ret < 0))
186 		goto out;
187 	ret = compat ? probe_kernel_read(dst, unsafe_ptr, size) :
188 	      probe_kernel_read_strict(dst, unsafe_ptr, size);
189 	if (unlikely(ret < 0))
190 out:
191 		memset(dst, 0, size);
192 	return ret;
193 }
194 
195 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
196 	   const void *, unsafe_ptr)
197 {
198 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, false);
199 }
200 
201 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
202 	.func		= bpf_probe_read_kernel,
203 	.gpl_only	= true,
204 	.ret_type	= RET_INTEGER,
205 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
206 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
207 	.arg3_type	= ARG_ANYTHING,
208 };
209 
210 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
211 	   const void *, unsafe_ptr)
212 {
213 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, true);
214 }
215 
216 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
217 	.func		= bpf_probe_read_compat,
218 	.gpl_only	= true,
219 	.ret_type	= RET_INTEGER,
220 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
221 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
222 	.arg3_type	= ARG_ANYTHING,
223 };
224 
225 static __always_inline int
226 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr,
227 				 const bool compat)
228 {
229 	int ret = security_locked_down(LOCKDOWN_BPF_READ);
230 
231 	if (unlikely(ret < 0))
232 		goto out;
233 	/*
234 	 * The strncpy_from_unsafe_*() call will likely not fill the entire
235 	 * buffer, but that's okay in this circumstance as we're probing
236 	 * arbitrary memory anyway similar to bpf_probe_read_*() and might
237 	 * as well probe the stack. Thus, memory is explicitly cleared
238 	 * only in error case, so that improper users ignoring return
239 	 * code altogether don't copy garbage; otherwise length of string
240 	 * is returned that can be used for bpf_perf_event_output() et al.
241 	 */
242 	ret = compat ? strncpy_from_unsafe(dst, unsafe_ptr, size) :
243 	      strncpy_from_unsafe_strict(dst, unsafe_ptr, size);
244 	if (unlikely(ret < 0))
245 out:
246 		memset(dst, 0, size);
247 	return ret;
248 }
249 
250 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
251 	   const void *, unsafe_ptr)
252 {
253 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, false);
254 }
255 
256 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
257 	.func		= bpf_probe_read_kernel_str,
258 	.gpl_only	= true,
259 	.ret_type	= RET_INTEGER,
260 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
261 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
262 	.arg3_type	= ARG_ANYTHING,
263 };
264 
265 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
266 	   const void *, unsafe_ptr)
267 {
268 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, true);
269 }
270 
271 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
272 	.func		= bpf_probe_read_compat_str,
273 	.gpl_only	= true,
274 	.ret_type	= RET_INTEGER,
275 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
276 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
277 	.arg3_type	= ARG_ANYTHING,
278 };
279 
280 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
281 	   u32, size)
282 {
283 	/*
284 	 * Ensure we're in user context which is safe for the helper to
285 	 * run. This helper has no business in a kthread.
286 	 *
287 	 * access_ok() should prevent writing to non-user memory, but in
288 	 * some situations (nommu, temporary switch, etc) access_ok() does
289 	 * not provide enough validation, hence the check on KERNEL_DS.
290 	 *
291 	 * nmi_uaccess_okay() ensures the probe is not run in an interim
292 	 * state, when the task or mm are switched. This is specifically
293 	 * required to prevent the use of temporary mm.
294 	 */
295 
296 	if (unlikely(in_interrupt() ||
297 		     current->flags & (PF_KTHREAD | PF_EXITING)))
298 		return -EPERM;
299 	if (unlikely(uaccess_kernel()))
300 		return -EPERM;
301 	if (unlikely(!nmi_uaccess_okay()))
302 		return -EPERM;
303 
304 	return probe_user_write(unsafe_ptr, src, size);
305 }
306 
307 static const struct bpf_func_proto bpf_probe_write_user_proto = {
308 	.func		= bpf_probe_write_user,
309 	.gpl_only	= true,
310 	.ret_type	= RET_INTEGER,
311 	.arg1_type	= ARG_ANYTHING,
312 	.arg2_type	= ARG_PTR_TO_MEM,
313 	.arg3_type	= ARG_CONST_SIZE,
314 };
315 
316 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
317 {
318 	if (!capable(CAP_SYS_ADMIN))
319 		return NULL;
320 
321 	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
322 			    current->comm, task_pid_nr(current));
323 
324 	return &bpf_probe_write_user_proto;
325 }
326 
327 /*
328  * Only limited trace_printk() conversion specifiers allowed:
329  * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pks %pus %s
330  */
331 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
332 	   u64, arg2, u64, arg3)
333 {
334 	int i, mod[3] = {}, fmt_cnt = 0;
335 	char buf[64], fmt_ptype;
336 	void *unsafe_ptr = NULL;
337 	bool str_seen = false;
338 
339 	/*
340 	 * bpf_check()->check_func_arg()->check_stack_boundary()
341 	 * guarantees that fmt points to bpf program stack,
342 	 * fmt_size bytes of it were initialized and fmt_size > 0
343 	 */
344 	if (fmt[--fmt_size] != 0)
345 		return -EINVAL;
346 
347 	/* check format string for allowed specifiers */
348 	for (i = 0; i < fmt_size; i++) {
349 		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
350 			return -EINVAL;
351 
352 		if (fmt[i] != '%')
353 			continue;
354 
355 		if (fmt_cnt >= 3)
356 			return -EINVAL;
357 
358 		/* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
359 		i++;
360 		if (fmt[i] == 'l') {
361 			mod[fmt_cnt]++;
362 			i++;
363 		} else if (fmt[i] == 'p') {
364 			mod[fmt_cnt]++;
365 			if ((fmt[i + 1] == 'k' ||
366 			     fmt[i + 1] == 'u') &&
367 			    fmt[i + 2] == 's') {
368 				fmt_ptype = fmt[i + 1];
369 				i += 2;
370 				goto fmt_str;
371 			}
372 
373 			/* disallow any further format extensions */
374 			if (fmt[i + 1] != 0 &&
375 			    !isspace(fmt[i + 1]) &&
376 			    !ispunct(fmt[i + 1]))
377 				return -EINVAL;
378 
379 			goto fmt_next;
380 		} else if (fmt[i] == 's') {
381 			mod[fmt_cnt]++;
382 			fmt_ptype = fmt[i];
383 fmt_str:
384 			if (str_seen)
385 				/* allow only one '%s' per fmt string */
386 				return -EINVAL;
387 			str_seen = true;
388 
389 			if (fmt[i + 1] != 0 &&
390 			    !isspace(fmt[i + 1]) &&
391 			    !ispunct(fmt[i + 1]))
392 				return -EINVAL;
393 
394 			switch (fmt_cnt) {
395 			case 0:
396 				unsafe_ptr = (void *)(long)arg1;
397 				arg1 = (long)buf;
398 				break;
399 			case 1:
400 				unsafe_ptr = (void *)(long)arg2;
401 				arg2 = (long)buf;
402 				break;
403 			case 2:
404 				unsafe_ptr = (void *)(long)arg3;
405 				arg3 = (long)buf;
406 				break;
407 			}
408 
409 			buf[0] = 0;
410 			switch (fmt_ptype) {
411 			case 's':
412 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
413 				strncpy_from_unsafe(buf, unsafe_ptr,
414 						    sizeof(buf));
415 				break;
416 #endif
417 			case 'k':
418 				strncpy_from_unsafe_strict(buf, unsafe_ptr,
419 							   sizeof(buf));
420 				break;
421 			case 'u':
422 				strncpy_from_unsafe_user(buf,
423 					(__force void __user *)unsafe_ptr,
424 							 sizeof(buf));
425 				break;
426 			}
427 			goto fmt_next;
428 		}
429 
430 		if (fmt[i] == 'l') {
431 			mod[fmt_cnt]++;
432 			i++;
433 		}
434 
435 		if (fmt[i] != 'i' && fmt[i] != 'd' &&
436 		    fmt[i] != 'u' && fmt[i] != 'x')
437 			return -EINVAL;
438 fmt_next:
439 		fmt_cnt++;
440 	}
441 
442 /* Horrid workaround for getting va_list handling working with different
443  * argument type combinations generically for 32 and 64 bit archs.
444  */
445 #define __BPF_TP_EMIT()	__BPF_ARG3_TP()
446 #define __BPF_TP(...)							\
447 	__trace_printk(0 /* Fake ip */,					\
448 		       fmt, ##__VA_ARGS__)
449 
450 #define __BPF_ARG1_TP(...)						\
451 	((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64))	\
452 	  ? __BPF_TP(arg1, ##__VA_ARGS__)				\
453 	  : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32))	\
454 	      ? __BPF_TP((long)arg1, ##__VA_ARGS__)			\
455 	      : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
456 
457 #define __BPF_ARG2_TP(...)						\
458 	((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64))	\
459 	  ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__)				\
460 	  : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32))	\
461 	      ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__)		\
462 	      : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
463 
464 #define __BPF_ARG3_TP(...)						\
465 	((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64))	\
466 	  ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__)				\
467 	  : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32))	\
468 	      ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__)		\
469 	      : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
470 
471 	return __BPF_TP_EMIT();
472 }
473 
474 static const struct bpf_func_proto bpf_trace_printk_proto = {
475 	.func		= bpf_trace_printk,
476 	.gpl_only	= true,
477 	.ret_type	= RET_INTEGER,
478 	.arg1_type	= ARG_PTR_TO_MEM,
479 	.arg2_type	= ARG_CONST_SIZE,
480 };
481 
482 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
483 {
484 	/*
485 	 * this program might be calling bpf_trace_printk,
486 	 * so allocate per-cpu printk buffers
487 	 */
488 	trace_printk_init_buffers();
489 
490 	return &bpf_trace_printk_proto;
491 }
492 
493 #define MAX_SEQ_PRINTF_VARARGS		12
494 #define MAX_SEQ_PRINTF_MAX_MEMCPY	6
495 #define MAX_SEQ_PRINTF_STR_LEN		128
496 
497 struct bpf_seq_printf_buf {
498 	char buf[MAX_SEQ_PRINTF_MAX_MEMCPY][MAX_SEQ_PRINTF_STR_LEN];
499 };
500 static DEFINE_PER_CPU(struct bpf_seq_printf_buf, bpf_seq_printf_buf);
501 static DEFINE_PER_CPU(int, bpf_seq_printf_buf_used);
502 
503 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
504 	   const void *, data, u32, data_len)
505 {
506 	int err = -EINVAL, fmt_cnt = 0, memcpy_cnt = 0;
507 	int i, buf_used, copy_size, num_args;
508 	u64 params[MAX_SEQ_PRINTF_VARARGS];
509 	struct bpf_seq_printf_buf *bufs;
510 	const u64 *args = data;
511 
512 	buf_used = this_cpu_inc_return(bpf_seq_printf_buf_used);
513 	if (WARN_ON_ONCE(buf_used > 1)) {
514 		err = -EBUSY;
515 		goto out;
516 	}
517 
518 	bufs = this_cpu_ptr(&bpf_seq_printf_buf);
519 
520 	/*
521 	 * bpf_check()->check_func_arg()->check_stack_boundary()
522 	 * guarantees that fmt points to bpf program stack,
523 	 * fmt_size bytes of it were initialized and fmt_size > 0
524 	 */
525 	if (fmt[--fmt_size] != 0)
526 		goto out;
527 
528 	if (data_len & 7)
529 		goto out;
530 
531 	for (i = 0; i < fmt_size; i++) {
532 		if (fmt[i] == '%') {
533 			if (fmt[i + 1] == '%')
534 				i++;
535 			else if (!data || !data_len)
536 				goto out;
537 		}
538 	}
539 
540 	num_args = data_len / 8;
541 
542 	/* check format string for allowed specifiers */
543 	for (i = 0; i < fmt_size; i++) {
544 		/* only printable ascii for now. */
545 		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
546 			err = -EINVAL;
547 			goto out;
548 		}
549 
550 		if (fmt[i] != '%')
551 			continue;
552 
553 		if (fmt[i + 1] == '%') {
554 			i++;
555 			continue;
556 		}
557 
558 		if (fmt_cnt >= MAX_SEQ_PRINTF_VARARGS) {
559 			err = -E2BIG;
560 			goto out;
561 		}
562 
563 		if (fmt_cnt >= num_args) {
564 			err = -EINVAL;
565 			goto out;
566 		}
567 
568 		/* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
569 		i++;
570 
571 		/* skip optional "[0 +-][num]" width formating field */
572 		while (fmt[i] == '0' || fmt[i] == '+'  || fmt[i] == '-' ||
573 		       fmt[i] == ' ')
574 			i++;
575 		if (fmt[i] >= '1' && fmt[i] <= '9') {
576 			i++;
577 			while (fmt[i] >= '0' && fmt[i] <= '9')
578 				i++;
579 		}
580 
581 		if (fmt[i] == 's') {
582 			/* try our best to copy */
583 			if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
584 				err = -E2BIG;
585 				goto out;
586 			}
587 
588 			err = strncpy_from_unsafe_strict(bufs->buf[memcpy_cnt],
589 							 (void *) (long) args[fmt_cnt],
590 							 MAX_SEQ_PRINTF_STR_LEN);
591 			if (err < 0)
592 				bufs->buf[memcpy_cnt][0] = '\0';
593 			params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
594 
595 			fmt_cnt++;
596 			memcpy_cnt++;
597 			continue;
598 		}
599 
600 		if (fmt[i] == 'p') {
601 			if (fmt[i + 1] == 0 ||
602 			    fmt[i + 1] == 'K' ||
603 			    fmt[i + 1] == 'x') {
604 				/* just kernel pointers */
605 				params[fmt_cnt] = args[fmt_cnt];
606 				fmt_cnt++;
607 				continue;
608 			}
609 
610 			/* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
611 			if (fmt[i + 1] != 'i' && fmt[i + 1] != 'I') {
612 				err = -EINVAL;
613 				goto out;
614 			}
615 			if (fmt[i + 2] != '4' && fmt[i + 2] != '6') {
616 				err = -EINVAL;
617 				goto out;
618 			}
619 
620 			if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
621 				err = -E2BIG;
622 				goto out;
623 			}
624 
625 
626 			copy_size = (fmt[i + 2] == '4') ? 4 : 16;
627 
628 			err = probe_kernel_read(bufs->buf[memcpy_cnt],
629 						(void *) (long) args[fmt_cnt],
630 						copy_size);
631 			if (err < 0)
632 				memset(bufs->buf[memcpy_cnt], 0, copy_size);
633 			params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
634 
635 			i += 2;
636 			fmt_cnt++;
637 			memcpy_cnt++;
638 			continue;
639 		}
640 
641 		if (fmt[i] == 'l') {
642 			i++;
643 			if (fmt[i] == 'l')
644 				i++;
645 		}
646 
647 		if (fmt[i] != 'i' && fmt[i] != 'd' &&
648 		    fmt[i] != 'u' && fmt[i] != 'x') {
649 			err = -EINVAL;
650 			goto out;
651 		}
652 
653 		params[fmt_cnt] = args[fmt_cnt];
654 		fmt_cnt++;
655 	}
656 
657 	/* Maximumly we can have MAX_SEQ_PRINTF_VARARGS parameter, just give
658 	 * all of them to seq_printf().
659 	 */
660 	seq_printf(m, fmt, params[0], params[1], params[2], params[3],
661 		   params[4], params[5], params[6], params[7], params[8],
662 		   params[9], params[10], params[11]);
663 
664 	err = seq_has_overflowed(m) ? -EOVERFLOW : 0;
665 out:
666 	this_cpu_dec(bpf_seq_printf_buf_used);
667 	return err;
668 }
669 
670 static int bpf_seq_printf_btf_ids[5];
671 static const struct bpf_func_proto bpf_seq_printf_proto = {
672 	.func		= bpf_seq_printf,
673 	.gpl_only	= true,
674 	.ret_type	= RET_INTEGER,
675 	.arg1_type	= ARG_PTR_TO_BTF_ID,
676 	.arg2_type	= ARG_PTR_TO_MEM,
677 	.arg3_type	= ARG_CONST_SIZE,
678 	.arg4_type      = ARG_PTR_TO_MEM_OR_NULL,
679 	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
680 	.btf_id		= bpf_seq_printf_btf_ids,
681 };
682 
683 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
684 {
685 	return seq_write(m, data, len) ? -EOVERFLOW : 0;
686 }
687 
688 static int bpf_seq_write_btf_ids[5];
689 static const struct bpf_func_proto bpf_seq_write_proto = {
690 	.func		= bpf_seq_write,
691 	.gpl_only	= true,
692 	.ret_type	= RET_INTEGER,
693 	.arg1_type	= ARG_PTR_TO_BTF_ID,
694 	.arg2_type	= ARG_PTR_TO_MEM,
695 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
696 	.btf_id		= bpf_seq_write_btf_ids,
697 };
698 
699 static __always_inline int
700 get_map_perf_counter(struct bpf_map *map, u64 flags,
701 		     u64 *value, u64 *enabled, u64 *running)
702 {
703 	struct bpf_array *array = container_of(map, struct bpf_array, map);
704 	unsigned int cpu = smp_processor_id();
705 	u64 index = flags & BPF_F_INDEX_MASK;
706 	struct bpf_event_entry *ee;
707 
708 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
709 		return -EINVAL;
710 	if (index == BPF_F_CURRENT_CPU)
711 		index = cpu;
712 	if (unlikely(index >= array->map.max_entries))
713 		return -E2BIG;
714 
715 	ee = READ_ONCE(array->ptrs[index]);
716 	if (!ee)
717 		return -ENOENT;
718 
719 	return perf_event_read_local(ee->event, value, enabled, running);
720 }
721 
722 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
723 {
724 	u64 value = 0;
725 	int err;
726 
727 	err = get_map_perf_counter(map, flags, &value, NULL, NULL);
728 	/*
729 	 * this api is ugly since we miss [-22..-2] range of valid
730 	 * counter values, but that's uapi
731 	 */
732 	if (err)
733 		return err;
734 	return value;
735 }
736 
737 static const struct bpf_func_proto bpf_perf_event_read_proto = {
738 	.func		= bpf_perf_event_read,
739 	.gpl_only	= true,
740 	.ret_type	= RET_INTEGER,
741 	.arg1_type	= ARG_CONST_MAP_PTR,
742 	.arg2_type	= ARG_ANYTHING,
743 };
744 
745 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
746 	   struct bpf_perf_event_value *, buf, u32, size)
747 {
748 	int err = -EINVAL;
749 
750 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
751 		goto clear;
752 	err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
753 				   &buf->running);
754 	if (unlikely(err))
755 		goto clear;
756 	return 0;
757 clear:
758 	memset(buf, 0, size);
759 	return err;
760 }
761 
762 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
763 	.func		= bpf_perf_event_read_value,
764 	.gpl_only	= true,
765 	.ret_type	= RET_INTEGER,
766 	.arg1_type	= ARG_CONST_MAP_PTR,
767 	.arg2_type	= ARG_ANYTHING,
768 	.arg3_type	= ARG_PTR_TO_UNINIT_MEM,
769 	.arg4_type	= ARG_CONST_SIZE,
770 };
771 
772 static __always_inline u64
773 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
774 			u64 flags, struct perf_sample_data *sd)
775 {
776 	struct bpf_array *array = container_of(map, struct bpf_array, map);
777 	unsigned int cpu = smp_processor_id();
778 	u64 index = flags & BPF_F_INDEX_MASK;
779 	struct bpf_event_entry *ee;
780 	struct perf_event *event;
781 
782 	if (index == BPF_F_CURRENT_CPU)
783 		index = cpu;
784 	if (unlikely(index >= array->map.max_entries))
785 		return -E2BIG;
786 
787 	ee = READ_ONCE(array->ptrs[index]);
788 	if (!ee)
789 		return -ENOENT;
790 
791 	event = ee->event;
792 	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
793 		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
794 		return -EINVAL;
795 
796 	if (unlikely(event->oncpu != cpu))
797 		return -EOPNOTSUPP;
798 
799 	return perf_event_output(event, sd, regs);
800 }
801 
802 /*
803  * Support executing tracepoints in normal, irq, and nmi context that each call
804  * bpf_perf_event_output
805  */
806 struct bpf_trace_sample_data {
807 	struct perf_sample_data sds[3];
808 };
809 
810 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
811 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
812 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
813 	   u64, flags, void *, data, u64, size)
814 {
815 	struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
816 	int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
817 	struct perf_raw_record raw = {
818 		.frag = {
819 			.size = size,
820 			.data = data,
821 		},
822 	};
823 	struct perf_sample_data *sd;
824 	int err;
825 
826 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
827 		err = -EBUSY;
828 		goto out;
829 	}
830 
831 	sd = &sds->sds[nest_level - 1];
832 
833 	if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
834 		err = -EINVAL;
835 		goto out;
836 	}
837 
838 	perf_sample_data_init(sd, 0, 0);
839 	sd->raw = &raw;
840 
841 	err = __bpf_perf_event_output(regs, map, flags, sd);
842 
843 out:
844 	this_cpu_dec(bpf_trace_nest_level);
845 	return err;
846 }
847 
848 static const struct bpf_func_proto bpf_perf_event_output_proto = {
849 	.func		= bpf_perf_event_output,
850 	.gpl_only	= true,
851 	.ret_type	= RET_INTEGER,
852 	.arg1_type	= ARG_PTR_TO_CTX,
853 	.arg2_type	= ARG_CONST_MAP_PTR,
854 	.arg3_type	= ARG_ANYTHING,
855 	.arg4_type	= ARG_PTR_TO_MEM,
856 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
857 };
858 
859 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
860 struct bpf_nested_pt_regs {
861 	struct pt_regs regs[3];
862 };
863 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
864 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
865 
866 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
867 		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
868 {
869 	int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
870 	struct perf_raw_frag frag = {
871 		.copy		= ctx_copy,
872 		.size		= ctx_size,
873 		.data		= ctx,
874 	};
875 	struct perf_raw_record raw = {
876 		.frag = {
877 			{
878 				.next	= ctx_size ? &frag : NULL,
879 			},
880 			.size	= meta_size,
881 			.data	= meta,
882 		},
883 	};
884 	struct perf_sample_data *sd;
885 	struct pt_regs *regs;
886 	u64 ret;
887 
888 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
889 		ret = -EBUSY;
890 		goto out;
891 	}
892 	sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
893 	regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
894 
895 	perf_fetch_caller_regs(regs);
896 	perf_sample_data_init(sd, 0, 0);
897 	sd->raw = &raw;
898 
899 	ret = __bpf_perf_event_output(regs, map, flags, sd);
900 out:
901 	this_cpu_dec(bpf_event_output_nest_level);
902 	return ret;
903 }
904 
905 BPF_CALL_0(bpf_get_current_task)
906 {
907 	return (long) current;
908 }
909 
910 const struct bpf_func_proto bpf_get_current_task_proto = {
911 	.func		= bpf_get_current_task,
912 	.gpl_only	= true,
913 	.ret_type	= RET_INTEGER,
914 };
915 
916 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
917 {
918 	struct bpf_array *array = container_of(map, struct bpf_array, map);
919 	struct cgroup *cgrp;
920 
921 	if (unlikely(idx >= array->map.max_entries))
922 		return -E2BIG;
923 
924 	cgrp = READ_ONCE(array->ptrs[idx]);
925 	if (unlikely(!cgrp))
926 		return -EAGAIN;
927 
928 	return task_under_cgroup_hierarchy(current, cgrp);
929 }
930 
931 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
932 	.func           = bpf_current_task_under_cgroup,
933 	.gpl_only       = false,
934 	.ret_type       = RET_INTEGER,
935 	.arg1_type      = ARG_CONST_MAP_PTR,
936 	.arg2_type      = ARG_ANYTHING,
937 };
938 
939 struct send_signal_irq_work {
940 	struct irq_work irq_work;
941 	struct task_struct *task;
942 	u32 sig;
943 	enum pid_type type;
944 };
945 
946 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
947 
948 static void do_bpf_send_signal(struct irq_work *entry)
949 {
950 	struct send_signal_irq_work *work;
951 
952 	work = container_of(entry, struct send_signal_irq_work, irq_work);
953 	group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
954 }
955 
956 static int bpf_send_signal_common(u32 sig, enum pid_type type)
957 {
958 	struct send_signal_irq_work *work = NULL;
959 
960 	/* Similar to bpf_probe_write_user, task needs to be
961 	 * in a sound condition and kernel memory access be
962 	 * permitted in order to send signal to the current
963 	 * task.
964 	 */
965 	if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
966 		return -EPERM;
967 	if (unlikely(uaccess_kernel()))
968 		return -EPERM;
969 	if (unlikely(!nmi_uaccess_okay()))
970 		return -EPERM;
971 
972 	if (irqs_disabled()) {
973 		/* Do an early check on signal validity. Otherwise,
974 		 * the error is lost in deferred irq_work.
975 		 */
976 		if (unlikely(!valid_signal(sig)))
977 			return -EINVAL;
978 
979 		work = this_cpu_ptr(&send_signal_work);
980 		if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
981 			return -EBUSY;
982 
983 		/* Add the current task, which is the target of sending signal,
984 		 * to the irq_work. The current task may change when queued
985 		 * irq works get executed.
986 		 */
987 		work->task = current;
988 		work->sig = sig;
989 		work->type = type;
990 		irq_work_queue(&work->irq_work);
991 		return 0;
992 	}
993 
994 	return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
995 }
996 
997 BPF_CALL_1(bpf_send_signal, u32, sig)
998 {
999 	return bpf_send_signal_common(sig, PIDTYPE_TGID);
1000 }
1001 
1002 static const struct bpf_func_proto bpf_send_signal_proto = {
1003 	.func		= bpf_send_signal,
1004 	.gpl_only	= false,
1005 	.ret_type	= RET_INTEGER,
1006 	.arg1_type	= ARG_ANYTHING,
1007 };
1008 
1009 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
1010 {
1011 	return bpf_send_signal_common(sig, PIDTYPE_PID);
1012 }
1013 
1014 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
1015 	.func		= bpf_send_signal_thread,
1016 	.gpl_only	= false,
1017 	.ret_type	= RET_INTEGER,
1018 	.arg1_type	= ARG_ANYTHING,
1019 };
1020 
1021 const struct bpf_func_proto *
1022 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1023 {
1024 	switch (func_id) {
1025 	case BPF_FUNC_map_lookup_elem:
1026 		return &bpf_map_lookup_elem_proto;
1027 	case BPF_FUNC_map_update_elem:
1028 		return &bpf_map_update_elem_proto;
1029 	case BPF_FUNC_map_delete_elem:
1030 		return &bpf_map_delete_elem_proto;
1031 	case BPF_FUNC_map_push_elem:
1032 		return &bpf_map_push_elem_proto;
1033 	case BPF_FUNC_map_pop_elem:
1034 		return &bpf_map_pop_elem_proto;
1035 	case BPF_FUNC_map_peek_elem:
1036 		return &bpf_map_peek_elem_proto;
1037 	case BPF_FUNC_ktime_get_ns:
1038 		return &bpf_ktime_get_ns_proto;
1039 	case BPF_FUNC_ktime_get_boot_ns:
1040 		return &bpf_ktime_get_boot_ns_proto;
1041 	case BPF_FUNC_tail_call:
1042 		return &bpf_tail_call_proto;
1043 	case BPF_FUNC_get_current_pid_tgid:
1044 		return &bpf_get_current_pid_tgid_proto;
1045 	case BPF_FUNC_get_current_task:
1046 		return &bpf_get_current_task_proto;
1047 	case BPF_FUNC_get_current_uid_gid:
1048 		return &bpf_get_current_uid_gid_proto;
1049 	case BPF_FUNC_get_current_comm:
1050 		return &bpf_get_current_comm_proto;
1051 	case BPF_FUNC_trace_printk:
1052 		return bpf_get_trace_printk_proto();
1053 	case BPF_FUNC_get_smp_processor_id:
1054 		return &bpf_get_smp_processor_id_proto;
1055 	case BPF_FUNC_get_numa_node_id:
1056 		return &bpf_get_numa_node_id_proto;
1057 	case BPF_FUNC_perf_event_read:
1058 		return &bpf_perf_event_read_proto;
1059 	case BPF_FUNC_probe_write_user:
1060 		return bpf_get_probe_write_proto();
1061 	case BPF_FUNC_current_task_under_cgroup:
1062 		return &bpf_current_task_under_cgroup_proto;
1063 	case BPF_FUNC_get_prandom_u32:
1064 		return &bpf_get_prandom_u32_proto;
1065 	case BPF_FUNC_probe_read_user:
1066 		return &bpf_probe_read_user_proto;
1067 	case BPF_FUNC_probe_read_kernel:
1068 		return &bpf_probe_read_kernel_proto;
1069 	case BPF_FUNC_probe_read_user_str:
1070 		return &bpf_probe_read_user_str_proto;
1071 	case BPF_FUNC_probe_read_kernel_str:
1072 		return &bpf_probe_read_kernel_str_proto;
1073 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1074 	case BPF_FUNC_probe_read:
1075 		return &bpf_probe_read_compat_proto;
1076 	case BPF_FUNC_probe_read_str:
1077 		return &bpf_probe_read_compat_str_proto;
1078 #endif
1079 #ifdef CONFIG_CGROUPS
1080 	case BPF_FUNC_get_current_cgroup_id:
1081 		return &bpf_get_current_cgroup_id_proto;
1082 #endif
1083 	case BPF_FUNC_send_signal:
1084 		return &bpf_send_signal_proto;
1085 	case BPF_FUNC_send_signal_thread:
1086 		return &bpf_send_signal_thread_proto;
1087 	case BPF_FUNC_perf_event_read_value:
1088 		return &bpf_perf_event_read_value_proto;
1089 	case BPF_FUNC_get_ns_current_pid_tgid:
1090 		return &bpf_get_ns_current_pid_tgid_proto;
1091 	case BPF_FUNC_ringbuf_output:
1092 		return &bpf_ringbuf_output_proto;
1093 	case BPF_FUNC_ringbuf_reserve:
1094 		return &bpf_ringbuf_reserve_proto;
1095 	case BPF_FUNC_ringbuf_submit:
1096 		return &bpf_ringbuf_submit_proto;
1097 	case BPF_FUNC_ringbuf_discard:
1098 		return &bpf_ringbuf_discard_proto;
1099 	case BPF_FUNC_ringbuf_query:
1100 		return &bpf_ringbuf_query_proto;
1101 	default:
1102 		return NULL;
1103 	}
1104 }
1105 
1106 static const struct bpf_func_proto *
1107 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1108 {
1109 	switch (func_id) {
1110 	case BPF_FUNC_perf_event_output:
1111 		return &bpf_perf_event_output_proto;
1112 	case BPF_FUNC_get_stackid:
1113 		return &bpf_get_stackid_proto;
1114 	case BPF_FUNC_get_stack:
1115 		return &bpf_get_stack_proto;
1116 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1117 	case BPF_FUNC_override_return:
1118 		return &bpf_override_return_proto;
1119 #endif
1120 	default:
1121 		return bpf_tracing_func_proto(func_id, prog);
1122 	}
1123 }
1124 
1125 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
1126 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1127 					const struct bpf_prog *prog,
1128 					struct bpf_insn_access_aux *info)
1129 {
1130 	if (off < 0 || off >= sizeof(struct pt_regs))
1131 		return false;
1132 	if (type != BPF_READ)
1133 		return false;
1134 	if (off % size != 0)
1135 		return false;
1136 	/*
1137 	 * Assertion for 32 bit to make sure last 8 byte access
1138 	 * (BPF_DW) to the last 4 byte member is disallowed.
1139 	 */
1140 	if (off + size > sizeof(struct pt_regs))
1141 		return false;
1142 
1143 	return true;
1144 }
1145 
1146 const struct bpf_verifier_ops kprobe_verifier_ops = {
1147 	.get_func_proto  = kprobe_prog_func_proto,
1148 	.is_valid_access = kprobe_prog_is_valid_access,
1149 };
1150 
1151 const struct bpf_prog_ops kprobe_prog_ops = {
1152 };
1153 
1154 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1155 	   u64, flags, void *, data, u64, size)
1156 {
1157 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1158 
1159 	/*
1160 	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1161 	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1162 	 * from there and call the same bpf_perf_event_output() helper inline.
1163 	 */
1164 	return ____bpf_perf_event_output(regs, map, flags, data, size);
1165 }
1166 
1167 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1168 	.func		= bpf_perf_event_output_tp,
1169 	.gpl_only	= true,
1170 	.ret_type	= RET_INTEGER,
1171 	.arg1_type	= ARG_PTR_TO_CTX,
1172 	.arg2_type	= ARG_CONST_MAP_PTR,
1173 	.arg3_type	= ARG_ANYTHING,
1174 	.arg4_type	= ARG_PTR_TO_MEM,
1175 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1176 };
1177 
1178 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1179 	   u64, flags)
1180 {
1181 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1182 
1183 	/*
1184 	 * Same comment as in bpf_perf_event_output_tp(), only that this time
1185 	 * the other helper's function body cannot be inlined due to being
1186 	 * external, thus we need to call raw helper function.
1187 	 */
1188 	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1189 			       flags, 0, 0);
1190 }
1191 
1192 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1193 	.func		= bpf_get_stackid_tp,
1194 	.gpl_only	= true,
1195 	.ret_type	= RET_INTEGER,
1196 	.arg1_type	= ARG_PTR_TO_CTX,
1197 	.arg2_type	= ARG_CONST_MAP_PTR,
1198 	.arg3_type	= ARG_ANYTHING,
1199 };
1200 
1201 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1202 	   u64, flags)
1203 {
1204 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1205 
1206 	return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1207 			     (unsigned long) size, flags, 0);
1208 }
1209 
1210 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1211 	.func		= bpf_get_stack_tp,
1212 	.gpl_only	= true,
1213 	.ret_type	= RET_INTEGER,
1214 	.arg1_type	= ARG_PTR_TO_CTX,
1215 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1216 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1217 	.arg4_type	= ARG_ANYTHING,
1218 };
1219 
1220 static const struct bpf_func_proto *
1221 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1222 {
1223 	switch (func_id) {
1224 	case BPF_FUNC_perf_event_output:
1225 		return &bpf_perf_event_output_proto_tp;
1226 	case BPF_FUNC_get_stackid:
1227 		return &bpf_get_stackid_proto_tp;
1228 	case BPF_FUNC_get_stack:
1229 		return &bpf_get_stack_proto_tp;
1230 	default:
1231 		return bpf_tracing_func_proto(func_id, prog);
1232 	}
1233 }
1234 
1235 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1236 				    const struct bpf_prog *prog,
1237 				    struct bpf_insn_access_aux *info)
1238 {
1239 	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1240 		return false;
1241 	if (type != BPF_READ)
1242 		return false;
1243 	if (off % size != 0)
1244 		return false;
1245 
1246 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1247 	return true;
1248 }
1249 
1250 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1251 	.get_func_proto  = tp_prog_func_proto,
1252 	.is_valid_access = tp_prog_is_valid_access,
1253 };
1254 
1255 const struct bpf_prog_ops tracepoint_prog_ops = {
1256 };
1257 
1258 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1259 	   struct bpf_perf_event_value *, buf, u32, size)
1260 {
1261 	int err = -EINVAL;
1262 
1263 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1264 		goto clear;
1265 	err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1266 				    &buf->running);
1267 	if (unlikely(err))
1268 		goto clear;
1269 	return 0;
1270 clear:
1271 	memset(buf, 0, size);
1272 	return err;
1273 }
1274 
1275 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1276          .func           = bpf_perf_prog_read_value,
1277          .gpl_only       = true,
1278          .ret_type       = RET_INTEGER,
1279          .arg1_type      = ARG_PTR_TO_CTX,
1280          .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1281          .arg3_type      = ARG_CONST_SIZE,
1282 };
1283 
1284 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1285 	   void *, buf, u32, size, u64, flags)
1286 {
1287 #ifndef CONFIG_X86
1288 	return -ENOENT;
1289 #else
1290 	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1291 	struct perf_branch_stack *br_stack = ctx->data->br_stack;
1292 	u32 to_copy;
1293 
1294 	if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1295 		return -EINVAL;
1296 
1297 	if (unlikely(!br_stack))
1298 		return -EINVAL;
1299 
1300 	if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1301 		return br_stack->nr * br_entry_size;
1302 
1303 	if (!buf || (size % br_entry_size != 0))
1304 		return -EINVAL;
1305 
1306 	to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1307 	memcpy(buf, br_stack->entries, to_copy);
1308 
1309 	return to_copy;
1310 #endif
1311 }
1312 
1313 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1314 	.func           = bpf_read_branch_records,
1315 	.gpl_only       = true,
1316 	.ret_type       = RET_INTEGER,
1317 	.arg1_type      = ARG_PTR_TO_CTX,
1318 	.arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1319 	.arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1320 	.arg4_type      = ARG_ANYTHING,
1321 };
1322 
1323 static const struct bpf_func_proto *
1324 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1325 {
1326 	switch (func_id) {
1327 	case BPF_FUNC_perf_event_output:
1328 		return &bpf_perf_event_output_proto_tp;
1329 	case BPF_FUNC_get_stackid:
1330 		return &bpf_get_stackid_proto_tp;
1331 	case BPF_FUNC_get_stack:
1332 		return &bpf_get_stack_proto_tp;
1333 	case BPF_FUNC_perf_prog_read_value:
1334 		return &bpf_perf_prog_read_value_proto;
1335 	case BPF_FUNC_read_branch_records:
1336 		return &bpf_read_branch_records_proto;
1337 	default:
1338 		return bpf_tracing_func_proto(func_id, prog);
1339 	}
1340 }
1341 
1342 /*
1343  * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1344  * to avoid potential recursive reuse issue when/if tracepoints are added
1345  * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1346  *
1347  * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1348  * in normal, irq, and nmi context.
1349  */
1350 struct bpf_raw_tp_regs {
1351 	struct pt_regs regs[3];
1352 };
1353 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1354 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1355 static struct pt_regs *get_bpf_raw_tp_regs(void)
1356 {
1357 	struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1358 	int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1359 
1360 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1361 		this_cpu_dec(bpf_raw_tp_nest_level);
1362 		return ERR_PTR(-EBUSY);
1363 	}
1364 
1365 	return &tp_regs->regs[nest_level - 1];
1366 }
1367 
1368 static void put_bpf_raw_tp_regs(void)
1369 {
1370 	this_cpu_dec(bpf_raw_tp_nest_level);
1371 }
1372 
1373 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1374 	   struct bpf_map *, map, u64, flags, void *, data, u64, size)
1375 {
1376 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1377 	int ret;
1378 
1379 	if (IS_ERR(regs))
1380 		return PTR_ERR(regs);
1381 
1382 	perf_fetch_caller_regs(regs);
1383 	ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1384 
1385 	put_bpf_raw_tp_regs();
1386 	return ret;
1387 }
1388 
1389 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1390 	.func		= bpf_perf_event_output_raw_tp,
1391 	.gpl_only	= true,
1392 	.ret_type	= RET_INTEGER,
1393 	.arg1_type	= ARG_PTR_TO_CTX,
1394 	.arg2_type	= ARG_CONST_MAP_PTR,
1395 	.arg3_type	= ARG_ANYTHING,
1396 	.arg4_type	= ARG_PTR_TO_MEM,
1397 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1398 };
1399 
1400 extern const struct bpf_func_proto bpf_skb_output_proto;
1401 extern const struct bpf_func_proto bpf_xdp_output_proto;
1402 
1403 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1404 	   struct bpf_map *, map, u64, flags)
1405 {
1406 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1407 	int ret;
1408 
1409 	if (IS_ERR(regs))
1410 		return PTR_ERR(regs);
1411 
1412 	perf_fetch_caller_regs(regs);
1413 	/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1414 	ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1415 			      flags, 0, 0);
1416 	put_bpf_raw_tp_regs();
1417 	return ret;
1418 }
1419 
1420 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1421 	.func		= bpf_get_stackid_raw_tp,
1422 	.gpl_only	= true,
1423 	.ret_type	= RET_INTEGER,
1424 	.arg1_type	= ARG_PTR_TO_CTX,
1425 	.arg2_type	= ARG_CONST_MAP_PTR,
1426 	.arg3_type	= ARG_ANYTHING,
1427 };
1428 
1429 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1430 	   void *, buf, u32, size, u64, flags)
1431 {
1432 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1433 	int ret;
1434 
1435 	if (IS_ERR(regs))
1436 		return PTR_ERR(regs);
1437 
1438 	perf_fetch_caller_regs(regs);
1439 	ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1440 			    (unsigned long) size, flags, 0);
1441 	put_bpf_raw_tp_regs();
1442 	return ret;
1443 }
1444 
1445 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1446 	.func		= bpf_get_stack_raw_tp,
1447 	.gpl_only	= true,
1448 	.ret_type	= RET_INTEGER,
1449 	.arg1_type	= ARG_PTR_TO_CTX,
1450 	.arg2_type	= ARG_PTR_TO_MEM,
1451 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1452 	.arg4_type	= ARG_ANYTHING,
1453 };
1454 
1455 static const struct bpf_func_proto *
1456 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1457 {
1458 	switch (func_id) {
1459 	case BPF_FUNC_perf_event_output:
1460 		return &bpf_perf_event_output_proto_raw_tp;
1461 	case BPF_FUNC_get_stackid:
1462 		return &bpf_get_stackid_proto_raw_tp;
1463 	case BPF_FUNC_get_stack:
1464 		return &bpf_get_stack_proto_raw_tp;
1465 	default:
1466 		return bpf_tracing_func_proto(func_id, prog);
1467 	}
1468 }
1469 
1470 const struct bpf_func_proto *
1471 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1472 {
1473 	switch (func_id) {
1474 #ifdef CONFIG_NET
1475 	case BPF_FUNC_skb_output:
1476 		return &bpf_skb_output_proto;
1477 	case BPF_FUNC_xdp_output:
1478 		return &bpf_xdp_output_proto;
1479 #endif
1480 	case BPF_FUNC_seq_printf:
1481 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1482 		       &bpf_seq_printf_proto :
1483 		       NULL;
1484 	case BPF_FUNC_seq_write:
1485 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1486 		       &bpf_seq_write_proto :
1487 		       NULL;
1488 	default:
1489 		return raw_tp_prog_func_proto(func_id, prog);
1490 	}
1491 }
1492 
1493 static bool raw_tp_prog_is_valid_access(int off, int size,
1494 					enum bpf_access_type type,
1495 					const struct bpf_prog *prog,
1496 					struct bpf_insn_access_aux *info)
1497 {
1498 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1499 		return false;
1500 	if (type != BPF_READ)
1501 		return false;
1502 	if (off % size != 0)
1503 		return false;
1504 	return true;
1505 }
1506 
1507 static bool tracing_prog_is_valid_access(int off, int size,
1508 					 enum bpf_access_type type,
1509 					 const struct bpf_prog *prog,
1510 					 struct bpf_insn_access_aux *info)
1511 {
1512 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1513 		return false;
1514 	if (type != BPF_READ)
1515 		return false;
1516 	if (off % size != 0)
1517 		return false;
1518 	return btf_ctx_access(off, size, type, prog, info);
1519 }
1520 
1521 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
1522 				     const union bpf_attr *kattr,
1523 				     union bpf_attr __user *uattr)
1524 {
1525 	return -ENOTSUPP;
1526 }
1527 
1528 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1529 	.get_func_proto  = raw_tp_prog_func_proto,
1530 	.is_valid_access = raw_tp_prog_is_valid_access,
1531 };
1532 
1533 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1534 };
1535 
1536 const struct bpf_verifier_ops tracing_verifier_ops = {
1537 	.get_func_proto  = tracing_prog_func_proto,
1538 	.is_valid_access = tracing_prog_is_valid_access,
1539 };
1540 
1541 const struct bpf_prog_ops tracing_prog_ops = {
1542 	.test_run = bpf_prog_test_run_tracing,
1543 };
1544 
1545 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1546 						 enum bpf_access_type type,
1547 						 const struct bpf_prog *prog,
1548 						 struct bpf_insn_access_aux *info)
1549 {
1550 	if (off == 0) {
1551 		if (size != sizeof(u64) || type != BPF_READ)
1552 			return false;
1553 		info->reg_type = PTR_TO_TP_BUFFER;
1554 	}
1555 	return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1556 }
1557 
1558 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1559 	.get_func_proto  = raw_tp_prog_func_proto,
1560 	.is_valid_access = raw_tp_writable_prog_is_valid_access,
1561 };
1562 
1563 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1564 };
1565 
1566 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1567 				    const struct bpf_prog *prog,
1568 				    struct bpf_insn_access_aux *info)
1569 {
1570 	const int size_u64 = sizeof(u64);
1571 
1572 	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1573 		return false;
1574 	if (type != BPF_READ)
1575 		return false;
1576 	if (off % size != 0) {
1577 		if (sizeof(unsigned long) != 4)
1578 			return false;
1579 		if (size != 8)
1580 			return false;
1581 		if (off % size != 4)
1582 			return false;
1583 	}
1584 
1585 	switch (off) {
1586 	case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1587 		bpf_ctx_record_field_size(info, size_u64);
1588 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1589 			return false;
1590 		break;
1591 	case bpf_ctx_range(struct bpf_perf_event_data, addr):
1592 		bpf_ctx_record_field_size(info, size_u64);
1593 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1594 			return false;
1595 		break;
1596 	default:
1597 		if (size != sizeof(long))
1598 			return false;
1599 	}
1600 
1601 	return true;
1602 }
1603 
1604 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1605 				      const struct bpf_insn *si,
1606 				      struct bpf_insn *insn_buf,
1607 				      struct bpf_prog *prog, u32 *target_size)
1608 {
1609 	struct bpf_insn *insn = insn_buf;
1610 
1611 	switch (si->off) {
1612 	case offsetof(struct bpf_perf_event_data, sample_period):
1613 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1614 						       data), si->dst_reg, si->src_reg,
1615 				      offsetof(struct bpf_perf_event_data_kern, data));
1616 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1617 				      bpf_target_off(struct perf_sample_data, period, 8,
1618 						     target_size));
1619 		break;
1620 	case offsetof(struct bpf_perf_event_data, addr):
1621 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1622 						       data), si->dst_reg, si->src_reg,
1623 				      offsetof(struct bpf_perf_event_data_kern, data));
1624 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1625 				      bpf_target_off(struct perf_sample_data, addr, 8,
1626 						     target_size));
1627 		break;
1628 	default:
1629 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1630 						       regs), si->dst_reg, si->src_reg,
1631 				      offsetof(struct bpf_perf_event_data_kern, regs));
1632 		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1633 				      si->off);
1634 		break;
1635 	}
1636 
1637 	return insn - insn_buf;
1638 }
1639 
1640 const struct bpf_verifier_ops perf_event_verifier_ops = {
1641 	.get_func_proto		= pe_prog_func_proto,
1642 	.is_valid_access	= pe_prog_is_valid_access,
1643 	.convert_ctx_access	= pe_prog_convert_ctx_access,
1644 };
1645 
1646 const struct bpf_prog_ops perf_event_prog_ops = {
1647 };
1648 
1649 static DEFINE_MUTEX(bpf_event_mutex);
1650 
1651 #define BPF_TRACE_MAX_PROGS 64
1652 
1653 int perf_event_attach_bpf_prog(struct perf_event *event,
1654 			       struct bpf_prog *prog)
1655 {
1656 	struct bpf_prog_array *old_array;
1657 	struct bpf_prog_array *new_array;
1658 	int ret = -EEXIST;
1659 
1660 	/*
1661 	 * Kprobe override only works if they are on the function entry,
1662 	 * and only if they are on the opt-in list.
1663 	 */
1664 	if (prog->kprobe_override &&
1665 	    (!trace_kprobe_on_func_entry(event->tp_event) ||
1666 	     !trace_kprobe_error_injectable(event->tp_event)))
1667 		return -EINVAL;
1668 
1669 	mutex_lock(&bpf_event_mutex);
1670 
1671 	if (event->prog)
1672 		goto unlock;
1673 
1674 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1675 	if (old_array &&
1676 	    bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1677 		ret = -E2BIG;
1678 		goto unlock;
1679 	}
1680 
1681 	ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1682 	if (ret < 0)
1683 		goto unlock;
1684 
1685 	/* set the new array to event->tp_event and set event->prog */
1686 	event->prog = prog;
1687 	rcu_assign_pointer(event->tp_event->prog_array, new_array);
1688 	bpf_prog_array_free(old_array);
1689 
1690 unlock:
1691 	mutex_unlock(&bpf_event_mutex);
1692 	return ret;
1693 }
1694 
1695 void perf_event_detach_bpf_prog(struct perf_event *event)
1696 {
1697 	struct bpf_prog_array *old_array;
1698 	struct bpf_prog_array *new_array;
1699 	int ret;
1700 
1701 	mutex_lock(&bpf_event_mutex);
1702 
1703 	if (!event->prog)
1704 		goto unlock;
1705 
1706 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1707 	ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1708 	if (ret == -ENOENT)
1709 		goto unlock;
1710 	if (ret < 0) {
1711 		bpf_prog_array_delete_safe(old_array, event->prog);
1712 	} else {
1713 		rcu_assign_pointer(event->tp_event->prog_array, new_array);
1714 		bpf_prog_array_free(old_array);
1715 	}
1716 
1717 	bpf_prog_put(event->prog);
1718 	event->prog = NULL;
1719 
1720 unlock:
1721 	mutex_unlock(&bpf_event_mutex);
1722 }
1723 
1724 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1725 {
1726 	struct perf_event_query_bpf __user *uquery = info;
1727 	struct perf_event_query_bpf query = {};
1728 	struct bpf_prog_array *progs;
1729 	u32 *ids, prog_cnt, ids_len;
1730 	int ret;
1731 
1732 	if (!perfmon_capable())
1733 		return -EPERM;
1734 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
1735 		return -EINVAL;
1736 	if (copy_from_user(&query, uquery, sizeof(query)))
1737 		return -EFAULT;
1738 
1739 	ids_len = query.ids_len;
1740 	if (ids_len > BPF_TRACE_MAX_PROGS)
1741 		return -E2BIG;
1742 	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1743 	if (!ids)
1744 		return -ENOMEM;
1745 	/*
1746 	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1747 	 * is required when user only wants to check for uquery->prog_cnt.
1748 	 * There is no need to check for it since the case is handled
1749 	 * gracefully in bpf_prog_array_copy_info.
1750 	 */
1751 
1752 	mutex_lock(&bpf_event_mutex);
1753 	progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1754 	ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
1755 	mutex_unlock(&bpf_event_mutex);
1756 
1757 	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1758 	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1759 		ret = -EFAULT;
1760 
1761 	kfree(ids);
1762 	return ret;
1763 }
1764 
1765 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1766 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1767 
1768 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
1769 {
1770 	struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1771 
1772 	for (; btp < __stop__bpf_raw_tp; btp++) {
1773 		if (!strcmp(btp->tp->name, name))
1774 			return btp;
1775 	}
1776 
1777 	return bpf_get_raw_tracepoint_module(name);
1778 }
1779 
1780 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1781 {
1782 	struct module *mod = __module_address((unsigned long)btp);
1783 
1784 	if (mod)
1785 		module_put(mod);
1786 }
1787 
1788 static __always_inline
1789 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1790 {
1791 	cant_sleep();
1792 	rcu_read_lock();
1793 	(void) BPF_PROG_RUN(prog, args);
1794 	rcu_read_unlock();
1795 }
1796 
1797 #define UNPACK(...)			__VA_ARGS__
1798 #define REPEAT_1(FN, DL, X, ...)	FN(X)
1799 #define REPEAT_2(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1800 #define REPEAT_3(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1801 #define REPEAT_4(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1802 #define REPEAT_5(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1803 #define REPEAT_6(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1804 #define REPEAT_7(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1805 #define REPEAT_8(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1806 #define REPEAT_9(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1807 #define REPEAT_10(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1808 #define REPEAT_11(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1809 #define REPEAT_12(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1810 #define REPEAT(X, FN, DL, ...)		REPEAT_##X(FN, DL, __VA_ARGS__)
1811 
1812 #define SARG(X)		u64 arg##X
1813 #define COPY(X)		args[X] = arg##X
1814 
1815 #define __DL_COM	(,)
1816 #define __DL_SEM	(;)
1817 
1818 #define __SEQ_0_11	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1819 
1820 #define BPF_TRACE_DEFN_x(x)						\
1821 	void bpf_trace_run##x(struct bpf_prog *prog,			\
1822 			      REPEAT(x, SARG, __DL_COM, __SEQ_0_11))	\
1823 	{								\
1824 		u64 args[x];						\
1825 		REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);			\
1826 		__bpf_trace_run(prog, args);				\
1827 	}								\
1828 	EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1829 BPF_TRACE_DEFN_x(1);
1830 BPF_TRACE_DEFN_x(2);
1831 BPF_TRACE_DEFN_x(3);
1832 BPF_TRACE_DEFN_x(4);
1833 BPF_TRACE_DEFN_x(5);
1834 BPF_TRACE_DEFN_x(6);
1835 BPF_TRACE_DEFN_x(7);
1836 BPF_TRACE_DEFN_x(8);
1837 BPF_TRACE_DEFN_x(9);
1838 BPF_TRACE_DEFN_x(10);
1839 BPF_TRACE_DEFN_x(11);
1840 BPF_TRACE_DEFN_x(12);
1841 
1842 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1843 {
1844 	struct tracepoint *tp = btp->tp;
1845 
1846 	/*
1847 	 * check that program doesn't access arguments beyond what's
1848 	 * available in this tracepoint
1849 	 */
1850 	if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1851 		return -EINVAL;
1852 
1853 	if (prog->aux->max_tp_access > btp->writable_size)
1854 		return -EINVAL;
1855 
1856 	return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
1857 }
1858 
1859 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1860 {
1861 	return __bpf_probe_register(btp, prog);
1862 }
1863 
1864 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1865 {
1866 	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1867 }
1868 
1869 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1870 			    u32 *fd_type, const char **buf,
1871 			    u64 *probe_offset, u64 *probe_addr)
1872 {
1873 	bool is_tracepoint, is_syscall_tp;
1874 	struct bpf_prog *prog;
1875 	int flags, err = 0;
1876 
1877 	prog = event->prog;
1878 	if (!prog)
1879 		return -ENOENT;
1880 
1881 	/* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1882 	if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1883 		return -EOPNOTSUPP;
1884 
1885 	*prog_id = prog->aux->id;
1886 	flags = event->tp_event->flags;
1887 	is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1888 	is_syscall_tp = is_syscall_trace_event(event->tp_event);
1889 
1890 	if (is_tracepoint || is_syscall_tp) {
1891 		*buf = is_tracepoint ? event->tp_event->tp->name
1892 				     : event->tp_event->name;
1893 		*fd_type = BPF_FD_TYPE_TRACEPOINT;
1894 		*probe_offset = 0x0;
1895 		*probe_addr = 0x0;
1896 	} else {
1897 		/* kprobe/uprobe */
1898 		err = -EOPNOTSUPP;
1899 #ifdef CONFIG_KPROBE_EVENTS
1900 		if (flags & TRACE_EVENT_FL_KPROBE)
1901 			err = bpf_get_kprobe_info(event, fd_type, buf,
1902 						  probe_offset, probe_addr,
1903 						  event->attr.type == PERF_TYPE_TRACEPOINT);
1904 #endif
1905 #ifdef CONFIG_UPROBE_EVENTS
1906 		if (flags & TRACE_EVENT_FL_UPROBE)
1907 			err = bpf_get_uprobe_info(event, fd_type, buf,
1908 						  probe_offset,
1909 						  event->attr.type == PERF_TYPE_TRACEPOINT);
1910 #endif
1911 	}
1912 
1913 	return err;
1914 }
1915 
1916 static int __init send_signal_irq_work_init(void)
1917 {
1918 	int cpu;
1919 	struct send_signal_irq_work *work;
1920 
1921 	for_each_possible_cpu(cpu) {
1922 		work = per_cpu_ptr(&send_signal_work, cpu);
1923 		init_irq_work(&work->irq_work, do_bpf_send_signal);
1924 	}
1925 	return 0;
1926 }
1927 
1928 subsys_initcall(send_signal_irq_work_init);
1929 
1930 #ifdef CONFIG_MODULES
1931 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
1932 			    void *module)
1933 {
1934 	struct bpf_trace_module *btm, *tmp;
1935 	struct module *mod = module;
1936 
1937 	if (mod->num_bpf_raw_events == 0 ||
1938 	    (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
1939 		return 0;
1940 
1941 	mutex_lock(&bpf_module_mutex);
1942 
1943 	switch (op) {
1944 	case MODULE_STATE_COMING:
1945 		btm = kzalloc(sizeof(*btm), GFP_KERNEL);
1946 		if (btm) {
1947 			btm->module = module;
1948 			list_add(&btm->list, &bpf_trace_modules);
1949 		}
1950 		break;
1951 	case MODULE_STATE_GOING:
1952 		list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
1953 			if (btm->module == module) {
1954 				list_del(&btm->list);
1955 				kfree(btm);
1956 				break;
1957 			}
1958 		}
1959 		break;
1960 	}
1961 
1962 	mutex_unlock(&bpf_module_mutex);
1963 
1964 	return 0;
1965 }
1966 
1967 static struct notifier_block bpf_module_nb = {
1968 	.notifier_call = bpf_event_notify,
1969 };
1970 
1971 static int __init bpf_event_init(void)
1972 {
1973 	register_module_notifier(&bpf_module_nb);
1974 	return 0;
1975 }
1976 
1977 fs_initcall(bpf_event_init);
1978 #endif /* CONFIG_MODULES */
1979