xref: /openbmc/linux/kernel/trace/bpf_trace.c (revision 5735054af3d3f3b4188a540edcb8f170070d0003)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
8 #include <linux/bpf.h>
9 #include <linux/bpf_verifier.h>
10 #include <linux/bpf_perf_event.h>
11 #include <linux/btf.h>
12 #include <linux/filter.h>
13 #include <linux/uaccess.h>
14 #include <linux/ctype.h>
15 #include <linux/kprobes.h>
16 #include <linux/spinlock.h>
17 #include <linux/syscalls.h>
18 #include <linux/error-injection.h>
19 #include <linux/btf_ids.h>
20 #include <linux/bpf_lsm.h>
21 #include <linux/fprobe.h>
22 #include <linux/bsearch.h>
23 #include <linux/sort.h>
24 #include <linux/key.h>
25 #include <linux/verification.h>
26 #include <linux/namei.h>
27 
28 #include <net/bpf_sk_storage.h>
29 
30 #include <uapi/linux/bpf.h>
31 #include <uapi/linux/btf.h>
32 
33 #include <asm/tlb.h>
34 
35 #include "trace_probe.h"
36 #include "trace.h"
37 
38 #define CREATE_TRACE_POINTS
39 #include "bpf_trace.h"
40 
41 #define bpf_event_rcu_dereference(p)					\
42 	rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
43 
44 #define MAX_UPROBE_MULTI_CNT (1U << 20)
45 
46 #ifdef CONFIG_MODULES
47 struct bpf_trace_module {
48 	struct module *module;
49 	struct list_head list;
50 };
51 
52 static LIST_HEAD(bpf_trace_modules);
53 static DEFINE_MUTEX(bpf_module_mutex);
54 
55 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
56 {
57 	struct bpf_raw_event_map *btp, *ret = NULL;
58 	struct bpf_trace_module *btm;
59 	unsigned int i;
60 
61 	mutex_lock(&bpf_module_mutex);
62 	list_for_each_entry(btm, &bpf_trace_modules, list) {
63 		for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
64 			btp = &btm->module->bpf_raw_events[i];
65 			if (!strcmp(btp->tp->name, name)) {
66 				if (try_module_get(btm->module))
67 					ret = btp;
68 				goto out;
69 			}
70 		}
71 	}
72 out:
73 	mutex_unlock(&bpf_module_mutex);
74 	return ret;
75 }
76 #else
77 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
78 {
79 	return NULL;
80 }
81 #endif /* CONFIG_MODULES */
82 
83 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
84 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
85 
86 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
87 				  u64 flags, const struct btf **btf,
88 				  s32 *btf_id);
89 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
90 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
91 
92 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx);
93 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
94 
95 /**
96  * trace_call_bpf - invoke BPF program
97  * @call: tracepoint event
98  * @ctx: opaque context pointer
99  *
100  * kprobe handlers execute BPF programs via this helper.
101  * Can be used from static tracepoints in the future.
102  *
103  * Return: BPF programs always return an integer which is interpreted by
104  * kprobe handler as:
105  * 0 - return from kprobe (event is filtered out)
106  * 1 - store kprobe event into ring buffer
107  * Other values are reserved and currently alias to 1
108  */
109 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
110 {
111 	unsigned int ret;
112 
113 	cant_sleep();
114 
115 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
116 		/*
117 		 * since some bpf program is already running on this cpu,
118 		 * don't call into another bpf program (same or different)
119 		 * and don't send kprobe event into ring-buffer,
120 		 * so return zero here
121 		 */
122 		ret = 0;
123 		goto out;
124 	}
125 
126 	/*
127 	 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
128 	 * to all call sites, we did a bpf_prog_array_valid() there to check
129 	 * whether call->prog_array is empty or not, which is
130 	 * a heuristic to speed up execution.
131 	 *
132 	 * If bpf_prog_array_valid() fetched prog_array was
133 	 * non-NULL, we go into trace_call_bpf() and do the actual
134 	 * proper rcu_dereference() under RCU lock.
135 	 * If it turns out that prog_array is NULL then, we bail out.
136 	 * For the opposite, if the bpf_prog_array_valid() fetched pointer
137 	 * was NULL, you'll skip the prog_array with the risk of missing
138 	 * out of events when it was updated in between this and the
139 	 * rcu_dereference() which is accepted risk.
140 	 */
141 	rcu_read_lock();
142 	ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
143 				 ctx, bpf_prog_run);
144 	rcu_read_unlock();
145 
146  out:
147 	__this_cpu_dec(bpf_prog_active);
148 
149 	return ret;
150 }
151 
152 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
153 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
154 {
155 	regs_set_return_value(regs, rc);
156 	override_function_with_return(regs);
157 	return 0;
158 }
159 
160 static const struct bpf_func_proto bpf_override_return_proto = {
161 	.func		= bpf_override_return,
162 	.gpl_only	= true,
163 	.ret_type	= RET_INTEGER,
164 	.arg1_type	= ARG_PTR_TO_CTX,
165 	.arg2_type	= ARG_ANYTHING,
166 };
167 #endif
168 
169 static __always_inline int
170 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
171 {
172 	int ret;
173 
174 	ret = copy_from_user_nofault(dst, unsafe_ptr, size);
175 	if (unlikely(ret < 0))
176 		memset(dst, 0, size);
177 	return ret;
178 }
179 
180 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
181 	   const void __user *, unsafe_ptr)
182 {
183 	return bpf_probe_read_user_common(dst, size, unsafe_ptr);
184 }
185 
186 const struct bpf_func_proto bpf_probe_read_user_proto = {
187 	.func		= bpf_probe_read_user,
188 	.gpl_only	= true,
189 	.ret_type	= RET_INTEGER,
190 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
191 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
192 	.arg3_type	= ARG_ANYTHING,
193 };
194 
195 static __always_inline int
196 bpf_probe_read_user_str_common(void *dst, u32 size,
197 			       const void __user *unsafe_ptr)
198 {
199 	int ret;
200 
201 	/*
202 	 * NB: We rely on strncpy_from_user() not copying junk past the NUL
203 	 * terminator into `dst`.
204 	 *
205 	 * strncpy_from_user() does long-sized strides in the fast path. If the
206 	 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
207 	 * then there could be junk after the NUL in `dst`. If user takes `dst`
208 	 * and keys a hash map with it, then semantically identical strings can
209 	 * occupy multiple entries in the map.
210 	 */
211 	ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
212 	if (unlikely(ret < 0))
213 		memset(dst, 0, size);
214 	return ret;
215 }
216 
217 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
218 	   const void __user *, unsafe_ptr)
219 {
220 	return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
221 }
222 
223 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
224 	.func		= bpf_probe_read_user_str,
225 	.gpl_only	= true,
226 	.ret_type	= RET_INTEGER,
227 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
228 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
229 	.arg3_type	= ARG_ANYTHING,
230 };
231 
232 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
233 	   const void *, unsafe_ptr)
234 {
235 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
236 }
237 
238 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
239 	.func		= bpf_probe_read_kernel,
240 	.gpl_only	= true,
241 	.ret_type	= RET_INTEGER,
242 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
243 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
244 	.arg3_type	= ARG_ANYTHING,
245 };
246 
247 static __always_inline int
248 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
249 {
250 	int ret;
251 
252 	/*
253 	 * The strncpy_from_kernel_nofault() call will likely not fill the
254 	 * entire buffer, but that's okay in this circumstance as we're probing
255 	 * arbitrary memory anyway similar to bpf_probe_read_*() and might
256 	 * as well probe the stack. Thus, memory is explicitly cleared
257 	 * only in error case, so that improper users ignoring return
258 	 * code altogether don't copy garbage; otherwise length of string
259 	 * is returned that can be used for bpf_perf_event_output() et al.
260 	 */
261 	ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
262 	if (unlikely(ret < 0))
263 		memset(dst, 0, size);
264 	return ret;
265 }
266 
267 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
268 	   const void *, unsafe_ptr)
269 {
270 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
271 }
272 
273 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
274 	.func		= bpf_probe_read_kernel_str,
275 	.gpl_only	= true,
276 	.ret_type	= RET_INTEGER,
277 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
278 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
279 	.arg3_type	= ARG_ANYTHING,
280 };
281 
282 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
283 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
284 	   const void *, unsafe_ptr)
285 {
286 	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
287 		return bpf_probe_read_user_common(dst, size,
288 				(__force void __user *)unsafe_ptr);
289 	}
290 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
291 }
292 
293 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
294 	.func		= bpf_probe_read_compat,
295 	.gpl_only	= true,
296 	.ret_type	= RET_INTEGER,
297 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
298 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
299 	.arg3_type	= ARG_ANYTHING,
300 };
301 
302 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
303 	   const void *, unsafe_ptr)
304 {
305 	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
306 		return bpf_probe_read_user_str_common(dst, size,
307 				(__force void __user *)unsafe_ptr);
308 	}
309 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
310 }
311 
312 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
313 	.func		= bpf_probe_read_compat_str,
314 	.gpl_only	= true,
315 	.ret_type	= RET_INTEGER,
316 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
317 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
318 	.arg3_type	= ARG_ANYTHING,
319 };
320 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
321 
322 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
323 	   u32, size)
324 {
325 	/*
326 	 * Ensure we're in user context which is safe for the helper to
327 	 * run. This helper has no business in a kthread.
328 	 *
329 	 * access_ok() should prevent writing to non-user memory, but in
330 	 * some situations (nommu, temporary switch, etc) access_ok() does
331 	 * not provide enough validation, hence the check on KERNEL_DS.
332 	 *
333 	 * nmi_uaccess_okay() ensures the probe is not run in an interim
334 	 * state, when the task or mm are switched. This is specifically
335 	 * required to prevent the use of temporary mm.
336 	 */
337 
338 	if (unlikely(in_interrupt() ||
339 		     current->flags & (PF_KTHREAD | PF_EXITING)))
340 		return -EPERM;
341 	if (unlikely(!nmi_uaccess_okay()))
342 		return -EPERM;
343 
344 	return copy_to_user_nofault(unsafe_ptr, src, size);
345 }
346 
347 static const struct bpf_func_proto bpf_probe_write_user_proto = {
348 	.func		= bpf_probe_write_user,
349 	.gpl_only	= true,
350 	.ret_type	= RET_INTEGER,
351 	.arg1_type	= ARG_ANYTHING,
352 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
353 	.arg3_type	= ARG_CONST_SIZE,
354 };
355 
356 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
357 {
358 	if (!capable(CAP_SYS_ADMIN))
359 		return NULL;
360 
361 	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
362 			    current->comm, task_pid_nr(current));
363 
364 	return &bpf_probe_write_user_proto;
365 }
366 
367 #define MAX_TRACE_PRINTK_VARARGS	3
368 #define BPF_TRACE_PRINTK_SIZE		1024
369 
370 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
371 	   u64, arg2, u64, arg3)
372 {
373 	u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
374 	struct bpf_bprintf_data data = {
375 		.get_bin_args	= true,
376 		.get_buf	= true,
377 	};
378 	int ret;
379 
380 	ret = bpf_bprintf_prepare(fmt, fmt_size, args,
381 				  MAX_TRACE_PRINTK_VARARGS, &data);
382 	if (ret < 0)
383 		return ret;
384 
385 	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
386 
387 	trace_bpf_trace_printk(data.buf);
388 
389 	bpf_bprintf_cleanup(&data);
390 
391 	return ret;
392 }
393 
394 static const struct bpf_func_proto bpf_trace_printk_proto = {
395 	.func		= bpf_trace_printk,
396 	.gpl_only	= true,
397 	.ret_type	= RET_INTEGER,
398 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
399 	.arg2_type	= ARG_CONST_SIZE,
400 };
401 
402 static void __set_printk_clr_event(void)
403 {
404 	/*
405 	 * This program might be calling bpf_trace_printk,
406 	 * so enable the associated bpf_trace/bpf_trace_printk event.
407 	 * Repeat this each time as it is possible a user has
408 	 * disabled bpf_trace_printk events.  By loading a program
409 	 * calling bpf_trace_printk() however the user has expressed
410 	 * the intent to see such events.
411 	 */
412 	if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
413 		pr_warn_ratelimited("could not enable bpf_trace_printk events");
414 }
415 
416 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
417 {
418 	__set_printk_clr_event();
419 	return &bpf_trace_printk_proto;
420 }
421 
422 BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,
423 	   u32, data_len)
424 {
425 	struct bpf_bprintf_data data = {
426 		.get_bin_args	= true,
427 		.get_buf	= true,
428 	};
429 	int ret, num_args;
430 
431 	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
432 	    (data_len && !args))
433 		return -EINVAL;
434 	num_args = data_len / 8;
435 
436 	ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
437 	if (ret < 0)
438 		return ret;
439 
440 	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
441 
442 	trace_bpf_trace_printk(data.buf);
443 
444 	bpf_bprintf_cleanup(&data);
445 
446 	return ret;
447 }
448 
449 static const struct bpf_func_proto bpf_trace_vprintk_proto = {
450 	.func		= bpf_trace_vprintk,
451 	.gpl_only	= true,
452 	.ret_type	= RET_INTEGER,
453 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
454 	.arg2_type	= ARG_CONST_SIZE,
455 	.arg3_type	= ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
456 	.arg4_type	= ARG_CONST_SIZE_OR_ZERO,
457 };
458 
459 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
460 {
461 	__set_printk_clr_event();
462 	return &bpf_trace_vprintk_proto;
463 }
464 
465 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
466 	   const void *, args, u32, data_len)
467 {
468 	struct bpf_bprintf_data data = {
469 		.get_bin_args	= true,
470 	};
471 	int err, num_args;
472 
473 	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
474 	    (data_len && !args))
475 		return -EINVAL;
476 	num_args = data_len / 8;
477 
478 	err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
479 	if (err < 0)
480 		return err;
481 
482 	seq_bprintf(m, fmt, data.bin_args);
483 
484 	bpf_bprintf_cleanup(&data);
485 
486 	return seq_has_overflowed(m) ? -EOVERFLOW : 0;
487 }
488 
489 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
490 
491 static const struct bpf_func_proto bpf_seq_printf_proto = {
492 	.func		= bpf_seq_printf,
493 	.gpl_only	= true,
494 	.ret_type	= RET_INTEGER,
495 	.arg1_type	= ARG_PTR_TO_BTF_ID,
496 	.arg1_btf_id	= &btf_seq_file_ids[0],
497 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
498 	.arg3_type	= ARG_CONST_SIZE,
499 	.arg4_type      = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
500 	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
501 };
502 
503 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
504 {
505 	return seq_write(m, data, len) ? -EOVERFLOW : 0;
506 }
507 
508 static const struct bpf_func_proto bpf_seq_write_proto = {
509 	.func		= bpf_seq_write,
510 	.gpl_only	= true,
511 	.ret_type	= RET_INTEGER,
512 	.arg1_type	= ARG_PTR_TO_BTF_ID,
513 	.arg1_btf_id	= &btf_seq_file_ids[0],
514 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
515 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
516 };
517 
518 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
519 	   u32, btf_ptr_size, u64, flags)
520 {
521 	const struct btf *btf;
522 	s32 btf_id;
523 	int ret;
524 
525 	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
526 	if (ret)
527 		return ret;
528 
529 	return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
530 }
531 
532 static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
533 	.func		= bpf_seq_printf_btf,
534 	.gpl_only	= true,
535 	.ret_type	= RET_INTEGER,
536 	.arg1_type	= ARG_PTR_TO_BTF_ID,
537 	.arg1_btf_id	= &btf_seq_file_ids[0],
538 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
539 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
540 	.arg4_type	= ARG_ANYTHING,
541 };
542 
543 static __always_inline int
544 get_map_perf_counter(struct bpf_map *map, u64 flags,
545 		     u64 *value, u64 *enabled, u64 *running)
546 {
547 	struct bpf_array *array = container_of(map, struct bpf_array, map);
548 	unsigned int cpu = smp_processor_id();
549 	u64 index = flags & BPF_F_INDEX_MASK;
550 	struct bpf_event_entry *ee;
551 
552 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
553 		return -EINVAL;
554 	if (index == BPF_F_CURRENT_CPU)
555 		index = cpu;
556 	if (unlikely(index >= array->map.max_entries))
557 		return -E2BIG;
558 
559 	ee = READ_ONCE(array->ptrs[index]);
560 	if (!ee)
561 		return -ENOENT;
562 
563 	return perf_event_read_local(ee->event, value, enabled, running);
564 }
565 
566 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
567 {
568 	u64 value = 0;
569 	int err;
570 
571 	err = get_map_perf_counter(map, flags, &value, NULL, NULL);
572 	/*
573 	 * this api is ugly since we miss [-22..-2] range of valid
574 	 * counter values, but that's uapi
575 	 */
576 	if (err)
577 		return err;
578 	return value;
579 }
580 
581 static const struct bpf_func_proto bpf_perf_event_read_proto = {
582 	.func		= bpf_perf_event_read,
583 	.gpl_only	= true,
584 	.ret_type	= RET_INTEGER,
585 	.arg1_type	= ARG_CONST_MAP_PTR,
586 	.arg2_type	= ARG_ANYTHING,
587 };
588 
589 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
590 	   struct bpf_perf_event_value *, buf, u32, size)
591 {
592 	int err = -EINVAL;
593 
594 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
595 		goto clear;
596 	err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
597 				   &buf->running);
598 	if (unlikely(err))
599 		goto clear;
600 	return 0;
601 clear:
602 	memset(buf, 0, size);
603 	return err;
604 }
605 
606 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
607 	.func		= bpf_perf_event_read_value,
608 	.gpl_only	= true,
609 	.ret_type	= RET_INTEGER,
610 	.arg1_type	= ARG_CONST_MAP_PTR,
611 	.arg2_type	= ARG_ANYTHING,
612 	.arg3_type	= ARG_PTR_TO_UNINIT_MEM,
613 	.arg4_type	= ARG_CONST_SIZE,
614 };
615 
616 static __always_inline u64
617 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
618 			u64 flags, struct perf_sample_data *sd)
619 {
620 	struct bpf_array *array = container_of(map, struct bpf_array, map);
621 	unsigned int cpu = smp_processor_id();
622 	u64 index = flags & BPF_F_INDEX_MASK;
623 	struct bpf_event_entry *ee;
624 	struct perf_event *event;
625 
626 	if (index == BPF_F_CURRENT_CPU)
627 		index = cpu;
628 	if (unlikely(index >= array->map.max_entries))
629 		return -E2BIG;
630 
631 	ee = READ_ONCE(array->ptrs[index]);
632 	if (!ee)
633 		return -ENOENT;
634 
635 	event = ee->event;
636 	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
637 		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
638 		return -EINVAL;
639 
640 	if (unlikely(event->oncpu != cpu))
641 		return -EOPNOTSUPP;
642 
643 	return perf_event_output(event, sd, regs);
644 }
645 
646 /*
647  * Support executing tracepoints in normal, irq, and nmi context that each call
648  * bpf_perf_event_output
649  */
650 struct bpf_trace_sample_data {
651 	struct perf_sample_data sds[3];
652 };
653 
654 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
655 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
656 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
657 	   u64, flags, void *, data, u64, size)
658 {
659 	struct bpf_trace_sample_data *sds;
660 	struct perf_raw_record raw = {
661 		.frag = {
662 			.size = size,
663 			.data = data,
664 		},
665 	};
666 	struct perf_sample_data *sd;
667 	int nest_level, err;
668 
669 	preempt_disable();
670 	sds = this_cpu_ptr(&bpf_trace_sds);
671 	nest_level = this_cpu_inc_return(bpf_trace_nest_level);
672 
673 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
674 		err = -EBUSY;
675 		goto out;
676 	}
677 
678 	sd = &sds->sds[nest_level - 1];
679 
680 	if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
681 		err = -EINVAL;
682 		goto out;
683 	}
684 
685 	perf_sample_data_init(sd, 0, 0);
686 	perf_sample_save_raw_data(sd, &raw);
687 
688 	err = __bpf_perf_event_output(regs, map, flags, sd);
689 out:
690 	this_cpu_dec(bpf_trace_nest_level);
691 	preempt_enable();
692 	return err;
693 }
694 
695 static const struct bpf_func_proto bpf_perf_event_output_proto = {
696 	.func		= bpf_perf_event_output,
697 	.gpl_only	= true,
698 	.ret_type	= RET_INTEGER,
699 	.arg1_type	= ARG_PTR_TO_CTX,
700 	.arg2_type	= ARG_CONST_MAP_PTR,
701 	.arg3_type	= ARG_ANYTHING,
702 	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
703 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
704 };
705 
706 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
707 struct bpf_nested_pt_regs {
708 	struct pt_regs regs[3];
709 };
710 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
711 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
712 
713 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
714 		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
715 {
716 	struct perf_raw_frag frag = {
717 		.copy		= ctx_copy,
718 		.size		= ctx_size,
719 		.data		= ctx,
720 	};
721 	struct perf_raw_record raw = {
722 		.frag = {
723 			{
724 				.next	= ctx_size ? &frag : NULL,
725 			},
726 			.size	= meta_size,
727 			.data	= meta,
728 		},
729 	};
730 	struct perf_sample_data *sd;
731 	struct pt_regs *regs;
732 	int nest_level;
733 	u64 ret;
734 
735 	preempt_disable();
736 	nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
737 
738 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
739 		ret = -EBUSY;
740 		goto out;
741 	}
742 	sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
743 	regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
744 
745 	perf_fetch_caller_regs(regs);
746 	perf_sample_data_init(sd, 0, 0);
747 	perf_sample_save_raw_data(sd, &raw);
748 
749 	ret = __bpf_perf_event_output(regs, map, flags, sd);
750 out:
751 	this_cpu_dec(bpf_event_output_nest_level);
752 	preempt_enable();
753 	return ret;
754 }
755 
756 BPF_CALL_0(bpf_get_current_task)
757 {
758 	return (long) current;
759 }
760 
761 const struct bpf_func_proto bpf_get_current_task_proto = {
762 	.func		= bpf_get_current_task,
763 	.gpl_only	= true,
764 	.ret_type	= RET_INTEGER,
765 };
766 
767 BPF_CALL_0(bpf_get_current_task_btf)
768 {
769 	return (unsigned long) current;
770 }
771 
772 const struct bpf_func_proto bpf_get_current_task_btf_proto = {
773 	.func		= bpf_get_current_task_btf,
774 	.gpl_only	= true,
775 	.ret_type	= RET_PTR_TO_BTF_ID_TRUSTED,
776 	.ret_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
777 };
778 
779 BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
780 {
781 	return (unsigned long) task_pt_regs(task);
782 }
783 
784 BTF_ID_LIST(bpf_task_pt_regs_ids)
785 BTF_ID(struct, pt_regs)
786 
787 const struct bpf_func_proto bpf_task_pt_regs_proto = {
788 	.func		= bpf_task_pt_regs,
789 	.gpl_only	= true,
790 	.arg1_type	= ARG_PTR_TO_BTF_ID,
791 	.arg1_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
792 	.ret_type	= RET_PTR_TO_BTF_ID,
793 	.ret_btf_id	= &bpf_task_pt_regs_ids[0],
794 };
795 
796 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
797 {
798 	struct bpf_array *array = container_of(map, struct bpf_array, map);
799 	struct cgroup *cgrp;
800 
801 	if (unlikely(idx >= array->map.max_entries))
802 		return -E2BIG;
803 
804 	cgrp = READ_ONCE(array->ptrs[idx]);
805 	if (unlikely(!cgrp))
806 		return -EAGAIN;
807 
808 	return task_under_cgroup_hierarchy(current, cgrp);
809 }
810 
811 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
812 	.func           = bpf_current_task_under_cgroup,
813 	.gpl_only       = false,
814 	.ret_type       = RET_INTEGER,
815 	.arg1_type      = ARG_CONST_MAP_PTR,
816 	.arg2_type      = ARG_ANYTHING,
817 };
818 
819 struct send_signal_irq_work {
820 	struct irq_work irq_work;
821 	struct task_struct *task;
822 	u32 sig;
823 	enum pid_type type;
824 };
825 
826 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
827 
828 static void do_bpf_send_signal(struct irq_work *entry)
829 {
830 	struct send_signal_irq_work *work;
831 
832 	work = container_of(entry, struct send_signal_irq_work, irq_work);
833 	group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
834 	put_task_struct(work->task);
835 }
836 
837 static int bpf_send_signal_common(u32 sig, enum pid_type type)
838 {
839 	struct send_signal_irq_work *work = NULL;
840 
841 	/* Similar to bpf_probe_write_user, task needs to be
842 	 * in a sound condition and kernel memory access be
843 	 * permitted in order to send signal to the current
844 	 * task.
845 	 */
846 	if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
847 		return -EPERM;
848 	if (unlikely(!nmi_uaccess_okay()))
849 		return -EPERM;
850 	/* Task should not be pid=1 to avoid kernel panic. */
851 	if (unlikely(is_global_init(current)))
852 		return -EPERM;
853 
854 	if (irqs_disabled()) {
855 		/* Do an early check on signal validity. Otherwise,
856 		 * the error is lost in deferred irq_work.
857 		 */
858 		if (unlikely(!valid_signal(sig)))
859 			return -EINVAL;
860 
861 		work = this_cpu_ptr(&send_signal_work);
862 		if (irq_work_is_busy(&work->irq_work))
863 			return -EBUSY;
864 
865 		/* Add the current task, which is the target of sending signal,
866 		 * to the irq_work. The current task may change when queued
867 		 * irq works get executed.
868 		 */
869 		work->task = get_task_struct(current);
870 		work->sig = sig;
871 		work->type = type;
872 		irq_work_queue(&work->irq_work);
873 		return 0;
874 	}
875 
876 	return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
877 }
878 
879 BPF_CALL_1(bpf_send_signal, u32, sig)
880 {
881 	return bpf_send_signal_common(sig, PIDTYPE_TGID);
882 }
883 
884 static const struct bpf_func_proto bpf_send_signal_proto = {
885 	.func		= bpf_send_signal,
886 	.gpl_only	= false,
887 	.ret_type	= RET_INTEGER,
888 	.arg1_type	= ARG_ANYTHING,
889 };
890 
891 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
892 {
893 	return bpf_send_signal_common(sig, PIDTYPE_PID);
894 }
895 
896 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
897 	.func		= bpf_send_signal_thread,
898 	.gpl_only	= false,
899 	.ret_type	= RET_INTEGER,
900 	.arg1_type	= ARG_ANYTHING,
901 };
902 
903 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
904 {
905 	struct path copy;
906 	long len;
907 	char *p;
908 
909 	if (!sz)
910 		return 0;
911 
912 	/*
913 	 * The path pointer is verified as trusted and safe to use,
914 	 * but let's double check it's valid anyway to workaround
915 	 * potentially broken verifier.
916 	 */
917 	len = copy_from_kernel_nofault(&copy, path, sizeof(*path));
918 	if (len < 0)
919 		return len;
920 
921 	p = d_path(&copy, buf, sz);
922 	if (IS_ERR(p)) {
923 		len = PTR_ERR(p);
924 	} else {
925 		len = buf + sz - p;
926 		memmove(buf, p, len);
927 	}
928 
929 	return len;
930 }
931 
932 BTF_SET_START(btf_allowlist_d_path)
933 #ifdef CONFIG_SECURITY
934 BTF_ID(func, security_file_permission)
935 BTF_ID(func, security_inode_getattr)
936 BTF_ID(func, security_file_open)
937 #endif
938 #ifdef CONFIG_SECURITY_PATH
939 BTF_ID(func, security_path_truncate)
940 #endif
941 BTF_ID(func, vfs_truncate)
942 BTF_ID(func, vfs_fallocate)
943 BTF_ID(func, dentry_open)
944 BTF_ID(func, vfs_getattr)
945 BTF_ID(func, filp_close)
946 BTF_SET_END(btf_allowlist_d_path)
947 
948 static bool bpf_d_path_allowed(const struct bpf_prog *prog)
949 {
950 	if (prog->type == BPF_PROG_TYPE_TRACING &&
951 	    prog->expected_attach_type == BPF_TRACE_ITER)
952 		return true;
953 
954 	if (prog->type == BPF_PROG_TYPE_LSM)
955 		return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
956 
957 	return btf_id_set_contains(&btf_allowlist_d_path,
958 				   prog->aux->attach_btf_id);
959 }
960 
961 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
962 
963 static const struct bpf_func_proto bpf_d_path_proto = {
964 	.func		= bpf_d_path,
965 	.gpl_only	= false,
966 	.ret_type	= RET_INTEGER,
967 	.arg1_type	= ARG_PTR_TO_BTF_ID,
968 	.arg1_btf_id	= &bpf_d_path_btf_ids[0],
969 	.arg2_type	= ARG_PTR_TO_MEM,
970 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
971 	.allowed	= bpf_d_path_allowed,
972 };
973 
974 #define BTF_F_ALL	(BTF_F_COMPACT  | BTF_F_NONAME | \
975 			 BTF_F_PTR_RAW | BTF_F_ZERO)
976 
977 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
978 				  u64 flags, const struct btf **btf,
979 				  s32 *btf_id)
980 {
981 	const struct btf_type *t;
982 
983 	if (unlikely(flags & ~(BTF_F_ALL)))
984 		return -EINVAL;
985 
986 	if (btf_ptr_size != sizeof(struct btf_ptr))
987 		return -EINVAL;
988 
989 	*btf = bpf_get_btf_vmlinux();
990 
991 	if (IS_ERR_OR_NULL(*btf))
992 		return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
993 
994 	if (ptr->type_id > 0)
995 		*btf_id = ptr->type_id;
996 	else
997 		return -EINVAL;
998 
999 	if (*btf_id > 0)
1000 		t = btf_type_by_id(*btf, *btf_id);
1001 	if (*btf_id <= 0 || !t)
1002 		return -ENOENT;
1003 
1004 	return 0;
1005 }
1006 
1007 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1008 	   u32, btf_ptr_size, u64, flags)
1009 {
1010 	const struct btf *btf;
1011 	s32 btf_id;
1012 	int ret;
1013 
1014 	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1015 	if (ret)
1016 		return ret;
1017 
1018 	return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1019 				      flags);
1020 }
1021 
1022 const struct bpf_func_proto bpf_snprintf_btf_proto = {
1023 	.func		= bpf_snprintf_btf,
1024 	.gpl_only	= false,
1025 	.ret_type	= RET_INTEGER,
1026 	.arg1_type	= ARG_PTR_TO_MEM,
1027 	.arg2_type	= ARG_CONST_SIZE,
1028 	.arg3_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1029 	.arg4_type	= ARG_CONST_SIZE,
1030 	.arg5_type	= ARG_ANYTHING,
1031 };
1032 
1033 BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1034 {
1035 	/* This helper call is inlined by verifier. */
1036 	return ((u64 *)ctx)[-2];
1037 }
1038 
1039 static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1040 	.func		= bpf_get_func_ip_tracing,
1041 	.gpl_only	= true,
1042 	.ret_type	= RET_INTEGER,
1043 	.arg1_type	= ARG_PTR_TO_CTX,
1044 };
1045 
1046 #ifdef CONFIG_X86_KERNEL_IBT
1047 static unsigned long get_entry_ip(unsigned long fentry_ip)
1048 {
1049 	u32 instr;
1050 
1051 	/* Being extra safe in here in case entry ip is on the page-edge. */
1052 	if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1))
1053 		return fentry_ip;
1054 	if (is_endbr(instr))
1055 		fentry_ip -= ENDBR_INSN_SIZE;
1056 	return fentry_ip;
1057 }
1058 #else
1059 #define get_entry_ip(fentry_ip) fentry_ip
1060 #endif
1061 
1062 BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1063 {
1064 	struct bpf_trace_run_ctx *run_ctx __maybe_unused;
1065 	struct kprobe *kp;
1066 
1067 #ifdef CONFIG_UPROBES
1068 	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1069 	if (run_ctx->is_uprobe)
1070 		return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr;
1071 #endif
1072 
1073 	kp = kprobe_running();
1074 
1075 	if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1076 		return 0;
1077 
1078 	return get_entry_ip((uintptr_t)kp->addr);
1079 }
1080 
1081 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1082 	.func		= bpf_get_func_ip_kprobe,
1083 	.gpl_only	= true,
1084 	.ret_type	= RET_INTEGER,
1085 	.arg1_type	= ARG_PTR_TO_CTX,
1086 };
1087 
1088 BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1089 {
1090 	return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
1091 }
1092 
1093 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1094 	.func		= bpf_get_func_ip_kprobe_multi,
1095 	.gpl_only	= false,
1096 	.ret_type	= RET_INTEGER,
1097 	.arg1_type	= ARG_PTR_TO_CTX,
1098 };
1099 
1100 BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1101 {
1102 	return bpf_kprobe_multi_cookie(current->bpf_ctx);
1103 }
1104 
1105 static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1106 	.func		= bpf_get_attach_cookie_kprobe_multi,
1107 	.gpl_only	= false,
1108 	.ret_type	= RET_INTEGER,
1109 	.arg1_type	= ARG_PTR_TO_CTX,
1110 };
1111 
1112 BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs)
1113 {
1114 	return bpf_uprobe_multi_entry_ip(current->bpf_ctx);
1115 }
1116 
1117 static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = {
1118 	.func		= bpf_get_func_ip_uprobe_multi,
1119 	.gpl_only	= false,
1120 	.ret_type	= RET_INTEGER,
1121 	.arg1_type	= ARG_PTR_TO_CTX,
1122 };
1123 
1124 BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs)
1125 {
1126 	return bpf_uprobe_multi_cookie(current->bpf_ctx);
1127 }
1128 
1129 static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = {
1130 	.func		= bpf_get_attach_cookie_uprobe_multi,
1131 	.gpl_only	= false,
1132 	.ret_type	= RET_INTEGER,
1133 	.arg1_type	= ARG_PTR_TO_CTX,
1134 };
1135 
1136 BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1137 {
1138 	struct bpf_trace_run_ctx *run_ctx;
1139 
1140 	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1141 	return run_ctx->bpf_cookie;
1142 }
1143 
1144 static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1145 	.func		= bpf_get_attach_cookie_trace,
1146 	.gpl_only	= false,
1147 	.ret_type	= RET_INTEGER,
1148 	.arg1_type	= ARG_PTR_TO_CTX,
1149 };
1150 
1151 BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1152 {
1153 	return ctx->event->bpf_cookie;
1154 }
1155 
1156 static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1157 	.func		= bpf_get_attach_cookie_pe,
1158 	.gpl_only	= false,
1159 	.ret_type	= RET_INTEGER,
1160 	.arg1_type	= ARG_PTR_TO_CTX,
1161 };
1162 
1163 BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1164 {
1165 	struct bpf_trace_run_ctx *run_ctx;
1166 
1167 	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1168 	return run_ctx->bpf_cookie;
1169 }
1170 
1171 static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1172 	.func		= bpf_get_attach_cookie_tracing,
1173 	.gpl_only	= false,
1174 	.ret_type	= RET_INTEGER,
1175 	.arg1_type	= ARG_PTR_TO_CTX,
1176 };
1177 
1178 BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1179 {
1180 #ifndef CONFIG_X86
1181 	return -ENOENT;
1182 #else
1183 	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1184 	u32 entry_cnt = size / br_entry_size;
1185 
1186 	entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1187 
1188 	if (unlikely(flags))
1189 		return -EINVAL;
1190 
1191 	if (!entry_cnt)
1192 		return -ENOENT;
1193 
1194 	return entry_cnt * br_entry_size;
1195 #endif
1196 }
1197 
1198 static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1199 	.func		= bpf_get_branch_snapshot,
1200 	.gpl_only	= true,
1201 	.ret_type	= RET_INTEGER,
1202 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1203 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1204 };
1205 
1206 BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1207 {
1208 	/* This helper call is inlined by verifier. */
1209 	u64 nr_args = ((u64 *)ctx)[-1];
1210 
1211 	if ((u64) n >= nr_args)
1212 		return -EINVAL;
1213 	*value = ((u64 *)ctx)[n];
1214 	return 0;
1215 }
1216 
1217 static const struct bpf_func_proto bpf_get_func_arg_proto = {
1218 	.func		= get_func_arg,
1219 	.ret_type	= RET_INTEGER,
1220 	.arg1_type	= ARG_PTR_TO_CTX,
1221 	.arg2_type	= ARG_ANYTHING,
1222 	.arg3_type	= ARG_PTR_TO_LONG,
1223 };
1224 
1225 BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1226 {
1227 	/* This helper call is inlined by verifier. */
1228 	u64 nr_args = ((u64 *)ctx)[-1];
1229 
1230 	*value = ((u64 *)ctx)[nr_args];
1231 	return 0;
1232 }
1233 
1234 static const struct bpf_func_proto bpf_get_func_ret_proto = {
1235 	.func		= get_func_ret,
1236 	.ret_type	= RET_INTEGER,
1237 	.arg1_type	= ARG_PTR_TO_CTX,
1238 	.arg2_type	= ARG_PTR_TO_LONG,
1239 };
1240 
1241 BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1242 {
1243 	/* This helper call is inlined by verifier. */
1244 	return ((u64 *)ctx)[-1];
1245 }
1246 
1247 static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1248 	.func		= get_func_arg_cnt,
1249 	.ret_type	= RET_INTEGER,
1250 	.arg1_type	= ARG_PTR_TO_CTX,
1251 };
1252 
1253 #ifdef CONFIG_KEYS
1254 __diag_push();
1255 __diag_ignore_all("-Wmissing-prototypes",
1256 		  "kfuncs which will be used in BPF programs");
1257 
1258 /**
1259  * bpf_lookup_user_key - lookup a key by its serial
1260  * @serial: key handle serial number
1261  * @flags: lookup-specific flags
1262  *
1263  * Search a key with a given *serial* and the provided *flags*.
1264  * If found, increment the reference count of the key by one, and
1265  * return it in the bpf_key structure.
1266  *
1267  * The bpf_key structure must be passed to bpf_key_put() when done
1268  * with it, so that the key reference count is decremented and the
1269  * bpf_key structure is freed.
1270  *
1271  * Permission checks are deferred to the time the key is used by
1272  * one of the available key-specific kfuncs.
1273  *
1274  * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1275  * special keyring (e.g. session keyring), if it doesn't yet exist.
1276  * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1277  * for the key construction, and to retrieve uninstantiated keys (keys
1278  * without data attached to them).
1279  *
1280  * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1281  *         NULL pointer otherwise.
1282  */
1283 __bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
1284 {
1285 	key_ref_t key_ref;
1286 	struct bpf_key *bkey;
1287 
1288 	if (flags & ~KEY_LOOKUP_ALL)
1289 		return NULL;
1290 
1291 	/*
1292 	 * Permission check is deferred until the key is used, as the
1293 	 * intent of the caller is unknown here.
1294 	 */
1295 	key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
1296 	if (IS_ERR(key_ref))
1297 		return NULL;
1298 
1299 	bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
1300 	if (!bkey) {
1301 		key_put(key_ref_to_ptr(key_ref));
1302 		return NULL;
1303 	}
1304 
1305 	bkey->key = key_ref_to_ptr(key_ref);
1306 	bkey->has_ref = true;
1307 
1308 	return bkey;
1309 }
1310 
1311 /**
1312  * bpf_lookup_system_key - lookup a key by a system-defined ID
1313  * @id: key ID
1314  *
1315  * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1316  * The key pointer is marked as invalid, to prevent bpf_key_put() from
1317  * attempting to decrement the key reference count on that pointer. The key
1318  * pointer set in such way is currently understood only by
1319  * verify_pkcs7_signature().
1320  *
1321  * Set *id* to one of the values defined in include/linux/verification.h:
1322  * 0 for the primary keyring (immutable keyring of system keys);
1323  * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1324  * (where keys can be added only if they are vouched for by existing keys
1325  * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1326  * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1327  * kerned image and, possibly, the initramfs signature).
1328  *
1329  * Return: a bpf_key pointer with an invalid key pointer set from the
1330  *         pre-determined ID on success, a NULL pointer otherwise
1331  */
1332 __bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
1333 {
1334 	struct bpf_key *bkey;
1335 
1336 	if (system_keyring_id_check(id) < 0)
1337 		return NULL;
1338 
1339 	bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
1340 	if (!bkey)
1341 		return NULL;
1342 
1343 	bkey->key = (struct key *)(unsigned long)id;
1344 	bkey->has_ref = false;
1345 
1346 	return bkey;
1347 }
1348 
1349 /**
1350  * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1351  * @bkey: bpf_key structure
1352  *
1353  * Decrement the reference count of the key inside *bkey*, if the pointer
1354  * is valid, and free *bkey*.
1355  */
1356 __bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
1357 {
1358 	if (bkey->has_ref)
1359 		key_put(bkey->key);
1360 
1361 	kfree(bkey);
1362 }
1363 
1364 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1365 /**
1366  * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1367  * @data_ptr: data to verify
1368  * @sig_ptr: signature of the data
1369  * @trusted_keyring: keyring with keys trusted for signature verification
1370  *
1371  * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1372  * with keys in a keyring referenced by *trusted_keyring*.
1373  *
1374  * Return: 0 on success, a negative value on error.
1375  */
1376 __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
1377 			       struct bpf_dynptr_kern *sig_ptr,
1378 			       struct bpf_key *trusted_keyring)
1379 {
1380 	int ret;
1381 
1382 	if (trusted_keyring->has_ref) {
1383 		/*
1384 		 * Do the permission check deferred in bpf_lookup_user_key().
1385 		 * See bpf_lookup_user_key() for more details.
1386 		 *
1387 		 * A call to key_task_permission() here would be redundant, as
1388 		 * it is already done by keyring_search() called by
1389 		 * find_asymmetric_key().
1390 		 */
1391 		ret = key_validate(trusted_keyring->key);
1392 		if (ret < 0)
1393 			return ret;
1394 	}
1395 
1396 	return verify_pkcs7_signature(data_ptr->data,
1397 				      __bpf_dynptr_size(data_ptr),
1398 				      sig_ptr->data,
1399 				      __bpf_dynptr_size(sig_ptr),
1400 				      trusted_keyring->key,
1401 				      VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
1402 				      NULL);
1403 }
1404 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1405 
1406 __diag_pop();
1407 
1408 BTF_SET8_START(key_sig_kfunc_set)
1409 BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
1410 BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
1411 BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
1412 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1413 BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
1414 #endif
1415 BTF_SET8_END(key_sig_kfunc_set)
1416 
1417 static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
1418 	.owner = THIS_MODULE,
1419 	.set = &key_sig_kfunc_set,
1420 };
1421 
1422 static int __init bpf_key_sig_kfuncs_init(void)
1423 {
1424 	return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
1425 					 &bpf_key_sig_kfunc_set);
1426 }
1427 
1428 late_initcall(bpf_key_sig_kfuncs_init);
1429 #endif /* CONFIG_KEYS */
1430 
1431 static const struct bpf_func_proto *
1432 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1433 {
1434 	switch (func_id) {
1435 	case BPF_FUNC_map_lookup_elem:
1436 		return &bpf_map_lookup_elem_proto;
1437 	case BPF_FUNC_map_update_elem:
1438 		return &bpf_map_update_elem_proto;
1439 	case BPF_FUNC_map_delete_elem:
1440 		return &bpf_map_delete_elem_proto;
1441 	case BPF_FUNC_map_push_elem:
1442 		return &bpf_map_push_elem_proto;
1443 	case BPF_FUNC_map_pop_elem:
1444 		return &bpf_map_pop_elem_proto;
1445 	case BPF_FUNC_map_peek_elem:
1446 		return &bpf_map_peek_elem_proto;
1447 	case BPF_FUNC_map_lookup_percpu_elem:
1448 		return &bpf_map_lookup_percpu_elem_proto;
1449 	case BPF_FUNC_ktime_get_ns:
1450 		return &bpf_ktime_get_ns_proto;
1451 	case BPF_FUNC_ktime_get_boot_ns:
1452 		return &bpf_ktime_get_boot_ns_proto;
1453 	case BPF_FUNC_tail_call:
1454 		return &bpf_tail_call_proto;
1455 	case BPF_FUNC_get_current_pid_tgid:
1456 		return &bpf_get_current_pid_tgid_proto;
1457 	case BPF_FUNC_get_current_task:
1458 		return &bpf_get_current_task_proto;
1459 	case BPF_FUNC_get_current_task_btf:
1460 		return &bpf_get_current_task_btf_proto;
1461 	case BPF_FUNC_task_pt_regs:
1462 		return &bpf_task_pt_regs_proto;
1463 	case BPF_FUNC_get_current_uid_gid:
1464 		return &bpf_get_current_uid_gid_proto;
1465 	case BPF_FUNC_get_current_comm:
1466 		return &bpf_get_current_comm_proto;
1467 	case BPF_FUNC_trace_printk:
1468 		return bpf_get_trace_printk_proto();
1469 	case BPF_FUNC_get_smp_processor_id:
1470 		return &bpf_get_smp_processor_id_proto;
1471 	case BPF_FUNC_get_numa_node_id:
1472 		return &bpf_get_numa_node_id_proto;
1473 	case BPF_FUNC_perf_event_read:
1474 		return &bpf_perf_event_read_proto;
1475 	case BPF_FUNC_current_task_under_cgroup:
1476 		return &bpf_current_task_under_cgroup_proto;
1477 	case BPF_FUNC_get_prandom_u32:
1478 		return &bpf_get_prandom_u32_proto;
1479 	case BPF_FUNC_probe_write_user:
1480 		return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1481 		       NULL : bpf_get_probe_write_proto();
1482 	case BPF_FUNC_probe_read_user:
1483 		return &bpf_probe_read_user_proto;
1484 	case BPF_FUNC_probe_read_kernel:
1485 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1486 		       NULL : &bpf_probe_read_kernel_proto;
1487 	case BPF_FUNC_probe_read_user_str:
1488 		return &bpf_probe_read_user_str_proto;
1489 	case BPF_FUNC_probe_read_kernel_str:
1490 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1491 		       NULL : &bpf_probe_read_kernel_str_proto;
1492 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1493 	case BPF_FUNC_probe_read:
1494 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1495 		       NULL : &bpf_probe_read_compat_proto;
1496 	case BPF_FUNC_probe_read_str:
1497 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1498 		       NULL : &bpf_probe_read_compat_str_proto;
1499 #endif
1500 #ifdef CONFIG_CGROUPS
1501 	case BPF_FUNC_cgrp_storage_get:
1502 		return &bpf_cgrp_storage_get_proto;
1503 	case BPF_FUNC_cgrp_storage_delete:
1504 		return &bpf_cgrp_storage_delete_proto;
1505 #endif
1506 	case BPF_FUNC_send_signal:
1507 		return &bpf_send_signal_proto;
1508 	case BPF_FUNC_send_signal_thread:
1509 		return &bpf_send_signal_thread_proto;
1510 	case BPF_FUNC_perf_event_read_value:
1511 		return &bpf_perf_event_read_value_proto;
1512 	case BPF_FUNC_get_ns_current_pid_tgid:
1513 		return &bpf_get_ns_current_pid_tgid_proto;
1514 	case BPF_FUNC_ringbuf_output:
1515 		return &bpf_ringbuf_output_proto;
1516 	case BPF_FUNC_ringbuf_reserve:
1517 		return &bpf_ringbuf_reserve_proto;
1518 	case BPF_FUNC_ringbuf_submit:
1519 		return &bpf_ringbuf_submit_proto;
1520 	case BPF_FUNC_ringbuf_discard:
1521 		return &bpf_ringbuf_discard_proto;
1522 	case BPF_FUNC_ringbuf_query:
1523 		return &bpf_ringbuf_query_proto;
1524 	case BPF_FUNC_jiffies64:
1525 		return &bpf_jiffies64_proto;
1526 	case BPF_FUNC_get_task_stack:
1527 		return &bpf_get_task_stack_proto;
1528 	case BPF_FUNC_copy_from_user:
1529 		return &bpf_copy_from_user_proto;
1530 	case BPF_FUNC_copy_from_user_task:
1531 		return &bpf_copy_from_user_task_proto;
1532 	case BPF_FUNC_snprintf_btf:
1533 		return &bpf_snprintf_btf_proto;
1534 	case BPF_FUNC_per_cpu_ptr:
1535 		return &bpf_per_cpu_ptr_proto;
1536 	case BPF_FUNC_this_cpu_ptr:
1537 		return &bpf_this_cpu_ptr_proto;
1538 	case BPF_FUNC_task_storage_get:
1539 		if (bpf_prog_check_recur(prog))
1540 			return &bpf_task_storage_get_recur_proto;
1541 		return &bpf_task_storage_get_proto;
1542 	case BPF_FUNC_task_storage_delete:
1543 		if (bpf_prog_check_recur(prog))
1544 			return &bpf_task_storage_delete_recur_proto;
1545 		return &bpf_task_storage_delete_proto;
1546 	case BPF_FUNC_for_each_map_elem:
1547 		return &bpf_for_each_map_elem_proto;
1548 	case BPF_FUNC_snprintf:
1549 		return &bpf_snprintf_proto;
1550 	case BPF_FUNC_get_func_ip:
1551 		return &bpf_get_func_ip_proto_tracing;
1552 	case BPF_FUNC_get_branch_snapshot:
1553 		return &bpf_get_branch_snapshot_proto;
1554 	case BPF_FUNC_find_vma:
1555 		return &bpf_find_vma_proto;
1556 	case BPF_FUNC_trace_vprintk:
1557 		return bpf_get_trace_vprintk_proto();
1558 	default:
1559 		return bpf_base_func_proto(func_id);
1560 	}
1561 }
1562 
1563 static const struct bpf_func_proto *
1564 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1565 {
1566 	switch (func_id) {
1567 	case BPF_FUNC_perf_event_output:
1568 		return &bpf_perf_event_output_proto;
1569 	case BPF_FUNC_get_stackid:
1570 		return &bpf_get_stackid_proto;
1571 	case BPF_FUNC_get_stack:
1572 		return &bpf_get_stack_proto;
1573 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1574 	case BPF_FUNC_override_return:
1575 		return &bpf_override_return_proto;
1576 #endif
1577 	case BPF_FUNC_get_func_ip:
1578 		if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI)
1579 			return &bpf_get_func_ip_proto_kprobe_multi;
1580 		if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
1581 			return &bpf_get_func_ip_proto_uprobe_multi;
1582 		return &bpf_get_func_ip_proto_kprobe;
1583 	case BPF_FUNC_get_attach_cookie:
1584 		if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI)
1585 			return &bpf_get_attach_cookie_proto_kmulti;
1586 		if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
1587 			return &bpf_get_attach_cookie_proto_umulti;
1588 		return &bpf_get_attach_cookie_proto_trace;
1589 	default:
1590 		return bpf_tracing_func_proto(func_id, prog);
1591 	}
1592 }
1593 
1594 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
1595 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1596 					const struct bpf_prog *prog,
1597 					struct bpf_insn_access_aux *info)
1598 {
1599 	if (off < 0 || off >= sizeof(struct pt_regs))
1600 		return false;
1601 	if (type != BPF_READ)
1602 		return false;
1603 	if (off % size != 0)
1604 		return false;
1605 	/*
1606 	 * Assertion for 32 bit to make sure last 8 byte access
1607 	 * (BPF_DW) to the last 4 byte member is disallowed.
1608 	 */
1609 	if (off + size > sizeof(struct pt_regs))
1610 		return false;
1611 
1612 	return true;
1613 }
1614 
1615 const struct bpf_verifier_ops kprobe_verifier_ops = {
1616 	.get_func_proto  = kprobe_prog_func_proto,
1617 	.is_valid_access = kprobe_prog_is_valid_access,
1618 };
1619 
1620 const struct bpf_prog_ops kprobe_prog_ops = {
1621 };
1622 
1623 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1624 	   u64, flags, void *, data, u64, size)
1625 {
1626 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1627 
1628 	/*
1629 	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1630 	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1631 	 * from there and call the same bpf_perf_event_output() helper inline.
1632 	 */
1633 	return ____bpf_perf_event_output(regs, map, flags, data, size);
1634 }
1635 
1636 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1637 	.func		= bpf_perf_event_output_tp,
1638 	.gpl_only	= true,
1639 	.ret_type	= RET_INTEGER,
1640 	.arg1_type	= ARG_PTR_TO_CTX,
1641 	.arg2_type	= ARG_CONST_MAP_PTR,
1642 	.arg3_type	= ARG_ANYTHING,
1643 	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1644 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1645 };
1646 
1647 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1648 	   u64, flags)
1649 {
1650 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1651 
1652 	/*
1653 	 * Same comment as in bpf_perf_event_output_tp(), only that this time
1654 	 * the other helper's function body cannot be inlined due to being
1655 	 * external, thus we need to call raw helper function.
1656 	 */
1657 	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1658 			       flags, 0, 0);
1659 }
1660 
1661 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1662 	.func		= bpf_get_stackid_tp,
1663 	.gpl_only	= true,
1664 	.ret_type	= RET_INTEGER,
1665 	.arg1_type	= ARG_PTR_TO_CTX,
1666 	.arg2_type	= ARG_CONST_MAP_PTR,
1667 	.arg3_type	= ARG_ANYTHING,
1668 };
1669 
1670 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1671 	   u64, flags)
1672 {
1673 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1674 
1675 	return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1676 			     (unsigned long) size, flags, 0);
1677 }
1678 
1679 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1680 	.func		= bpf_get_stack_tp,
1681 	.gpl_only	= true,
1682 	.ret_type	= RET_INTEGER,
1683 	.arg1_type	= ARG_PTR_TO_CTX,
1684 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1685 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1686 	.arg4_type	= ARG_ANYTHING,
1687 };
1688 
1689 static const struct bpf_func_proto *
1690 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1691 {
1692 	switch (func_id) {
1693 	case BPF_FUNC_perf_event_output:
1694 		return &bpf_perf_event_output_proto_tp;
1695 	case BPF_FUNC_get_stackid:
1696 		return &bpf_get_stackid_proto_tp;
1697 	case BPF_FUNC_get_stack:
1698 		return &bpf_get_stack_proto_tp;
1699 	case BPF_FUNC_get_attach_cookie:
1700 		return &bpf_get_attach_cookie_proto_trace;
1701 	default:
1702 		return bpf_tracing_func_proto(func_id, prog);
1703 	}
1704 }
1705 
1706 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1707 				    const struct bpf_prog *prog,
1708 				    struct bpf_insn_access_aux *info)
1709 {
1710 	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1711 		return false;
1712 	if (type != BPF_READ)
1713 		return false;
1714 	if (off % size != 0)
1715 		return false;
1716 
1717 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1718 	return true;
1719 }
1720 
1721 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1722 	.get_func_proto  = tp_prog_func_proto,
1723 	.is_valid_access = tp_prog_is_valid_access,
1724 };
1725 
1726 const struct bpf_prog_ops tracepoint_prog_ops = {
1727 };
1728 
1729 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1730 	   struct bpf_perf_event_value *, buf, u32, size)
1731 {
1732 	int err = -EINVAL;
1733 
1734 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1735 		goto clear;
1736 	err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1737 				    &buf->running);
1738 	if (unlikely(err))
1739 		goto clear;
1740 	return 0;
1741 clear:
1742 	memset(buf, 0, size);
1743 	return err;
1744 }
1745 
1746 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1747          .func           = bpf_perf_prog_read_value,
1748          .gpl_only       = true,
1749          .ret_type       = RET_INTEGER,
1750          .arg1_type      = ARG_PTR_TO_CTX,
1751          .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1752          .arg3_type      = ARG_CONST_SIZE,
1753 };
1754 
1755 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1756 	   void *, buf, u32, size, u64, flags)
1757 {
1758 	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1759 	struct perf_branch_stack *br_stack = ctx->data->br_stack;
1760 	u32 to_copy;
1761 
1762 	if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1763 		return -EINVAL;
1764 
1765 	if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1766 		return -ENOENT;
1767 
1768 	if (unlikely(!br_stack))
1769 		return -ENOENT;
1770 
1771 	if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1772 		return br_stack->nr * br_entry_size;
1773 
1774 	if (!buf || (size % br_entry_size != 0))
1775 		return -EINVAL;
1776 
1777 	to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1778 	memcpy(buf, br_stack->entries, to_copy);
1779 
1780 	return to_copy;
1781 }
1782 
1783 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1784 	.func           = bpf_read_branch_records,
1785 	.gpl_only       = true,
1786 	.ret_type       = RET_INTEGER,
1787 	.arg1_type      = ARG_PTR_TO_CTX,
1788 	.arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1789 	.arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1790 	.arg4_type      = ARG_ANYTHING,
1791 };
1792 
1793 static const struct bpf_func_proto *
1794 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1795 {
1796 	switch (func_id) {
1797 	case BPF_FUNC_perf_event_output:
1798 		return &bpf_perf_event_output_proto_tp;
1799 	case BPF_FUNC_get_stackid:
1800 		return &bpf_get_stackid_proto_pe;
1801 	case BPF_FUNC_get_stack:
1802 		return &bpf_get_stack_proto_pe;
1803 	case BPF_FUNC_perf_prog_read_value:
1804 		return &bpf_perf_prog_read_value_proto;
1805 	case BPF_FUNC_read_branch_records:
1806 		return &bpf_read_branch_records_proto;
1807 	case BPF_FUNC_get_attach_cookie:
1808 		return &bpf_get_attach_cookie_proto_pe;
1809 	default:
1810 		return bpf_tracing_func_proto(func_id, prog);
1811 	}
1812 }
1813 
1814 /*
1815  * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1816  * to avoid potential recursive reuse issue when/if tracepoints are added
1817  * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1818  *
1819  * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1820  * in normal, irq, and nmi context.
1821  */
1822 struct bpf_raw_tp_regs {
1823 	struct pt_regs regs[3];
1824 };
1825 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1826 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1827 static struct pt_regs *get_bpf_raw_tp_regs(void)
1828 {
1829 	struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1830 	int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1831 
1832 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1833 		this_cpu_dec(bpf_raw_tp_nest_level);
1834 		return ERR_PTR(-EBUSY);
1835 	}
1836 
1837 	return &tp_regs->regs[nest_level - 1];
1838 }
1839 
1840 static void put_bpf_raw_tp_regs(void)
1841 {
1842 	this_cpu_dec(bpf_raw_tp_nest_level);
1843 }
1844 
1845 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1846 	   struct bpf_map *, map, u64, flags, void *, data, u64, size)
1847 {
1848 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1849 	int ret;
1850 
1851 	if (IS_ERR(regs))
1852 		return PTR_ERR(regs);
1853 
1854 	perf_fetch_caller_regs(regs);
1855 	ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1856 
1857 	put_bpf_raw_tp_regs();
1858 	return ret;
1859 }
1860 
1861 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1862 	.func		= bpf_perf_event_output_raw_tp,
1863 	.gpl_only	= true,
1864 	.ret_type	= RET_INTEGER,
1865 	.arg1_type	= ARG_PTR_TO_CTX,
1866 	.arg2_type	= ARG_CONST_MAP_PTR,
1867 	.arg3_type	= ARG_ANYTHING,
1868 	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1869 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1870 };
1871 
1872 extern const struct bpf_func_proto bpf_skb_output_proto;
1873 extern const struct bpf_func_proto bpf_xdp_output_proto;
1874 extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
1875 
1876 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1877 	   struct bpf_map *, map, u64, flags)
1878 {
1879 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1880 	int ret;
1881 
1882 	if (IS_ERR(regs))
1883 		return PTR_ERR(regs);
1884 
1885 	perf_fetch_caller_regs(regs);
1886 	/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1887 	ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1888 			      flags, 0, 0);
1889 	put_bpf_raw_tp_regs();
1890 	return ret;
1891 }
1892 
1893 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1894 	.func		= bpf_get_stackid_raw_tp,
1895 	.gpl_only	= true,
1896 	.ret_type	= RET_INTEGER,
1897 	.arg1_type	= ARG_PTR_TO_CTX,
1898 	.arg2_type	= ARG_CONST_MAP_PTR,
1899 	.arg3_type	= ARG_ANYTHING,
1900 };
1901 
1902 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1903 	   void *, buf, u32, size, u64, flags)
1904 {
1905 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1906 	int ret;
1907 
1908 	if (IS_ERR(regs))
1909 		return PTR_ERR(regs);
1910 
1911 	perf_fetch_caller_regs(regs);
1912 	ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1913 			    (unsigned long) size, flags, 0);
1914 	put_bpf_raw_tp_regs();
1915 	return ret;
1916 }
1917 
1918 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1919 	.func		= bpf_get_stack_raw_tp,
1920 	.gpl_only	= true,
1921 	.ret_type	= RET_INTEGER,
1922 	.arg1_type	= ARG_PTR_TO_CTX,
1923 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1924 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1925 	.arg4_type	= ARG_ANYTHING,
1926 };
1927 
1928 static const struct bpf_func_proto *
1929 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1930 {
1931 	switch (func_id) {
1932 	case BPF_FUNC_perf_event_output:
1933 		return &bpf_perf_event_output_proto_raw_tp;
1934 	case BPF_FUNC_get_stackid:
1935 		return &bpf_get_stackid_proto_raw_tp;
1936 	case BPF_FUNC_get_stack:
1937 		return &bpf_get_stack_proto_raw_tp;
1938 	default:
1939 		return bpf_tracing_func_proto(func_id, prog);
1940 	}
1941 }
1942 
1943 const struct bpf_func_proto *
1944 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1945 {
1946 	const struct bpf_func_proto *fn;
1947 
1948 	switch (func_id) {
1949 #ifdef CONFIG_NET
1950 	case BPF_FUNC_skb_output:
1951 		return &bpf_skb_output_proto;
1952 	case BPF_FUNC_xdp_output:
1953 		return &bpf_xdp_output_proto;
1954 	case BPF_FUNC_skc_to_tcp6_sock:
1955 		return &bpf_skc_to_tcp6_sock_proto;
1956 	case BPF_FUNC_skc_to_tcp_sock:
1957 		return &bpf_skc_to_tcp_sock_proto;
1958 	case BPF_FUNC_skc_to_tcp_timewait_sock:
1959 		return &bpf_skc_to_tcp_timewait_sock_proto;
1960 	case BPF_FUNC_skc_to_tcp_request_sock:
1961 		return &bpf_skc_to_tcp_request_sock_proto;
1962 	case BPF_FUNC_skc_to_udp6_sock:
1963 		return &bpf_skc_to_udp6_sock_proto;
1964 	case BPF_FUNC_skc_to_unix_sock:
1965 		return &bpf_skc_to_unix_sock_proto;
1966 	case BPF_FUNC_skc_to_mptcp_sock:
1967 		return &bpf_skc_to_mptcp_sock_proto;
1968 	case BPF_FUNC_sk_storage_get:
1969 		return &bpf_sk_storage_get_tracing_proto;
1970 	case BPF_FUNC_sk_storage_delete:
1971 		return &bpf_sk_storage_delete_tracing_proto;
1972 	case BPF_FUNC_sock_from_file:
1973 		return &bpf_sock_from_file_proto;
1974 	case BPF_FUNC_get_socket_cookie:
1975 		return &bpf_get_socket_ptr_cookie_proto;
1976 	case BPF_FUNC_xdp_get_buff_len:
1977 		return &bpf_xdp_get_buff_len_trace_proto;
1978 #endif
1979 	case BPF_FUNC_seq_printf:
1980 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1981 		       &bpf_seq_printf_proto :
1982 		       NULL;
1983 	case BPF_FUNC_seq_write:
1984 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1985 		       &bpf_seq_write_proto :
1986 		       NULL;
1987 	case BPF_FUNC_seq_printf_btf:
1988 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1989 		       &bpf_seq_printf_btf_proto :
1990 		       NULL;
1991 	case BPF_FUNC_d_path:
1992 		return &bpf_d_path_proto;
1993 	case BPF_FUNC_get_func_arg:
1994 		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
1995 	case BPF_FUNC_get_func_ret:
1996 		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
1997 	case BPF_FUNC_get_func_arg_cnt:
1998 		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
1999 	case BPF_FUNC_get_attach_cookie:
2000 		return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
2001 	default:
2002 		fn = raw_tp_prog_func_proto(func_id, prog);
2003 		if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
2004 			fn = bpf_iter_get_func_proto(func_id, prog);
2005 		return fn;
2006 	}
2007 }
2008 
2009 static bool raw_tp_prog_is_valid_access(int off, int size,
2010 					enum bpf_access_type type,
2011 					const struct bpf_prog *prog,
2012 					struct bpf_insn_access_aux *info)
2013 {
2014 	return bpf_tracing_ctx_access(off, size, type);
2015 }
2016 
2017 static bool tracing_prog_is_valid_access(int off, int size,
2018 					 enum bpf_access_type type,
2019 					 const struct bpf_prog *prog,
2020 					 struct bpf_insn_access_aux *info)
2021 {
2022 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
2023 }
2024 
2025 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
2026 				     const union bpf_attr *kattr,
2027 				     union bpf_attr __user *uattr)
2028 {
2029 	return -ENOTSUPP;
2030 }
2031 
2032 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
2033 	.get_func_proto  = raw_tp_prog_func_proto,
2034 	.is_valid_access = raw_tp_prog_is_valid_access,
2035 };
2036 
2037 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
2038 #ifdef CONFIG_NET
2039 	.test_run = bpf_prog_test_run_raw_tp,
2040 #endif
2041 };
2042 
2043 const struct bpf_verifier_ops tracing_verifier_ops = {
2044 	.get_func_proto  = tracing_prog_func_proto,
2045 	.is_valid_access = tracing_prog_is_valid_access,
2046 };
2047 
2048 const struct bpf_prog_ops tracing_prog_ops = {
2049 	.test_run = bpf_prog_test_run_tracing,
2050 };
2051 
2052 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
2053 						 enum bpf_access_type type,
2054 						 const struct bpf_prog *prog,
2055 						 struct bpf_insn_access_aux *info)
2056 {
2057 	if (off == 0) {
2058 		if (size != sizeof(u64) || type != BPF_READ)
2059 			return false;
2060 		info->reg_type = PTR_TO_TP_BUFFER;
2061 	}
2062 	return raw_tp_prog_is_valid_access(off, size, type, prog, info);
2063 }
2064 
2065 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
2066 	.get_func_proto  = raw_tp_prog_func_proto,
2067 	.is_valid_access = raw_tp_writable_prog_is_valid_access,
2068 };
2069 
2070 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
2071 };
2072 
2073 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
2074 				    const struct bpf_prog *prog,
2075 				    struct bpf_insn_access_aux *info)
2076 {
2077 	const int size_u64 = sizeof(u64);
2078 
2079 	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
2080 		return false;
2081 	if (type != BPF_READ)
2082 		return false;
2083 	if (off % size != 0) {
2084 		if (sizeof(unsigned long) != 4)
2085 			return false;
2086 		if (size != 8)
2087 			return false;
2088 		if (off % size != 4)
2089 			return false;
2090 	}
2091 
2092 	switch (off) {
2093 	case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
2094 		bpf_ctx_record_field_size(info, size_u64);
2095 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2096 			return false;
2097 		break;
2098 	case bpf_ctx_range(struct bpf_perf_event_data, addr):
2099 		bpf_ctx_record_field_size(info, size_u64);
2100 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2101 			return false;
2102 		break;
2103 	default:
2104 		if (size != sizeof(long))
2105 			return false;
2106 	}
2107 
2108 	return true;
2109 }
2110 
2111 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2112 				      const struct bpf_insn *si,
2113 				      struct bpf_insn *insn_buf,
2114 				      struct bpf_prog *prog, u32 *target_size)
2115 {
2116 	struct bpf_insn *insn = insn_buf;
2117 
2118 	switch (si->off) {
2119 	case offsetof(struct bpf_perf_event_data, sample_period):
2120 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2121 						       data), si->dst_reg, si->src_reg,
2122 				      offsetof(struct bpf_perf_event_data_kern, data));
2123 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2124 				      bpf_target_off(struct perf_sample_data, period, 8,
2125 						     target_size));
2126 		break;
2127 	case offsetof(struct bpf_perf_event_data, addr):
2128 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2129 						       data), si->dst_reg, si->src_reg,
2130 				      offsetof(struct bpf_perf_event_data_kern, data));
2131 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2132 				      bpf_target_off(struct perf_sample_data, addr, 8,
2133 						     target_size));
2134 		break;
2135 	default:
2136 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2137 						       regs), si->dst_reg, si->src_reg,
2138 				      offsetof(struct bpf_perf_event_data_kern, regs));
2139 		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2140 				      si->off);
2141 		break;
2142 	}
2143 
2144 	return insn - insn_buf;
2145 }
2146 
2147 const struct bpf_verifier_ops perf_event_verifier_ops = {
2148 	.get_func_proto		= pe_prog_func_proto,
2149 	.is_valid_access	= pe_prog_is_valid_access,
2150 	.convert_ctx_access	= pe_prog_convert_ctx_access,
2151 };
2152 
2153 const struct bpf_prog_ops perf_event_prog_ops = {
2154 };
2155 
2156 static DEFINE_MUTEX(bpf_event_mutex);
2157 
2158 #define BPF_TRACE_MAX_PROGS 64
2159 
2160 int perf_event_attach_bpf_prog(struct perf_event *event,
2161 			       struct bpf_prog *prog,
2162 			       u64 bpf_cookie)
2163 {
2164 	struct bpf_prog_array *old_array;
2165 	struct bpf_prog_array *new_array;
2166 	int ret = -EEXIST;
2167 
2168 	/*
2169 	 * Kprobe override only works if they are on the function entry,
2170 	 * and only if they are on the opt-in list.
2171 	 */
2172 	if (prog->kprobe_override &&
2173 	    (!trace_kprobe_on_func_entry(event->tp_event) ||
2174 	     !trace_kprobe_error_injectable(event->tp_event)))
2175 		return -EINVAL;
2176 
2177 	mutex_lock(&bpf_event_mutex);
2178 
2179 	if (event->prog)
2180 		goto unlock;
2181 
2182 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2183 	if (old_array &&
2184 	    bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
2185 		ret = -E2BIG;
2186 		goto unlock;
2187 	}
2188 
2189 	ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
2190 	if (ret < 0)
2191 		goto unlock;
2192 
2193 	/* set the new array to event->tp_event and set event->prog */
2194 	event->prog = prog;
2195 	event->bpf_cookie = bpf_cookie;
2196 	rcu_assign_pointer(event->tp_event->prog_array, new_array);
2197 	bpf_prog_array_free_sleepable(old_array);
2198 
2199 unlock:
2200 	mutex_unlock(&bpf_event_mutex);
2201 	return ret;
2202 }
2203 
2204 void perf_event_detach_bpf_prog(struct perf_event *event)
2205 {
2206 	struct bpf_prog_array *old_array;
2207 	struct bpf_prog_array *new_array;
2208 	int ret;
2209 
2210 	mutex_lock(&bpf_event_mutex);
2211 
2212 	if (!event->prog)
2213 		goto unlock;
2214 
2215 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2216 	ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
2217 	if (ret == -ENOENT)
2218 		goto unlock;
2219 	if (ret < 0) {
2220 		bpf_prog_array_delete_safe(old_array, event->prog);
2221 	} else {
2222 		rcu_assign_pointer(event->tp_event->prog_array, new_array);
2223 		bpf_prog_array_free_sleepable(old_array);
2224 	}
2225 
2226 	bpf_prog_put(event->prog);
2227 	event->prog = NULL;
2228 
2229 unlock:
2230 	mutex_unlock(&bpf_event_mutex);
2231 }
2232 
2233 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
2234 {
2235 	struct perf_event_query_bpf __user *uquery = info;
2236 	struct perf_event_query_bpf query = {};
2237 	struct bpf_prog_array *progs;
2238 	u32 *ids, prog_cnt, ids_len;
2239 	int ret;
2240 
2241 	if (!perfmon_capable())
2242 		return -EPERM;
2243 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
2244 		return -EINVAL;
2245 	if (copy_from_user(&query, uquery, sizeof(query)))
2246 		return -EFAULT;
2247 
2248 	ids_len = query.ids_len;
2249 	if (ids_len > BPF_TRACE_MAX_PROGS)
2250 		return -E2BIG;
2251 	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2252 	if (!ids)
2253 		return -ENOMEM;
2254 	/*
2255 	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2256 	 * is required when user only wants to check for uquery->prog_cnt.
2257 	 * There is no need to check for it since the case is handled
2258 	 * gracefully in bpf_prog_array_copy_info.
2259 	 */
2260 
2261 	mutex_lock(&bpf_event_mutex);
2262 	progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2263 	ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
2264 	mutex_unlock(&bpf_event_mutex);
2265 
2266 	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2267 	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2268 		ret = -EFAULT;
2269 
2270 	kfree(ids);
2271 	return ret;
2272 }
2273 
2274 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2275 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2276 
2277 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2278 {
2279 	struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2280 
2281 	for (; btp < __stop__bpf_raw_tp; btp++) {
2282 		if (!strcmp(btp->tp->name, name))
2283 			return btp;
2284 	}
2285 
2286 	return bpf_get_raw_tracepoint_module(name);
2287 }
2288 
2289 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2290 {
2291 	struct module *mod;
2292 
2293 	preempt_disable();
2294 	mod = __module_address((unsigned long)btp);
2295 	module_put(mod);
2296 	preempt_enable();
2297 }
2298 
2299 static __always_inline
2300 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2301 {
2302 	cant_sleep();
2303 	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2304 		bpf_prog_inc_misses_counter(prog);
2305 		goto out;
2306 	}
2307 	rcu_read_lock();
2308 	(void) bpf_prog_run(prog, args);
2309 	rcu_read_unlock();
2310 out:
2311 	this_cpu_dec(*(prog->active));
2312 }
2313 
2314 #define UNPACK(...)			__VA_ARGS__
2315 #define REPEAT_1(FN, DL, X, ...)	FN(X)
2316 #define REPEAT_2(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2317 #define REPEAT_3(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2318 #define REPEAT_4(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2319 #define REPEAT_5(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2320 #define REPEAT_6(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2321 #define REPEAT_7(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2322 #define REPEAT_8(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2323 #define REPEAT_9(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2324 #define REPEAT_10(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2325 #define REPEAT_11(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2326 #define REPEAT_12(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2327 #define REPEAT(X, FN, DL, ...)		REPEAT_##X(FN, DL, __VA_ARGS__)
2328 
2329 #define SARG(X)		u64 arg##X
2330 #define COPY(X)		args[X] = arg##X
2331 
2332 #define __DL_COM	(,)
2333 #define __DL_SEM	(;)
2334 
2335 #define __SEQ_0_11	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2336 
2337 #define BPF_TRACE_DEFN_x(x)						\
2338 	void bpf_trace_run##x(struct bpf_prog *prog,			\
2339 			      REPEAT(x, SARG, __DL_COM, __SEQ_0_11))	\
2340 	{								\
2341 		u64 args[x];						\
2342 		REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);			\
2343 		__bpf_trace_run(prog, args);				\
2344 	}								\
2345 	EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2346 BPF_TRACE_DEFN_x(1);
2347 BPF_TRACE_DEFN_x(2);
2348 BPF_TRACE_DEFN_x(3);
2349 BPF_TRACE_DEFN_x(4);
2350 BPF_TRACE_DEFN_x(5);
2351 BPF_TRACE_DEFN_x(6);
2352 BPF_TRACE_DEFN_x(7);
2353 BPF_TRACE_DEFN_x(8);
2354 BPF_TRACE_DEFN_x(9);
2355 BPF_TRACE_DEFN_x(10);
2356 BPF_TRACE_DEFN_x(11);
2357 BPF_TRACE_DEFN_x(12);
2358 
2359 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2360 {
2361 	struct tracepoint *tp = btp->tp;
2362 
2363 	/*
2364 	 * check that program doesn't access arguments beyond what's
2365 	 * available in this tracepoint
2366 	 */
2367 	if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2368 		return -EINVAL;
2369 
2370 	if (prog->aux->max_tp_access > btp->writable_size)
2371 		return -EINVAL;
2372 
2373 	return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
2374 						   prog);
2375 }
2376 
2377 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2378 {
2379 	return __bpf_probe_register(btp, prog);
2380 }
2381 
2382 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2383 {
2384 	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
2385 }
2386 
2387 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2388 			    u32 *fd_type, const char **buf,
2389 			    u64 *probe_offset, u64 *probe_addr)
2390 {
2391 	bool is_tracepoint, is_syscall_tp;
2392 	struct bpf_prog *prog;
2393 	int flags, err = 0;
2394 
2395 	prog = event->prog;
2396 	if (!prog)
2397 		return -ENOENT;
2398 
2399 	/* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2400 	if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2401 		return -EOPNOTSUPP;
2402 
2403 	*prog_id = prog->aux->id;
2404 	flags = event->tp_event->flags;
2405 	is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2406 	is_syscall_tp = is_syscall_trace_event(event->tp_event);
2407 
2408 	if (is_tracepoint || is_syscall_tp) {
2409 		*buf = is_tracepoint ? event->tp_event->tp->name
2410 				     : event->tp_event->name;
2411 		/* We allow NULL pointer for tracepoint */
2412 		if (fd_type)
2413 			*fd_type = BPF_FD_TYPE_TRACEPOINT;
2414 		if (probe_offset)
2415 			*probe_offset = 0x0;
2416 		if (probe_addr)
2417 			*probe_addr = 0x0;
2418 	} else {
2419 		/* kprobe/uprobe */
2420 		err = -EOPNOTSUPP;
2421 #ifdef CONFIG_KPROBE_EVENTS
2422 		if (flags & TRACE_EVENT_FL_KPROBE)
2423 			err = bpf_get_kprobe_info(event, fd_type, buf,
2424 						  probe_offset, probe_addr,
2425 						  event->attr.type == PERF_TYPE_TRACEPOINT);
2426 #endif
2427 #ifdef CONFIG_UPROBE_EVENTS
2428 		if (flags & TRACE_EVENT_FL_UPROBE)
2429 			err = bpf_get_uprobe_info(event, fd_type, buf,
2430 						  probe_offset, probe_addr,
2431 						  event->attr.type == PERF_TYPE_TRACEPOINT);
2432 #endif
2433 	}
2434 
2435 	return err;
2436 }
2437 
2438 static int __init send_signal_irq_work_init(void)
2439 {
2440 	int cpu;
2441 	struct send_signal_irq_work *work;
2442 
2443 	for_each_possible_cpu(cpu) {
2444 		work = per_cpu_ptr(&send_signal_work, cpu);
2445 		init_irq_work(&work->irq_work, do_bpf_send_signal);
2446 	}
2447 	return 0;
2448 }
2449 
2450 subsys_initcall(send_signal_irq_work_init);
2451 
2452 #ifdef CONFIG_MODULES
2453 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2454 			    void *module)
2455 {
2456 	struct bpf_trace_module *btm, *tmp;
2457 	struct module *mod = module;
2458 	int ret = 0;
2459 
2460 	if (mod->num_bpf_raw_events == 0 ||
2461 	    (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2462 		goto out;
2463 
2464 	mutex_lock(&bpf_module_mutex);
2465 
2466 	switch (op) {
2467 	case MODULE_STATE_COMING:
2468 		btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2469 		if (btm) {
2470 			btm->module = module;
2471 			list_add(&btm->list, &bpf_trace_modules);
2472 		} else {
2473 			ret = -ENOMEM;
2474 		}
2475 		break;
2476 	case MODULE_STATE_GOING:
2477 		list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2478 			if (btm->module == module) {
2479 				list_del(&btm->list);
2480 				kfree(btm);
2481 				break;
2482 			}
2483 		}
2484 		break;
2485 	}
2486 
2487 	mutex_unlock(&bpf_module_mutex);
2488 
2489 out:
2490 	return notifier_from_errno(ret);
2491 }
2492 
2493 static struct notifier_block bpf_module_nb = {
2494 	.notifier_call = bpf_event_notify,
2495 };
2496 
2497 static int __init bpf_event_init(void)
2498 {
2499 	register_module_notifier(&bpf_module_nb);
2500 	return 0;
2501 }
2502 
2503 fs_initcall(bpf_event_init);
2504 #endif /* CONFIG_MODULES */
2505 
2506 #ifdef CONFIG_FPROBE
2507 struct bpf_kprobe_multi_link {
2508 	struct bpf_link link;
2509 	struct fprobe fp;
2510 	unsigned long *addrs;
2511 	u64 *cookies;
2512 	u32 cnt;
2513 	u32 mods_cnt;
2514 	struct module **mods;
2515 	u32 flags;
2516 };
2517 
2518 struct bpf_kprobe_multi_run_ctx {
2519 	struct bpf_run_ctx run_ctx;
2520 	struct bpf_kprobe_multi_link *link;
2521 	unsigned long entry_ip;
2522 };
2523 
2524 struct user_syms {
2525 	const char **syms;
2526 	char *buf;
2527 };
2528 
2529 static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2530 {
2531 	unsigned long __user usymbol;
2532 	const char **syms = NULL;
2533 	char *buf = NULL, *p;
2534 	int err = -ENOMEM;
2535 	unsigned int i;
2536 
2537 	syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
2538 	if (!syms)
2539 		goto error;
2540 
2541 	buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
2542 	if (!buf)
2543 		goto error;
2544 
2545 	for (p = buf, i = 0; i < cnt; i++) {
2546 		if (__get_user(usymbol, usyms + i)) {
2547 			err = -EFAULT;
2548 			goto error;
2549 		}
2550 		err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2551 		if (err == KSYM_NAME_LEN)
2552 			err = -E2BIG;
2553 		if (err < 0)
2554 			goto error;
2555 		syms[i] = p;
2556 		p += err + 1;
2557 	}
2558 
2559 	us->syms = syms;
2560 	us->buf = buf;
2561 	return 0;
2562 
2563 error:
2564 	if (err) {
2565 		kvfree(syms);
2566 		kvfree(buf);
2567 	}
2568 	return err;
2569 }
2570 
2571 static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2572 {
2573 	u32 i;
2574 
2575 	for (i = 0; i < cnt; i++)
2576 		module_put(mods[i]);
2577 }
2578 
2579 static void free_user_syms(struct user_syms *us)
2580 {
2581 	kvfree(us->syms);
2582 	kvfree(us->buf);
2583 }
2584 
2585 static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2586 {
2587 	struct bpf_kprobe_multi_link *kmulti_link;
2588 
2589 	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2590 	unregister_fprobe(&kmulti_link->fp);
2591 	kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
2592 }
2593 
2594 static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2595 {
2596 	struct bpf_kprobe_multi_link *kmulti_link;
2597 
2598 	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2599 	kvfree(kmulti_link->addrs);
2600 	kvfree(kmulti_link->cookies);
2601 	kfree(kmulti_link->mods);
2602 	kfree(kmulti_link);
2603 }
2604 
2605 static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
2606 						struct bpf_link_info *info)
2607 {
2608 	u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs);
2609 	struct bpf_kprobe_multi_link *kmulti_link;
2610 	u32 ucount = info->kprobe_multi.count;
2611 	int err = 0, i;
2612 
2613 	if (!uaddrs ^ !ucount)
2614 		return -EINVAL;
2615 
2616 	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2617 	info->kprobe_multi.count = kmulti_link->cnt;
2618 	info->kprobe_multi.flags = kmulti_link->flags;
2619 
2620 	if (!uaddrs)
2621 		return 0;
2622 	if (ucount < kmulti_link->cnt)
2623 		err = -ENOSPC;
2624 	else
2625 		ucount = kmulti_link->cnt;
2626 
2627 	if (kallsyms_show_value(current_cred())) {
2628 		if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64)))
2629 			return -EFAULT;
2630 	} else {
2631 		for (i = 0; i < ucount; i++) {
2632 			if (put_user(0, uaddrs + i))
2633 				return -EFAULT;
2634 		}
2635 	}
2636 	return err;
2637 }
2638 
2639 static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2640 	.release = bpf_kprobe_multi_link_release,
2641 	.dealloc = bpf_kprobe_multi_link_dealloc,
2642 	.fill_link_info = bpf_kprobe_multi_link_fill_link_info,
2643 };
2644 
2645 static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2646 {
2647 	const struct bpf_kprobe_multi_link *link = priv;
2648 	unsigned long *addr_a = a, *addr_b = b;
2649 	u64 *cookie_a, *cookie_b;
2650 
2651 	cookie_a = link->cookies + (addr_a - link->addrs);
2652 	cookie_b = link->cookies + (addr_b - link->addrs);
2653 
2654 	/* swap addr_a/addr_b and cookie_a/cookie_b values */
2655 	swap(*addr_a, *addr_b);
2656 	swap(*cookie_a, *cookie_b);
2657 }
2658 
2659 static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
2660 {
2661 	const unsigned long *addr_a = a, *addr_b = b;
2662 
2663 	if (*addr_a == *addr_b)
2664 		return 0;
2665 	return *addr_a < *addr_b ? -1 : 1;
2666 }
2667 
2668 static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2669 {
2670 	return bpf_kprobe_multi_addrs_cmp(a, b);
2671 }
2672 
2673 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2674 {
2675 	struct bpf_kprobe_multi_run_ctx *run_ctx;
2676 	struct bpf_kprobe_multi_link *link;
2677 	u64 *cookie, entry_ip;
2678 	unsigned long *addr;
2679 
2680 	if (WARN_ON_ONCE(!ctx))
2681 		return 0;
2682 	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2683 	link = run_ctx->link;
2684 	if (!link->cookies)
2685 		return 0;
2686 	entry_ip = run_ctx->entry_ip;
2687 	addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
2688 		       bpf_kprobe_multi_addrs_cmp);
2689 	if (!addr)
2690 		return 0;
2691 	cookie = link->cookies + (addr - link->addrs);
2692 	return *cookie;
2693 }
2694 
2695 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2696 {
2697 	struct bpf_kprobe_multi_run_ctx *run_ctx;
2698 
2699 	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2700 	return run_ctx->entry_ip;
2701 }
2702 
2703 static int
2704 kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
2705 			   unsigned long entry_ip, struct pt_regs *regs)
2706 {
2707 	struct bpf_kprobe_multi_run_ctx run_ctx = {
2708 		.link = link,
2709 		.entry_ip = entry_ip,
2710 	};
2711 	struct bpf_run_ctx *old_run_ctx;
2712 	int err;
2713 
2714 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
2715 		err = 0;
2716 		goto out;
2717 	}
2718 
2719 	migrate_disable();
2720 	rcu_read_lock();
2721 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2722 	err = bpf_prog_run(link->link.prog, regs);
2723 	bpf_reset_run_ctx(old_run_ctx);
2724 	rcu_read_unlock();
2725 	migrate_enable();
2726 
2727  out:
2728 	__this_cpu_dec(bpf_prog_active);
2729 	return err;
2730 }
2731 
2732 static int
2733 kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
2734 			  unsigned long ret_ip, struct pt_regs *regs,
2735 			  void *data)
2736 {
2737 	struct bpf_kprobe_multi_link *link;
2738 
2739 	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2740 	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
2741 	return 0;
2742 }
2743 
2744 static void
2745 kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
2746 			       unsigned long ret_ip, struct pt_regs *regs,
2747 			       void *data)
2748 {
2749 	struct bpf_kprobe_multi_link *link;
2750 
2751 	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2752 	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
2753 }
2754 
2755 static int symbols_cmp_r(const void *a, const void *b, const void *priv)
2756 {
2757 	const char **str_a = (const char **) a;
2758 	const char **str_b = (const char **) b;
2759 
2760 	return strcmp(*str_a, *str_b);
2761 }
2762 
2763 struct multi_symbols_sort {
2764 	const char **funcs;
2765 	u64 *cookies;
2766 };
2767 
2768 static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2769 {
2770 	const struct multi_symbols_sort *data = priv;
2771 	const char **name_a = a, **name_b = b;
2772 
2773 	swap(*name_a, *name_b);
2774 
2775 	/* If defined, swap also related cookies. */
2776 	if (data->cookies) {
2777 		u64 *cookie_a, *cookie_b;
2778 
2779 		cookie_a = data->cookies + (name_a - data->funcs);
2780 		cookie_b = data->cookies + (name_b - data->funcs);
2781 		swap(*cookie_a, *cookie_b);
2782 	}
2783 }
2784 
2785 struct modules_array {
2786 	struct module **mods;
2787 	int mods_cnt;
2788 	int mods_cap;
2789 };
2790 
2791 static int add_module(struct modules_array *arr, struct module *mod)
2792 {
2793 	struct module **mods;
2794 
2795 	if (arr->mods_cnt == arr->mods_cap) {
2796 		arr->mods_cap = max(16, arr->mods_cap * 3 / 2);
2797 		mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL);
2798 		if (!mods)
2799 			return -ENOMEM;
2800 		arr->mods = mods;
2801 	}
2802 
2803 	arr->mods[arr->mods_cnt] = mod;
2804 	arr->mods_cnt++;
2805 	return 0;
2806 }
2807 
2808 static bool has_module(struct modules_array *arr, struct module *mod)
2809 {
2810 	int i;
2811 
2812 	for (i = arr->mods_cnt - 1; i >= 0; i--) {
2813 		if (arr->mods[i] == mod)
2814 			return true;
2815 	}
2816 	return false;
2817 }
2818 
2819 static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
2820 {
2821 	struct modules_array arr = {};
2822 	u32 i, err = 0;
2823 
2824 	for (i = 0; i < addrs_cnt; i++) {
2825 		struct module *mod;
2826 
2827 		preempt_disable();
2828 		mod = __module_address(addrs[i]);
2829 		/* Either no module or we it's already stored  */
2830 		if (!mod || has_module(&arr, mod)) {
2831 			preempt_enable();
2832 			continue;
2833 		}
2834 		if (!try_module_get(mod))
2835 			err = -EINVAL;
2836 		preempt_enable();
2837 		if (err)
2838 			break;
2839 		err = add_module(&arr, mod);
2840 		if (err) {
2841 			module_put(mod);
2842 			break;
2843 		}
2844 	}
2845 
2846 	/* We return either err < 0 in case of error, ... */
2847 	if (err) {
2848 		kprobe_multi_put_modules(arr.mods, arr.mods_cnt);
2849 		kfree(arr.mods);
2850 		return err;
2851 	}
2852 
2853 	/* or number of modules found if everything is ok. */
2854 	*mods = arr.mods;
2855 	return arr.mods_cnt;
2856 }
2857 
2858 static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt)
2859 {
2860 	u32 i;
2861 
2862 	for (i = 0; i < cnt; i++) {
2863 		if (!within_error_injection_list(addrs[i]))
2864 			return -EINVAL;
2865 	}
2866 	return 0;
2867 }
2868 
2869 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2870 {
2871 	struct bpf_kprobe_multi_link *link = NULL;
2872 	struct bpf_link_primer link_primer;
2873 	void __user *ucookies;
2874 	unsigned long *addrs;
2875 	u32 flags, cnt, size;
2876 	void __user *uaddrs;
2877 	u64 *cookies = NULL;
2878 	void __user *usyms;
2879 	int err;
2880 
2881 	/* no support for 32bit archs yet */
2882 	if (sizeof(u64) != sizeof(void *))
2883 		return -EOPNOTSUPP;
2884 
2885 	if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI)
2886 		return -EINVAL;
2887 
2888 	flags = attr->link_create.kprobe_multi.flags;
2889 	if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
2890 		return -EINVAL;
2891 
2892 	uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2893 	usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2894 	if (!!uaddrs == !!usyms)
2895 		return -EINVAL;
2896 
2897 	cnt = attr->link_create.kprobe_multi.cnt;
2898 	if (!cnt)
2899 		return -EINVAL;
2900 
2901 	size = cnt * sizeof(*addrs);
2902 	addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2903 	if (!addrs)
2904 		return -ENOMEM;
2905 
2906 	ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
2907 	if (ucookies) {
2908 		cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2909 		if (!cookies) {
2910 			err = -ENOMEM;
2911 			goto error;
2912 		}
2913 		if (copy_from_user(cookies, ucookies, size)) {
2914 			err = -EFAULT;
2915 			goto error;
2916 		}
2917 	}
2918 
2919 	if (uaddrs) {
2920 		if (copy_from_user(addrs, uaddrs, size)) {
2921 			err = -EFAULT;
2922 			goto error;
2923 		}
2924 	} else {
2925 		struct multi_symbols_sort data = {
2926 			.cookies = cookies,
2927 		};
2928 		struct user_syms us;
2929 
2930 		err = copy_user_syms(&us, usyms, cnt);
2931 		if (err)
2932 			goto error;
2933 
2934 		if (cookies)
2935 			data.funcs = us.syms;
2936 
2937 		sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
2938 		       symbols_swap_r, &data);
2939 
2940 		err = ftrace_lookup_symbols(us.syms, cnt, addrs);
2941 		free_user_syms(&us);
2942 		if (err)
2943 			goto error;
2944 	}
2945 
2946 	if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) {
2947 		err = -EINVAL;
2948 		goto error;
2949 	}
2950 
2951 	link = kzalloc(sizeof(*link), GFP_KERNEL);
2952 	if (!link) {
2953 		err = -ENOMEM;
2954 		goto error;
2955 	}
2956 
2957 	bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
2958 		      &bpf_kprobe_multi_link_lops, prog);
2959 
2960 	err = bpf_link_prime(&link->link, &link_primer);
2961 	if (err)
2962 		goto error;
2963 
2964 	if (flags & BPF_F_KPROBE_MULTI_RETURN)
2965 		link->fp.exit_handler = kprobe_multi_link_exit_handler;
2966 	else
2967 		link->fp.entry_handler = kprobe_multi_link_handler;
2968 
2969 	link->addrs = addrs;
2970 	link->cookies = cookies;
2971 	link->cnt = cnt;
2972 	link->flags = flags;
2973 
2974 	if (cookies) {
2975 		/*
2976 		 * Sorting addresses will trigger sorting cookies as well
2977 		 * (check bpf_kprobe_multi_cookie_swap). This way we can
2978 		 * find cookie based on the address in bpf_get_attach_cookie
2979 		 * helper.
2980 		 */
2981 		sort_r(addrs, cnt, sizeof(*addrs),
2982 		       bpf_kprobe_multi_cookie_cmp,
2983 		       bpf_kprobe_multi_cookie_swap,
2984 		       link);
2985 	}
2986 
2987 	err = get_modules_for_addrs(&link->mods, addrs, cnt);
2988 	if (err < 0) {
2989 		bpf_link_cleanup(&link_primer);
2990 		return err;
2991 	}
2992 	link->mods_cnt = err;
2993 
2994 	err = register_fprobe_ips(&link->fp, addrs, cnt);
2995 	if (err) {
2996 		kprobe_multi_put_modules(link->mods, link->mods_cnt);
2997 		bpf_link_cleanup(&link_primer);
2998 		return err;
2999 	}
3000 
3001 	return bpf_link_settle(&link_primer);
3002 
3003 error:
3004 	kfree(link);
3005 	kvfree(addrs);
3006 	kvfree(cookies);
3007 	return err;
3008 }
3009 #else /* !CONFIG_FPROBE */
3010 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3011 {
3012 	return -EOPNOTSUPP;
3013 }
3014 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
3015 {
3016 	return 0;
3017 }
3018 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3019 {
3020 	return 0;
3021 }
3022 #endif
3023 
3024 #ifdef CONFIG_UPROBES
3025 struct bpf_uprobe_multi_link;
3026 
3027 struct bpf_uprobe {
3028 	struct bpf_uprobe_multi_link *link;
3029 	loff_t offset;
3030 	u64 cookie;
3031 	struct uprobe_consumer consumer;
3032 };
3033 
3034 struct bpf_uprobe_multi_link {
3035 	struct path path;
3036 	struct bpf_link link;
3037 	u32 cnt;
3038 	struct bpf_uprobe *uprobes;
3039 	struct task_struct *task;
3040 };
3041 
3042 struct bpf_uprobe_multi_run_ctx {
3043 	struct bpf_run_ctx run_ctx;
3044 	unsigned long entry_ip;
3045 	struct bpf_uprobe *uprobe;
3046 };
3047 
3048 static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes,
3049 				  u32 cnt)
3050 {
3051 	u32 i;
3052 
3053 	for (i = 0; i < cnt; i++) {
3054 		uprobe_unregister(d_real_inode(path->dentry), uprobes[i].offset,
3055 				  &uprobes[i].consumer);
3056 	}
3057 }
3058 
3059 static void bpf_uprobe_multi_link_release(struct bpf_link *link)
3060 {
3061 	struct bpf_uprobe_multi_link *umulti_link;
3062 
3063 	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3064 	bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt);
3065 }
3066 
3067 static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
3068 {
3069 	struct bpf_uprobe_multi_link *umulti_link;
3070 
3071 	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3072 	if (umulti_link->task)
3073 		put_task_struct(umulti_link->task);
3074 	path_put(&umulti_link->path);
3075 	kvfree(umulti_link->uprobes);
3076 	kfree(umulti_link);
3077 }
3078 
3079 static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
3080 	.release = bpf_uprobe_multi_link_release,
3081 	.dealloc = bpf_uprobe_multi_link_dealloc,
3082 };
3083 
3084 static int uprobe_prog_run(struct bpf_uprobe *uprobe,
3085 			   unsigned long entry_ip,
3086 			   struct pt_regs *regs)
3087 {
3088 	struct bpf_uprobe_multi_link *link = uprobe->link;
3089 	struct bpf_uprobe_multi_run_ctx run_ctx = {
3090 		.entry_ip = entry_ip,
3091 		.uprobe = uprobe,
3092 	};
3093 	struct bpf_prog *prog = link->link.prog;
3094 	bool sleepable = prog->aux->sleepable;
3095 	struct bpf_run_ctx *old_run_ctx;
3096 	int err = 0;
3097 
3098 	if (link->task && current != link->task)
3099 		return 0;
3100 
3101 	if (sleepable)
3102 		rcu_read_lock_trace();
3103 	else
3104 		rcu_read_lock();
3105 
3106 	migrate_disable();
3107 
3108 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
3109 	err = bpf_prog_run(link->link.prog, regs);
3110 	bpf_reset_run_ctx(old_run_ctx);
3111 
3112 	migrate_enable();
3113 
3114 	if (sleepable)
3115 		rcu_read_unlock_trace();
3116 	else
3117 		rcu_read_unlock();
3118 	return err;
3119 }
3120 
3121 static bool
3122 uprobe_multi_link_filter(struct uprobe_consumer *con, enum uprobe_filter_ctx ctx,
3123 			 struct mm_struct *mm)
3124 {
3125 	struct bpf_uprobe *uprobe;
3126 
3127 	uprobe = container_of(con, struct bpf_uprobe, consumer);
3128 	return uprobe->link->task->mm == mm;
3129 }
3130 
3131 static int
3132 uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
3133 {
3134 	struct bpf_uprobe *uprobe;
3135 
3136 	uprobe = container_of(con, struct bpf_uprobe, consumer);
3137 	return uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
3138 }
3139 
3140 static int
3141 uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs)
3142 {
3143 	struct bpf_uprobe *uprobe;
3144 
3145 	uprobe = container_of(con, struct bpf_uprobe, consumer);
3146 	return uprobe_prog_run(uprobe, func, regs);
3147 }
3148 
3149 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3150 {
3151 	struct bpf_uprobe_multi_run_ctx *run_ctx;
3152 
3153 	run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
3154 	return run_ctx->entry_ip;
3155 }
3156 
3157 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3158 {
3159 	struct bpf_uprobe_multi_run_ctx *run_ctx;
3160 
3161 	run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
3162 	return run_ctx->uprobe->cookie;
3163 }
3164 
3165 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3166 {
3167 	struct bpf_uprobe_multi_link *link = NULL;
3168 	unsigned long __user *uref_ctr_offsets;
3169 	unsigned long *ref_ctr_offsets = NULL;
3170 	struct bpf_link_primer link_primer;
3171 	struct bpf_uprobe *uprobes = NULL;
3172 	struct task_struct *task = NULL;
3173 	unsigned long __user *uoffsets;
3174 	u64 __user *ucookies;
3175 	void __user *upath;
3176 	u32 flags, cnt, i;
3177 	struct path path;
3178 	char *name;
3179 	pid_t pid;
3180 	int err;
3181 
3182 	/* no support for 32bit archs yet */
3183 	if (sizeof(u64) != sizeof(void *))
3184 		return -EOPNOTSUPP;
3185 
3186 	if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI)
3187 		return -EINVAL;
3188 
3189 	flags = attr->link_create.uprobe_multi.flags;
3190 	if (flags & ~BPF_F_UPROBE_MULTI_RETURN)
3191 		return -EINVAL;
3192 
3193 	/*
3194 	 * path, offsets and cnt are mandatory,
3195 	 * ref_ctr_offsets and cookies are optional
3196 	 */
3197 	upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
3198 	uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
3199 	cnt = attr->link_create.uprobe_multi.cnt;
3200 
3201 	if (!upath || !uoffsets || !cnt)
3202 		return -EINVAL;
3203 	if (cnt > MAX_UPROBE_MULTI_CNT)
3204 		return -E2BIG;
3205 
3206 	uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
3207 	ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
3208 
3209 	name = strndup_user(upath, PATH_MAX);
3210 	if (IS_ERR(name)) {
3211 		err = PTR_ERR(name);
3212 		return err;
3213 	}
3214 
3215 	err = kern_path(name, LOOKUP_FOLLOW, &path);
3216 	kfree(name);
3217 	if (err)
3218 		return err;
3219 
3220 	if (!d_is_reg(path.dentry)) {
3221 		err = -EBADF;
3222 		goto error_path_put;
3223 	}
3224 
3225 	pid = attr->link_create.uprobe_multi.pid;
3226 	if (pid) {
3227 		rcu_read_lock();
3228 		task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
3229 		rcu_read_unlock();
3230 		if (!task) {
3231 			err = -ESRCH;
3232 			goto error_path_put;
3233 		}
3234 	}
3235 
3236 	err = -ENOMEM;
3237 
3238 	link = kzalloc(sizeof(*link), GFP_KERNEL);
3239 	uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL);
3240 
3241 	if (!uprobes || !link)
3242 		goto error_free;
3243 
3244 	if (uref_ctr_offsets) {
3245 		ref_ctr_offsets = kvcalloc(cnt, sizeof(*ref_ctr_offsets), GFP_KERNEL);
3246 		if (!ref_ctr_offsets)
3247 			goto error_free;
3248 	}
3249 
3250 	for (i = 0; i < cnt; i++) {
3251 		if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
3252 			err = -EFAULT;
3253 			goto error_free;
3254 		}
3255 		if (uref_ctr_offsets && __get_user(ref_ctr_offsets[i], uref_ctr_offsets + i)) {
3256 			err = -EFAULT;
3257 			goto error_free;
3258 		}
3259 		if (__get_user(uprobes[i].offset, uoffsets + i)) {
3260 			err = -EFAULT;
3261 			goto error_free;
3262 		}
3263 
3264 		uprobes[i].link = link;
3265 
3266 		if (flags & BPF_F_UPROBE_MULTI_RETURN)
3267 			uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
3268 		else
3269 			uprobes[i].consumer.handler = uprobe_multi_link_handler;
3270 
3271 		if (pid)
3272 			uprobes[i].consumer.filter = uprobe_multi_link_filter;
3273 	}
3274 
3275 	link->cnt = cnt;
3276 	link->uprobes = uprobes;
3277 	link->path = path;
3278 	link->task = task;
3279 
3280 	bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI,
3281 		      &bpf_uprobe_multi_link_lops, prog);
3282 
3283 	for (i = 0; i < cnt; i++) {
3284 		err = uprobe_register_refctr(d_real_inode(link->path.dentry),
3285 					     uprobes[i].offset,
3286 					     ref_ctr_offsets ? ref_ctr_offsets[i] : 0,
3287 					     &uprobes[i].consumer);
3288 		if (err) {
3289 			bpf_uprobe_unregister(&path, uprobes, i);
3290 			goto error_free;
3291 		}
3292 	}
3293 
3294 	err = bpf_link_prime(&link->link, &link_primer);
3295 	if (err)
3296 		goto error_free;
3297 
3298 	kvfree(ref_ctr_offsets);
3299 	return bpf_link_settle(&link_primer);
3300 
3301 error_free:
3302 	kvfree(ref_ctr_offsets);
3303 	kvfree(uprobes);
3304 	kfree(link);
3305 	if (task)
3306 		put_task_struct(task);
3307 error_path_put:
3308 	path_put(&path);
3309 	return err;
3310 }
3311 #else /* !CONFIG_UPROBES */
3312 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3313 {
3314 	return -EOPNOTSUPP;
3315 }
3316 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3317 {
3318 	return 0;
3319 }
3320 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3321 {
3322 	return 0;
3323 }
3324 #endif /* CONFIG_UPROBES */
3325