xref: /openbmc/linux/kernel/trace/bpf_trace.c (revision 5ffd8c73)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
8 #include <linux/bpf.h>
9 #include <linux/bpf_verifier.h>
10 #include <linux/bpf_perf_event.h>
11 #include <linux/btf.h>
12 #include <linux/filter.h>
13 #include <linux/uaccess.h>
14 #include <linux/ctype.h>
15 #include <linux/kprobes.h>
16 #include <linux/spinlock.h>
17 #include <linux/syscalls.h>
18 #include <linux/error-injection.h>
19 #include <linux/btf_ids.h>
20 #include <linux/bpf_lsm.h>
21 #include <linux/fprobe.h>
22 #include <linux/bsearch.h>
23 #include <linux/sort.h>
24 #include <linux/key.h>
25 #include <linux/verification.h>
26 #include <linux/namei.h>
27 
28 #include <net/bpf_sk_storage.h>
29 
30 #include <uapi/linux/bpf.h>
31 #include <uapi/linux/btf.h>
32 
33 #include <asm/tlb.h>
34 
35 #include "trace_probe.h"
36 #include "trace.h"
37 
38 #define CREATE_TRACE_POINTS
39 #include "bpf_trace.h"
40 
41 #define bpf_event_rcu_dereference(p)					\
42 	rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
43 
44 #ifdef CONFIG_MODULES
45 struct bpf_trace_module {
46 	struct module *module;
47 	struct list_head list;
48 };
49 
50 static LIST_HEAD(bpf_trace_modules);
51 static DEFINE_MUTEX(bpf_module_mutex);
52 
53 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
54 {
55 	struct bpf_raw_event_map *btp, *ret = NULL;
56 	struct bpf_trace_module *btm;
57 	unsigned int i;
58 
59 	mutex_lock(&bpf_module_mutex);
60 	list_for_each_entry(btm, &bpf_trace_modules, list) {
61 		for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
62 			btp = &btm->module->bpf_raw_events[i];
63 			if (!strcmp(btp->tp->name, name)) {
64 				if (try_module_get(btm->module))
65 					ret = btp;
66 				goto out;
67 			}
68 		}
69 	}
70 out:
71 	mutex_unlock(&bpf_module_mutex);
72 	return ret;
73 }
74 #else
75 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
76 {
77 	return NULL;
78 }
79 #endif /* CONFIG_MODULES */
80 
81 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
82 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
83 
84 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
85 				  u64 flags, const struct btf **btf,
86 				  s32 *btf_id);
87 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
88 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
89 
90 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx);
91 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
92 
93 /**
94  * trace_call_bpf - invoke BPF program
95  * @call: tracepoint event
96  * @ctx: opaque context pointer
97  *
98  * kprobe handlers execute BPF programs via this helper.
99  * Can be used from static tracepoints in the future.
100  *
101  * Return: BPF programs always return an integer which is interpreted by
102  * kprobe handler as:
103  * 0 - return from kprobe (event is filtered out)
104  * 1 - store kprobe event into ring buffer
105  * Other values are reserved and currently alias to 1
106  */
107 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
108 {
109 	unsigned int ret;
110 
111 	cant_sleep();
112 
113 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
114 		/*
115 		 * since some bpf program is already running on this cpu,
116 		 * don't call into another bpf program (same or different)
117 		 * and don't send kprobe event into ring-buffer,
118 		 * so return zero here
119 		 */
120 		ret = 0;
121 		goto out;
122 	}
123 
124 	/*
125 	 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
126 	 * to all call sites, we did a bpf_prog_array_valid() there to check
127 	 * whether call->prog_array is empty or not, which is
128 	 * a heuristic to speed up execution.
129 	 *
130 	 * If bpf_prog_array_valid() fetched prog_array was
131 	 * non-NULL, we go into trace_call_bpf() and do the actual
132 	 * proper rcu_dereference() under RCU lock.
133 	 * If it turns out that prog_array is NULL then, we bail out.
134 	 * For the opposite, if the bpf_prog_array_valid() fetched pointer
135 	 * was NULL, you'll skip the prog_array with the risk of missing
136 	 * out of events when it was updated in between this and the
137 	 * rcu_dereference() which is accepted risk.
138 	 */
139 	rcu_read_lock();
140 	ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
141 				 ctx, bpf_prog_run);
142 	rcu_read_unlock();
143 
144  out:
145 	__this_cpu_dec(bpf_prog_active);
146 
147 	return ret;
148 }
149 
150 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
151 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
152 {
153 	regs_set_return_value(regs, rc);
154 	override_function_with_return(regs);
155 	return 0;
156 }
157 
158 static const struct bpf_func_proto bpf_override_return_proto = {
159 	.func		= bpf_override_return,
160 	.gpl_only	= true,
161 	.ret_type	= RET_INTEGER,
162 	.arg1_type	= ARG_PTR_TO_CTX,
163 	.arg2_type	= ARG_ANYTHING,
164 };
165 #endif
166 
167 static __always_inline int
168 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
169 {
170 	int ret;
171 
172 	ret = copy_from_user_nofault(dst, unsafe_ptr, size);
173 	if (unlikely(ret < 0))
174 		memset(dst, 0, size);
175 	return ret;
176 }
177 
178 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
179 	   const void __user *, unsafe_ptr)
180 {
181 	return bpf_probe_read_user_common(dst, size, unsafe_ptr);
182 }
183 
184 const struct bpf_func_proto bpf_probe_read_user_proto = {
185 	.func		= bpf_probe_read_user,
186 	.gpl_only	= true,
187 	.ret_type	= RET_INTEGER,
188 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
189 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
190 	.arg3_type	= ARG_ANYTHING,
191 };
192 
193 static __always_inline int
194 bpf_probe_read_user_str_common(void *dst, u32 size,
195 			       const void __user *unsafe_ptr)
196 {
197 	int ret;
198 
199 	/*
200 	 * NB: We rely on strncpy_from_user() not copying junk past the NUL
201 	 * terminator into `dst`.
202 	 *
203 	 * strncpy_from_user() does long-sized strides in the fast path. If the
204 	 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
205 	 * then there could be junk after the NUL in `dst`. If user takes `dst`
206 	 * and keys a hash map with it, then semantically identical strings can
207 	 * occupy multiple entries in the map.
208 	 */
209 	ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
210 	if (unlikely(ret < 0))
211 		memset(dst, 0, size);
212 	return ret;
213 }
214 
215 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
216 	   const void __user *, unsafe_ptr)
217 {
218 	return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
219 }
220 
221 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
222 	.func		= bpf_probe_read_user_str,
223 	.gpl_only	= true,
224 	.ret_type	= RET_INTEGER,
225 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
226 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
227 	.arg3_type	= ARG_ANYTHING,
228 };
229 
230 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
231 	   const void *, unsafe_ptr)
232 {
233 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
234 }
235 
236 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
237 	.func		= bpf_probe_read_kernel,
238 	.gpl_only	= true,
239 	.ret_type	= RET_INTEGER,
240 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
241 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
242 	.arg3_type	= ARG_ANYTHING,
243 };
244 
245 static __always_inline int
246 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
247 {
248 	int ret;
249 
250 	/*
251 	 * The strncpy_from_kernel_nofault() call will likely not fill the
252 	 * entire buffer, but that's okay in this circumstance as we're probing
253 	 * arbitrary memory anyway similar to bpf_probe_read_*() and might
254 	 * as well probe the stack. Thus, memory is explicitly cleared
255 	 * only in error case, so that improper users ignoring return
256 	 * code altogether don't copy garbage; otherwise length of string
257 	 * is returned that can be used for bpf_perf_event_output() et al.
258 	 */
259 	ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
260 	if (unlikely(ret < 0))
261 		memset(dst, 0, size);
262 	return ret;
263 }
264 
265 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
266 	   const void *, unsafe_ptr)
267 {
268 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
269 }
270 
271 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
272 	.func		= bpf_probe_read_kernel_str,
273 	.gpl_only	= true,
274 	.ret_type	= RET_INTEGER,
275 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
276 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
277 	.arg3_type	= ARG_ANYTHING,
278 };
279 
280 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
281 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
282 	   const void *, unsafe_ptr)
283 {
284 	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
285 		return bpf_probe_read_user_common(dst, size,
286 				(__force void __user *)unsafe_ptr);
287 	}
288 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
289 }
290 
291 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
292 	.func		= bpf_probe_read_compat,
293 	.gpl_only	= true,
294 	.ret_type	= RET_INTEGER,
295 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
296 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
297 	.arg3_type	= ARG_ANYTHING,
298 };
299 
300 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
301 	   const void *, unsafe_ptr)
302 {
303 	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
304 		return bpf_probe_read_user_str_common(dst, size,
305 				(__force void __user *)unsafe_ptr);
306 	}
307 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
308 }
309 
310 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
311 	.func		= bpf_probe_read_compat_str,
312 	.gpl_only	= true,
313 	.ret_type	= RET_INTEGER,
314 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
315 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
316 	.arg3_type	= ARG_ANYTHING,
317 };
318 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
319 
320 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
321 	   u32, size)
322 {
323 	/*
324 	 * Ensure we're in user context which is safe for the helper to
325 	 * run. This helper has no business in a kthread.
326 	 *
327 	 * access_ok() should prevent writing to non-user memory, but in
328 	 * some situations (nommu, temporary switch, etc) access_ok() does
329 	 * not provide enough validation, hence the check on KERNEL_DS.
330 	 *
331 	 * nmi_uaccess_okay() ensures the probe is not run in an interim
332 	 * state, when the task or mm are switched. This is specifically
333 	 * required to prevent the use of temporary mm.
334 	 */
335 
336 	if (unlikely(in_interrupt() ||
337 		     current->flags & (PF_KTHREAD | PF_EXITING)))
338 		return -EPERM;
339 	if (unlikely(!nmi_uaccess_okay()))
340 		return -EPERM;
341 
342 	return copy_to_user_nofault(unsafe_ptr, src, size);
343 }
344 
345 static const struct bpf_func_proto bpf_probe_write_user_proto = {
346 	.func		= bpf_probe_write_user,
347 	.gpl_only	= true,
348 	.ret_type	= RET_INTEGER,
349 	.arg1_type	= ARG_ANYTHING,
350 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
351 	.arg3_type	= ARG_CONST_SIZE,
352 };
353 
354 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
355 {
356 	if (!capable(CAP_SYS_ADMIN))
357 		return NULL;
358 
359 	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
360 			    current->comm, task_pid_nr(current));
361 
362 	return &bpf_probe_write_user_proto;
363 }
364 
365 #define MAX_TRACE_PRINTK_VARARGS	3
366 #define BPF_TRACE_PRINTK_SIZE		1024
367 
368 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
369 	   u64, arg2, u64, arg3)
370 {
371 	u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
372 	struct bpf_bprintf_data data = {
373 		.get_bin_args	= true,
374 		.get_buf	= true,
375 	};
376 	int ret;
377 
378 	ret = bpf_bprintf_prepare(fmt, fmt_size, args,
379 				  MAX_TRACE_PRINTK_VARARGS, &data);
380 	if (ret < 0)
381 		return ret;
382 
383 	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
384 
385 	trace_bpf_trace_printk(data.buf);
386 
387 	bpf_bprintf_cleanup(&data);
388 
389 	return ret;
390 }
391 
392 static const struct bpf_func_proto bpf_trace_printk_proto = {
393 	.func		= bpf_trace_printk,
394 	.gpl_only	= true,
395 	.ret_type	= RET_INTEGER,
396 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
397 	.arg2_type	= ARG_CONST_SIZE,
398 };
399 
400 static void __set_printk_clr_event(void)
401 {
402 	/*
403 	 * This program might be calling bpf_trace_printk,
404 	 * so enable the associated bpf_trace/bpf_trace_printk event.
405 	 * Repeat this each time as it is possible a user has
406 	 * disabled bpf_trace_printk events.  By loading a program
407 	 * calling bpf_trace_printk() however the user has expressed
408 	 * the intent to see such events.
409 	 */
410 	if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
411 		pr_warn_ratelimited("could not enable bpf_trace_printk events");
412 }
413 
414 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
415 {
416 	__set_printk_clr_event();
417 	return &bpf_trace_printk_proto;
418 }
419 
420 BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,
421 	   u32, data_len)
422 {
423 	struct bpf_bprintf_data data = {
424 		.get_bin_args	= true,
425 		.get_buf	= true,
426 	};
427 	int ret, num_args;
428 
429 	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
430 	    (data_len && !args))
431 		return -EINVAL;
432 	num_args = data_len / 8;
433 
434 	ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
435 	if (ret < 0)
436 		return ret;
437 
438 	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
439 
440 	trace_bpf_trace_printk(data.buf);
441 
442 	bpf_bprintf_cleanup(&data);
443 
444 	return ret;
445 }
446 
447 static const struct bpf_func_proto bpf_trace_vprintk_proto = {
448 	.func		= bpf_trace_vprintk,
449 	.gpl_only	= true,
450 	.ret_type	= RET_INTEGER,
451 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
452 	.arg2_type	= ARG_CONST_SIZE,
453 	.arg3_type	= ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
454 	.arg4_type	= ARG_CONST_SIZE_OR_ZERO,
455 };
456 
457 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
458 {
459 	__set_printk_clr_event();
460 	return &bpf_trace_vprintk_proto;
461 }
462 
463 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
464 	   const void *, args, u32, data_len)
465 {
466 	struct bpf_bprintf_data data = {
467 		.get_bin_args	= true,
468 	};
469 	int err, num_args;
470 
471 	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
472 	    (data_len && !args))
473 		return -EINVAL;
474 	num_args = data_len / 8;
475 
476 	err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
477 	if (err < 0)
478 		return err;
479 
480 	seq_bprintf(m, fmt, data.bin_args);
481 
482 	bpf_bprintf_cleanup(&data);
483 
484 	return seq_has_overflowed(m) ? -EOVERFLOW : 0;
485 }
486 
487 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
488 
489 static const struct bpf_func_proto bpf_seq_printf_proto = {
490 	.func		= bpf_seq_printf,
491 	.gpl_only	= true,
492 	.ret_type	= RET_INTEGER,
493 	.arg1_type	= ARG_PTR_TO_BTF_ID,
494 	.arg1_btf_id	= &btf_seq_file_ids[0],
495 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
496 	.arg3_type	= ARG_CONST_SIZE,
497 	.arg4_type      = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
498 	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
499 };
500 
501 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
502 {
503 	return seq_write(m, data, len) ? -EOVERFLOW : 0;
504 }
505 
506 static const struct bpf_func_proto bpf_seq_write_proto = {
507 	.func		= bpf_seq_write,
508 	.gpl_only	= true,
509 	.ret_type	= RET_INTEGER,
510 	.arg1_type	= ARG_PTR_TO_BTF_ID,
511 	.arg1_btf_id	= &btf_seq_file_ids[0],
512 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
513 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
514 };
515 
516 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
517 	   u32, btf_ptr_size, u64, flags)
518 {
519 	const struct btf *btf;
520 	s32 btf_id;
521 	int ret;
522 
523 	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
524 	if (ret)
525 		return ret;
526 
527 	return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
528 }
529 
530 static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
531 	.func		= bpf_seq_printf_btf,
532 	.gpl_only	= true,
533 	.ret_type	= RET_INTEGER,
534 	.arg1_type	= ARG_PTR_TO_BTF_ID,
535 	.arg1_btf_id	= &btf_seq_file_ids[0],
536 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
537 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
538 	.arg4_type	= ARG_ANYTHING,
539 };
540 
541 static __always_inline int
542 get_map_perf_counter(struct bpf_map *map, u64 flags,
543 		     u64 *value, u64 *enabled, u64 *running)
544 {
545 	struct bpf_array *array = container_of(map, struct bpf_array, map);
546 	unsigned int cpu = smp_processor_id();
547 	u64 index = flags & BPF_F_INDEX_MASK;
548 	struct bpf_event_entry *ee;
549 
550 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
551 		return -EINVAL;
552 	if (index == BPF_F_CURRENT_CPU)
553 		index = cpu;
554 	if (unlikely(index >= array->map.max_entries))
555 		return -E2BIG;
556 
557 	ee = READ_ONCE(array->ptrs[index]);
558 	if (!ee)
559 		return -ENOENT;
560 
561 	return perf_event_read_local(ee->event, value, enabled, running);
562 }
563 
564 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
565 {
566 	u64 value = 0;
567 	int err;
568 
569 	err = get_map_perf_counter(map, flags, &value, NULL, NULL);
570 	/*
571 	 * this api is ugly since we miss [-22..-2] range of valid
572 	 * counter values, but that's uapi
573 	 */
574 	if (err)
575 		return err;
576 	return value;
577 }
578 
579 static const struct bpf_func_proto bpf_perf_event_read_proto = {
580 	.func		= bpf_perf_event_read,
581 	.gpl_only	= true,
582 	.ret_type	= RET_INTEGER,
583 	.arg1_type	= ARG_CONST_MAP_PTR,
584 	.arg2_type	= ARG_ANYTHING,
585 };
586 
587 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
588 	   struct bpf_perf_event_value *, buf, u32, size)
589 {
590 	int err = -EINVAL;
591 
592 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
593 		goto clear;
594 	err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
595 				   &buf->running);
596 	if (unlikely(err))
597 		goto clear;
598 	return 0;
599 clear:
600 	memset(buf, 0, size);
601 	return err;
602 }
603 
604 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
605 	.func		= bpf_perf_event_read_value,
606 	.gpl_only	= true,
607 	.ret_type	= RET_INTEGER,
608 	.arg1_type	= ARG_CONST_MAP_PTR,
609 	.arg2_type	= ARG_ANYTHING,
610 	.arg3_type	= ARG_PTR_TO_UNINIT_MEM,
611 	.arg4_type	= ARG_CONST_SIZE,
612 };
613 
614 static __always_inline u64
615 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
616 			u64 flags, struct perf_sample_data *sd)
617 {
618 	struct bpf_array *array = container_of(map, struct bpf_array, map);
619 	unsigned int cpu = smp_processor_id();
620 	u64 index = flags & BPF_F_INDEX_MASK;
621 	struct bpf_event_entry *ee;
622 	struct perf_event *event;
623 
624 	if (index == BPF_F_CURRENT_CPU)
625 		index = cpu;
626 	if (unlikely(index >= array->map.max_entries))
627 		return -E2BIG;
628 
629 	ee = READ_ONCE(array->ptrs[index]);
630 	if (!ee)
631 		return -ENOENT;
632 
633 	event = ee->event;
634 	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
635 		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
636 		return -EINVAL;
637 
638 	if (unlikely(event->oncpu != cpu))
639 		return -EOPNOTSUPP;
640 
641 	return perf_event_output(event, sd, regs);
642 }
643 
644 /*
645  * Support executing tracepoints in normal, irq, and nmi context that each call
646  * bpf_perf_event_output
647  */
648 struct bpf_trace_sample_data {
649 	struct perf_sample_data sds[3];
650 };
651 
652 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
653 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
654 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
655 	   u64, flags, void *, data, u64, size)
656 {
657 	struct bpf_trace_sample_data *sds;
658 	struct perf_raw_record raw = {
659 		.frag = {
660 			.size = size,
661 			.data = data,
662 		},
663 	};
664 	struct perf_sample_data *sd;
665 	int nest_level, err;
666 
667 	preempt_disable();
668 	sds = this_cpu_ptr(&bpf_trace_sds);
669 	nest_level = this_cpu_inc_return(bpf_trace_nest_level);
670 
671 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
672 		err = -EBUSY;
673 		goto out;
674 	}
675 
676 	sd = &sds->sds[nest_level - 1];
677 
678 	if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
679 		err = -EINVAL;
680 		goto out;
681 	}
682 
683 	perf_sample_data_init(sd, 0, 0);
684 	perf_sample_save_raw_data(sd, &raw);
685 
686 	err = __bpf_perf_event_output(regs, map, flags, sd);
687 out:
688 	this_cpu_dec(bpf_trace_nest_level);
689 	preempt_enable();
690 	return err;
691 }
692 
693 static const struct bpf_func_proto bpf_perf_event_output_proto = {
694 	.func		= bpf_perf_event_output,
695 	.gpl_only	= true,
696 	.ret_type	= RET_INTEGER,
697 	.arg1_type	= ARG_PTR_TO_CTX,
698 	.arg2_type	= ARG_CONST_MAP_PTR,
699 	.arg3_type	= ARG_ANYTHING,
700 	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
701 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
702 };
703 
704 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
705 struct bpf_nested_pt_regs {
706 	struct pt_regs regs[3];
707 };
708 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
709 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
710 
711 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
712 		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
713 {
714 	struct perf_raw_frag frag = {
715 		.copy		= ctx_copy,
716 		.size		= ctx_size,
717 		.data		= ctx,
718 	};
719 	struct perf_raw_record raw = {
720 		.frag = {
721 			{
722 				.next	= ctx_size ? &frag : NULL,
723 			},
724 			.size	= meta_size,
725 			.data	= meta,
726 		},
727 	};
728 	struct perf_sample_data *sd;
729 	struct pt_regs *regs;
730 	int nest_level;
731 	u64 ret;
732 
733 	preempt_disable();
734 	nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
735 
736 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
737 		ret = -EBUSY;
738 		goto out;
739 	}
740 	sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
741 	regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
742 
743 	perf_fetch_caller_regs(regs);
744 	perf_sample_data_init(sd, 0, 0);
745 	perf_sample_save_raw_data(sd, &raw);
746 
747 	ret = __bpf_perf_event_output(regs, map, flags, sd);
748 out:
749 	this_cpu_dec(bpf_event_output_nest_level);
750 	preempt_enable();
751 	return ret;
752 }
753 
754 BPF_CALL_0(bpf_get_current_task)
755 {
756 	return (long) current;
757 }
758 
759 const struct bpf_func_proto bpf_get_current_task_proto = {
760 	.func		= bpf_get_current_task,
761 	.gpl_only	= true,
762 	.ret_type	= RET_INTEGER,
763 };
764 
765 BPF_CALL_0(bpf_get_current_task_btf)
766 {
767 	return (unsigned long) current;
768 }
769 
770 const struct bpf_func_proto bpf_get_current_task_btf_proto = {
771 	.func		= bpf_get_current_task_btf,
772 	.gpl_only	= true,
773 	.ret_type	= RET_PTR_TO_BTF_ID_TRUSTED,
774 	.ret_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
775 };
776 
777 BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
778 {
779 	return (unsigned long) task_pt_regs(task);
780 }
781 
782 BTF_ID_LIST(bpf_task_pt_regs_ids)
783 BTF_ID(struct, pt_regs)
784 
785 const struct bpf_func_proto bpf_task_pt_regs_proto = {
786 	.func		= bpf_task_pt_regs,
787 	.gpl_only	= true,
788 	.arg1_type	= ARG_PTR_TO_BTF_ID,
789 	.arg1_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
790 	.ret_type	= RET_PTR_TO_BTF_ID,
791 	.ret_btf_id	= &bpf_task_pt_regs_ids[0],
792 };
793 
794 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
795 {
796 	struct bpf_array *array = container_of(map, struct bpf_array, map);
797 	struct cgroup *cgrp;
798 
799 	if (unlikely(idx >= array->map.max_entries))
800 		return -E2BIG;
801 
802 	cgrp = READ_ONCE(array->ptrs[idx]);
803 	if (unlikely(!cgrp))
804 		return -EAGAIN;
805 
806 	return task_under_cgroup_hierarchy(current, cgrp);
807 }
808 
809 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
810 	.func           = bpf_current_task_under_cgroup,
811 	.gpl_only       = false,
812 	.ret_type       = RET_INTEGER,
813 	.arg1_type      = ARG_CONST_MAP_PTR,
814 	.arg2_type      = ARG_ANYTHING,
815 };
816 
817 struct send_signal_irq_work {
818 	struct irq_work irq_work;
819 	struct task_struct *task;
820 	u32 sig;
821 	enum pid_type type;
822 };
823 
824 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
825 
826 static void do_bpf_send_signal(struct irq_work *entry)
827 {
828 	struct send_signal_irq_work *work;
829 
830 	work = container_of(entry, struct send_signal_irq_work, irq_work);
831 	group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
832 	put_task_struct(work->task);
833 }
834 
835 static int bpf_send_signal_common(u32 sig, enum pid_type type)
836 {
837 	struct send_signal_irq_work *work = NULL;
838 
839 	/* Similar to bpf_probe_write_user, task needs to be
840 	 * in a sound condition and kernel memory access be
841 	 * permitted in order to send signal to the current
842 	 * task.
843 	 */
844 	if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
845 		return -EPERM;
846 	if (unlikely(!nmi_uaccess_okay()))
847 		return -EPERM;
848 	/* Task should not be pid=1 to avoid kernel panic. */
849 	if (unlikely(is_global_init(current)))
850 		return -EPERM;
851 
852 	if (irqs_disabled()) {
853 		/* Do an early check on signal validity. Otherwise,
854 		 * the error is lost in deferred irq_work.
855 		 */
856 		if (unlikely(!valid_signal(sig)))
857 			return -EINVAL;
858 
859 		work = this_cpu_ptr(&send_signal_work);
860 		if (irq_work_is_busy(&work->irq_work))
861 			return -EBUSY;
862 
863 		/* Add the current task, which is the target of sending signal,
864 		 * to the irq_work. The current task may change when queued
865 		 * irq works get executed.
866 		 */
867 		work->task = get_task_struct(current);
868 		work->sig = sig;
869 		work->type = type;
870 		irq_work_queue(&work->irq_work);
871 		return 0;
872 	}
873 
874 	return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
875 }
876 
877 BPF_CALL_1(bpf_send_signal, u32, sig)
878 {
879 	return bpf_send_signal_common(sig, PIDTYPE_TGID);
880 }
881 
882 static const struct bpf_func_proto bpf_send_signal_proto = {
883 	.func		= bpf_send_signal,
884 	.gpl_only	= false,
885 	.ret_type	= RET_INTEGER,
886 	.arg1_type	= ARG_ANYTHING,
887 };
888 
889 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
890 {
891 	return bpf_send_signal_common(sig, PIDTYPE_PID);
892 }
893 
894 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
895 	.func		= bpf_send_signal_thread,
896 	.gpl_only	= false,
897 	.ret_type	= RET_INTEGER,
898 	.arg1_type	= ARG_ANYTHING,
899 };
900 
901 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
902 {
903 	struct path copy;
904 	long len;
905 	char *p;
906 
907 	if (!sz)
908 		return 0;
909 
910 	/*
911 	 * The path pointer is verified as trusted and safe to use,
912 	 * but let's double check it's valid anyway to workaround
913 	 * potentially broken verifier.
914 	 */
915 	len = copy_from_kernel_nofault(&copy, path, sizeof(*path));
916 	if (len < 0)
917 		return len;
918 
919 	p = d_path(&copy, buf, sz);
920 	if (IS_ERR(p)) {
921 		len = PTR_ERR(p);
922 	} else {
923 		len = buf + sz - p;
924 		memmove(buf, p, len);
925 	}
926 
927 	return len;
928 }
929 
930 BTF_SET_START(btf_allowlist_d_path)
931 #ifdef CONFIG_SECURITY
932 BTF_ID(func, security_file_permission)
933 BTF_ID(func, security_inode_getattr)
934 BTF_ID(func, security_file_open)
935 #endif
936 #ifdef CONFIG_SECURITY_PATH
937 BTF_ID(func, security_path_truncate)
938 #endif
939 BTF_ID(func, vfs_truncate)
940 BTF_ID(func, vfs_fallocate)
941 BTF_ID(func, dentry_open)
942 BTF_ID(func, vfs_getattr)
943 BTF_ID(func, filp_close)
944 BTF_SET_END(btf_allowlist_d_path)
945 
946 static bool bpf_d_path_allowed(const struct bpf_prog *prog)
947 {
948 	if (prog->type == BPF_PROG_TYPE_TRACING &&
949 	    prog->expected_attach_type == BPF_TRACE_ITER)
950 		return true;
951 
952 	if (prog->type == BPF_PROG_TYPE_LSM)
953 		return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
954 
955 	return btf_id_set_contains(&btf_allowlist_d_path,
956 				   prog->aux->attach_btf_id);
957 }
958 
959 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
960 
961 static const struct bpf_func_proto bpf_d_path_proto = {
962 	.func		= bpf_d_path,
963 	.gpl_only	= false,
964 	.ret_type	= RET_INTEGER,
965 	.arg1_type	= ARG_PTR_TO_BTF_ID,
966 	.arg1_btf_id	= &bpf_d_path_btf_ids[0],
967 	.arg2_type	= ARG_PTR_TO_MEM,
968 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
969 	.allowed	= bpf_d_path_allowed,
970 };
971 
972 #define BTF_F_ALL	(BTF_F_COMPACT  | BTF_F_NONAME | \
973 			 BTF_F_PTR_RAW | BTF_F_ZERO)
974 
975 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
976 				  u64 flags, const struct btf **btf,
977 				  s32 *btf_id)
978 {
979 	const struct btf_type *t;
980 
981 	if (unlikely(flags & ~(BTF_F_ALL)))
982 		return -EINVAL;
983 
984 	if (btf_ptr_size != sizeof(struct btf_ptr))
985 		return -EINVAL;
986 
987 	*btf = bpf_get_btf_vmlinux();
988 
989 	if (IS_ERR_OR_NULL(*btf))
990 		return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
991 
992 	if (ptr->type_id > 0)
993 		*btf_id = ptr->type_id;
994 	else
995 		return -EINVAL;
996 
997 	if (*btf_id > 0)
998 		t = btf_type_by_id(*btf, *btf_id);
999 	if (*btf_id <= 0 || !t)
1000 		return -ENOENT;
1001 
1002 	return 0;
1003 }
1004 
1005 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1006 	   u32, btf_ptr_size, u64, flags)
1007 {
1008 	const struct btf *btf;
1009 	s32 btf_id;
1010 	int ret;
1011 
1012 	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1013 	if (ret)
1014 		return ret;
1015 
1016 	return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1017 				      flags);
1018 }
1019 
1020 const struct bpf_func_proto bpf_snprintf_btf_proto = {
1021 	.func		= bpf_snprintf_btf,
1022 	.gpl_only	= false,
1023 	.ret_type	= RET_INTEGER,
1024 	.arg1_type	= ARG_PTR_TO_MEM,
1025 	.arg2_type	= ARG_CONST_SIZE,
1026 	.arg3_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1027 	.arg4_type	= ARG_CONST_SIZE,
1028 	.arg5_type	= ARG_ANYTHING,
1029 };
1030 
1031 BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1032 {
1033 	/* This helper call is inlined by verifier. */
1034 	return ((u64 *)ctx)[-2];
1035 }
1036 
1037 static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1038 	.func		= bpf_get_func_ip_tracing,
1039 	.gpl_only	= true,
1040 	.ret_type	= RET_INTEGER,
1041 	.arg1_type	= ARG_PTR_TO_CTX,
1042 };
1043 
1044 #ifdef CONFIG_X86_KERNEL_IBT
1045 static unsigned long get_entry_ip(unsigned long fentry_ip)
1046 {
1047 	u32 instr;
1048 
1049 	/* Being extra safe in here in case entry ip is on the page-edge. */
1050 	if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1))
1051 		return fentry_ip;
1052 	if (is_endbr(instr))
1053 		fentry_ip -= ENDBR_INSN_SIZE;
1054 	return fentry_ip;
1055 }
1056 #else
1057 #define get_entry_ip(fentry_ip) fentry_ip
1058 #endif
1059 
1060 BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1061 {
1062 	struct bpf_trace_run_ctx *run_ctx __maybe_unused;
1063 	struct kprobe *kp;
1064 
1065 #ifdef CONFIG_UPROBES
1066 	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1067 	if (run_ctx->is_uprobe)
1068 		return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr;
1069 #endif
1070 
1071 	kp = kprobe_running();
1072 
1073 	if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1074 		return 0;
1075 
1076 	return get_entry_ip((uintptr_t)kp->addr);
1077 }
1078 
1079 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1080 	.func		= bpf_get_func_ip_kprobe,
1081 	.gpl_only	= true,
1082 	.ret_type	= RET_INTEGER,
1083 	.arg1_type	= ARG_PTR_TO_CTX,
1084 };
1085 
1086 BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1087 {
1088 	return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
1089 }
1090 
1091 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1092 	.func		= bpf_get_func_ip_kprobe_multi,
1093 	.gpl_only	= false,
1094 	.ret_type	= RET_INTEGER,
1095 	.arg1_type	= ARG_PTR_TO_CTX,
1096 };
1097 
1098 BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1099 {
1100 	return bpf_kprobe_multi_cookie(current->bpf_ctx);
1101 }
1102 
1103 static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1104 	.func		= bpf_get_attach_cookie_kprobe_multi,
1105 	.gpl_only	= false,
1106 	.ret_type	= RET_INTEGER,
1107 	.arg1_type	= ARG_PTR_TO_CTX,
1108 };
1109 
1110 BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs)
1111 {
1112 	return bpf_uprobe_multi_entry_ip(current->bpf_ctx);
1113 }
1114 
1115 static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = {
1116 	.func		= bpf_get_func_ip_uprobe_multi,
1117 	.gpl_only	= false,
1118 	.ret_type	= RET_INTEGER,
1119 	.arg1_type	= ARG_PTR_TO_CTX,
1120 };
1121 
1122 BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs)
1123 {
1124 	return bpf_uprobe_multi_cookie(current->bpf_ctx);
1125 }
1126 
1127 static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = {
1128 	.func		= bpf_get_attach_cookie_uprobe_multi,
1129 	.gpl_only	= false,
1130 	.ret_type	= RET_INTEGER,
1131 	.arg1_type	= ARG_PTR_TO_CTX,
1132 };
1133 
1134 BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1135 {
1136 	struct bpf_trace_run_ctx *run_ctx;
1137 
1138 	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1139 	return run_ctx->bpf_cookie;
1140 }
1141 
1142 static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1143 	.func		= bpf_get_attach_cookie_trace,
1144 	.gpl_only	= false,
1145 	.ret_type	= RET_INTEGER,
1146 	.arg1_type	= ARG_PTR_TO_CTX,
1147 };
1148 
1149 BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1150 {
1151 	return ctx->event->bpf_cookie;
1152 }
1153 
1154 static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1155 	.func		= bpf_get_attach_cookie_pe,
1156 	.gpl_only	= false,
1157 	.ret_type	= RET_INTEGER,
1158 	.arg1_type	= ARG_PTR_TO_CTX,
1159 };
1160 
1161 BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1162 {
1163 	struct bpf_trace_run_ctx *run_ctx;
1164 
1165 	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1166 	return run_ctx->bpf_cookie;
1167 }
1168 
1169 static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1170 	.func		= bpf_get_attach_cookie_tracing,
1171 	.gpl_only	= false,
1172 	.ret_type	= RET_INTEGER,
1173 	.arg1_type	= ARG_PTR_TO_CTX,
1174 };
1175 
1176 BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1177 {
1178 #ifndef CONFIG_X86
1179 	return -ENOENT;
1180 #else
1181 	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1182 	u32 entry_cnt = size / br_entry_size;
1183 
1184 	entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1185 
1186 	if (unlikely(flags))
1187 		return -EINVAL;
1188 
1189 	if (!entry_cnt)
1190 		return -ENOENT;
1191 
1192 	return entry_cnt * br_entry_size;
1193 #endif
1194 }
1195 
1196 static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1197 	.func		= bpf_get_branch_snapshot,
1198 	.gpl_only	= true,
1199 	.ret_type	= RET_INTEGER,
1200 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1201 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1202 };
1203 
1204 BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1205 {
1206 	/* This helper call is inlined by verifier. */
1207 	u64 nr_args = ((u64 *)ctx)[-1];
1208 
1209 	if ((u64) n >= nr_args)
1210 		return -EINVAL;
1211 	*value = ((u64 *)ctx)[n];
1212 	return 0;
1213 }
1214 
1215 static const struct bpf_func_proto bpf_get_func_arg_proto = {
1216 	.func		= get_func_arg,
1217 	.ret_type	= RET_INTEGER,
1218 	.arg1_type	= ARG_PTR_TO_CTX,
1219 	.arg2_type	= ARG_ANYTHING,
1220 	.arg3_type	= ARG_PTR_TO_LONG,
1221 };
1222 
1223 BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1224 {
1225 	/* This helper call is inlined by verifier. */
1226 	u64 nr_args = ((u64 *)ctx)[-1];
1227 
1228 	*value = ((u64 *)ctx)[nr_args];
1229 	return 0;
1230 }
1231 
1232 static const struct bpf_func_proto bpf_get_func_ret_proto = {
1233 	.func		= get_func_ret,
1234 	.ret_type	= RET_INTEGER,
1235 	.arg1_type	= ARG_PTR_TO_CTX,
1236 	.arg2_type	= ARG_PTR_TO_LONG,
1237 };
1238 
1239 BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1240 {
1241 	/* This helper call is inlined by verifier. */
1242 	return ((u64 *)ctx)[-1];
1243 }
1244 
1245 static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1246 	.func		= get_func_arg_cnt,
1247 	.ret_type	= RET_INTEGER,
1248 	.arg1_type	= ARG_PTR_TO_CTX,
1249 };
1250 
1251 #ifdef CONFIG_KEYS
1252 __diag_push();
1253 __diag_ignore_all("-Wmissing-prototypes",
1254 		  "kfuncs which will be used in BPF programs");
1255 
1256 /**
1257  * bpf_lookup_user_key - lookup a key by its serial
1258  * @serial: key handle serial number
1259  * @flags: lookup-specific flags
1260  *
1261  * Search a key with a given *serial* and the provided *flags*.
1262  * If found, increment the reference count of the key by one, and
1263  * return it in the bpf_key structure.
1264  *
1265  * The bpf_key structure must be passed to bpf_key_put() when done
1266  * with it, so that the key reference count is decremented and the
1267  * bpf_key structure is freed.
1268  *
1269  * Permission checks are deferred to the time the key is used by
1270  * one of the available key-specific kfuncs.
1271  *
1272  * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1273  * special keyring (e.g. session keyring), if it doesn't yet exist.
1274  * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1275  * for the key construction, and to retrieve uninstantiated keys (keys
1276  * without data attached to them).
1277  *
1278  * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1279  *         NULL pointer otherwise.
1280  */
1281 __bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
1282 {
1283 	key_ref_t key_ref;
1284 	struct bpf_key *bkey;
1285 
1286 	if (flags & ~KEY_LOOKUP_ALL)
1287 		return NULL;
1288 
1289 	/*
1290 	 * Permission check is deferred until the key is used, as the
1291 	 * intent of the caller is unknown here.
1292 	 */
1293 	key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
1294 	if (IS_ERR(key_ref))
1295 		return NULL;
1296 
1297 	bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
1298 	if (!bkey) {
1299 		key_put(key_ref_to_ptr(key_ref));
1300 		return NULL;
1301 	}
1302 
1303 	bkey->key = key_ref_to_ptr(key_ref);
1304 	bkey->has_ref = true;
1305 
1306 	return bkey;
1307 }
1308 
1309 /**
1310  * bpf_lookup_system_key - lookup a key by a system-defined ID
1311  * @id: key ID
1312  *
1313  * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1314  * The key pointer is marked as invalid, to prevent bpf_key_put() from
1315  * attempting to decrement the key reference count on that pointer. The key
1316  * pointer set in such way is currently understood only by
1317  * verify_pkcs7_signature().
1318  *
1319  * Set *id* to one of the values defined in include/linux/verification.h:
1320  * 0 for the primary keyring (immutable keyring of system keys);
1321  * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1322  * (where keys can be added only if they are vouched for by existing keys
1323  * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1324  * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1325  * kerned image and, possibly, the initramfs signature).
1326  *
1327  * Return: a bpf_key pointer with an invalid key pointer set from the
1328  *         pre-determined ID on success, a NULL pointer otherwise
1329  */
1330 __bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
1331 {
1332 	struct bpf_key *bkey;
1333 
1334 	if (system_keyring_id_check(id) < 0)
1335 		return NULL;
1336 
1337 	bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
1338 	if (!bkey)
1339 		return NULL;
1340 
1341 	bkey->key = (struct key *)(unsigned long)id;
1342 	bkey->has_ref = false;
1343 
1344 	return bkey;
1345 }
1346 
1347 /**
1348  * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1349  * @bkey: bpf_key structure
1350  *
1351  * Decrement the reference count of the key inside *bkey*, if the pointer
1352  * is valid, and free *bkey*.
1353  */
1354 __bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
1355 {
1356 	if (bkey->has_ref)
1357 		key_put(bkey->key);
1358 
1359 	kfree(bkey);
1360 }
1361 
1362 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1363 /**
1364  * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1365  * @data_ptr: data to verify
1366  * @sig_ptr: signature of the data
1367  * @trusted_keyring: keyring with keys trusted for signature verification
1368  *
1369  * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1370  * with keys in a keyring referenced by *trusted_keyring*.
1371  *
1372  * Return: 0 on success, a negative value on error.
1373  */
1374 __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
1375 			       struct bpf_dynptr_kern *sig_ptr,
1376 			       struct bpf_key *trusted_keyring)
1377 {
1378 	int ret;
1379 
1380 	if (trusted_keyring->has_ref) {
1381 		/*
1382 		 * Do the permission check deferred in bpf_lookup_user_key().
1383 		 * See bpf_lookup_user_key() for more details.
1384 		 *
1385 		 * A call to key_task_permission() here would be redundant, as
1386 		 * it is already done by keyring_search() called by
1387 		 * find_asymmetric_key().
1388 		 */
1389 		ret = key_validate(trusted_keyring->key);
1390 		if (ret < 0)
1391 			return ret;
1392 	}
1393 
1394 	return verify_pkcs7_signature(data_ptr->data,
1395 				      __bpf_dynptr_size(data_ptr),
1396 				      sig_ptr->data,
1397 				      __bpf_dynptr_size(sig_ptr),
1398 				      trusted_keyring->key,
1399 				      VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
1400 				      NULL);
1401 }
1402 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1403 
1404 __diag_pop();
1405 
1406 BTF_SET8_START(key_sig_kfunc_set)
1407 BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
1408 BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
1409 BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
1410 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1411 BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
1412 #endif
1413 BTF_SET8_END(key_sig_kfunc_set)
1414 
1415 static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
1416 	.owner = THIS_MODULE,
1417 	.set = &key_sig_kfunc_set,
1418 };
1419 
1420 static int __init bpf_key_sig_kfuncs_init(void)
1421 {
1422 	return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
1423 					 &bpf_key_sig_kfunc_set);
1424 }
1425 
1426 late_initcall(bpf_key_sig_kfuncs_init);
1427 #endif /* CONFIG_KEYS */
1428 
1429 static const struct bpf_func_proto *
1430 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1431 {
1432 	switch (func_id) {
1433 	case BPF_FUNC_map_lookup_elem:
1434 		return &bpf_map_lookup_elem_proto;
1435 	case BPF_FUNC_map_update_elem:
1436 		return &bpf_map_update_elem_proto;
1437 	case BPF_FUNC_map_delete_elem:
1438 		return &bpf_map_delete_elem_proto;
1439 	case BPF_FUNC_map_push_elem:
1440 		return &bpf_map_push_elem_proto;
1441 	case BPF_FUNC_map_pop_elem:
1442 		return &bpf_map_pop_elem_proto;
1443 	case BPF_FUNC_map_peek_elem:
1444 		return &bpf_map_peek_elem_proto;
1445 	case BPF_FUNC_map_lookup_percpu_elem:
1446 		return &bpf_map_lookup_percpu_elem_proto;
1447 	case BPF_FUNC_ktime_get_ns:
1448 		return &bpf_ktime_get_ns_proto;
1449 	case BPF_FUNC_ktime_get_boot_ns:
1450 		return &bpf_ktime_get_boot_ns_proto;
1451 	case BPF_FUNC_tail_call:
1452 		return &bpf_tail_call_proto;
1453 	case BPF_FUNC_get_current_pid_tgid:
1454 		return &bpf_get_current_pid_tgid_proto;
1455 	case BPF_FUNC_get_current_task:
1456 		return &bpf_get_current_task_proto;
1457 	case BPF_FUNC_get_current_task_btf:
1458 		return &bpf_get_current_task_btf_proto;
1459 	case BPF_FUNC_task_pt_regs:
1460 		return &bpf_task_pt_regs_proto;
1461 	case BPF_FUNC_get_current_uid_gid:
1462 		return &bpf_get_current_uid_gid_proto;
1463 	case BPF_FUNC_get_current_comm:
1464 		return &bpf_get_current_comm_proto;
1465 	case BPF_FUNC_trace_printk:
1466 		return bpf_get_trace_printk_proto();
1467 	case BPF_FUNC_get_smp_processor_id:
1468 		return &bpf_get_smp_processor_id_proto;
1469 	case BPF_FUNC_get_numa_node_id:
1470 		return &bpf_get_numa_node_id_proto;
1471 	case BPF_FUNC_perf_event_read:
1472 		return &bpf_perf_event_read_proto;
1473 	case BPF_FUNC_current_task_under_cgroup:
1474 		return &bpf_current_task_under_cgroup_proto;
1475 	case BPF_FUNC_get_prandom_u32:
1476 		return &bpf_get_prandom_u32_proto;
1477 	case BPF_FUNC_probe_write_user:
1478 		return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1479 		       NULL : bpf_get_probe_write_proto();
1480 	case BPF_FUNC_probe_read_user:
1481 		return &bpf_probe_read_user_proto;
1482 	case BPF_FUNC_probe_read_kernel:
1483 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1484 		       NULL : &bpf_probe_read_kernel_proto;
1485 	case BPF_FUNC_probe_read_user_str:
1486 		return &bpf_probe_read_user_str_proto;
1487 	case BPF_FUNC_probe_read_kernel_str:
1488 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1489 		       NULL : &bpf_probe_read_kernel_str_proto;
1490 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1491 	case BPF_FUNC_probe_read:
1492 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1493 		       NULL : &bpf_probe_read_compat_proto;
1494 	case BPF_FUNC_probe_read_str:
1495 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1496 		       NULL : &bpf_probe_read_compat_str_proto;
1497 #endif
1498 #ifdef CONFIG_CGROUPS
1499 	case BPF_FUNC_cgrp_storage_get:
1500 		return &bpf_cgrp_storage_get_proto;
1501 	case BPF_FUNC_cgrp_storage_delete:
1502 		return &bpf_cgrp_storage_delete_proto;
1503 #endif
1504 	case BPF_FUNC_send_signal:
1505 		return &bpf_send_signal_proto;
1506 	case BPF_FUNC_send_signal_thread:
1507 		return &bpf_send_signal_thread_proto;
1508 	case BPF_FUNC_perf_event_read_value:
1509 		return &bpf_perf_event_read_value_proto;
1510 	case BPF_FUNC_get_ns_current_pid_tgid:
1511 		return &bpf_get_ns_current_pid_tgid_proto;
1512 	case BPF_FUNC_ringbuf_output:
1513 		return &bpf_ringbuf_output_proto;
1514 	case BPF_FUNC_ringbuf_reserve:
1515 		return &bpf_ringbuf_reserve_proto;
1516 	case BPF_FUNC_ringbuf_submit:
1517 		return &bpf_ringbuf_submit_proto;
1518 	case BPF_FUNC_ringbuf_discard:
1519 		return &bpf_ringbuf_discard_proto;
1520 	case BPF_FUNC_ringbuf_query:
1521 		return &bpf_ringbuf_query_proto;
1522 	case BPF_FUNC_jiffies64:
1523 		return &bpf_jiffies64_proto;
1524 	case BPF_FUNC_get_task_stack:
1525 		return &bpf_get_task_stack_proto;
1526 	case BPF_FUNC_copy_from_user:
1527 		return &bpf_copy_from_user_proto;
1528 	case BPF_FUNC_copy_from_user_task:
1529 		return &bpf_copy_from_user_task_proto;
1530 	case BPF_FUNC_snprintf_btf:
1531 		return &bpf_snprintf_btf_proto;
1532 	case BPF_FUNC_per_cpu_ptr:
1533 		return &bpf_per_cpu_ptr_proto;
1534 	case BPF_FUNC_this_cpu_ptr:
1535 		return &bpf_this_cpu_ptr_proto;
1536 	case BPF_FUNC_task_storage_get:
1537 		if (bpf_prog_check_recur(prog))
1538 			return &bpf_task_storage_get_recur_proto;
1539 		return &bpf_task_storage_get_proto;
1540 	case BPF_FUNC_task_storage_delete:
1541 		if (bpf_prog_check_recur(prog))
1542 			return &bpf_task_storage_delete_recur_proto;
1543 		return &bpf_task_storage_delete_proto;
1544 	case BPF_FUNC_for_each_map_elem:
1545 		return &bpf_for_each_map_elem_proto;
1546 	case BPF_FUNC_snprintf:
1547 		return &bpf_snprintf_proto;
1548 	case BPF_FUNC_get_func_ip:
1549 		return &bpf_get_func_ip_proto_tracing;
1550 	case BPF_FUNC_get_branch_snapshot:
1551 		return &bpf_get_branch_snapshot_proto;
1552 	case BPF_FUNC_find_vma:
1553 		return &bpf_find_vma_proto;
1554 	case BPF_FUNC_trace_vprintk:
1555 		return bpf_get_trace_vprintk_proto();
1556 	default:
1557 		return bpf_base_func_proto(func_id);
1558 	}
1559 }
1560 
1561 static const struct bpf_func_proto *
1562 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1563 {
1564 	switch (func_id) {
1565 	case BPF_FUNC_perf_event_output:
1566 		return &bpf_perf_event_output_proto;
1567 	case BPF_FUNC_get_stackid:
1568 		return &bpf_get_stackid_proto;
1569 	case BPF_FUNC_get_stack:
1570 		return &bpf_get_stack_proto;
1571 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1572 	case BPF_FUNC_override_return:
1573 		return &bpf_override_return_proto;
1574 #endif
1575 	case BPF_FUNC_get_func_ip:
1576 		if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI)
1577 			return &bpf_get_func_ip_proto_kprobe_multi;
1578 		if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
1579 			return &bpf_get_func_ip_proto_uprobe_multi;
1580 		return &bpf_get_func_ip_proto_kprobe;
1581 	case BPF_FUNC_get_attach_cookie:
1582 		if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI)
1583 			return &bpf_get_attach_cookie_proto_kmulti;
1584 		if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
1585 			return &bpf_get_attach_cookie_proto_umulti;
1586 		return &bpf_get_attach_cookie_proto_trace;
1587 	default:
1588 		return bpf_tracing_func_proto(func_id, prog);
1589 	}
1590 }
1591 
1592 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
1593 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1594 					const struct bpf_prog *prog,
1595 					struct bpf_insn_access_aux *info)
1596 {
1597 	if (off < 0 || off >= sizeof(struct pt_regs))
1598 		return false;
1599 	if (type != BPF_READ)
1600 		return false;
1601 	if (off % size != 0)
1602 		return false;
1603 	/*
1604 	 * Assertion for 32 bit to make sure last 8 byte access
1605 	 * (BPF_DW) to the last 4 byte member is disallowed.
1606 	 */
1607 	if (off + size > sizeof(struct pt_regs))
1608 		return false;
1609 
1610 	return true;
1611 }
1612 
1613 const struct bpf_verifier_ops kprobe_verifier_ops = {
1614 	.get_func_proto  = kprobe_prog_func_proto,
1615 	.is_valid_access = kprobe_prog_is_valid_access,
1616 };
1617 
1618 const struct bpf_prog_ops kprobe_prog_ops = {
1619 };
1620 
1621 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1622 	   u64, flags, void *, data, u64, size)
1623 {
1624 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1625 
1626 	/*
1627 	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1628 	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1629 	 * from there and call the same bpf_perf_event_output() helper inline.
1630 	 */
1631 	return ____bpf_perf_event_output(regs, map, flags, data, size);
1632 }
1633 
1634 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1635 	.func		= bpf_perf_event_output_tp,
1636 	.gpl_only	= true,
1637 	.ret_type	= RET_INTEGER,
1638 	.arg1_type	= ARG_PTR_TO_CTX,
1639 	.arg2_type	= ARG_CONST_MAP_PTR,
1640 	.arg3_type	= ARG_ANYTHING,
1641 	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1642 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1643 };
1644 
1645 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1646 	   u64, flags)
1647 {
1648 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1649 
1650 	/*
1651 	 * Same comment as in bpf_perf_event_output_tp(), only that this time
1652 	 * the other helper's function body cannot be inlined due to being
1653 	 * external, thus we need to call raw helper function.
1654 	 */
1655 	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1656 			       flags, 0, 0);
1657 }
1658 
1659 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1660 	.func		= bpf_get_stackid_tp,
1661 	.gpl_only	= true,
1662 	.ret_type	= RET_INTEGER,
1663 	.arg1_type	= ARG_PTR_TO_CTX,
1664 	.arg2_type	= ARG_CONST_MAP_PTR,
1665 	.arg3_type	= ARG_ANYTHING,
1666 };
1667 
1668 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1669 	   u64, flags)
1670 {
1671 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1672 
1673 	return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1674 			     (unsigned long) size, flags, 0);
1675 }
1676 
1677 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1678 	.func		= bpf_get_stack_tp,
1679 	.gpl_only	= true,
1680 	.ret_type	= RET_INTEGER,
1681 	.arg1_type	= ARG_PTR_TO_CTX,
1682 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1683 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1684 	.arg4_type	= ARG_ANYTHING,
1685 };
1686 
1687 static const struct bpf_func_proto *
1688 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1689 {
1690 	switch (func_id) {
1691 	case BPF_FUNC_perf_event_output:
1692 		return &bpf_perf_event_output_proto_tp;
1693 	case BPF_FUNC_get_stackid:
1694 		return &bpf_get_stackid_proto_tp;
1695 	case BPF_FUNC_get_stack:
1696 		return &bpf_get_stack_proto_tp;
1697 	case BPF_FUNC_get_attach_cookie:
1698 		return &bpf_get_attach_cookie_proto_trace;
1699 	default:
1700 		return bpf_tracing_func_proto(func_id, prog);
1701 	}
1702 }
1703 
1704 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1705 				    const struct bpf_prog *prog,
1706 				    struct bpf_insn_access_aux *info)
1707 {
1708 	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1709 		return false;
1710 	if (type != BPF_READ)
1711 		return false;
1712 	if (off % size != 0)
1713 		return false;
1714 
1715 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1716 	return true;
1717 }
1718 
1719 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1720 	.get_func_proto  = tp_prog_func_proto,
1721 	.is_valid_access = tp_prog_is_valid_access,
1722 };
1723 
1724 const struct bpf_prog_ops tracepoint_prog_ops = {
1725 };
1726 
1727 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1728 	   struct bpf_perf_event_value *, buf, u32, size)
1729 {
1730 	int err = -EINVAL;
1731 
1732 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1733 		goto clear;
1734 	err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1735 				    &buf->running);
1736 	if (unlikely(err))
1737 		goto clear;
1738 	return 0;
1739 clear:
1740 	memset(buf, 0, size);
1741 	return err;
1742 }
1743 
1744 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1745          .func           = bpf_perf_prog_read_value,
1746          .gpl_only       = true,
1747          .ret_type       = RET_INTEGER,
1748          .arg1_type      = ARG_PTR_TO_CTX,
1749          .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1750          .arg3_type      = ARG_CONST_SIZE,
1751 };
1752 
1753 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1754 	   void *, buf, u32, size, u64, flags)
1755 {
1756 	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1757 	struct perf_branch_stack *br_stack = ctx->data->br_stack;
1758 	u32 to_copy;
1759 
1760 	if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1761 		return -EINVAL;
1762 
1763 	if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1764 		return -ENOENT;
1765 
1766 	if (unlikely(!br_stack))
1767 		return -ENOENT;
1768 
1769 	if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1770 		return br_stack->nr * br_entry_size;
1771 
1772 	if (!buf || (size % br_entry_size != 0))
1773 		return -EINVAL;
1774 
1775 	to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1776 	memcpy(buf, br_stack->entries, to_copy);
1777 
1778 	return to_copy;
1779 }
1780 
1781 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1782 	.func           = bpf_read_branch_records,
1783 	.gpl_only       = true,
1784 	.ret_type       = RET_INTEGER,
1785 	.arg1_type      = ARG_PTR_TO_CTX,
1786 	.arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1787 	.arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1788 	.arg4_type      = ARG_ANYTHING,
1789 };
1790 
1791 static const struct bpf_func_proto *
1792 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1793 {
1794 	switch (func_id) {
1795 	case BPF_FUNC_perf_event_output:
1796 		return &bpf_perf_event_output_proto_tp;
1797 	case BPF_FUNC_get_stackid:
1798 		return &bpf_get_stackid_proto_pe;
1799 	case BPF_FUNC_get_stack:
1800 		return &bpf_get_stack_proto_pe;
1801 	case BPF_FUNC_perf_prog_read_value:
1802 		return &bpf_perf_prog_read_value_proto;
1803 	case BPF_FUNC_read_branch_records:
1804 		return &bpf_read_branch_records_proto;
1805 	case BPF_FUNC_get_attach_cookie:
1806 		return &bpf_get_attach_cookie_proto_pe;
1807 	default:
1808 		return bpf_tracing_func_proto(func_id, prog);
1809 	}
1810 }
1811 
1812 /*
1813  * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1814  * to avoid potential recursive reuse issue when/if tracepoints are added
1815  * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1816  *
1817  * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1818  * in normal, irq, and nmi context.
1819  */
1820 struct bpf_raw_tp_regs {
1821 	struct pt_regs regs[3];
1822 };
1823 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1824 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
1825 static struct pt_regs *get_bpf_raw_tp_regs(void)
1826 {
1827 	struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1828 	int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1829 
1830 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1831 		this_cpu_dec(bpf_raw_tp_nest_level);
1832 		return ERR_PTR(-EBUSY);
1833 	}
1834 
1835 	return &tp_regs->regs[nest_level - 1];
1836 }
1837 
1838 static void put_bpf_raw_tp_regs(void)
1839 {
1840 	this_cpu_dec(bpf_raw_tp_nest_level);
1841 }
1842 
1843 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1844 	   struct bpf_map *, map, u64, flags, void *, data, u64, size)
1845 {
1846 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1847 	int ret;
1848 
1849 	if (IS_ERR(regs))
1850 		return PTR_ERR(regs);
1851 
1852 	perf_fetch_caller_regs(regs);
1853 	ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1854 
1855 	put_bpf_raw_tp_regs();
1856 	return ret;
1857 }
1858 
1859 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1860 	.func		= bpf_perf_event_output_raw_tp,
1861 	.gpl_only	= true,
1862 	.ret_type	= RET_INTEGER,
1863 	.arg1_type	= ARG_PTR_TO_CTX,
1864 	.arg2_type	= ARG_CONST_MAP_PTR,
1865 	.arg3_type	= ARG_ANYTHING,
1866 	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1867 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1868 };
1869 
1870 extern const struct bpf_func_proto bpf_skb_output_proto;
1871 extern const struct bpf_func_proto bpf_xdp_output_proto;
1872 extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
1873 
1874 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1875 	   struct bpf_map *, map, u64, flags)
1876 {
1877 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1878 	int ret;
1879 
1880 	if (IS_ERR(regs))
1881 		return PTR_ERR(regs);
1882 
1883 	perf_fetch_caller_regs(regs);
1884 	/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1885 	ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1886 			      flags, 0, 0);
1887 	put_bpf_raw_tp_regs();
1888 	return ret;
1889 }
1890 
1891 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1892 	.func		= bpf_get_stackid_raw_tp,
1893 	.gpl_only	= true,
1894 	.ret_type	= RET_INTEGER,
1895 	.arg1_type	= ARG_PTR_TO_CTX,
1896 	.arg2_type	= ARG_CONST_MAP_PTR,
1897 	.arg3_type	= ARG_ANYTHING,
1898 };
1899 
1900 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1901 	   void *, buf, u32, size, u64, flags)
1902 {
1903 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1904 	int ret;
1905 
1906 	if (IS_ERR(regs))
1907 		return PTR_ERR(regs);
1908 
1909 	perf_fetch_caller_regs(regs);
1910 	ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1911 			    (unsigned long) size, flags, 0);
1912 	put_bpf_raw_tp_regs();
1913 	return ret;
1914 }
1915 
1916 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1917 	.func		= bpf_get_stack_raw_tp,
1918 	.gpl_only	= true,
1919 	.ret_type	= RET_INTEGER,
1920 	.arg1_type	= ARG_PTR_TO_CTX,
1921 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1922 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1923 	.arg4_type	= ARG_ANYTHING,
1924 };
1925 
1926 static const struct bpf_func_proto *
1927 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1928 {
1929 	switch (func_id) {
1930 	case BPF_FUNC_perf_event_output:
1931 		return &bpf_perf_event_output_proto_raw_tp;
1932 	case BPF_FUNC_get_stackid:
1933 		return &bpf_get_stackid_proto_raw_tp;
1934 	case BPF_FUNC_get_stack:
1935 		return &bpf_get_stack_proto_raw_tp;
1936 	default:
1937 		return bpf_tracing_func_proto(func_id, prog);
1938 	}
1939 }
1940 
1941 const struct bpf_func_proto *
1942 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1943 {
1944 	const struct bpf_func_proto *fn;
1945 
1946 	switch (func_id) {
1947 #ifdef CONFIG_NET
1948 	case BPF_FUNC_skb_output:
1949 		return &bpf_skb_output_proto;
1950 	case BPF_FUNC_xdp_output:
1951 		return &bpf_xdp_output_proto;
1952 	case BPF_FUNC_skc_to_tcp6_sock:
1953 		return &bpf_skc_to_tcp6_sock_proto;
1954 	case BPF_FUNC_skc_to_tcp_sock:
1955 		return &bpf_skc_to_tcp_sock_proto;
1956 	case BPF_FUNC_skc_to_tcp_timewait_sock:
1957 		return &bpf_skc_to_tcp_timewait_sock_proto;
1958 	case BPF_FUNC_skc_to_tcp_request_sock:
1959 		return &bpf_skc_to_tcp_request_sock_proto;
1960 	case BPF_FUNC_skc_to_udp6_sock:
1961 		return &bpf_skc_to_udp6_sock_proto;
1962 	case BPF_FUNC_skc_to_unix_sock:
1963 		return &bpf_skc_to_unix_sock_proto;
1964 	case BPF_FUNC_skc_to_mptcp_sock:
1965 		return &bpf_skc_to_mptcp_sock_proto;
1966 	case BPF_FUNC_sk_storage_get:
1967 		return &bpf_sk_storage_get_tracing_proto;
1968 	case BPF_FUNC_sk_storage_delete:
1969 		return &bpf_sk_storage_delete_tracing_proto;
1970 	case BPF_FUNC_sock_from_file:
1971 		return &bpf_sock_from_file_proto;
1972 	case BPF_FUNC_get_socket_cookie:
1973 		return &bpf_get_socket_ptr_cookie_proto;
1974 	case BPF_FUNC_xdp_get_buff_len:
1975 		return &bpf_xdp_get_buff_len_trace_proto;
1976 #endif
1977 	case BPF_FUNC_seq_printf:
1978 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1979 		       &bpf_seq_printf_proto :
1980 		       NULL;
1981 	case BPF_FUNC_seq_write:
1982 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1983 		       &bpf_seq_write_proto :
1984 		       NULL;
1985 	case BPF_FUNC_seq_printf_btf:
1986 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1987 		       &bpf_seq_printf_btf_proto :
1988 		       NULL;
1989 	case BPF_FUNC_d_path:
1990 		return &bpf_d_path_proto;
1991 	case BPF_FUNC_get_func_arg:
1992 		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
1993 	case BPF_FUNC_get_func_ret:
1994 		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
1995 	case BPF_FUNC_get_func_arg_cnt:
1996 		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
1997 	case BPF_FUNC_get_attach_cookie:
1998 		return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
1999 	default:
2000 		fn = raw_tp_prog_func_proto(func_id, prog);
2001 		if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
2002 			fn = bpf_iter_get_func_proto(func_id, prog);
2003 		return fn;
2004 	}
2005 }
2006 
2007 static bool raw_tp_prog_is_valid_access(int off, int size,
2008 					enum bpf_access_type type,
2009 					const struct bpf_prog *prog,
2010 					struct bpf_insn_access_aux *info)
2011 {
2012 	return bpf_tracing_ctx_access(off, size, type);
2013 }
2014 
2015 static bool tracing_prog_is_valid_access(int off, int size,
2016 					 enum bpf_access_type type,
2017 					 const struct bpf_prog *prog,
2018 					 struct bpf_insn_access_aux *info)
2019 {
2020 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
2021 }
2022 
2023 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
2024 				     const union bpf_attr *kattr,
2025 				     union bpf_attr __user *uattr)
2026 {
2027 	return -ENOTSUPP;
2028 }
2029 
2030 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
2031 	.get_func_proto  = raw_tp_prog_func_proto,
2032 	.is_valid_access = raw_tp_prog_is_valid_access,
2033 };
2034 
2035 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
2036 #ifdef CONFIG_NET
2037 	.test_run = bpf_prog_test_run_raw_tp,
2038 #endif
2039 };
2040 
2041 const struct bpf_verifier_ops tracing_verifier_ops = {
2042 	.get_func_proto  = tracing_prog_func_proto,
2043 	.is_valid_access = tracing_prog_is_valid_access,
2044 };
2045 
2046 const struct bpf_prog_ops tracing_prog_ops = {
2047 	.test_run = bpf_prog_test_run_tracing,
2048 };
2049 
2050 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
2051 						 enum bpf_access_type type,
2052 						 const struct bpf_prog *prog,
2053 						 struct bpf_insn_access_aux *info)
2054 {
2055 	if (off == 0) {
2056 		if (size != sizeof(u64) || type != BPF_READ)
2057 			return false;
2058 		info->reg_type = PTR_TO_TP_BUFFER;
2059 	}
2060 	return raw_tp_prog_is_valid_access(off, size, type, prog, info);
2061 }
2062 
2063 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
2064 	.get_func_proto  = raw_tp_prog_func_proto,
2065 	.is_valid_access = raw_tp_writable_prog_is_valid_access,
2066 };
2067 
2068 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
2069 };
2070 
2071 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
2072 				    const struct bpf_prog *prog,
2073 				    struct bpf_insn_access_aux *info)
2074 {
2075 	const int size_u64 = sizeof(u64);
2076 
2077 	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
2078 		return false;
2079 	if (type != BPF_READ)
2080 		return false;
2081 	if (off % size != 0) {
2082 		if (sizeof(unsigned long) != 4)
2083 			return false;
2084 		if (size != 8)
2085 			return false;
2086 		if (off % size != 4)
2087 			return false;
2088 	}
2089 
2090 	switch (off) {
2091 	case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
2092 		bpf_ctx_record_field_size(info, size_u64);
2093 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2094 			return false;
2095 		break;
2096 	case bpf_ctx_range(struct bpf_perf_event_data, addr):
2097 		bpf_ctx_record_field_size(info, size_u64);
2098 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2099 			return false;
2100 		break;
2101 	default:
2102 		if (size != sizeof(long))
2103 			return false;
2104 	}
2105 
2106 	return true;
2107 }
2108 
2109 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2110 				      const struct bpf_insn *si,
2111 				      struct bpf_insn *insn_buf,
2112 				      struct bpf_prog *prog, u32 *target_size)
2113 {
2114 	struct bpf_insn *insn = insn_buf;
2115 
2116 	switch (si->off) {
2117 	case offsetof(struct bpf_perf_event_data, sample_period):
2118 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2119 						       data), si->dst_reg, si->src_reg,
2120 				      offsetof(struct bpf_perf_event_data_kern, data));
2121 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2122 				      bpf_target_off(struct perf_sample_data, period, 8,
2123 						     target_size));
2124 		break;
2125 	case offsetof(struct bpf_perf_event_data, addr):
2126 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2127 						       data), si->dst_reg, si->src_reg,
2128 				      offsetof(struct bpf_perf_event_data_kern, data));
2129 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2130 				      bpf_target_off(struct perf_sample_data, addr, 8,
2131 						     target_size));
2132 		break;
2133 	default:
2134 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2135 						       regs), si->dst_reg, si->src_reg,
2136 				      offsetof(struct bpf_perf_event_data_kern, regs));
2137 		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2138 				      si->off);
2139 		break;
2140 	}
2141 
2142 	return insn - insn_buf;
2143 }
2144 
2145 const struct bpf_verifier_ops perf_event_verifier_ops = {
2146 	.get_func_proto		= pe_prog_func_proto,
2147 	.is_valid_access	= pe_prog_is_valid_access,
2148 	.convert_ctx_access	= pe_prog_convert_ctx_access,
2149 };
2150 
2151 const struct bpf_prog_ops perf_event_prog_ops = {
2152 };
2153 
2154 static DEFINE_MUTEX(bpf_event_mutex);
2155 
2156 #define BPF_TRACE_MAX_PROGS 64
2157 
2158 int perf_event_attach_bpf_prog(struct perf_event *event,
2159 			       struct bpf_prog *prog,
2160 			       u64 bpf_cookie)
2161 {
2162 	struct bpf_prog_array *old_array;
2163 	struct bpf_prog_array *new_array;
2164 	int ret = -EEXIST;
2165 
2166 	/*
2167 	 * Kprobe override only works if they are on the function entry,
2168 	 * and only if they are on the opt-in list.
2169 	 */
2170 	if (prog->kprobe_override &&
2171 	    (!trace_kprobe_on_func_entry(event->tp_event) ||
2172 	     !trace_kprobe_error_injectable(event->tp_event)))
2173 		return -EINVAL;
2174 
2175 	mutex_lock(&bpf_event_mutex);
2176 
2177 	if (event->prog)
2178 		goto unlock;
2179 
2180 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2181 	if (old_array &&
2182 	    bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
2183 		ret = -E2BIG;
2184 		goto unlock;
2185 	}
2186 
2187 	ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
2188 	if (ret < 0)
2189 		goto unlock;
2190 
2191 	/* set the new array to event->tp_event and set event->prog */
2192 	event->prog = prog;
2193 	event->bpf_cookie = bpf_cookie;
2194 	rcu_assign_pointer(event->tp_event->prog_array, new_array);
2195 	bpf_prog_array_free_sleepable(old_array);
2196 
2197 unlock:
2198 	mutex_unlock(&bpf_event_mutex);
2199 	return ret;
2200 }
2201 
2202 void perf_event_detach_bpf_prog(struct perf_event *event)
2203 {
2204 	struct bpf_prog_array *old_array;
2205 	struct bpf_prog_array *new_array;
2206 	int ret;
2207 
2208 	mutex_lock(&bpf_event_mutex);
2209 
2210 	if (!event->prog)
2211 		goto unlock;
2212 
2213 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2214 	ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
2215 	if (ret == -ENOENT)
2216 		goto unlock;
2217 	if (ret < 0) {
2218 		bpf_prog_array_delete_safe(old_array, event->prog);
2219 	} else {
2220 		rcu_assign_pointer(event->tp_event->prog_array, new_array);
2221 		bpf_prog_array_free_sleepable(old_array);
2222 	}
2223 
2224 	bpf_prog_put(event->prog);
2225 	event->prog = NULL;
2226 
2227 unlock:
2228 	mutex_unlock(&bpf_event_mutex);
2229 }
2230 
2231 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
2232 {
2233 	struct perf_event_query_bpf __user *uquery = info;
2234 	struct perf_event_query_bpf query = {};
2235 	struct bpf_prog_array *progs;
2236 	u32 *ids, prog_cnt, ids_len;
2237 	int ret;
2238 
2239 	if (!perfmon_capable())
2240 		return -EPERM;
2241 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
2242 		return -EINVAL;
2243 	if (copy_from_user(&query, uquery, sizeof(query)))
2244 		return -EFAULT;
2245 
2246 	ids_len = query.ids_len;
2247 	if (ids_len > BPF_TRACE_MAX_PROGS)
2248 		return -E2BIG;
2249 	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2250 	if (!ids)
2251 		return -ENOMEM;
2252 	/*
2253 	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2254 	 * is required when user only wants to check for uquery->prog_cnt.
2255 	 * There is no need to check for it since the case is handled
2256 	 * gracefully in bpf_prog_array_copy_info.
2257 	 */
2258 
2259 	mutex_lock(&bpf_event_mutex);
2260 	progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2261 	ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
2262 	mutex_unlock(&bpf_event_mutex);
2263 
2264 	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2265 	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2266 		ret = -EFAULT;
2267 
2268 	kfree(ids);
2269 	return ret;
2270 }
2271 
2272 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2273 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2274 
2275 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2276 {
2277 	struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2278 
2279 	for (; btp < __stop__bpf_raw_tp; btp++) {
2280 		if (!strcmp(btp->tp->name, name))
2281 			return btp;
2282 	}
2283 
2284 	return bpf_get_raw_tracepoint_module(name);
2285 }
2286 
2287 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2288 {
2289 	struct module *mod;
2290 
2291 	preempt_disable();
2292 	mod = __module_address((unsigned long)btp);
2293 	module_put(mod);
2294 	preempt_enable();
2295 }
2296 
2297 static __always_inline
2298 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2299 {
2300 	cant_sleep();
2301 	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2302 		bpf_prog_inc_misses_counter(prog);
2303 		goto out;
2304 	}
2305 	rcu_read_lock();
2306 	(void) bpf_prog_run(prog, args);
2307 	rcu_read_unlock();
2308 out:
2309 	this_cpu_dec(*(prog->active));
2310 }
2311 
2312 #define UNPACK(...)			__VA_ARGS__
2313 #define REPEAT_1(FN, DL, X, ...)	FN(X)
2314 #define REPEAT_2(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2315 #define REPEAT_3(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2316 #define REPEAT_4(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2317 #define REPEAT_5(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2318 #define REPEAT_6(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2319 #define REPEAT_7(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2320 #define REPEAT_8(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2321 #define REPEAT_9(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2322 #define REPEAT_10(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2323 #define REPEAT_11(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2324 #define REPEAT_12(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2325 #define REPEAT(X, FN, DL, ...)		REPEAT_##X(FN, DL, __VA_ARGS__)
2326 
2327 #define SARG(X)		u64 arg##X
2328 #define COPY(X)		args[X] = arg##X
2329 
2330 #define __DL_COM	(,)
2331 #define __DL_SEM	(;)
2332 
2333 #define __SEQ_0_11	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2334 
2335 #define BPF_TRACE_DEFN_x(x)						\
2336 	void bpf_trace_run##x(struct bpf_prog *prog,			\
2337 			      REPEAT(x, SARG, __DL_COM, __SEQ_0_11))	\
2338 	{								\
2339 		u64 args[x];						\
2340 		REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);			\
2341 		__bpf_trace_run(prog, args);				\
2342 	}								\
2343 	EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2344 BPF_TRACE_DEFN_x(1);
2345 BPF_TRACE_DEFN_x(2);
2346 BPF_TRACE_DEFN_x(3);
2347 BPF_TRACE_DEFN_x(4);
2348 BPF_TRACE_DEFN_x(5);
2349 BPF_TRACE_DEFN_x(6);
2350 BPF_TRACE_DEFN_x(7);
2351 BPF_TRACE_DEFN_x(8);
2352 BPF_TRACE_DEFN_x(9);
2353 BPF_TRACE_DEFN_x(10);
2354 BPF_TRACE_DEFN_x(11);
2355 BPF_TRACE_DEFN_x(12);
2356 
2357 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2358 {
2359 	struct tracepoint *tp = btp->tp;
2360 
2361 	/*
2362 	 * check that program doesn't access arguments beyond what's
2363 	 * available in this tracepoint
2364 	 */
2365 	if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2366 		return -EINVAL;
2367 
2368 	if (prog->aux->max_tp_access > btp->writable_size)
2369 		return -EINVAL;
2370 
2371 	return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
2372 						   prog);
2373 }
2374 
2375 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2376 {
2377 	return __bpf_probe_register(btp, prog);
2378 }
2379 
2380 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2381 {
2382 	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
2383 }
2384 
2385 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2386 			    u32 *fd_type, const char **buf,
2387 			    u64 *probe_offset, u64 *probe_addr)
2388 {
2389 	bool is_tracepoint, is_syscall_tp;
2390 	struct bpf_prog *prog;
2391 	int flags, err = 0;
2392 
2393 	prog = event->prog;
2394 	if (!prog)
2395 		return -ENOENT;
2396 
2397 	/* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2398 	if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2399 		return -EOPNOTSUPP;
2400 
2401 	*prog_id = prog->aux->id;
2402 	flags = event->tp_event->flags;
2403 	is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2404 	is_syscall_tp = is_syscall_trace_event(event->tp_event);
2405 
2406 	if (is_tracepoint || is_syscall_tp) {
2407 		*buf = is_tracepoint ? event->tp_event->tp->name
2408 				     : event->tp_event->name;
2409 		/* We allow NULL pointer for tracepoint */
2410 		if (fd_type)
2411 			*fd_type = BPF_FD_TYPE_TRACEPOINT;
2412 		if (probe_offset)
2413 			*probe_offset = 0x0;
2414 		if (probe_addr)
2415 			*probe_addr = 0x0;
2416 	} else {
2417 		/* kprobe/uprobe */
2418 		err = -EOPNOTSUPP;
2419 #ifdef CONFIG_KPROBE_EVENTS
2420 		if (flags & TRACE_EVENT_FL_KPROBE)
2421 			err = bpf_get_kprobe_info(event, fd_type, buf,
2422 						  probe_offset, probe_addr,
2423 						  event->attr.type == PERF_TYPE_TRACEPOINT);
2424 #endif
2425 #ifdef CONFIG_UPROBE_EVENTS
2426 		if (flags & TRACE_EVENT_FL_UPROBE)
2427 			err = bpf_get_uprobe_info(event, fd_type, buf,
2428 						  probe_offset, probe_addr,
2429 						  event->attr.type == PERF_TYPE_TRACEPOINT);
2430 #endif
2431 	}
2432 
2433 	return err;
2434 }
2435 
2436 static int __init send_signal_irq_work_init(void)
2437 {
2438 	int cpu;
2439 	struct send_signal_irq_work *work;
2440 
2441 	for_each_possible_cpu(cpu) {
2442 		work = per_cpu_ptr(&send_signal_work, cpu);
2443 		init_irq_work(&work->irq_work, do_bpf_send_signal);
2444 	}
2445 	return 0;
2446 }
2447 
2448 subsys_initcall(send_signal_irq_work_init);
2449 
2450 #ifdef CONFIG_MODULES
2451 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2452 			    void *module)
2453 {
2454 	struct bpf_trace_module *btm, *tmp;
2455 	struct module *mod = module;
2456 	int ret = 0;
2457 
2458 	if (mod->num_bpf_raw_events == 0 ||
2459 	    (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2460 		goto out;
2461 
2462 	mutex_lock(&bpf_module_mutex);
2463 
2464 	switch (op) {
2465 	case MODULE_STATE_COMING:
2466 		btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2467 		if (btm) {
2468 			btm->module = module;
2469 			list_add(&btm->list, &bpf_trace_modules);
2470 		} else {
2471 			ret = -ENOMEM;
2472 		}
2473 		break;
2474 	case MODULE_STATE_GOING:
2475 		list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2476 			if (btm->module == module) {
2477 				list_del(&btm->list);
2478 				kfree(btm);
2479 				break;
2480 			}
2481 		}
2482 		break;
2483 	}
2484 
2485 	mutex_unlock(&bpf_module_mutex);
2486 
2487 out:
2488 	return notifier_from_errno(ret);
2489 }
2490 
2491 static struct notifier_block bpf_module_nb = {
2492 	.notifier_call = bpf_event_notify,
2493 };
2494 
2495 static int __init bpf_event_init(void)
2496 {
2497 	register_module_notifier(&bpf_module_nb);
2498 	return 0;
2499 }
2500 
2501 fs_initcall(bpf_event_init);
2502 #endif /* CONFIG_MODULES */
2503 
2504 #ifdef CONFIG_FPROBE
2505 struct bpf_kprobe_multi_link {
2506 	struct bpf_link link;
2507 	struct fprobe fp;
2508 	unsigned long *addrs;
2509 	u64 *cookies;
2510 	u32 cnt;
2511 	u32 mods_cnt;
2512 	struct module **mods;
2513 	u32 flags;
2514 };
2515 
2516 struct bpf_kprobe_multi_run_ctx {
2517 	struct bpf_run_ctx run_ctx;
2518 	struct bpf_kprobe_multi_link *link;
2519 	unsigned long entry_ip;
2520 };
2521 
2522 struct user_syms {
2523 	const char **syms;
2524 	char *buf;
2525 };
2526 
2527 static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2528 {
2529 	unsigned long __user usymbol;
2530 	const char **syms = NULL;
2531 	char *buf = NULL, *p;
2532 	int err = -ENOMEM;
2533 	unsigned int i;
2534 
2535 	syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
2536 	if (!syms)
2537 		goto error;
2538 
2539 	buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
2540 	if (!buf)
2541 		goto error;
2542 
2543 	for (p = buf, i = 0; i < cnt; i++) {
2544 		if (__get_user(usymbol, usyms + i)) {
2545 			err = -EFAULT;
2546 			goto error;
2547 		}
2548 		err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2549 		if (err == KSYM_NAME_LEN)
2550 			err = -E2BIG;
2551 		if (err < 0)
2552 			goto error;
2553 		syms[i] = p;
2554 		p += err + 1;
2555 	}
2556 
2557 	us->syms = syms;
2558 	us->buf = buf;
2559 	return 0;
2560 
2561 error:
2562 	if (err) {
2563 		kvfree(syms);
2564 		kvfree(buf);
2565 	}
2566 	return err;
2567 }
2568 
2569 static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2570 {
2571 	u32 i;
2572 
2573 	for (i = 0; i < cnt; i++)
2574 		module_put(mods[i]);
2575 }
2576 
2577 static void free_user_syms(struct user_syms *us)
2578 {
2579 	kvfree(us->syms);
2580 	kvfree(us->buf);
2581 }
2582 
2583 static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2584 {
2585 	struct bpf_kprobe_multi_link *kmulti_link;
2586 
2587 	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2588 	unregister_fprobe(&kmulti_link->fp);
2589 	kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
2590 }
2591 
2592 static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2593 {
2594 	struct bpf_kprobe_multi_link *kmulti_link;
2595 
2596 	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2597 	kvfree(kmulti_link->addrs);
2598 	kvfree(kmulti_link->cookies);
2599 	kfree(kmulti_link->mods);
2600 	kfree(kmulti_link);
2601 }
2602 
2603 static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
2604 						struct bpf_link_info *info)
2605 {
2606 	u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs);
2607 	struct bpf_kprobe_multi_link *kmulti_link;
2608 	u32 ucount = info->kprobe_multi.count;
2609 	int err = 0, i;
2610 
2611 	if (!uaddrs ^ !ucount)
2612 		return -EINVAL;
2613 
2614 	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2615 	info->kprobe_multi.count = kmulti_link->cnt;
2616 	info->kprobe_multi.flags = kmulti_link->flags;
2617 
2618 	if (!uaddrs)
2619 		return 0;
2620 	if (ucount < kmulti_link->cnt)
2621 		err = -ENOSPC;
2622 	else
2623 		ucount = kmulti_link->cnt;
2624 
2625 	if (kallsyms_show_value(current_cred())) {
2626 		if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64)))
2627 			return -EFAULT;
2628 	} else {
2629 		for (i = 0; i < ucount; i++) {
2630 			if (put_user(0, uaddrs + i))
2631 				return -EFAULT;
2632 		}
2633 	}
2634 	return err;
2635 }
2636 
2637 static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2638 	.release = bpf_kprobe_multi_link_release,
2639 	.dealloc = bpf_kprobe_multi_link_dealloc,
2640 	.fill_link_info = bpf_kprobe_multi_link_fill_link_info,
2641 };
2642 
2643 static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2644 {
2645 	const struct bpf_kprobe_multi_link *link = priv;
2646 	unsigned long *addr_a = a, *addr_b = b;
2647 	u64 *cookie_a, *cookie_b;
2648 
2649 	cookie_a = link->cookies + (addr_a - link->addrs);
2650 	cookie_b = link->cookies + (addr_b - link->addrs);
2651 
2652 	/* swap addr_a/addr_b and cookie_a/cookie_b values */
2653 	swap(*addr_a, *addr_b);
2654 	swap(*cookie_a, *cookie_b);
2655 }
2656 
2657 static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
2658 {
2659 	const unsigned long *addr_a = a, *addr_b = b;
2660 
2661 	if (*addr_a == *addr_b)
2662 		return 0;
2663 	return *addr_a < *addr_b ? -1 : 1;
2664 }
2665 
2666 static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2667 {
2668 	return bpf_kprobe_multi_addrs_cmp(a, b);
2669 }
2670 
2671 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2672 {
2673 	struct bpf_kprobe_multi_run_ctx *run_ctx;
2674 	struct bpf_kprobe_multi_link *link;
2675 	u64 *cookie, entry_ip;
2676 	unsigned long *addr;
2677 
2678 	if (WARN_ON_ONCE(!ctx))
2679 		return 0;
2680 	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2681 	link = run_ctx->link;
2682 	if (!link->cookies)
2683 		return 0;
2684 	entry_ip = run_ctx->entry_ip;
2685 	addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
2686 		       bpf_kprobe_multi_addrs_cmp);
2687 	if (!addr)
2688 		return 0;
2689 	cookie = link->cookies + (addr - link->addrs);
2690 	return *cookie;
2691 }
2692 
2693 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2694 {
2695 	struct bpf_kprobe_multi_run_ctx *run_ctx;
2696 
2697 	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2698 	return run_ctx->entry_ip;
2699 }
2700 
2701 static int
2702 kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
2703 			   unsigned long entry_ip, struct pt_regs *regs)
2704 {
2705 	struct bpf_kprobe_multi_run_ctx run_ctx = {
2706 		.link = link,
2707 		.entry_ip = entry_ip,
2708 	};
2709 	struct bpf_run_ctx *old_run_ctx;
2710 	int err;
2711 
2712 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
2713 		err = 0;
2714 		goto out;
2715 	}
2716 
2717 	migrate_disable();
2718 	rcu_read_lock();
2719 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2720 	err = bpf_prog_run(link->link.prog, regs);
2721 	bpf_reset_run_ctx(old_run_ctx);
2722 	rcu_read_unlock();
2723 	migrate_enable();
2724 
2725  out:
2726 	__this_cpu_dec(bpf_prog_active);
2727 	return err;
2728 }
2729 
2730 static int
2731 kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
2732 			  unsigned long ret_ip, struct pt_regs *regs,
2733 			  void *data)
2734 {
2735 	struct bpf_kprobe_multi_link *link;
2736 
2737 	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2738 	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
2739 	return 0;
2740 }
2741 
2742 static void
2743 kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
2744 			       unsigned long ret_ip, struct pt_regs *regs,
2745 			       void *data)
2746 {
2747 	struct bpf_kprobe_multi_link *link;
2748 
2749 	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2750 	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
2751 }
2752 
2753 static int symbols_cmp_r(const void *a, const void *b, const void *priv)
2754 {
2755 	const char **str_a = (const char **) a;
2756 	const char **str_b = (const char **) b;
2757 
2758 	return strcmp(*str_a, *str_b);
2759 }
2760 
2761 struct multi_symbols_sort {
2762 	const char **funcs;
2763 	u64 *cookies;
2764 };
2765 
2766 static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2767 {
2768 	const struct multi_symbols_sort *data = priv;
2769 	const char **name_a = a, **name_b = b;
2770 
2771 	swap(*name_a, *name_b);
2772 
2773 	/* If defined, swap also related cookies. */
2774 	if (data->cookies) {
2775 		u64 *cookie_a, *cookie_b;
2776 
2777 		cookie_a = data->cookies + (name_a - data->funcs);
2778 		cookie_b = data->cookies + (name_b - data->funcs);
2779 		swap(*cookie_a, *cookie_b);
2780 	}
2781 }
2782 
2783 struct modules_array {
2784 	struct module **mods;
2785 	int mods_cnt;
2786 	int mods_cap;
2787 };
2788 
2789 static int add_module(struct modules_array *arr, struct module *mod)
2790 {
2791 	struct module **mods;
2792 
2793 	if (arr->mods_cnt == arr->mods_cap) {
2794 		arr->mods_cap = max(16, arr->mods_cap * 3 / 2);
2795 		mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL);
2796 		if (!mods)
2797 			return -ENOMEM;
2798 		arr->mods = mods;
2799 	}
2800 
2801 	arr->mods[arr->mods_cnt] = mod;
2802 	arr->mods_cnt++;
2803 	return 0;
2804 }
2805 
2806 static bool has_module(struct modules_array *arr, struct module *mod)
2807 {
2808 	int i;
2809 
2810 	for (i = arr->mods_cnt - 1; i >= 0; i--) {
2811 		if (arr->mods[i] == mod)
2812 			return true;
2813 	}
2814 	return false;
2815 }
2816 
2817 static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
2818 {
2819 	struct modules_array arr = {};
2820 	u32 i, err = 0;
2821 
2822 	for (i = 0; i < addrs_cnt; i++) {
2823 		struct module *mod;
2824 
2825 		preempt_disable();
2826 		mod = __module_address(addrs[i]);
2827 		/* Either no module or we it's already stored  */
2828 		if (!mod || has_module(&arr, mod)) {
2829 			preempt_enable();
2830 			continue;
2831 		}
2832 		if (!try_module_get(mod))
2833 			err = -EINVAL;
2834 		preempt_enable();
2835 		if (err)
2836 			break;
2837 		err = add_module(&arr, mod);
2838 		if (err) {
2839 			module_put(mod);
2840 			break;
2841 		}
2842 	}
2843 
2844 	/* We return either err < 0 in case of error, ... */
2845 	if (err) {
2846 		kprobe_multi_put_modules(arr.mods, arr.mods_cnt);
2847 		kfree(arr.mods);
2848 		return err;
2849 	}
2850 
2851 	/* or number of modules found if everything is ok. */
2852 	*mods = arr.mods;
2853 	return arr.mods_cnt;
2854 }
2855 
2856 static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt)
2857 {
2858 	u32 i;
2859 
2860 	for (i = 0; i < cnt; i++) {
2861 		if (!within_error_injection_list(addrs[i]))
2862 			return -EINVAL;
2863 	}
2864 	return 0;
2865 }
2866 
2867 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2868 {
2869 	struct bpf_kprobe_multi_link *link = NULL;
2870 	struct bpf_link_primer link_primer;
2871 	void __user *ucookies;
2872 	unsigned long *addrs;
2873 	u32 flags, cnt, size;
2874 	void __user *uaddrs;
2875 	u64 *cookies = NULL;
2876 	void __user *usyms;
2877 	int err;
2878 
2879 	/* no support for 32bit archs yet */
2880 	if (sizeof(u64) != sizeof(void *))
2881 		return -EOPNOTSUPP;
2882 
2883 	if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI)
2884 		return -EINVAL;
2885 
2886 	flags = attr->link_create.kprobe_multi.flags;
2887 	if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
2888 		return -EINVAL;
2889 
2890 	uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2891 	usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2892 	if (!!uaddrs == !!usyms)
2893 		return -EINVAL;
2894 
2895 	cnt = attr->link_create.kprobe_multi.cnt;
2896 	if (!cnt)
2897 		return -EINVAL;
2898 
2899 	size = cnt * sizeof(*addrs);
2900 	addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2901 	if (!addrs)
2902 		return -ENOMEM;
2903 
2904 	ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
2905 	if (ucookies) {
2906 		cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2907 		if (!cookies) {
2908 			err = -ENOMEM;
2909 			goto error;
2910 		}
2911 		if (copy_from_user(cookies, ucookies, size)) {
2912 			err = -EFAULT;
2913 			goto error;
2914 		}
2915 	}
2916 
2917 	if (uaddrs) {
2918 		if (copy_from_user(addrs, uaddrs, size)) {
2919 			err = -EFAULT;
2920 			goto error;
2921 		}
2922 	} else {
2923 		struct multi_symbols_sort data = {
2924 			.cookies = cookies,
2925 		};
2926 		struct user_syms us;
2927 
2928 		err = copy_user_syms(&us, usyms, cnt);
2929 		if (err)
2930 			goto error;
2931 
2932 		if (cookies)
2933 			data.funcs = us.syms;
2934 
2935 		sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
2936 		       symbols_swap_r, &data);
2937 
2938 		err = ftrace_lookup_symbols(us.syms, cnt, addrs);
2939 		free_user_syms(&us);
2940 		if (err)
2941 			goto error;
2942 	}
2943 
2944 	if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) {
2945 		err = -EINVAL;
2946 		goto error;
2947 	}
2948 
2949 	link = kzalloc(sizeof(*link), GFP_KERNEL);
2950 	if (!link) {
2951 		err = -ENOMEM;
2952 		goto error;
2953 	}
2954 
2955 	bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
2956 		      &bpf_kprobe_multi_link_lops, prog);
2957 
2958 	err = bpf_link_prime(&link->link, &link_primer);
2959 	if (err)
2960 		goto error;
2961 
2962 	if (flags & BPF_F_KPROBE_MULTI_RETURN)
2963 		link->fp.exit_handler = kprobe_multi_link_exit_handler;
2964 	else
2965 		link->fp.entry_handler = kprobe_multi_link_handler;
2966 
2967 	link->addrs = addrs;
2968 	link->cookies = cookies;
2969 	link->cnt = cnt;
2970 	link->flags = flags;
2971 
2972 	if (cookies) {
2973 		/*
2974 		 * Sorting addresses will trigger sorting cookies as well
2975 		 * (check bpf_kprobe_multi_cookie_swap). This way we can
2976 		 * find cookie based on the address in bpf_get_attach_cookie
2977 		 * helper.
2978 		 */
2979 		sort_r(addrs, cnt, sizeof(*addrs),
2980 		       bpf_kprobe_multi_cookie_cmp,
2981 		       bpf_kprobe_multi_cookie_swap,
2982 		       link);
2983 	}
2984 
2985 	err = get_modules_for_addrs(&link->mods, addrs, cnt);
2986 	if (err < 0) {
2987 		bpf_link_cleanup(&link_primer);
2988 		return err;
2989 	}
2990 	link->mods_cnt = err;
2991 
2992 	err = register_fprobe_ips(&link->fp, addrs, cnt);
2993 	if (err) {
2994 		kprobe_multi_put_modules(link->mods, link->mods_cnt);
2995 		bpf_link_cleanup(&link_primer);
2996 		return err;
2997 	}
2998 
2999 	return bpf_link_settle(&link_primer);
3000 
3001 error:
3002 	kfree(link);
3003 	kvfree(addrs);
3004 	kvfree(cookies);
3005 	return err;
3006 }
3007 #else /* !CONFIG_FPROBE */
3008 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3009 {
3010 	return -EOPNOTSUPP;
3011 }
3012 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
3013 {
3014 	return 0;
3015 }
3016 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3017 {
3018 	return 0;
3019 }
3020 #endif
3021 
3022 #ifdef CONFIG_UPROBES
3023 struct bpf_uprobe_multi_link;
3024 
3025 struct bpf_uprobe {
3026 	struct bpf_uprobe_multi_link *link;
3027 	loff_t offset;
3028 	u64 cookie;
3029 	struct uprobe_consumer consumer;
3030 };
3031 
3032 struct bpf_uprobe_multi_link {
3033 	struct path path;
3034 	struct bpf_link link;
3035 	u32 cnt;
3036 	struct bpf_uprobe *uprobes;
3037 	struct task_struct *task;
3038 };
3039 
3040 struct bpf_uprobe_multi_run_ctx {
3041 	struct bpf_run_ctx run_ctx;
3042 	unsigned long entry_ip;
3043 	struct bpf_uprobe *uprobe;
3044 };
3045 
3046 static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes,
3047 				  u32 cnt)
3048 {
3049 	u32 i;
3050 
3051 	for (i = 0; i < cnt; i++) {
3052 		uprobe_unregister(d_real_inode(path->dentry), uprobes[i].offset,
3053 				  &uprobes[i].consumer);
3054 	}
3055 }
3056 
3057 static void bpf_uprobe_multi_link_release(struct bpf_link *link)
3058 {
3059 	struct bpf_uprobe_multi_link *umulti_link;
3060 
3061 	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3062 	bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt);
3063 }
3064 
3065 static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
3066 {
3067 	struct bpf_uprobe_multi_link *umulti_link;
3068 
3069 	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3070 	if (umulti_link->task)
3071 		put_task_struct(umulti_link->task);
3072 	path_put(&umulti_link->path);
3073 	kvfree(umulti_link->uprobes);
3074 	kfree(umulti_link);
3075 }
3076 
3077 static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
3078 	.release = bpf_uprobe_multi_link_release,
3079 	.dealloc = bpf_uprobe_multi_link_dealloc,
3080 };
3081 
3082 static int uprobe_prog_run(struct bpf_uprobe *uprobe,
3083 			   unsigned long entry_ip,
3084 			   struct pt_regs *regs)
3085 {
3086 	struct bpf_uprobe_multi_link *link = uprobe->link;
3087 	struct bpf_uprobe_multi_run_ctx run_ctx = {
3088 		.entry_ip = entry_ip,
3089 		.uprobe = uprobe,
3090 	};
3091 	struct bpf_prog *prog = link->link.prog;
3092 	bool sleepable = prog->aux->sleepable;
3093 	struct bpf_run_ctx *old_run_ctx;
3094 	int err = 0;
3095 
3096 	if (link->task && current != link->task)
3097 		return 0;
3098 
3099 	if (sleepable)
3100 		rcu_read_lock_trace();
3101 	else
3102 		rcu_read_lock();
3103 
3104 	migrate_disable();
3105 
3106 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
3107 	err = bpf_prog_run(link->link.prog, regs);
3108 	bpf_reset_run_ctx(old_run_ctx);
3109 
3110 	migrate_enable();
3111 
3112 	if (sleepable)
3113 		rcu_read_unlock_trace();
3114 	else
3115 		rcu_read_unlock();
3116 	return err;
3117 }
3118 
3119 static bool
3120 uprobe_multi_link_filter(struct uprobe_consumer *con, enum uprobe_filter_ctx ctx,
3121 			 struct mm_struct *mm)
3122 {
3123 	struct bpf_uprobe *uprobe;
3124 
3125 	uprobe = container_of(con, struct bpf_uprobe, consumer);
3126 	return uprobe->link->task->mm == mm;
3127 }
3128 
3129 static int
3130 uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
3131 {
3132 	struct bpf_uprobe *uprobe;
3133 
3134 	uprobe = container_of(con, struct bpf_uprobe, consumer);
3135 	return uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
3136 }
3137 
3138 static int
3139 uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs)
3140 {
3141 	struct bpf_uprobe *uprobe;
3142 
3143 	uprobe = container_of(con, struct bpf_uprobe, consumer);
3144 	return uprobe_prog_run(uprobe, func, regs);
3145 }
3146 
3147 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3148 {
3149 	struct bpf_uprobe_multi_run_ctx *run_ctx;
3150 
3151 	run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
3152 	return run_ctx->entry_ip;
3153 }
3154 
3155 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3156 {
3157 	struct bpf_uprobe_multi_run_ctx *run_ctx;
3158 
3159 	run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
3160 	return run_ctx->uprobe->cookie;
3161 }
3162 
3163 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3164 {
3165 	struct bpf_uprobe_multi_link *link = NULL;
3166 	unsigned long __user *uref_ctr_offsets;
3167 	unsigned long *ref_ctr_offsets = NULL;
3168 	struct bpf_link_primer link_primer;
3169 	struct bpf_uprobe *uprobes = NULL;
3170 	struct task_struct *task = NULL;
3171 	unsigned long __user *uoffsets;
3172 	u64 __user *ucookies;
3173 	void __user *upath;
3174 	u32 flags, cnt, i;
3175 	struct path path;
3176 	char *name;
3177 	pid_t pid;
3178 	int err;
3179 
3180 	/* no support for 32bit archs yet */
3181 	if (sizeof(u64) != sizeof(void *))
3182 		return -EOPNOTSUPP;
3183 
3184 	if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI)
3185 		return -EINVAL;
3186 
3187 	flags = attr->link_create.uprobe_multi.flags;
3188 	if (flags & ~BPF_F_UPROBE_MULTI_RETURN)
3189 		return -EINVAL;
3190 
3191 	/*
3192 	 * path, offsets and cnt are mandatory,
3193 	 * ref_ctr_offsets and cookies are optional
3194 	 */
3195 	upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
3196 	uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
3197 	cnt = attr->link_create.uprobe_multi.cnt;
3198 
3199 	if (!upath || !uoffsets || !cnt)
3200 		return -EINVAL;
3201 
3202 	uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
3203 	ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
3204 
3205 	name = strndup_user(upath, PATH_MAX);
3206 	if (IS_ERR(name)) {
3207 		err = PTR_ERR(name);
3208 		return err;
3209 	}
3210 
3211 	err = kern_path(name, LOOKUP_FOLLOW, &path);
3212 	kfree(name);
3213 	if (err)
3214 		return err;
3215 
3216 	if (!d_is_reg(path.dentry)) {
3217 		err = -EBADF;
3218 		goto error_path_put;
3219 	}
3220 
3221 	pid = attr->link_create.uprobe_multi.pid;
3222 	if (pid) {
3223 		rcu_read_lock();
3224 		task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
3225 		rcu_read_unlock();
3226 		if (!task) {
3227 			err = -ESRCH;
3228 			goto error_path_put;
3229 		}
3230 	}
3231 
3232 	err = -ENOMEM;
3233 
3234 	link = kzalloc(sizeof(*link), GFP_KERNEL);
3235 	uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL);
3236 
3237 	if (!uprobes || !link)
3238 		goto error_free;
3239 
3240 	if (uref_ctr_offsets) {
3241 		ref_ctr_offsets = kvcalloc(cnt, sizeof(*ref_ctr_offsets), GFP_KERNEL);
3242 		if (!ref_ctr_offsets)
3243 			goto error_free;
3244 	}
3245 
3246 	for (i = 0; i < cnt; i++) {
3247 		if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
3248 			err = -EFAULT;
3249 			goto error_free;
3250 		}
3251 		if (uref_ctr_offsets && __get_user(ref_ctr_offsets[i], uref_ctr_offsets + i)) {
3252 			err = -EFAULT;
3253 			goto error_free;
3254 		}
3255 		if (__get_user(uprobes[i].offset, uoffsets + i)) {
3256 			err = -EFAULT;
3257 			goto error_free;
3258 		}
3259 
3260 		uprobes[i].link = link;
3261 
3262 		if (flags & BPF_F_UPROBE_MULTI_RETURN)
3263 			uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
3264 		else
3265 			uprobes[i].consumer.handler = uprobe_multi_link_handler;
3266 
3267 		if (pid)
3268 			uprobes[i].consumer.filter = uprobe_multi_link_filter;
3269 	}
3270 
3271 	link->cnt = cnt;
3272 	link->uprobes = uprobes;
3273 	link->path = path;
3274 	link->task = task;
3275 
3276 	bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI,
3277 		      &bpf_uprobe_multi_link_lops, prog);
3278 
3279 	for (i = 0; i < cnt; i++) {
3280 		err = uprobe_register_refctr(d_real_inode(link->path.dentry),
3281 					     uprobes[i].offset,
3282 					     ref_ctr_offsets ? ref_ctr_offsets[i] : 0,
3283 					     &uprobes[i].consumer);
3284 		if (err) {
3285 			bpf_uprobe_unregister(&path, uprobes, i);
3286 			goto error_free;
3287 		}
3288 	}
3289 
3290 	err = bpf_link_prime(&link->link, &link_primer);
3291 	if (err)
3292 		goto error_free;
3293 
3294 	kvfree(ref_ctr_offsets);
3295 	return bpf_link_settle(&link_primer);
3296 
3297 error_free:
3298 	kvfree(ref_ctr_offsets);
3299 	kvfree(uprobes);
3300 	kfree(link);
3301 	if (task)
3302 		put_task_struct(task);
3303 error_path_put:
3304 	path_put(&path);
3305 	return err;
3306 }
3307 #else /* !CONFIG_UPROBES */
3308 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3309 {
3310 	return -EOPNOTSUPP;
3311 }
3312 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3313 {
3314 	return 0;
3315 }
3316 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3317 {
3318 	return 0;
3319 }
3320 #endif /* CONFIG_UPROBES */
3321