xref: /openbmc/linux/kernel/trace/bpf_trace.c (revision 060f35a317ef09101b128f399dce7ed13d019461)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
8 #include <linux/bpf.h>
9 #include <linux/bpf_verifier.h>
10 #include <linux/bpf_perf_event.h>
11 #include <linux/btf.h>
12 #include <linux/filter.h>
13 #include <linux/uaccess.h>
14 #include <linux/ctype.h>
15 #include <linux/kprobes.h>
16 #include <linux/spinlock.h>
17 #include <linux/syscalls.h>
18 #include <linux/error-injection.h>
19 #include <linux/btf_ids.h>
20 #include <linux/bpf_lsm.h>
21 #include <linux/fprobe.h>
22 #include <linux/bsearch.h>
23 #include <linux/sort.h>
24 #include <linux/key.h>
25 #include <linux/verification.h>
26 #include <linux/namei.h>
27 
28 #include <net/bpf_sk_storage.h>
29 
30 #include <uapi/linux/bpf.h>
31 #include <uapi/linux/btf.h>
32 
33 #include <asm/tlb.h>
34 
35 #include "trace_probe.h"
36 #include "trace.h"
37 
38 #define CREATE_TRACE_POINTS
39 #include "bpf_trace.h"
40 
41 #define bpf_event_rcu_dereference(p)					\
42 	rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
43 
44 #define MAX_UPROBE_MULTI_CNT (1U << 20)
45 #define MAX_KPROBE_MULTI_CNT (1U << 20)
46 
47 #ifdef CONFIG_MODULES
48 struct bpf_trace_module {
49 	struct module *module;
50 	struct list_head list;
51 };
52 
53 static LIST_HEAD(bpf_trace_modules);
54 static DEFINE_MUTEX(bpf_module_mutex);
55 
bpf_get_raw_tracepoint_module(const char * name)56 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
57 {
58 	struct bpf_raw_event_map *btp, *ret = NULL;
59 	struct bpf_trace_module *btm;
60 	unsigned int i;
61 
62 	mutex_lock(&bpf_module_mutex);
63 	list_for_each_entry(btm, &bpf_trace_modules, list) {
64 		for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
65 			btp = &btm->module->bpf_raw_events[i];
66 			if (!strcmp(btp->tp->name, name)) {
67 				if (try_module_get(btm->module))
68 					ret = btp;
69 				goto out;
70 			}
71 		}
72 	}
73 out:
74 	mutex_unlock(&bpf_module_mutex);
75 	return ret;
76 }
77 #else
bpf_get_raw_tracepoint_module(const char * name)78 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
79 {
80 	return NULL;
81 }
82 #endif /* CONFIG_MODULES */
83 
84 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
85 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
86 
87 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
88 				  u64 flags, const struct btf **btf,
89 				  s32 *btf_id);
90 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
91 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
92 
93 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx);
94 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
95 
96 /**
97  * trace_call_bpf - invoke BPF program
98  * @call: tracepoint event
99  * @ctx: opaque context pointer
100  *
101  * kprobe handlers execute BPF programs via this helper.
102  * Can be used from static tracepoints in the future.
103  *
104  * Return: BPF programs always return an integer which is interpreted by
105  * kprobe handler as:
106  * 0 - return from kprobe (event is filtered out)
107  * 1 - store kprobe event into ring buffer
108  * Other values are reserved and currently alias to 1
109  */
trace_call_bpf(struct trace_event_call * call,void * ctx)110 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
111 {
112 	unsigned int ret;
113 
114 	cant_sleep();
115 
116 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
117 		/*
118 		 * since some bpf program is already running on this cpu,
119 		 * don't call into another bpf program (same or different)
120 		 * and don't send kprobe event into ring-buffer,
121 		 * so return zero here
122 		 */
123 		ret = 0;
124 		goto out;
125 	}
126 
127 	/*
128 	 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
129 	 * to all call sites, we did a bpf_prog_array_valid() there to check
130 	 * whether call->prog_array is empty or not, which is
131 	 * a heuristic to speed up execution.
132 	 *
133 	 * If bpf_prog_array_valid() fetched prog_array was
134 	 * non-NULL, we go into trace_call_bpf() and do the actual
135 	 * proper rcu_dereference() under RCU lock.
136 	 * If it turns out that prog_array is NULL then, we bail out.
137 	 * For the opposite, if the bpf_prog_array_valid() fetched pointer
138 	 * was NULL, you'll skip the prog_array with the risk of missing
139 	 * out of events when it was updated in between this and the
140 	 * rcu_dereference() which is accepted risk.
141 	 */
142 	rcu_read_lock();
143 	ret = bpf_prog_run_array(rcu_dereference(call->prog_array),
144 				 ctx, bpf_prog_run);
145 	rcu_read_unlock();
146 
147  out:
148 	__this_cpu_dec(bpf_prog_active);
149 
150 	return ret;
151 }
152 
153 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
BPF_CALL_2(bpf_override_return,struct pt_regs *,regs,unsigned long,rc)154 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
155 {
156 	regs_set_return_value(regs, rc);
157 	override_function_with_return(regs);
158 	return 0;
159 }
160 
161 static const struct bpf_func_proto bpf_override_return_proto = {
162 	.func		= bpf_override_return,
163 	.gpl_only	= true,
164 	.ret_type	= RET_INTEGER,
165 	.arg1_type	= ARG_PTR_TO_CTX,
166 	.arg2_type	= ARG_ANYTHING,
167 };
168 #endif
169 
170 static __always_inline int
bpf_probe_read_user_common(void * dst,u32 size,const void __user * unsafe_ptr)171 bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
172 {
173 	int ret;
174 
175 	ret = copy_from_user_nofault(dst, unsafe_ptr, size);
176 	if (unlikely(ret < 0))
177 		memset(dst, 0, size);
178 	return ret;
179 }
180 
BPF_CALL_3(bpf_probe_read_user,void *,dst,u32,size,const void __user *,unsafe_ptr)181 BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
182 	   const void __user *, unsafe_ptr)
183 {
184 	return bpf_probe_read_user_common(dst, size, unsafe_ptr);
185 }
186 
187 const struct bpf_func_proto bpf_probe_read_user_proto = {
188 	.func		= bpf_probe_read_user,
189 	.gpl_only	= true,
190 	.ret_type	= RET_INTEGER,
191 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
192 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
193 	.arg3_type	= ARG_ANYTHING,
194 };
195 
196 static __always_inline int
bpf_probe_read_user_str_common(void * dst,u32 size,const void __user * unsafe_ptr)197 bpf_probe_read_user_str_common(void *dst, u32 size,
198 			       const void __user *unsafe_ptr)
199 {
200 	int ret;
201 
202 	/*
203 	 * NB: We rely on strncpy_from_user() not copying junk past the NUL
204 	 * terminator into `dst`.
205 	 *
206 	 * strncpy_from_user() does long-sized strides in the fast path. If the
207 	 * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`,
208 	 * then there could be junk after the NUL in `dst`. If user takes `dst`
209 	 * and keys a hash map with it, then semantically identical strings can
210 	 * occupy multiple entries in the map.
211 	 */
212 	ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
213 	if (unlikely(ret < 0))
214 		memset(dst, 0, size);
215 	return ret;
216 }
217 
BPF_CALL_3(bpf_probe_read_user_str,void *,dst,u32,size,const void __user *,unsafe_ptr)218 BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
219 	   const void __user *, unsafe_ptr)
220 {
221 	return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
222 }
223 
224 const struct bpf_func_proto bpf_probe_read_user_str_proto = {
225 	.func		= bpf_probe_read_user_str,
226 	.gpl_only	= true,
227 	.ret_type	= RET_INTEGER,
228 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
229 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
230 	.arg3_type	= ARG_ANYTHING,
231 };
232 
BPF_CALL_3(bpf_probe_read_kernel,void *,dst,u32,size,const void *,unsafe_ptr)233 BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
234 	   const void *, unsafe_ptr)
235 {
236 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
237 }
238 
239 const struct bpf_func_proto bpf_probe_read_kernel_proto = {
240 	.func		= bpf_probe_read_kernel,
241 	.gpl_only	= true,
242 	.ret_type	= RET_INTEGER,
243 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
244 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
245 	.arg3_type	= ARG_ANYTHING,
246 };
247 
248 static __always_inline int
bpf_probe_read_kernel_str_common(void * dst,u32 size,const void * unsafe_ptr)249 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
250 {
251 	int ret;
252 
253 	/*
254 	 * The strncpy_from_kernel_nofault() call will likely not fill the
255 	 * entire buffer, but that's okay in this circumstance as we're probing
256 	 * arbitrary memory anyway similar to bpf_probe_read_*() and might
257 	 * as well probe the stack. Thus, memory is explicitly cleared
258 	 * only in error case, so that improper users ignoring return
259 	 * code altogether don't copy garbage; otherwise length of string
260 	 * is returned that can be used for bpf_perf_event_output() et al.
261 	 */
262 	ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
263 	if (unlikely(ret < 0))
264 		memset(dst, 0, size);
265 	return ret;
266 }
267 
BPF_CALL_3(bpf_probe_read_kernel_str,void *,dst,u32,size,const void *,unsafe_ptr)268 BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
269 	   const void *, unsafe_ptr)
270 {
271 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
272 }
273 
274 const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
275 	.func		= bpf_probe_read_kernel_str,
276 	.gpl_only	= true,
277 	.ret_type	= RET_INTEGER,
278 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
279 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
280 	.arg3_type	= ARG_ANYTHING,
281 };
282 
283 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
BPF_CALL_3(bpf_probe_read_compat,void *,dst,u32,size,const void *,unsafe_ptr)284 BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
285 	   const void *, unsafe_ptr)
286 {
287 	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
288 		return bpf_probe_read_user_common(dst, size,
289 				(__force void __user *)unsafe_ptr);
290 	}
291 	return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
292 }
293 
294 static const struct bpf_func_proto bpf_probe_read_compat_proto = {
295 	.func		= bpf_probe_read_compat,
296 	.gpl_only	= true,
297 	.ret_type	= RET_INTEGER,
298 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
299 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
300 	.arg3_type	= ARG_ANYTHING,
301 };
302 
BPF_CALL_3(bpf_probe_read_compat_str,void *,dst,u32,size,const void *,unsafe_ptr)303 BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
304 	   const void *, unsafe_ptr)
305 {
306 	if ((unsigned long)unsafe_ptr < TASK_SIZE) {
307 		return bpf_probe_read_user_str_common(dst, size,
308 				(__force void __user *)unsafe_ptr);
309 	}
310 	return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
311 }
312 
313 static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
314 	.func		= bpf_probe_read_compat_str,
315 	.gpl_only	= true,
316 	.ret_type	= RET_INTEGER,
317 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
318 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
319 	.arg3_type	= ARG_ANYTHING,
320 };
321 #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
322 
BPF_CALL_3(bpf_probe_write_user,void __user *,unsafe_ptr,const void *,src,u32,size)323 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
324 	   u32, size)
325 {
326 	/*
327 	 * Ensure we're in user context which is safe for the helper to
328 	 * run. This helper has no business in a kthread.
329 	 *
330 	 * access_ok() should prevent writing to non-user memory, but in
331 	 * some situations (nommu, temporary switch, etc) access_ok() does
332 	 * not provide enough validation, hence the check on KERNEL_DS.
333 	 *
334 	 * nmi_uaccess_okay() ensures the probe is not run in an interim
335 	 * state, when the task or mm are switched. This is specifically
336 	 * required to prevent the use of temporary mm.
337 	 */
338 
339 	if (unlikely(in_interrupt() ||
340 		     current->flags & (PF_KTHREAD | PF_EXITING)))
341 		return -EPERM;
342 	if (unlikely(!nmi_uaccess_okay()))
343 		return -EPERM;
344 
345 	return copy_to_user_nofault(unsafe_ptr, src, size);
346 }
347 
348 static const struct bpf_func_proto bpf_probe_write_user_proto = {
349 	.func		= bpf_probe_write_user,
350 	.gpl_only	= true,
351 	.ret_type	= RET_INTEGER,
352 	.arg1_type	= ARG_ANYTHING,
353 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
354 	.arg3_type	= ARG_CONST_SIZE,
355 };
356 
bpf_get_probe_write_proto(void)357 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
358 {
359 	if (!capable(CAP_SYS_ADMIN))
360 		return NULL;
361 
362 	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
363 			    current->comm, task_pid_nr(current));
364 
365 	return &bpf_probe_write_user_proto;
366 }
367 
368 #define MAX_TRACE_PRINTK_VARARGS	3
369 #define BPF_TRACE_PRINTK_SIZE		1024
370 
BPF_CALL_5(bpf_trace_printk,char *,fmt,u32,fmt_size,u64,arg1,u64,arg2,u64,arg3)371 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
372 	   u64, arg2, u64, arg3)
373 {
374 	u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 };
375 	struct bpf_bprintf_data data = {
376 		.get_bin_args	= true,
377 		.get_buf	= true,
378 	};
379 	int ret;
380 
381 	ret = bpf_bprintf_prepare(fmt, fmt_size, args,
382 				  MAX_TRACE_PRINTK_VARARGS, &data);
383 	if (ret < 0)
384 		return ret;
385 
386 	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
387 
388 	trace_bpf_trace_printk(data.buf);
389 
390 	bpf_bprintf_cleanup(&data);
391 
392 	return ret;
393 }
394 
395 static const struct bpf_func_proto bpf_trace_printk_proto = {
396 	.func		= bpf_trace_printk,
397 	.gpl_only	= true,
398 	.ret_type	= RET_INTEGER,
399 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
400 	.arg2_type	= ARG_CONST_SIZE,
401 };
402 
__set_printk_clr_event(void)403 static void __set_printk_clr_event(void)
404 {
405 	/*
406 	 * This program might be calling bpf_trace_printk,
407 	 * so enable the associated bpf_trace/bpf_trace_printk event.
408 	 * Repeat this each time as it is possible a user has
409 	 * disabled bpf_trace_printk events.  By loading a program
410 	 * calling bpf_trace_printk() however the user has expressed
411 	 * the intent to see such events.
412 	 */
413 	if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
414 		pr_warn_ratelimited("could not enable bpf_trace_printk events");
415 }
416 
bpf_get_trace_printk_proto(void)417 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
418 {
419 	__set_printk_clr_event();
420 	return &bpf_trace_printk_proto;
421 }
422 
BPF_CALL_4(bpf_trace_vprintk,char *,fmt,u32,fmt_size,const void *,args,u32,data_len)423 BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args,
424 	   u32, data_len)
425 {
426 	struct bpf_bprintf_data data = {
427 		.get_bin_args	= true,
428 		.get_buf	= true,
429 	};
430 	int ret, num_args;
431 
432 	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
433 	    (data_len && !args))
434 		return -EINVAL;
435 	num_args = data_len / 8;
436 
437 	ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
438 	if (ret < 0)
439 		return ret;
440 
441 	ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args);
442 
443 	trace_bpf_trace_printk(data.buf);
444 
445 	bpf_bprintf_cleanup(&data);
446 
447 	return ret;
448 }
449 
450 static const struct bpf_func_proto bpf_trace_vprintk_proto = {
451 	.func		= bpf_trace_vprintk,
452 	.gpl_only	= true,
453 	.ret_type	= RET_INTEGER,
454 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
455 	.arg2_type	= ARG_CONST_SIZE,
456 	.arg3_type	= ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
457 	.arg4_type	= ARG_CONST_SIZE_OR_ZERO,
458 };
459 
bpf_get_trace_vprintk_proto(void)460 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void)
461 {
462 	__set_printk_clr_event();
463 	return &bpf_trace_vprintk_proto;
464 }
465 
BPF_CALL_5(bpf_seq_printf,struct seq_file *,m,char *,fmt,u32,fmt_size,const void *,args,u32,data_len)466 BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
467 	   const void *, args, u32, data_len)
468 {
469 	struct bpf_bprintf_data data = {
470 		.get_bin_args	= true,
471 	};
472 	int err, num_args;
473 
474 	if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 ||
475 	    (data_len && !args))
476 		return -EINVAL;
477 	num_args = data_len / 8;
478 
479 	err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data);
480 	if (err < 0)
481 		return err;
482 
483 	seq_bprintf(m, fmt, data.bin_args);
484 
485 	bpf_bprintf_cleanup(&data);
486 
487 	return seq_has_overflowed(m) ? -EOVERFLOW : 0;
488 }
489 
490 BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
491 
492 static const struct bpf_func_proto bpf_seq_printf_proto = {
493 	.func		= bpf_seq_printf,
494 	.gpl_only	= true,
495 	.ret_type	= RET_INTEGER,
496 	.arg1_type	= ARG_PTR_TO_BTF_ID,
497 	.arg1_btf_id	= &btf_seq_file_ids[0],
498 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
499 	.arg3_type	= ARG_CONST_SIZE,
500 	.arg4_type      = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
501 	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
502 };
503 
BPF_CALL_3(bpf_seq_write,struct seq_file *,m,const void *,data,u32,len)504 BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
505 {
506 	return seq_write(m, data, len) ? -EOVERFLOW : 0;
507 }
508 
509 static const struct bpf_func_proto bpf_seq_write_proto = {
510 	.func		= bpf_seq_write,
511 	.gpl_only	= true,
512 	.ret_type	= RET_INTEGER,
513 	.arg1_type	= ARG_PTR_TO_BTF_ID,
514 	.arg1_btf_id	= &btf_seq_file_ids[0],
515 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
516 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
517 };
518 
BPF_CALL_4(bpf_seq_printf_btf,struct seq_file *,m,struct btf_ptr *,ptr,u32,btf_ptr_size,u64,flags)519 BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
520 	   u32, btf_ptr_size, u64, flags)
521 {
522 	const struct btf *btf;
523 	s32 btf_id;
524 	int ret;
525 
526 	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
527 	if (ret)
528 		return ret;
529 
530 	return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
531 }
532 
533 static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
534 	.func		= bpf_seq_printf_btf,
535 	.gpl_only	= true,
536 	.ret_type	= RET_INTEGER,
537 	.arg1_type	= ARG_PTR_TO_BTF_ID,
538 	.arg1_btf_id	= &btf_seq_file_ids[0],
539 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
540 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
541 	.arg4_type	= ARG_ANYTHING,
542 };
543 
544 static __always_inline int
get_map_perf_counter(struct bpf_map * map,u64 flags,u64 * value,u64 * enabled,u64 * running)545 get_map_perf_counter(struct bpf_map *map, u64 flags,
546 		     u64 *value, u64 *enabled, u64 *running)
547 {
548 	struct bpf_array *array = container_of(map, struct bpf_array, map);
549 	unsigned int cpu = smp_processor_id();
550 	u64 index = flags & BPF_F_INDEX_MASK;
551 	struct bpf_event_entry *ee;
552 
553 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
554 		return -EINVAL;
555 	if (index == BPF_F_CURRENT_CPU)
556 		index = cpu;
557 	if (unlikely(index >= array->map.max_entries))
558 		return -E2BIG;
559 
560 	ee = READ_ONCE(array->ptrs[index]);
561 	if (!ee)
562 		return -ENOENT;
563 
564 	return perf_event_read_local(ee->event, value, enabled, running);
565 }
566 
BPF_CALL_2(bpf_perf_event_read,struct bpf_map *,map,u64,flags)567 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
568 {
569 	u64 value = 0;
570 	int err;
571 
572 	err = get_map_perf_counter(map, flags, &value, NULL, NULL);
573 	/*
574 	 * this api is ugly since we miss [-22..-2] range of valid
575 	 * counter values, but that's uapi
576 	 */
577 	if (err)
578 		return err;
579 	return value;
580 }
581 
582 static const struct bpf_func_proto bpf_perf_event_read_proto = {
583 	.func		= bpf_perf_event_read,
584 	.gpl_only	= true,
585 	.ret_type	= RET_INTEGER,
586 	.arg1_type	= ARG_CONST_MAP_PTR,
587 	.arg2_type	= ARG_ANYTHING,
588 };
589 
BPF_CALL_4(bpf_perf_event_read_value,struct bpf_map *,map,u64,flags,struct bpf_perf_event_value *,buf,u32,size)590 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
591 	   struct bpf_perf_event_value *, buf, u32, size)
592 {
593 	int err = -EINVAL;
594 
595 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
596 		goto clear;
597 	err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
598 				   &buf->running);
599 	if (unlikely(err))
600 		goto clear;
601 	return 0;
602 clear:
603 	memset(buf, 0, size);
604 	return err;
605 }
606 
607 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
608 	.func		= bpf_perf_event_read_value,
609 	.gpl_only	= true,
610 	.ret_type	= RET_INTEGER,
611 	.arg1_type	= ARG_CONST_MAP_PTR,
612 	.arg2_type	= ARG_ANYTHING,
613 	.arg3_type	= ARG_PTR_TO_UNINIT_MEM,
614 	.arg4_type	= ARG_CONST_SIZE,
615 };
616 
617 static __always_inline u64
__bpf_perf_event_output(struct pt_regs * regs,struct bpf_map * map,u64 flags,struct perf_raw_record * raw,struct perf_sample_data * sd)618 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
619 			u64 flags, struct perf_raw_record *raw,
620 			struct perf_sample_data *sd)
621 {
622 	struct bpf_array *array = container_of(map, struct bpf_array, map);
623 	unsigned int cpu = smp_processor_id();
624 	u64 index = flags & BPF_F_INDEX_MASK;
625 	struct bpf_event_entry *ee;
626 	struct perf_event *event;
627 
628 	if (index == BPF_F_CURRENT_CPU)
629 		index = cpu;
630 	if (unlikely(index >= array->map.max_entries))
631 		return -E2BIG;
632 
633 	ee = READ_ONCE(array->ptrs[index]);
634 	if (!ee)
635 		return -ENOENT;
636 
637 	event = ee->event;
638 	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
639 		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
640 		return -EINVAL;
641 
642 	if (unlikely(event->oncpu != cpu))
643 		return -EOPNOTSUPP;
644 
645 	perf_sample_save_raw_data(sd, event, raw);
646 
647 	return perf_event_output(event, sd, regs);
648 }
649 
650 /*
651  * Support executing tracepoints in normal, irq, and nmi context that each call
652  * bpf_perf_event_output
653  */
654 struct bpf_trace_sample_data {
655 	struct perf_sample_data sds[3];
656 };
657 
658 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
659 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
BPF_CALL_5(bpf_perf_event_output,struct pt_regs *,regs,struct bpf_map *,map,u64,flags,void *,data,u64,size)660 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
661 	   u64, flags, void *, data, u64, size)
662 {
663 	struct bpf_trace_sample_data *sds;
664 	struct perf_raw_record raw = {
665 		.frag = {
666 			.size = size,
667 			.data = data,
668 		},
669 	};
670 	struct perf_sample_data *sd;
671 	int nest_level, err;
672 
673 	preempt_disable();
674 	sds = this_cpu_ptr(&bpf_trace_sds);
675 	nest_level = this_cpu_inc_return(bpf_trace_nest_level);
676 
677 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
678 		err = -EBUSY;
679 		goto out;
680 	}
681 
682 	sd = &sds->sds[nest_level - 1];
683 
684 	if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
685 		err = -EINVAL;
686 		goto out;
687 	}
688 
689 	perf_sample_data_init(sd, 0, 0);
690 
691 	err = __bpf_perf_event_output(regs, map, flags, &raw, sd);
692 out:
693 	this_cpu_dec(bpf_trace_nest_level);
694 	preempt_enable();
695 	return err;
696 }
697 
698 static const struct bpf_func_proto bpf_perf_event_output_proto = {
699 	.func		= bpf_perf_event_output,
700 	.gpl_only	= true,
701 	.ret_type	= RET_INTEGER,
702 	.arg1_type	= ARG_PTR_TO_CTX,
703 	.arg2_type	= ARG_CONST_MAP_PTR,
704 	.arg3_type	= ARG_ANYTHING,
705 	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
706 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
707 };
708 
709 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
710 struct bpf_nested_pt_regs {
711 	struct pt_regs regs[3];
712 };
713 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
714 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
715 
bpf_event_output(struct bpf_map * map,u64 flags,void * meta,u64 meta_size,void * ctx,u64 ctx_size,bpf_ctx_copy_t ctx_copy)716 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
717 		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
718 {
719 	struct perf_raw_frag frag = {
720 		.copy		= ctx_copy,
721 		.size		= ctx_size,
722 		.data		= ctx,
723 	};
724 	struct perf_raw_record raw = {
725 		.frag = {
726 			{
727 				.next	= ctx_size ? &frag : NULL,
728 			},
729 			.size	= meta_size,
730 			.data	= meta,
731 		},
732 	};
733 	struct perf_sample_data *sd;
734 	struct pt_regs *regs;
735 	int nest_level;
736 	u64 ret;
737 
738 	preempt_disable();
739 	nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
740 
741 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
742 		ret = -EBUSY;
743 		goto out;
744 	}
745 	sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
746 	regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
747 
748 	perf_fetch_caller_regs(regs);
749 	perf_sample_data_init(sd, 0, 0);
750 
751 	ret = __bpf_perf_event_output(regs, map, flags, &raw, sd);
752 out:
753 	this_cpu_dec(bpf_event_output_nest_level);
754 	preempt_enable();
755 	return ret;
756 }
757 
BPF_CALL_0(bpf_get_current_task)758 BPF_CALL_0(bpf_get_current_task)
759 {
760 	return (long) current;
761 }
762 
763 const struct bpf_func_proto bpf_get_current_task_proto = {
764 	.func		= bpf_get_current_task,
765 	.gpl_only	= true,
766 	.ret_type	= RET_INTEGER,
767 };
768 
BPF_CALL_0(bpf_get_current_task_btf)769 BPF_CALL_0(bpf_get_current_task_btf)
770 {
771 	return (unsigned long) current;
772 }
773 
774 const struct bpf_func_proto bpf_get_current_task_btf_proto = {
775 	.func		= bpf_get_current_task_btf,
776 	.gpl_only	= true,
777 	.ret_type	= RET_PTR_TO_BTF_ID_TRUSTED,
778 	.ret_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
779 };
780 
BPF_CALL_1(bpf_task_pt_regs,struct task_struct *,task)781 BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
782 {
783 	return (unsigned long) task_pt_regs(task);
784 }
785 
786 BTF_ID_LIST(bpf_task_pt_regs_ids)
787 BTF_ID(struct, pt_regs)
788 
789 const struct bpf_func_proto bpf_task_pt_regs_proto = {
790 	.func		= bpf_task_pt_regs,
791 	.gpl_only	= true,
792 	.arg1_type	= ARG_PTR_TO_BTF_ID,
793 	.arg1_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
794 	.ret_type	= RET_PTR_TO_BTF_ID,
795 	.ret_btf_id	= &bpf_task_pt_regs_ids[0],
796 };
797 
BPF_CALL_2(bpf_current_task_under_cgroup,struct bpf_map *,map,u32,idx)798 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
799 {
800 	struct bpf_array *array = container_of(map, struct bpf_array, map);
801 	struct cgroup *cgrp;
802 
803 	if (unlikely(idx >= array->map.max_entries))
804 		return -E2BIG;
805 
806 	cgrp = READ_ONCE(array->ptrs[idx]);
807 	if (unlikely(!cgrp))
808 		return -EAGAIN;
809 
810 	return task_under_cgroup_hierarchy(current, cgrp);
811 }
812 
813 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
814 	.func           = bpf_current_task_under_cgroup,
815 	.gpl_only       = false,
816 	.ret_type       = RET_INTEGER,
817 	.arg1_type      = ARG_CONST_MAP_PTR,
818 	.arg2_type      = ARG_ANYTHING,
819 };
820 
821 struct send_signal_irq_work {
822 	struct irq_work irq_work;
823 	struct task_struct *task;
824 	u32 sig;
825 	enum pid_type type;
826 };
827 
828 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
829 
do_bpf_send_signal(struct irq_work * entry)830 static void do_bpf_send_signal(struct irq_work *entry)
831 {
832 	struct send_signal_irq_work *work;
833 
834 	work = container_of(entry, struct send_signal_irq_work, irq_work);
835 	group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
836 	put_task_struct(work->task);
837 }
838 
bpf_send_signal_common(u32 sig,enum pid_type type)839 static int bpf_send_signal_common(u32 sig, enum pid_type type)
840 {
841 	struct send_signal_irq_work *work = NULL;
842 
843 	/* Similar to bpf_probe_write_user, task needs to be
844 	 * in a sound condition and kernel memory access be
845 	 * permitted in order to send signal to the current
846 	 * task.
847 	 */
848 	if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
849 		return -EPERM;
850 	if (unlikely(!nmi_uaccess_okay()))
851 		return -EPERM;
852 	/* Task should not be pid=1 to avoid kernel panic. */
853 	if (unlikely(is_global_init(current)))
854 		return -EPERM;
855 
856 	if (!preemptible()) {
857 		/* Do an early check on signal validity. Otherwise,
858 		 * the error is lost in deferred irq_work.
859 		 */
860 		if (unlikely(!valid_signal(sig)))
861 			return -EINVAL;
862 
863 		work = this_cpu_ptr(&send_signal_work);
864 		if (irq_work_is_busy(&work->irq_work))
865 			return -EBUSY;
866 
867 		/* Add the current task, which is the target of sending signal,
868 		 * to the irq_work. The current task may change when queued
869 		 * irq works get executed.
870 		 */
871 		work->task = get_task_struct(current);
872 		work->sig = sig;
873 		work->type = type;
874 		irq_work_queue(&work->irq_work);
875 		return 0;
876 	}
877 
878 	return group_send_sig_info(sig, SEND_SIG_PRIV, current, type);
879 }
880 
BPF_CALL_1(bpf_send_signal,u32,sig)881 BPF_CALL_1(bpf_send_signal, u32, sig)
882 {
883 	return bpf_send_signal_common(sig, PIDTYPE_TGID);
884 }
885 
886 static const struct bpf_func_proto bpf_send_signal_proto = {
887 	.func		= bpf_send_signal,
888 	.gpl_only	= false,
889 	.ret_type	= RET_INTEGER,
890 	.arg1_type	= ARG_ANYTHING,
891 };
892 
BPF_CALL_1(bpf_send_signal_thread,u32,sig)893 BPF_CALL_1(bpf_send_signal_thread, u32, sig)
894 {
895 	return bpf_send_signal_common(sig, PIDTYPE_PID);
896 }
897 
898 static const struct bpf_func_proto bpf_send_signal_thread_proto = {
899 	.func		= bpf_send_signal_thread,
900 	.gpl_only	= false,
901 	.ret_type	= RET_INTEGER,
902 	.arg1_type	= ARG_ANYTHING,
903 };
904 
BPF_CALL_3(bpf_d_path,struct path *,path,char *,buf,u32,sz)905 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
906 {
907 	struct path copy;
908 	long len;
909 	char *p;
910 
911 	if (!sz)
912 		return 0;
913 
914 	/*
915 	 * The path pointer is verified as trusted and safe to use,
916 	 * but let's double check it's valid anyway to workaround
917 	 * potentially broken verifier.
918 	 */
919 	len = copy_from_kernel_nofault(&copy, path, sizeof(*path));
920 	if (len < 0)
921 		return len;
922 
923 	p = d_path(&copy, buf, sz);
924 	if (IS_ERR(p)) {
925 		len = PTR_ERR(p);
926 	} else {
927 		len = buf + sz - p;
928 		memmove(buf, p, len);
929 	}
930 
931 	return len;
932 }
933 
934 BTF_SET_START(btf_allowlist_d_path)
935 #ifdef CONFIG_SECURITY
BTF_ID(func,security_file_permission)936 BTF_ID(func, security_file_permission)
937 BTF_ID(func, security_inode_getattr)
938 BTF_ID(func, security_file_open)
939 #endif
940 #ifdef CONFIG_SECURITY_PATH
941 BTF_ID(func, security_path_truncate)
942 #endif
943 BTF_ID(func, vfs_truncate)
944 BTF_ID(func, vfs_fallocate)
945 BTF_ID(func, dentry_open)
946 BTF_ID(func, vfs_getattr)
947 BTF_ID(func, filp_close)
948 BTF_SET_END(btf_allowlist_d_path)
949 
950 static bool bpf_d_path_allowed(const struct bpf_prog *prog)
951 {
952 	if (prog->type == BPF_PROG_TYPE_TRACING &&
953 	    prog->expected_attach_type == BPF_TRACE_ITER)
954 		return true;
955 
956 	if (prog->type == BPF_PROG_TYPE_LSM)
957 		return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
958 
959 	return btf_id_set_contains(&btf_allowlist_d_path,
960 				   prog->aux->attach_btf_id);
961 }
962 
963 BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
964 
965 static const struct bpf_func_proto bpf_d_path_proto = {
966 	.func		= bpf_d_path,
967 	.gpl_only	= false,
968 	.ret_type	= RET_INTEGER,
969 	.arg1_type	= ARG_PTR_TO_BTF_ID,
970 	.arg1_btf_id	= &bpf_d_path_btf_ids[0],
971 	.arg2_type	= ARG_PTR_TO_MEM,
972 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
973 	.allowed	= bpf_d_path_allowed,
974 };
975 
976 #define BTF_F_ALL	(BTF_F_COMPACT  | BTF_F_NONAME | \
977 			 BTF_F_PTR_RAW | BTF_F_ZERO)
978 
bpf_btf_printf_prepare(struct btf_ptr * ptr,u32 btf_ptr_size,u64 flags,const struct btf ** btf,s32 * btf_id)979 static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
980 				  u64 flags, const struct btf **btf,
981 				  s32 *btf_id)
982 {
983 	const struct btf_type *t;
984 
985 	if (unlikely(flags & ~(BTF_F_ALL)))
986 		return -EINVAL;
987 
988 	if (btf_ptr_size != sizeof(struct btf_ptr))
989 		return -EINVAL;
990 
991 	*btf = bpf_get_btf_vmlinux();
992 
993 	if (IS_ERR_OR_NULL(*btf))
994 		return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL;
995 
996 	if (ptr->type_id > 0)
997 		*btf_id = ptr->type_id;
998 	else
999 		return -EINVAL;
1000 
1001 	if (*btf_id > 0)
1002 		t = btf_type_by_id(*btf, *btf_id);
1003 	if (*btf_id <= 0 || !t)
1004 		return -ENOENT;
1005 
1006 	return 0;
1007 }
1008 
BPF_CALL_5(bpf_snprintf_btf,char *,str,u32,str_size,struct btf_ptr *,ptr,u32,btf_ptr_size,u64,flags)1009 BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
1010 	   u32, btf_ptr_size, u64, flags)
1011 {
1012 	const struct btf *btf;
1013 	s32 btf_id;
1014 	int ret;
1015 
1016 	ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
1017 	if (ret)
1018 		return ret;
1019 
1020 	return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
1021 				      flags);
1022 }
1023 
1024 const struct bpf_func_proto bpf_snprintf_btf_proto = {
1025 	.func		= bpf_snprintf_btf,
1026 	.gpl_only	= false,
1027 	.ret_type	= RET_INTEGER,
1028 	.arg1_type	= ARG_PTR_TO_MEM,
1029 	.arg2_type	= ARG_CONST_SIZE,
1030 	.arg3_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1031 	.arg4_type	= ARG_CONST_SIZE,
1032 	.arg5_type	= ARG_ANYTHING,
1033 };
1034 
BPF_CALL_1(bpf_get_func_ip_tracing,void *,ctx)1035 BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
1036 {
1037 	/* This helper call is inlined by verifier. */
1038 	return ((u64 *)ctx)[-2];
1039 }
1040 
1041 static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
1042 	.func		= bpf_get_func_ip_tracing,
1043 	.gpl_only	= true,
1044 	.ret_type	= RET_INTEGER,
1045 	.arg1_type	= ARG_PTR_TO_CTX,
1046 };
1047 
1048 #ifdef CONFIG_X86_KERNEL_IBT
get_entry_ip(unsigned long fentry_ip)1049 static unsigned long get_entry_ip(unsigned long fentry_ip)
1050 {
1051 	u32 instr;
1052 
1053 	/* Being extra safe in here in case entry ip is on the page-edge. */
1054 	if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1))
1055 		return fentry_ip;
1056 	if (is_endbr(instr))
1057 		fentry_ip -= ENDBR_INSN_SIZE;
1058 	return fentry_ip;
1059 }
1060 #else
1061 #define get_entry_ip(fentry_ip) fentry_ip
1062 #endif
1063 
BPF_CALL_1(bpf_get_func_ip_kprobe,struct pt_regs *,regs)1064 BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
1065 {
1066 	struct bpf_trace_run_ctx *run_ctx __maybe_unused;
1067 	struct kprobe *kp;
1068 
1069 #ifdef CONFIG_UPROBES
1070 	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1071 	if (run_ctx->is_uprobe)
1072 		return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr;
1073 #endif
1074 
1075 	kp = kprobe_running();
1076 
1077 	if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
1078 		return 0;
1079 
1080 	return get_entry_ip((uintptr_t)kp->addr);
1081 }
1082 
1083 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
1084 	.func		= bpf_get_func_ip_kprobe,
1085 	.gpl_only	= true,
1086 	.ret_type	= RET_INTEGER,
1087 	.arg1_type	= ARG_PTR_TO_CTX,
1088 };
1089 
BPF_CALL_1(bpf_get_func_ip_kprobe_multi,struct pt_regs *,regs)1090 BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs)
1091 {
1092 	return bpf_kprobe_multi_entry_ip(current->bpf_ctx);
1093 }
1094 
1095 static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = {
1096 	.func		= bpf_get_func_ip_kprobe_multi,
1097 	.gpl_only	= false,
1098 	.ret_type	= RET_INTEGER,
1099 	.arg1_type	= ARG_PTR_TO_CTX,
1100 };
1101 
BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi,struct pt_regs *,regs)1102 BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs)
1103 {
1104 	return bpf_kprobe_multi_cookie(current->bpf_ctx);
1105 }
1106 
1107 static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
1108 	.func		= bpf_get_attach_cookie_kprobe_multi,
1109 	.gpl_only	= false,
1110 	.ret_type	= RET_INTEGER,
1111 	.arg1_type	= ARG_PTR_TO_CTX,
1112 };
1113 
BPF_CALL_1(bpf_get_func_ip_uprobe_multi,struct pt_regs *,regs)1114 BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs)
1115 {
1116 	return bpf_uprobe_multi_entry_ip(current->bpf_ctx);
1117 }
1118 
1119 static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = {
1120 	.func		= bpf_get_func_ip_uprobe_multi,
1121 	.gpl_only	= false,
1122 	.ret_type	= RET_INTEGER,
1123 	.arg1_type	= ARG_PTR_TO_CTX,
1124 };
1125 
BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi,struct pt_regs *,regs)1126 BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs)
1127 {
1128 	return bpf_uprobe_multi_cookie(current->bpf_ctx);
1129 }
1130 
1131 static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = {
1132 	.func		= bpf_get_attach_cookie_uprobe_multi,
1133 	.gpl_only	= false,
1134 	.ret_type	= RET_INTEGER,
1135 	.arg1_type	= ARG_PTR_TO_CTX,
1136 };
1137 
BPF_CALL_1(bpf_get_attach_cookie_trace,void *,ctx)1138 BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
1139 {
1140 	struct bpf_trace_run_ctx *run_ctx;
1141 
1142 	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1143 	return run_ctx->bpf_cookie;
1144 }
1145 
1146 static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = {
1147 	.func		= bpf_get_attach_cookie_trace,
1148 	.gpl_only	= false,
1149 	.ret_type	= RET_INTEGER,
1150 	.arg1_type	= ARG_PTR_TO_CTX,
1151 };
1152 
BPF_CALL_1(bpf_get_attach_cookie_pe,struct bpf_perf_event_data_kern *,ctx)1153 BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx)
1154 {
1155 	return ctx->event->bpf_cookie;
1156 }
1157 
1158 static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = {
1159 	.func		= bpf_get_attach_cookie_pe,
1160 	.gpl_only	= false,
1161 	.ret_type	= RET_INTEGER,
1162 	.arg1_type	= ARG_PTR_TO_CTX,
1163 };
1164 
BPF_CALL_1(bpf_get_attach_cookie_tracing,void *,ctx)1165 BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx)
1166 {
1167 	struct bpf_trace_run_ctx *run_ctx;
1168 
1169 	run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
1170 	return run_ctx->bpf_cookie;
1171 }
1172 
1173 static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = {
1174 	.func		= bpf_get_attach_cookie_tracing,
1175 	.gpl_only	= false,
1176 	.ret_type	= RET_INTEGER,
1177 	.arg1_type	= ARG_PTR_TO_CTX,
1178 };
1179 
BPF_CALL_3(bpf_get_branch_snapshot,void *,buf,u32,size,u64,flags)1180 BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags)
1181 {
1182 #ifndef CONFIG_X86
1183 	return -ENOENT;
1184 #else
1185 	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1186 	u32 entry_cnt = size / br_entry_size;
1187 
1188 	entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
1189 
1190 	if (unlikely(flags))
1191 		return -EINVAL;
1192 
1193 	if (!entry_cnt)
1194 		return -ENOENT;
1195 
1196 	return entry_cnt * br_entry_size;
1197 #endif
1198 }
1199 
1200 static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
1201 	.func		= bpf_get_branch_snapshot,
1202 	.gpl_only	= true,
1203 	.ret_type	= RET_INTEGER,
1204 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1205 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1206 };
1207 
BPF_CALL_3(get_func_arg,void *,ctx,u32,n,u64 *,value)1208 BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
1209 {
1210 	/* This helper call is inlined by verifier. */
1211 	u64 nr_args = ((u64 *)ctx)[-1];
1212 
1213 	if ((u64) n >= nr_args)
1214 		return -EINVAL;
1215 	*value = ((u64 *)ctx)[n];
1216 	return 0;
1217 }
1218 
1219 static const struct bpf_func_proto bpf_get_func_arg_proto = {
1220 	.func		= get_func_arg,
1221 	.ret_type	= RET_INTEGER,
1222 	.arg1_type	= ARG_PTR_TO_CTX,
1223 	.arg2_type	= ARG_ANYTHING,
1224 	.arg3_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
1225 	.arg3_size	= sizeof(u64),
1226 };
1227 
BPF_CALL_2(get_func_ret,void *,ctx,u64 *,value)1228 BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
1229 {
1230 	/* This helper call is inlined by verifier. */
1231 	u64 nr_args = ((u64 *)ctx)[-1];
1232 
1233 	*value = ((u64 *)ctx)[nr_args];
1234 	return 0;
1235 }
1236 
1237 static const struct bpf_func_proto bpf_get_func_ret_proto = {
1238 	.func		= get_func_ret,
1239 	.ret_type	= RET_INTEGER,
1240 	.arg1_type	= ARG_PTR_TO_CTX,
1241 	.arg2_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
1242 	.arg2_size	= sizeof(u64),
1243 };
1244 
BPF_CALL_1(get_func_arg_cnt,void *,ctx)1245 BPF_CALL_1(get_func_arg_cnt, void *, ctx)
1246 {
1247 	/* This helper call is inlined by verifier. */
1248 	return ((u64 *)ctx)[-1];
1249 }
1250 
1251 static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
1252 	.func		= get_func_arg_cnt,
1253 	.ret_type	= RET_INTEGER,
1254 	.arg1_type	= ARG_PTR_TO_CTX,
1255 };
1256 
1257 #ifdef CONFIG_KEYS
1258 __diag_push();
1259 __diag_ignore_all("-Wmissing-prototypes",
1260 		  "kfuncs which will be used in BPF programs");
1261 
1262 /**
1263  * bpf_lookup_user_key - lookup a key by its serial
1264  * @serial: key handle serial number
1265  * @flags: lookup-specific flags
1266  *
1267  * Search a key with a given *serial* and the provided *flags*.
1268  * If found, increment the reference count of the key by one, and
1269  * return it in the bpf_key structure.
1270  *
1271  * The bpf_key structure must be passed to bpf_key_put() when done
1272  * with it, so that the key reference count is decremented and the
1273  * bpf_key structure is freed.
1274  *
1275  * Permission checks are deferred to the time the key is used by
1276  * one of the available key-specific kfuncs.
1277  *
1278  * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested
1279  * special keyring (e.g. session keyring), if it doesn't yet exist.
1280  * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting
1281  * for the key construction, and to retrieve uninstantiated keys (keys
1282  * without data attached to them).
1283  *
1284  * Return: a bpf_key pointer with a valid key pointer if the key is found, a
1285  *         NULL pointer otherwise.
1286  */
bpf_lookup_user_key(u32 serial,u64 flags)1287 __bpf_kfunc struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
1288 {
1289 	key_ref_t key_ref;
1290 	struct bpf_key *bkey;
1291 
1292 	if (flags & ~KEY_LOOKUP_ALL)
1293 		return NULL;
1294 
1295 	/*
1296 	 * Permission check is deferred until the key is used, as the
1297 	 * intent of the caller is unknown here.
1298 	 */
1299 	key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK);
1300 	if (IS_ERR(key_ref))
1301 		return NULL;
1302 
1303 	bkey = kmalloc(sizeof(*bkey), GFP_KERNEL);
1304 	if (!bkey) {
1305 		key_put(key_ref_to_ptr(key_ref));
1306 		return NULL;
1307 	}
1308 
1309 	bkey->key = key_ref_to_ptr(key_ref);
1310 	bkey->has_ref = true;
1311 
1312 	return bkey;
1313 }
1314 
1315 /**
1316  * bpf_lookup_system_key - lookup a key by a system-defined ID
1317  * @id: key ID
1318  *
1319  * Obtain a bpf_key structure with a key pointer set to the passed key ID.
1320  * The key pointer is marked as invalid, to prevent bpf_key_put() from
1321  * attempting to decrement the key reference count on that pointer. The key
1322  * pointer set in such way is currently understood only by
1323  * verify_pkcs7_signature().
1324  *
1325  * Set *id* to one of the values defined in include/linux/verification.h:
1326  * 0 for the primary keyring (immutable keyring of system keys);
1327  * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring
1328  * (where keys can be added only if they are vouched for by existing keys
1329  * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform
1330  * keyring (primarily used by the integrity subsystem to verify a kexec'ed
1331  * kerned image and, possibly, the initramfs signature).
1332  *
1333  * Return: a bpf_key pointer with an invalid key pointer set from the
1334  *         pre-determined ID on success, a NULL pointer otherwise
1335  */
bpf_lookup_system_key(u64 id)1336 __bpf_kfunc struct bpf_key *bpf_lookup_system_key(u64 id)
1337 {
1338 	struct bpf_key *bkey;
1339 
1340 	if (system_keyring_id_check(id) < 0)
1341 		return NULL;
1342 
1343 	bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC);
1344 	if (!bkey)
1345 		return NULL;
1346 
1347 	bkey->key = (struct key *)(unsigned long)id;
1348 	bkey->has_ref = false;
1349 
1350 	return bkey;
1351 }
1352 
1353 /**
1354  * bpf_key_put - decrement key reference count if key is valid and free bpf_key
1355  * @bkey: bpf_key structure
1356  *
1357  * Decrement the reference count of the key inside *bkey*, if the pointer
1358  * is valid, and free *bkey*.
1359  */
bpf_key_put(struct bpf_key * bkey)1360 __bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
1361 {
1362 	if (bkey->has_ref)
1363 		key_put(bkey->key);
1364 
1365 	kfree(bkey);
1366 }
1367 
1368 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1369 /**
1370  * bpf_verify_pkcs7_signature - verify a PKCS#7 signature
1371  * @data_ptr: data to verify
1372  * @sig_ptr: signature of the data
1373  * @trusted_keyring: keyring with keys trusted for signature verification
1374  *
1375  * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
1376  * with keys in a keyring referenced by *trusted_keyring*.
1377  *
1378  * Return: 0 on success, a negative value on error.
1379  */
bpf_verify_pkcs7_signature(struct bpf_dynptr_kern * data_ptr,struct bpf_dynptr_kern * sig_ptr,struct bpf_key * trusted_keyring)1380 __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
1381 			       struct bpf_dynptr_kern *sig_ptr,
1382 			       struct bpf_key *trusted_keyring)
1383 {
1384 	int ret;
1385 
1386 	if (trusted_keyring->has_ref) {
1387 		/*
1388 		 * Do the permission check deferred in bpf_lookup_user_key().
1389 		 * See bpf_lookup_user_key() for more details.
1390 		 *
1391 		 * A call to key_task_permission() here would be redundant, as
1392 		 * it is already done by keyring_search() called by
1393 		 * find_asymmetric_key().
1394 		 */
1395 		ret = key_validate(trusted_keyring->key);
1396 		if (ret < 0)
1397 			return ret;
1398 	}
1399 
1400 	return verify_pkcs7_signature(data_ptr->data,
1401 				      __bpf_dynptr_size(data_ptr),
1402 				      sig_ptr->data,
1403 				      __bpf_dynptr_size(sig_ptr),
1404 				      trusted_keyring->key,
1405 				      VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
1406 				      NULL);
1407 }
1408 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
1409 
1410 __diag_pop();
1411 
1412 BTF_SET8_START(key_sig_kfunc_set)
1413 BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
1414 BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL)
1415 BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE)
1416 #ifdef CONFIG_SYSTEM_DATA_VERIFICATION
1417 BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE)
1418 #endif
1419 BTF_SET8_END(key_sig_kfunc_set)
1420 
1421 static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
1422 	.owner = THIS_MODULE,
1423 	.set = &key_sig_kfunc_set,
1424 };
1425 
bpf_key_sig_kfuncs_init(void)1426 static int __init bpf_key_sig_kfuncs_init(void)
1427 {
1428 	return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
1429 					 &bpf_key_sig_kfunc_set);
1430 }
1431 
1432 late_initcall(bpf_key_sig_kfuncs_init);
1433 #endif /* CONFIG_KEYS */
1434 
1435 static const struct bpf_func_proto *
bpf_tracing_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1436 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1437 {
1438 	switch (func_id) {
1439 	case BPF_FUNC_map_lookup_elem:
1440 		return &bpf_map_lookup_elem_proto;
1441 	case BPF_FUNC_map_update_elem:
1442 		return &bpf_map_update_elem_proto;
1443 	case BPF_FUNC_map_delete_elem:
1444 		return &bpf_map_delete_elem_proto;
1445 	case BPF_FUNC_map_push_elem:
1446 		return &bpf_map_push_elem_proto;
1447 	case BPF_FUNC_map_pop_elem:
1448 		return &bpf_map_pop_elem_proto;
1449 	case BPF_FUNC_map_peek_elem:
1450 		return &bpf_map_peek_elem_proto;
1451 	case BPF_FUNC_map_lookup_percpu_elem:
1452 		return &bpf_map_lookup_percpu_elem_proto;
1453 	case BPF_FUNC_ktime_get_ns:
1454 		return &bpf_ktime_get_ns_proto;
1455 	case BPF_FUNC_ktime_get_boot_ns:
1456 		return &bpf_ktime_get_boot_ns_proto;
1457 	case BPF_FUNC_tail_call:
1458 		return &bpf_tail_call_proto;
1459 	case BPF_FUNC_get_current_pid_tgid:
1460 		return &bpf_get_current_pid_tgid_proto;
1461 	case BPF_FUNC_get_current_task:
1462 		return &bpf_get_current_task_proto;
1463 	case BPF_FUNC_get_current_task_btf:
1464 		return &bpf_get_current_task_btf_proto;
1465 	case BPF_FUNC_task_pt_regs:
1466 		return &bpf_task_pt_regs_proto;
1467 	case BPF_FUNC_get_current_uid_gid:
1468 		return &bpf_get_current_uid_gid_proto;
1469 	case BPF_FUNC_get_current_comm:
1470 		return &bpf_get_current_comm_proto;
1471 	case BPF_FUNC_trace_printk:
1472 		return bpf_get_trace_printk_proto();
1473 	case BPF_FUNC_get_smp_processor_id:
1474 		return &bpf_get_smp_processor_id_proto;
1475 	case BPF_FUNC_get_numa_node_id:
1476 		return &bpf_get_numa_node_id_proto;
1477 	case BPF_FUNC_perf_event_read:
1478 		return &bpf_perf_event_read_proto;
1479 	case BPF_FUNC_current_task_under_cgroup:
1480 		return &bpf_current_task_under_cgroup_proto;
1481 	case BPF_FUNC_get_prandom_u32:
1482 		return &bpf_get_prandom_u32_proto;
1483 	case BPF_FUNC_probe_write_user:
1484 		return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ?
1485 		       NULL : bpf_get_probe_write_proto();
1486 	case BPF_FUNC_probe_read_user:
1487 		return &bpf_probe_read_user_proto;
1488 	case BPF_FUNC_probe_read_kernel:
1489 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1490 		       NULL : &bpf_probe_read_kernel_proto;
1491 	case BPF_FUNC_probe_read_user_str:
1492 		return &bpf_probe_read_user_str_proto;
1493 	case BPF_FUNC_probe_read_kernel_str:
1494 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1495 		       NULL : &bpf_probe_read_kernel_str_proto;
1496 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1497 	case BPF_FUNC_probe_read:
1498 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1499 		       NULL : &bpf_probe_read_compat_proto;
1500 	case BPF_FUNC_probe_read_str:
1501 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
1502 		       NULL : &bpf_probe_read_compat_str_proto;
1503 #endif
1504 #ifdef CONFIG_CGROUPS
1505 	case BPF_FUNC_cgrp_storage_get:
1506 		return &bpf_cgrp_storage_get_proto;
1507 	case BPF_FUNC_cgrp_storage_delete:
1508 		return &bpf_cgrp_storage_delete_proto;
1509 #endif
1510 	case BPF_FUNC_send_signal:
1511 		return &bpf_send_signal_proto;
1512 	case BPF_FUNC_send_signal_thread:
1513 		return &bpf_send_signal_thread_proto;
1514 	case BPF_FUNC_perf_event_read_value:
1515 		return &bpf_perf_event_read_value_proto;
1516 	case BPF_FUNC_get_ns_current_pid_tgid:
1517 		return &bpf_get_ns_current_pid_tgid_proto;
1518 	case BPF_FUNC_ringbuf_output:
1519 		return &bpf_ringbuf_output_proto;
1520 	case BPF_FUNC_ringbuf_reserve:
1521 		return &bpf_ringbuf_reserve_proto;
1522 	case BPF_FUNC_ringbuf_submit:
1523 		return &bpf_ringbuf_submit_proto;
1524 	case BPF_FUNC_ringbuf_discard:
1525 		return &bpf_ringbuf_discard_proto;
1526 	case BPF_FUNC_ringbuf_query:
1527 		return &bpf_ringbuf_query_proto;
1528 	case BPF_FUNC_jiffies64:
1529 		return &bpf_jiffies64_proto;
1530 	case BPF_FUNC_get_task_stack:
1531 		return &bpf_get_task_stack_proto;
1532 	case BPF_FUNC_copy_from_user:
1533 		return &bpf_copy_from_user_proto;
1534 	case BPF_FUNC_copy_from_user_task:
1535 		return &bpf_copy_from_user_task_proto;
1536 	case BPF_FUNC_snprintf_btf:
1537 		return &bpf_snprintf_btf_proto;
1538 	case BPF_FUNC_per_cpu_ptr:
1539 		return &bpf_per_cpu_ptr_proto;
1540 	case BPF_FUNC_this_cpu_ptr:
1541 		return &bpf_this_cpu_ptr_proto;
1542 	case BPF_FUNC_task_storage_get:
1543 		if (bpf_prog_check_recur(prog))
1544 			return &bpf_task_storage_get_recur_proto;
1545 		return &bpf_task_storage_get_proto;
1546 	case BPF_FUNC_task_storage_delete:
1547 		if (bpf_prog_check_recur(prog))
1548 			return &bpf_task_storage_delete_recur_proto;
1549 		return &bpf_task_storage_delete_proto;
1550 	case BPF_FUNC_for_each_map_elem:
1551 		return &bpf_for_each_map_elem_proto;
1552 	case BPF_FUNC_snprintf:
1553 		return &bpf_snprintf_proto;
1554 	case BPF_FUNC_get_func_ip:
1555 		return &bpf_get_func_ip_proto_tracing;
1556 	case BPF_FUNC_get_branch_snapshot:
1557 		return &bpf_get_branch_snapshot_proto;
1558 	case BPF_FUNC_find_vma:
1559 		return &bpf_find_vma_proto;
1560 	case BPF_FUNC_trace_vprintk:
1561 		return bpf_get_trace_vprintk_proto();
1562 	default:
1563 		return bpf_base_func_proto(func_id);
1564 	}
1565 }
1566 
1567 static const struct bpf_func_proto *
kprobe_prog_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1568 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1569 {
1570 	switch (func_id) {
1571 	case BPF_FUNC_perf_event_output:
1572 		return &bpf_perf_event_output_proto;
1573 	case BPF_FUNC_get_stackid:
1574 		return &bpf_get_stackid_proto;
1575 	case BPF_FUNC_get_stack:
1576 		return &bpf_get_stack_proto;
1577 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
1578 	case BPF_FUNC_override_return:
1579 		return &bpf_override_return_proto;
1580 #endif
1581 	case BPF_FUNC_get_func_ip:
1582 		if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI)
1583 			return &bpf_get_func_ip_proto_kprobe_multi;
1584 		if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
1585 			return &bpf_get_func_ip_proto_uprobe_multi;
1586 		return &bpf_get_func_ip_proto_kprobe;
1587 	case BPF_FUNC_get_attach_cookie:
1588 		if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI)
1589 			return &bpf_get_attach_cookie_proto_kmulti;
1590 		if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
1591 			return &bpf_get_attach_cookie_proto_umulti;
1592 		return &bpf_get_attach_cookie_proto_trace;
1593 	default:
1594 		return bpf_tracing_func_proto(func_id, prog);
1595 	}
1596 }
1597 
1598 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
kprobe_prog_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)1599 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1600 					const struct bpf_prog *prog,
1601 					struct bpf_insn_access_aux *info)
1602 {
1603 	if (off < 0 || off >= sizeof(struct pt_regs))
1604 		return false;
1605 	if (type != BPF_READ)
1606 		return false;
1607 	if (off % size != 0)
1608 		return false;
1609 	/*
1610 	 * Assertion for 32 bit to make sure last 8 byte access
1611 	 * (BPF_DW) to the last 4 byte member is disallowed.
1612 	 */
1613 	if (off + size > sizeof(struct pt_regs))
1614 		return false;
1615 
1616 	return true;
1617 }
1618 
1619 const struct bpf_verifier_ops kprobe_verifier_ops = {
1620 	.get_func_proto  = kprobe_prog_func_proto,
1621 	.is_valid_access = kprobe_prog_is_valid_access,
1622 };
1623 
1624 const struct bpf_prog_ops kprobe_prog_ops = {
1625 };
1626 
BPF_CALL_5(bpf_perf_event_output_tp,void *,tp_buff,struct bpf_map *,map,u64,flags,void *,data,u64,size)1627 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
1628 	   u64, flags, void *, data, u64, size)
1629 {
1630 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1631 
1632 	/*
1633 	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
1634 	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
1635 	 * from there and call the same bpf_perf_event_output() helper inline.
1636 	 */
1637 	return ____bpf_perf_event_output(regs, map, flags, data, size);
1638 }
1639 
1640 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
1641 	.func		= bpf_perf_event_output_tp,
1642 	.gpl_only	= true,
1643 	.ret_type	= RET_INTEGER,
1644 	.arg1_type	= ARG_PTR_TO_CTX,
1645 	.arg2_type	= ARG_CONST_MAP_PTR,
1646 	.arg3_type	= ARG_ANYTHING,
1647 	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1648 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1649 };
1650 
BPF_CALL_3(bpf_get_stackid_tp,void *,tp_buff,struct bpf_map *,map,u64,flags)1651 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
1652 	   u64, flags)
1653 {
1654 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1655 
1656 	/*
1657 	 * Same comment as in bpf_perf_event_output_tp(), only that this time
1658 	 * the other helper's function body cannot be inlined due to being
1659 	 * external, thus we need to call raw helper function.
1660 	 */
1661 	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1662 			       flags, 0, 0);
1663 }
1664 
1665 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
1666 	.func		= bpf_get_stackid_tp,
1667 	.gpl_only	= true,
1668 	.ret_type	= RET_INTEGER,
1669 	.arg1_type	= ARG_PTR_TO_CTX,
1670 	.arg2_type	= ARG_CONST_MAP_PTR,
1671 	.arg3_type	= ARG_ANYTHING,
1672 };
1673 
BPF_CALL_4(bpf_get_stack_tp,void *,tp_buff,void *,buf,u32,size,u64,flags)1674 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
1675 	   u64, flags)
1676 {
1677 	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
1678 
1679 	return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1680 			     (unsigned long) size, flags, 0);
1681 }
1682 
1683 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
1684 	.func		= bpf_get_stack_tp,
1685 	.gpl_only	= true,
1686 	.ret_type	= RET_INTEGER,
1687 	.arg1_type	= ARG_PTR_TO_CTX,
1688 	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
1689 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1690 	.arg4_type	= ARG_ANYTHING,
1691 };
1692 
1693 static const struct bpf_func_proto *
tp_prog_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1694 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1695 {
1696 	switch (func_id) {
1697 	case BPF_FUNC_perf_event_output:
1698 		return &bpf_perf_event_output_proto_tp;
1699 	case BPF_FUNC_get_stackid:
1700 		return &bpf_get_stackid_proto_tp;
1701 	case BPF_FUNC_get_stack:
1702 		return &bpf_get_stack_proto_tp;
1703 	case BPF_FUNC_get_attach_cookie:
1704 		return &bpf_get_attach_cookie_proto_trace;
1705 	default:
1706 		return bpf_tracing_func_proto(func_id, prog);
1707 	}
1708 }
1709 
tp_prog_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)1710 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1711 				    const struct bpf_prog *prog,
1712 				    struct bpf_insn_access_aux *info)
1713 {
1714 	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
1715 		return false;
1716 	if (type != BPF_READ)
1717 		return false;
1718 	if (off % size != 0)
1719 		return false;
1720 
1721 	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
1722 	return true;
1723 }
1724 
1725 const struct bpf_verifier_ops tracepoint_verifier_ops = {
1726 	.get_func_proto  = tp_prog_func_proto,
1727 	.is_valid_access = tp_prog_is_valid_access,
1728 };
1729 
1730 const struct bpf_prog_ops tracepoint_prog_ops = {
1731 };
1732 
BPF_CALL_3(bpf_perf_prog_read_value,struct bpf_perf_event_data_kern *,ctx,struct bpf_perf_event_value *,buf,u32,size)1733 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
1734 	   struct bpf_perf_event_value *, buf, u32, size)
1735 {
1736 	int err = -EINVAL;
1737 
1738 	if (unlikely(size != sizeof(struct bpf_perf_event_value)))
1739 		goto clear;
1740 	err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
1741 				    &buf->running);
1742 	if (unlikely(err))
1743 		goto clear;
1744 	return 0;
1745 clear:
1746 	memset(buf, 0, size);
1747 	return err;
1748 }
1749 
1750 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
1751          .func           = bpf_perf_prog_read_value,
1752          .gpl_only       = true,
1753          .ret_type       = RET_INTEGER,
1754          .arg1_type      = ARG_PTR_TO_CTX,
1755          .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1756          .arg3_type      = ARG_CONST_SIZE,
1757 };
1758 
BPF_CALL_4(bpf_read_branch_records,struct bpf_perf_event_data_kern *,ctx,void *,buf,u32,size,u64,flags)1759 BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx,
1760 	   void *, buf, u32, size, u64, flags)
1761 {
1762 	static const u32 br_entry_size = sizeof(struct perf_branch_entry);
1763 	struct perf_branch_stack *br_stack = ctx->data->br_stack;
1764 	u32 to_copy;
1765 
1766 	if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE))
1767 		return -EINVAL;
1768 
1769 	if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK)))
1770 		return -ENOENT;
1771 
1772 	if (unlikely(!br_stack))
1773 		return -ENOENT;
1774 
1775 	if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE)
1776 		return br_stack->nr * br_entry_size;
1777 
1778 	if (!buf || (size % br_entry_size != 0))
1779 		return -EINVAL;
1780 
1781 	to_copy = min_t(u32, br_stack->nr * br_entry_size, size);
1782 	memcpy(buf, br_stack->entries, to_copy);
1783 
1784 	return to_copy;
1785 }
1786 
1787 static const struct bpf_func_proto bpf_read_branch_records_proto = {
1788 	.func           = bpf_read_branch_records,
1789 	.gpl_only       = true,
1790 	.ret_type       = RET_INTEGER,
1791 	.arg1_type      = ARG_PTR_TO_CTX,
1792 	.arg2_type      = ARG_PTR_TO_MEM_OR_NULL,
1793 	.arg3_type      = ARG_CONST_SIZE_OR_ZERO,
1794 	.arg4_type      = ARG_ANYTHING,
1795 };
1796 
1797 static const struct bpf_func_proto *
pe_prog_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1798 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1799 {
1800 	switch (func_id) {
1801 	case BPF_FUNC_perf_event_output:
1802 		return &bpf_perf_event_output_proto_tp;
1803 	case BPF_FUNC_get_stackid:
1804 		return &bpf_get_stackid_proto_pe;
1805 	case BPF_FUNC_get_stack:
1806 		return &bpf_get_stack_proto_pe;
1807 	case BPF_FUNC_perf_prog_read_value:
1808 		return &bpf_perf_prog_read_value_proto;
1809 	case BPF_FUNC_read_branch_records:
1810 		return &bpf_read_branch_records_proto;
1811 	case BPF_FUNC_get_attach_cookie:
1812 		return &bpf_get_attach_cookie_proto_pe;
1813 	default:
1814 		return bpf_tracing_func_proto(func_id, prog);
1815 	}
1816 }
1817 
1818 /*
1819  * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
1820  * to avoid potential recursive reuse issue when/if tracepoints are added
1821  * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
1822  *
1823  * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
1824  * in normal, irq, and nmi context.
1825  */
1826 struct bpf_raw_tp_regs {
1827 	struct pt_regs regs[3];
1828 };
1829 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
1830 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
get_bpf_raw_tp_regs(void)1831 static struct pt_regs *get_bpf_raw_tp_regs(void)
1832 {
1833 	struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
1834 	int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
1835 
1836 	if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
1837 		this_cpu_dec(bpf_raw_tp_nest_level);
1838 		return ERR_PTR(-EBUSY);
1839 	}
1840 
1841 	return &tp_regs->regs[nest_level - 1];
1842 }
1843 
put_bpf_raw_tp_regs(void)1844 static void put_bpf_raw_tp_regs(void)
1845 {
1846 	this_cpu_dec(bpf_raw_tp_nest_level);
1847 }
1848 
BPF_CALL_5(bpf_perf_event_output_raw_tp,struct bpf_raw_tracepoint_args *,args,struct bpf_map *,map,u64,flags,void *,data,u64,size)1849 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
1850 	   struct bpf_map *, map, u64, flags, void *, data, u64, size)
1851 {
1852 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1853 	int ret;
1854 
1855 	if (IS_ERR(regs))
1856 		return PTR_ERR(regs);
1857 
1858 	perf_fetch_caller_regs(regs);
1859 	ret = ____bpf_perf_event_output(regs, map, flags, data, size);
1860 
1861 	put_bpf_raw_tp_regs();
1862 	return ret;
1863 }
1864 
1865 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
1866 	.func		= bpf_perf_event_output_raw_tp,
1867 	.gpl_only	= true,
1868 	.ret_type	= RET_INTEGER,
1869 	.arg1_type	= ARG_PTR_TO_CTX,
1870 	.arg2_type	= ARG_CONST_MAP_PTR,
1871 	.arg3_type	= ARG_ANYTHING,
1872 	.arg4_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1873 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1874 };
1875 
1876 extern const struct bpf_func_proto bpf_skb_output_proto;
1877 extern const struct bpf_func_proto bpf_xdp_output_proto;
1878 extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto;
1879 
BPF_CALL_3(bpf_get_stackid_raw_tp,struct bpf_raw_tracepoint_args *,args,struct bpf_map *,map,u64,flags)1880 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1881 	   struct bpf_map *, map, u64, flags)
1882 {
1883 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1884 	int ret;
1885 
1886 	if (IS_ERR(regs))
1887 		return PTR_ERR(regs);
1888 
1889 	perf_fetch_caller_regs(regs);
1890 	/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1891 	ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1892 			      flags, 0, 0);
1893 	put_bpf_raw_tp_regs();
1894 	return ret;
1895 }
1896 
1897 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1898 	.func		= bpf_get_stackid_raw_tp,
1899 	.gpl_only	= true,
1900 	.ret_type	= RET_INTEGER,
1901 	.arg1_type	= ARG_PTR_TO_CTX,
1902 	.arg2_type	= ARG_CONST_MAP_PTR,
1903 	.arg3_type	= ARG_ANYTHING,
1904 };
1905 
BPF_CALL_4(bpf_get_stack_raw_tp,struct bpf_raw_tracepoint_args *,args,void *,buf,u32,size,u64,flags)1906 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1907 	   void *, buf, u32, size, u64, flags)
1908 {
1909 	struct pt_regs *regs = get_bpf_raw_tp_regs();
1910 	int ret;
1911 
1912 	if (IS_ERR(regs))
1913 		return PTR_ERR(regs);
1914 
1915 	perf_fetch_caller_regs(regs);
1916 	ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1917 			    (unsigned long) size, flags, 0);
1918 	put_bpf_raw_tp_regs();
1919 	return ret;
1920 }
1921 
1922 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1923 	.func		= bpf_get_stack_raw_tp,
1924 	.gpl_only	= true,
1925 	.ret_type	= RET_INTEGER,
1926 	.arg1_type	= ARG_PTR_TO_CTX,
1927 	.arg2_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1928 	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
1929 	.arg4_type	= ARG_ANYTHING,
1930 };
1931 
1932 static const struct bpf_func_proto *
raw_tp_prog_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1933 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1934 {
1935 	switch (func_id) {
1936 	case BPF_FUNC_perf_event_output:
1937 		return &bpf_perf_event_output_proto_raw_tp;
1938 	case BPF_FUNC_get_stackid:
1939 		return &bpf_get_stackid_proto_raw_tp;
1940 	case BPF_FUNC_get_stack:
1941 		return &bpf_get_stack_proto_raw_tp;
1942 	default:
1943 		return bpf_tracing_func_proto(func_id, prog);
1944 	}
1945 }
1946 
1947 const struct bpf_func_proto *
tracing_prog_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1948 tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1949 {
1950 	const struct bpf_func_proto *fn;
1951 
1952 	switch (func_id) {
1953 #ifdef CONFIG_NET
1954 	case BPF_FUNC_skb_output:
1955 		return &bpf_skb_output_proto;
1956 	case BPF_FUNC_xdp_output:
1957 		return &bpf_xdp_output_proto;
1958 	case BPF_FUNC_skc_to_tcp6_sock:
1959 		return &bpf_skc_to_tcp6_sock_proto;
1960 	case BPF_FUNC_skc_to_tcp_sock:
1961 		return &bpf_skc_to_tcp_sock_proto;
1962 	case BPF_FUNC_skc_to_tcp_timewait_sock:
1963 		return &bpf_skc_to_tcp_timewait_sock_proto;
1964 	case BPF_FUNC_skc_to_tcp_request_sock:
1965 		return &bpf_skc_to_tcp_request_sock_proto;
1966 	case BPF_FUNC_skc_to_udp6_sock:
1967 		return &bpf_skc_to_udp6_sock_proto;
1968 	case BPF_FUNC_skc_to_unix_sock:
1969 		return &bpf_skc_to_unix_sock_proto;
1970 	case BPF_FUNC_skc_to_mptcp_sock:
1971 		return &bpf_skc_to_mptcp_sock_proto;
1972 	case BPF_FUNC_sk_storage_get:
1973 		return &bpf_sk_storage_get_tracing_proto;
1974 	case BPF_FUNC_sk_storage_delete:
1975 		return &bpf_sk_storage_delete_tracing_proto;
1976 	case BPF_FUNC_sock_from_file:
1977 		return &bpf_sock_from_file_proto;
1978 	case BPF_FUNC_get_socket_cookie:
1979 		return &bpf_get_socket_ptr_cookie_proto;
1980 	case BPF_FUNC_xdp_get_buff_len:
1981 		return &bpf_xdp_get_buff_len_trace_proto;
1982 #endif
1983 	case BPF_FUNC_seq_printf:
1984 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1985 		       &bpf_seq_printf_proto :
1986 		       NULL;
1987 	case BPF_FUNC_seq_write:
1988 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1989 		       &bpf_seq_write_proto :
1990 		       NULL;
1991 	case BPF_FUNC_seq_printf_btf:
1992 		return prog->expected_attach_type == BPF_TRACE_ITER ?
1993 		       &bpf_seq_printf_btf_proto :
1994 		       NULL;
1995 	case BPF_FUNC_d_path:
1996 		return &bpf_d_path_proto;
1997 	case BPF_FUNC_get_func_arg:
1998 		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
1999 	case BPF_FUNC_get_func_ret:
2000 		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
2001 	case BPF_FUNC_get_func_arg_cnt:
2002 		return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
2003 	case BPF_FUNC_get_attach_cookie:
2004 		return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL;
2005 	default:
2006 		fn = raw_tp_prog_func_proto(func_id, prog);
2007 		if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
2008 			fn = bpf_iter_get_func_proto(func_id, prog);
2009 		return fn;
2010 	}
2011 }
2012 
raw_tp_prog_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)2013 static bool raw_tp_prog_is_valid_access(int off, int size,
2014 					enum bpf_access_type type,
2015 					const struct bpf_prog *prog,
2016 					struct bpf_insn_access_aux *info)
2017 {
2018 	return bpf_tracing_ctx_access(off, size, type);
2019 }
2020 
tracing_prog_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)2021 static bool tracing_prog_is_valid_access(int off, int size,
2022 					 enum bpf_access_type type,
2023 					 const struct bpf_prog *prog,
2024 					 struct bpf_insn_access_aux *info)
2025 {
2026 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
2027 }
2028 
bpf_prog_test_run_tracing(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)2029 int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
2030 				     const union bpf_attr *kattr,
2031 				     union bpf_attr __user *uattr)
2032 {
2033 	return -ENOTSUPP;
2034 }
2035 
2036 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
2037 	.get_func_proto  = raw_tp_prog_func_proto,
2038 	.is_valid_access = raw_tp_prog_is_valid_access,
2039 };
2040 
2041 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
2042 #ifdef CONFIG_NET
2043 	.test_run = bpf_prog_test_run_raw_tp,
2044 #endif
2045 };
2046 
2047 const struct bpf_verifier_ops tracing_verifier_ops = {
2048 	.get_func_proto  = tracing_prog_func_proto,
2049 	.is_valid_access = tracing_prog_is_valid_access,
2050 };
2051 
2052 const struct bpf_prog_ops tracing_prog_ops = {
2053 	.test_run = bpf_prog_test_run_tracing,
2054 };
2055 
raw_tp_writable_prog_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)2056 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
2057 						 enum bpf_access_type type,
2058 						 const struct bpf_prog *prog,
2059 						 struct bpf_insn_access_aux *info)
2060 {
2061 	if (off == 0) {
2062 		if (size != sizeof(u64) || type != BPF_READ)
2063 			return false;
2064 		info->reg_type = PTR_TO_TP_BUFFER;
2065 	}
2066 	return raw_tp_prog_is_valid_access(off, size, type, prog, info);
2067 }
2068 
2069 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
2070 	.get_func_proto  = raw_tp_prog_func_proto,
2071 	.is_valid_access = raw_tp_writable_prog_is_valid_access,
2072 };
2073 
2074 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
2075 };
2076 
pe_prog_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)2077 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
2078 				    const struct bpf_prog *prog,
2079 				    struct bpf_insn_access_aux *info)
2080 {
2081 	const int size_u64 = sizeof(u64);
2082 
2083 	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
2084 		return false;
2085 	if (type != BPF_READ)
2086 		return false;
2087 	if (off % size != 0) {
2088 		if (sizeof(unsigned long) != 4)
2089 			return false;
2090 		if (size != 8)
2091 			return false;
2092 		if (off % size != 4)
2093 			return false;
2094 	}
2095 
2096 	switch (off) {
2097 	case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
2098 		bpf_ctx_record_field_size(info, size_u64);
2099 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2100 			return false;
2101 		break;
2102 	case bpf_ctx_range(struct bpf_perf_event_data, addr):
2103 		bpf_ctx_record_field_size(info, size_u64);
2104 		if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
2105 			return false;
2106 		break;
2107 	default:
2108 		if (size != sizeof(long))
2109 			return false;
2110 	}
2111 
2112 	return true;
2113 }
2114 
pe_prog_convert_ctx_access(enum bpf_access_type type,const struct bpf_insn * si,struct bpf_insn * insn_buf,struct bpf_prog * prog,u32 * target_size)2115 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
2116 				      const struct bpf_insn *si,
2117 				      struct bpf_insn *insn_buf,
2118 				      struct bpf_prog *prog, u32 *target_size)
2119 {
2120 	struct bpf_insn *insn = insn_buf;
2121 
2122 	switch (si->off) {
2123 	case offsetof(struct bpf_perf_event_data, sample_period):
2124 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2125 						       data), si->dst_reg, si->src_reg,
2126 				      offsetof(struct bpf_perf_event_data_kern, data));
2127 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2128 				      bpf_target_off(struct perf_sample_data, period, 8,
2129 						     target_size));
2130 		break;
2131 	case offsetof(struct bpf_perf_event_data, addr):
2132 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2133 						       data), si->dst_reg, si->src_reg,
2134 				      offsetof(struct bpf_perf_event_data_kern, data));
2135 		*insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
2136 				      bpf_target_off(struct perf_sample_data, addr, 8,
2137 						     target_size));
2138 		break;
2139 	default:
2140 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
2141 						       regs), si->dst_reg, si->src_reg,
2142 				      offsetof(struct bpf_perf_event_data_kern, regs));
2143 		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
2144 				      si->off);
2145 		break;
2146 	}
2147 
2148 	return insn - insn_buf;
2149 }
2150 
2151 const struct bpf_verifier_ops perf_event_verifier_ops = {
2152 	.get_func_proto		= pe_prog_func_proto,
2153 	.is_valid_access	= pe_prog_is_valid_access,
2154 	.convert_ctx_access	= pe_prog_convert_ctx_access,
2155 };
2156 
2157 const struct bpf_prog_ops perf_event_prog_ops = {
2158 };
2159 
2160 static DEFINE_MUTEX(bpf_event_mutex);
2161 
2162 #define BPF_TRACE_MAX_PROGS 64
2163 
perf_event_attach_bpf_prog(struct perf_event * event,struct bpf_prog * prog,u64 bpf_cookie)2164 int perf_event_attach_bpf_prog(struct perf_event *event,
2165 			       struct bpf_prog *prog,
2166 			       u64 bpf_cookie)
2167 {
2168 	struct bpf_prog_array *old_array;
2169 	struct bpf_prog_array *new_array;
2170 	int ret = -EEXIST;
2171 
2172 	/*
2173 	 * Kprobe override only works if they are on the function entry,
2174 	 * and only if they are on the opt-in list.
2175 	 */
2176 	if (prog->kprobe_override &&
2177 	    (!trace_kprobe_on_func_entry(event->tp_event) ||
2178 	     !trace_kprobe_error_injectable(event->tp_event)))
2179 		return -EINVAL;
2180 
2181 	mutex_lock(&bpf_event_mutex);
2182 
2183 	if (event->prog)
2184 		goto unlock;
2185 
2186 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2187 	if (old_array &&
2188 	    bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
2189 		ret = -E2BIG;
2190 		goto unlock;
2191 	}
2192 
2193 	ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array);
2194 	if (ret < 0)
2195 		goto unlock;
2196 
2197 	/* set the new array to event->tp_event and set event->prog */
2198 	event->prog = prog;
2199 	event->bpf_cookie = bpf_cookie;
2200 	rcu_assign_pointer(event->tp_event->prog_array, new_array);
2201 	bpf_prog_array_free_sleepable(old_array);
2202 
2203 unlock:
2204 	mutex_unlock(&bpf_event_mutex);
2205 	return ret;
2206 }
2207 
perf_event_detach_bpf_prog(struct perf_event * event)2208 void perf_event_detach_bpf_prog(struct perf_event *event)
2209 {
2210 	struct bpf_prog_array *old_array;
2211 	struct bpf_prog_array *new_array;
2212 	int ret;
2213 
2214 	mutex_lock(&bpf_event_mutex);
2215 
2216 	if (!event->prog)
2217 		goto unlock;
2218 
2219 	old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
2220 	if (!old_array)
2221 		goto put;
2222 
2223 	ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
2224 	if (ret < 0) {
2225 		bpf_prog_array_delete_safe(old_array, event->prog);
2226 	} else {
2227 		rcu_assign_pointer(event->tp_event->prog_array, new_array);
2228 		bpf_prog_array_free_sleepable(old_array);
2229 	}
2230 
2231 put:
2232 	/*
2233 	 * It could be that the bpf_prog is not sleepable (and will be freed
2234 	 * via normal RCU), but is called from a point that supports sleepable
2235 	 * programs and uses tasks-trace-RCU.
2236 	 */
2237 	synchronize_rcu_tasks_trace();
2238 
2239 	bpf_prog_put(event->prog);
2240 	event->prog = NULL;
2241 
2242 unlock:
2243 	mutex_unlock(&bpf_event_mutex);
2244 }
2245 
perf_event_query_prog_array(struct perf_event * event,void __user * info)2246 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
2247 {
2248 	struct perf_event_query_bpf __user *uquery = info;
2249 	struct perf_event_query_bpf query = {};
2250 	struct bpf_prog_array *progs;
2251 	u32 *ids, prog_cnt, ids_len;
2252 	int ret;
2253 
2254 	if (!perfmon_capable())
2255 		return -EPERM;
2256 	if (event->attr.type != PERF_TYPE_TRACEPOINT)
2257 		return -EINVAL;
2258 	if (copy_from_user(&query, uquery, sizeof(query)))
2259 		return -EFAULT;
2260 
2261 	ids_len = query.ids_len;
2262 	if (ids_len > BPF_TRACE_MAX_PROGS)
2263 		return -E2BIG;
2264 	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
2265 	if (!ids)
2266 		return -ENOMEM;
2267 	/*
2268 	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
2269 	 * is required when user only wants to check for uquery->prog_cnt.
2270 	 * There is no need to check for it since the case is handled
2271 	 * gracefully in bpf_prog_array_copy_info.
2272 	 */
2273 
2274 	mutex_lock(&bpf_event_mutex);
2275 	progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
2276 	ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
2277 	mutex_unlock(&bpf_event_mutex);
2278 
2279 	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
2280 	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
2281 		ret = -EFAULT;
2282 
2283 	kfree(ids);
2284 	return ret;
2285 }
2286 
2287 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
2288 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
2289 
bpf_get_raw_tracepoint(const char * name)2290 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
2291 {
2292 	struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
2293 
2294 	for (; btp < __stop__bpf_raw_tp; btp++) {
2295 		if (!strcmp(btp->tp->name, name))
2296 			return btp;
2297 	}
2298 
2299 	return bpf_get_raw_tracepoint_module(name);
2300 }
2301 
bpf_put_raw_tracepoint(struct bpf_raw_event_map * btp)2302 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
2303 {
2304 	struct module *mod;
2305 
2306 	preempt_disable();
2307 	mod = __module_address((unsigned long)btp);
2308 	module_put(mod);
2309 	preempt_enable();
2310 }
2311 
2312 static __always_inline
__bpf_trace_run(struct bpf_prog * prog,u64 * args)2313 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
2314 {
2315 	cant_sleep();
2316 	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
2317 		bpf_prog_inc_misses_counter(prog);
2318 		goto out;
2319 	}
2320 	rcu_read_lock();
2321 	(void) bpf_prog_run(prog, args);
2322 	rcu_read_unlock();
2323 out:
2324 	this_cpu_dec(*(prog->active));
2325 }
2326 
2327 #define UNPACK(...)			__VA_ARGS__
2328 #define REPEAT_1(FN, DL, X, ...)	FN(X)
2329 #define REPEAT_2(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
2330 #define REPEAT_3(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
2331 #define REPEAT_4(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
2332 #define REPEAT_5(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
2333 #define REPEAT_6(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
2334 #define REPEAT_7(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
2335 #define REPEAT_8(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
2336 #define REPEAT_9(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
2337 #define REPEAT_10(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
2338 #define REPEAT_11(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
2339 #define REPEAT_12(FN, DL, X, ...)	FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
2340 #define REPEAT(X, FN, DL, ...)		REPEAT_##X(FN, DL, __VA_ARGS__)
2341 
2342 #define SARG(X)		u64 arg##X
2343 #define COPY(X)		args[X] = arg##X
2344 
2345 #define __DL_COM	(,)
2346 #define __DL_SEM	(;)
2347 
2348 #define __SEQ_0_11	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
2349 
2350 #define BPF_TRACE_DEFN_x(x)						\
2351 	void bpf_trace_run##x(struct bpf_prog *prog,			\
2352 			      REPEAT(x, SARG, __DL_COM, __SEQ_0_11))	\
2353 	{								\
2354 		u64 args[x];						\
2355 		REPEAT(x, COPY, __DL_SEM, __SEQ_0_11);			\
2356 		__bpf_trace_run(prog, args);				\
2357 	}								\
2358 	EXPORT_SYMBOL_GPL(bpf_trace_run##x)
2359 BPF_TRACE_DEFN_x(1);
2360 BPF_TRACE_DEFN_x(2);
2361 BPF_TRACE_DEFN_x(3);
2362 BPF_TRACE_DEFN_x(4);
2363 BPF_TRACE_DEFN_x(5);
2364 BPF_TRACE_DEFN_x(6);
2365 BPF_TRACE_DEFN_x(7);
2366 BPF_TRACE_DEFN_x(8);
2367 BPF_TRACE_DEFN_x(9);
2368 BPF_TRACE_DEFN_x(10);
2369 BPF_TRACE_DEFN_x(11);
2370 BPF_TRACE_DEFN_x(12);
2371 
__bpf_probe_register(struct bpf_raw_event_map * btp,struct bpf_prog * prog)2372 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2373 {
2374 	struct tracepoint *tp = btp->tp;
2375 
2376 	/*
2377 	 * check that program doesn't access arguments beyond what's
2378 	 * available in this tracepoint
2379 	 */
2380 	if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
2381 		return -EINVAL;
2382 
2383 	if (prog->aux->max_tp_access > btp->writable_size)
2384 		return -EINVAL;
2385 
2386 	return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
2387 						   prog);
2388 }
2389 
bpf_probe_register(struct bpf_raw_event_map * btp,struct bpf_prog * prog)2390 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2391 {
2392 	return __bpf_probe_register(btp, prog);
2393 }
2394 
bpf_probe_unregister(struct bpf_raw_event_map * btp,struct bpf_prog * prog)2395 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
2396 {
2397 	return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
2398 }
2399 
bpf_get_perf_event_info(const struct perf_event * event,u32 * prog_id,u32 * fd_type,const char ** buf,u64 * probe_offset,u64 * probe_addr,unsigned long * missed)2400 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
2401 			    u32 *fd_type, const char **buf,
2402 			    u64 *probe_offset, u64 *probe_addr,
2403 			    unsigned long *missed)
2404 {
2405 	bool is_tracepoint, is_syscall_tp;
2406 	struct bpf_prog *prog;
2407 	int flags, err = 0;
2408 
2409 	prog = event->prog;
2410 	if (!prog)
2411 		return -ENOENT;
2412 
2413 	/* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
2414 	if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
2415 		return -EOPNOTSUPP;
2416 
2417 	*prog_id = prog->aux->id;
2418 	flags = event->tp_event->flags;
2419 	is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
2420 	is_syscall_tp = is_syscall_trace_event(event->tp_event);
2421 
2422 	if (is_tracepoint || is_syscall_tp) {
2423 		*buf = is_tracepoint ? event->tp_event->tp->name
2424 				     : event->tp_event->name;
2425 		/* We allow NULL pointer for tracepoint */
2426 		if (fd_type)
2427 			*fd_type = BPF_FD_TYPE_TRACEPOINT;
2428 		if (probe_offset)
2429 			*probe_offset = 0x0;
2430 		if (probe_addr)
2431 			*probe_addr = 0x0;
2432 	} else {
2433 		/* kprobe/uprobe */
2434 		err = -EOPNOTSUPP;
2435 #ifdef CONFIG_KPROBE_EVENTS
2436 		if (flags & TRACE_EVENT_FL_KPROBE)
2437 			err = bpf_get_kprobe_info(event, fd_type, buf,
2438 						  probe_offset, probe_addr, missed,
2439 						  event->attr.type == PERF_TYPE_TRACEPOINT);
2440 #endif
2441 #ifdef CONFIG_UPROBE_EVENTS
2442 		if (flags & TRACE_EVENT_FL_UPROBE)
2443 			err = bpf_get_uprobe_info(event, fd_type, buf,
2444 						  probe_offset, probe_addr,
2445 						  event->attr.type == PERF_TYPE_TRACEPOINT);
2446 #endif
2447 	}
2448 
2449 	return err;
2450 }
2451 
send_signal_irq_work_init(void)2452 static int __init send_signal_irq_work_init(void)
2453 {
2454 	int cpu;
2455 	struct send_signal_irq_work *work;
2456 
2457 	for_each_possible_cpu(cpu) {
2458 		work = per_cpu_ptr(&send_signal_work, cpu);
2459 		init_irq_work(&work->irq_work, do_bpf_send_signal);
2460 	}
2461 	return 0;
2462 }
2463 
2464 subsys_initcall(send_signal_irq_work_init);
2465 
2466 #ifdef CONFIG_MODULES
bpf_event_notify(struct notifier_block * nb,unsigned long op,void * module)2467 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
2468 			    void *module)
2469 {
2470 	struct bpf_trace_module *btm, *tmp;
2471 	struct module *mod = module;
2472 	int ret = 0;
2473 
2474 	if (mod->num_bpf_raw_events == 0 ||
2475 	    (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
2476 		goto out;
2477 
2478 	mutex_lock(&bpf_module_mutex);
2479 
2480 	switch (op) {
2481 	case MODULE_STATE_COMING:
2482 		btm = kzalloc(sizeof(*btm), GFP_KERNEL);
2483 		if (btm) {
2484 			btm->module = module;
2485 			list_add(&btm->list, &bpf_trace_modules);
2486 		} else {
2487 			ret = -ENOMEM;
2488 		}
2489 		break;
2490 	case MODULE_STATE_GOING:
2491 		list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
2492 			if (btm->module == module) {
2493 				list_del(&btm->list);
2494 				kfree(btm);
2495 				break;
2496 			}
2497 		}
2498 		break;
2499 	}
2500 
2501 	mutex_unlock(&bpf_module_mutex);
2502 
2503 out:
2504 	return notifier_from_errno(ret);
2505 }
2506 
2507 static struct notifier_block bpf_module_nb = {
2508 	.notifier_call = bpf_event_notify,
2509 };
2510 
bpf_event_init(void)2511 static int __init bpf_event_init(void)
2512 {
2513 	register_module_notifier(&bpf_module_nb);
2514 	return 0;
2515 }
2516 
2517 fs_initcall(bpf_event_init);
2518 #endif /* CONFIG_MODULES */
2519 
2520 #ifdef CONFIG_FPROBE
2521 struct bpf_kprobe_multi_link {
2522 	struct bpf_link link;
2523 	struct fprobe fp;
2524 	unsigned long *addrs;
2525 	u64 *cookies;
2526 	u32 cnt;
2527 	u32 mods_cnt;
2528 	struct module **mods;
2529 	u32 flags;
2530 };
2531 
2532 struct bpf_kprobe_multi_run_ctx {
2533 	struct bpf_run_ctx run_ctx;
2534 	struct bpf_kprobe_multi_link *link;
2535 	unsigned long entry_ip;
2536 };
2537 
2538 struct user_syms {
2539 	const char **syms;
2540 	char *buf;
2541 };
2542 
copy_user_syms(struct user_syms * us,unsigned long __user * usyms,u32 cnt)2543 static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
2544 {
2545 	unsigned long __user usymbol;
2546 	const char **syms = NULL;
2547 	char *buf = NULL, *p;
2548 	int err = -ENOMEM;
2549 	unsigned int i;
2550 
2551 	syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
2552 	if (!syms)
2553 		goto error;
2554 
2555 	buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
2556 	if (!buf)
2557 		goto error;
2558 
2559 	for (p = buf, i = 0; i < cnt; i++) {
2560 		if (__get_user(usymbol, usyms + i)) {
2561 			err = -EFAULT;
2562 			goto error;
2563 		}
2564 		err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN);
2565 		if (err == KSYM_NAME_LEN)
2566 			err = -E2BIG;
2567 		if (err < 0)
2568 			goto error;
2569 		syms[i] = p;
2570 		p += err + 1;
2571 	}
2572 
2573 	us->syms = syms;
2574 	us->buf = buf;
2575 	return 0;
2576 
2577 error:
2578 	if (err) {
2579 		kvfree(syms);
2580 		kvfree(buf);
2581 	}
2582 	return err;
2583 }
2584 
kprobe_multi_put_modules(struct module ** mods,u32 cnt)2585 static void kprobe_multi_put_modules(struct module **mods, u32 cnt)
2586 {
2587 	u32 i;
2588 
2589 	for (i = 0; i < cnt; i++)
2590 		module_put(mods[i]);
2591 }
2592 
free_user_syms(struct user_syms * us)2593 static void free_user_syms(struct user_syms *us)
2594 {
2595 	kvfree(us->syms);
2596 	kvfree(us->buf);
2597 }
2598 
bpf_kprobe_multi_link_release(struct bpf_link * link)2599 static void bpf_kprobe_multi_link_release(struct bpf_link *link)
2600 {
2601 	struct bpf_kprobe_multi_link *kmulti_link;
2602 
2603 	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2604 	unregister_fprobe(&kmulti_link->fp);
2605 	kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt);
2606 }
2607 
bpf_kprobe_multi_link_dealloc(struct bpf_link * link)2608 static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link)
2609 {
2610 	struct bpf_kprobe_multi_link *kmulti_link;
2611 
2612 	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2613 	kvfree(kmulti_link->addrs);
2614 	kvfree(kmulti_link->cookies);
2615 	kfree(kmulti_link->mods);
2616 	kfree(kmulti_link);
2617 }
2618 
bpf_kprobe_multi_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)2619 static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
2620 						struct bpf_link_info *info)
2621 {
2622 	u64 __user *uaddrs = u64_to_user_ptr(info->kprobe_multi.addrs);
2623 	struct bpf_kprobe_multi_link *kmulti_link;
2624 	u32 ucount = info->kprobe_multi.count;
2625 	int err = 0, i;
2626 
2627 	if (!uaddrs ^ !ucount)
2628 		return -EINVAL;
2629 
2630 	kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
2631 	info->kprobe_multi.count = kmulti_link->cnt;
2632 	info->kprobe_multi.flags = kmulti_link->flags;
2633 
2634 	if (!uaddrs)
2635 		return 0;
2636 	if (ucount < kmulti_link->cnt)
2637 		err = -ENOSPC;
2638 	else
2639 		ucount = kmulti_link->cnt;
2640 
2641 	if (kallsyms_show_value(current_cred())) {
2642 		if (copy_to_user(uaddrs, kmulti_link->addrs, ucount * sizeof(u64)))
2643 			return -EFAULT;
2644 	} else {
2645 		for (i = 0; i < ucount; i++) {
2646 			if (put_user(0, uaddrs + i))
2647 				return -EFAULT;
2648 		}
2649 	}
2650 	return err;
2651 }
2652 
2653 static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
2654 	.release = bpf_kprobe_multi_link_release,
2655 	.dealloc_deferred = bpf_kprobe_multi_link_dealloc,
2656 	.fill_link_info = bpf_kprobe_multi_link_fill_link_info,
2657 };
2658 
bpf_kprobe_multi_cookie_swap(void * a,void * b,int size,const void * priv)2659 static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv)
2660 {
2661 	const struct bpf_kprobe_multi_link *link = priv;
2662 	unsigned long *addr_a = a, *addr_b = b;
2663 	u64 *cookie_a, *cookie_b;
2664 
2665 	cookie_a = link->cookies + (addr_a - link->addrs);
2666 	cookie_b = link->cookies + (addr_b - link->addrs);
2667 
2668 	/* swap addr_a/addr_b and cookie_a/cookie_b values */
2669 	swap(*addr_a, *addr_b);
2670 	swap(*cookie_a, *cookie_b);
2671 }
2672 
bpf_kprobe_multi_addrs_cmp(const void * a,const void * b)2673 static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b)
2674 {
2675 	const unsigned long *addr_a = a, *addr_b = b;
2676 
2677 	if (*addr_a == *addr_b)
2678 		return 0;
2679 	return *addr_a < *addr_b ? -1 : 1;
2680 }
2681 
bpf_kprobe_multi_cookie_cmp(const void * a,const void * b,const void * priv)2682 static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv)
2683 {
2684 	return bpf_kprobe_multi_addrs_cmp(a, b);
2685 }
2686 
bpf_kprobe_multi_cookie(struct bpf_run_ctx * ctx)2687 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
2688 {
2689 	struct bpf_kprobe_multi_run_ctx *run_ctx;
2690 	struct bpf_kprobe_multi_link *link;
2691 	u64 *cookie, entry_ip;
2692 	unsigned long *addr;
2693 
2694 	if (WARN_ON_ONCE(!ctx))
2695 		return 0;
2696 	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2697 	link = run_ctx->link;
2698 	if (!link->cookies)
2699 		return 0;
2700 	entry_ip = run_ctx->entry_ip;
2701 	addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip),
2702 		       bpf_kprobe_multi_addrs_cmp);
2703 	if (!addr)
2704 		return 0;
2705 	cookie = link->cookies + (addr - link->addrs);
2706 	return *cookie;
2707 }
2708 
bpf_kprobe_multi_entry_ip(struct bpf_run_ctx * ctx)2709 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
2710 {
2711 	struct bpf_kprobe_multi_run_ctx *run_ctx;
2712 
2713 	run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx);
2714 	return run_ctx->entry_ip;
2715 }
2716 
2717 static int
kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link * link,unsigned long entry_ip,struct pt_regs * regs)2718 kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
2719 			   unsigned long entry_ip, struct pt_regs *regs)
2720 {
2721 	struct bpf_kprobe_multi_run_ctx run_ctx = {
2722 		.link = link,
2723 		.entry_ip = entry_ip,
2724 	};
2725 	struct bpf_run_ctx *old_run_ctx;
2726 	int err;
2727 
2728 	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
2729 		err = 0;
2730 		goto out;
2731 	}
2732 
2733 	migrate_disable();
2734 	rcu_read_lock();
2735 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2736 	err = bpf_prog_run(link->link.prog, regs);
2737 	bpf_reset_run_ctx(old_run_ctx);
2738 	rcu_read_unlock();
2739 	migrate_enable();
2740 
2741  out:
2742 	__this_cpu_dec(bpf_prog_active);
2743 	return err;
2744 }
2745 
2746 static int
kprobe_multi_link_handler(struct fprobe * fp,unsigned long fentry_ip,unsigned long ret_ip,struct pt_regs * regs,void * data)2747 kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
2748 			  unsigned long ret_ip, struct pt_regs *regs,
2749 			  void *data)
2750 {
2751 	struct bpf_kprobe_multi_link *link;
2752 
2753 	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2754 	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
2755 	return 0;
2756 }
2757 
2758 static void
kprobe_multi_link_exit_handler(struct fprobe * fp,unsigned long fentry_ip,unsigned long ret_ip,struct pt_regs * regs,void * data)2759 kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
2760 			       unsigned long ret_ip, struct pt_regs *regs,
2761 			       void *data)
2762 {
2763 	struct bpf_kprobe_multi_link *link;
2764 
2765 	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
2766 	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
2767 }
2768 
symbols_cmp_r(const void * a,const void * b,const void * priv)2769 static int symbols_cmp_r(const void *a, const void *b, const void *priv)
2770 {
2771 	const char **str_a = (const char **) a;
2772 	const char **str_b = (const char **) b;
2773 
2774 	return strcmp(*str_a, *str_b);
2775 }
2776 
2777 struct multi_symbols_sort {
2778 	const char **funcs;
2779 	u64 *cookies;
2780 };
2781 
symbols_swap_r(void * a,void * b,int size,const void * priv)2782 static void symbols_swap_r(void *a, void *b, int size, const void *priv)
2783 {
2784 	const struct multi_symbols_sort *data = priv;
2785 	const char **name_a = a, **name_b = b;
2786 
2787 	swap(*name_a, *name_b);
2788 
2789 	/* If defined, swap also related cookies. */
2790 	if (data->cookies) {
2791 		u64 *cookie_a, *cookie_b;
2792 
2793 		cookie_a = data->cookies + (name_a - data->funcs);
2794 		cookie_b = data->cookies + (name_b - data->funcs);
2795 		swap(*cookie_a, *cookie_b);
2796 	}
2797 }
2798 
2799 struct modules_array {
2800 	struct module **mods;
2801 	int mods_cnt;
2802 	int mods_cap;
2803 };
2804 
add_module(struct modules_array * arr,struct module * mod)2805 static int add_module(struct modules_array *arr, struct module *mod)
2806 {
2807 	struct module **mods;
2808 
2809 	if (arr->mods_cnt == arr->mods_cap) {
2810 		arr->mods_cap = max(16, arr->mods_cap * 3 / 2);
2811 		mods = krealloc_array(arr->mods, arr->mods_cap, sizeof(*mods), GFP_KERNEL);
2812 		if (!mods)
2813 			return -ENOMEM;
2814 		arr->mods = mods;
2815 	}
2816 
2817 	arr->mods[arr->mods_cnt] = mod;
2818 	arr->mods_cnt++;
2819 	return 0;
2820 }
2821 
has_module(struct modules_array * arr,struct module * mod)2822 static bool has_module(struct modules_array *arr, struct module *mod)
2823 {
2824 	int i;
2825 
2826 	for (i = arr->mods_cnt - 1; i >= 0; i--) {
2827 		if (arr->mods[i] == mod)
2828 			return true;
2829 	}
2830 	return false;
2831 }
2832 
get_modules_for_addrs(struct module *** mods,unsigned long * addrs,u32 addrs_cnt)2833 static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt)
2834 {
2835 	struct modules_array arr = {};
2836 	u32 i, err = 0;
2837 
2838 	for (i = 0; i < addrs_cnt; i++) {
2839 		struct module *mod;
2840 
2841 		preempt_disable();
2842 		mod = __module_address(addrs[i]);
2843 		/* Either no module or we it's already stored  */
2844 		if (!mod || has_module(&arr, mod)) {
2845 			preempt_enable();
2846 			continue;
2847 		}
2848 		if (!try_module_get(mod))
2849 			err = -EINVAL;
2850 		preempt_enable();
2851 		if (err)
2852 			break;
2853 		err = add_module(&arr, mod);
2854 		if (err) {
2855 			module_put(mod);
2856 			break;
2857 		}
2858 	}
2859 
2860 	/* We return either err < 0 in case of error, ... */
2861 	if (err) {
2862 		kprobe_multi_put_modules(arr.mods, arr.mods_cnt);
2863 		kfree(arr.mods);
2864 		return err;
2865 	}
2866 
2867 	/* or number of modules found if everything is ok. */
2868 	*mods = arr.mods;
2869 	return arr.mods_cnt;
2870 }
2871 
addrs_check_error_injection_list(unsigned long * addrs,u32 cnt)2872 static int addrs_check_error_injection_list(unsigned long *addrs, u32 cnt)
2873 {
2874 	u32 i;
2875 
2876 	for (i = 0; i < cnt; i++) {
2877 		if (!within_error_injection_list(addrs[i]))
2878 			return -EINVAL;
2879 	}
2880 	return 0;
2881 }
2882 
bpf_kprobe_multi_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)2883 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
2884 {
2885 	struct bpf_kprobe_multi_link *link = NULL;
2886 	struct bpf_link_primer link_primer;
2887 	void __user *ucookies;
2888 	unsigned long *addrs;
2889 	u32 flags, cnt, size;
2890 	void __user *uaddrs;
2891 	u64 *cookies = NULL;
2892 	void __user *usyms;
2893 	int err;
2894 
2895 	/* no support for 32bit archs yet */
2896 	if (sizeof(u64) != sizeof(void *))
2897 		return -EOPNOTSUPP;
2898 
2899 	if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI)
2900 		return -EINVAL;
2901 
2902 	flags = attr->link_create.kprobe_multi.flags;
2903 	if (flags & ~BPF_F_KPROBE_MULTI_RETURN)
2904 		return -EINVAL;
2905 
2906 	uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs);
2907 	usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms);
2908 	if (!!uaddrs == !!usyms)
2909 		return -EINVAL;
2910 
2911 	cnt = attr->link_create.kprobe_multi.cnt;
2912 	if (!cnt)
2913 		return -EINVAL;
2914 	if (cnt > MAX_KPROBE_MULTI_CNT)
2915 		return -E2BIG;
2916 
2917 	size = cnt * sizeof(*addrs);
2918 	addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2919 	if (!addrs)
2920 		return -ENOMEM;
2921 
2922 	ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
2923 	if (ucookies) {
2924 		cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
2925 		if (!cookies) {
2926 			err = -ENOMEM;
2927 			goto error;
2928 		}
2929 		if (copy_from_user(cookies, ucookies, size)) {
2930 			err = -EFAULT;
2931 			goto error;
2932 		}
2933 	}
2934 
2935 	if (uaddrs) {
2936 		if (copy_from_user(addrs, uaddrs, size)) {
2937 			err = -EFAULT;
2938 			goto error;
2939 		}
2940 	} else {
2941 		struct multi_symbols_sort data = {
2942 			.cookies = cookies,
2943 		};
2944 		struct user_syms us;
2945 
2946 		err = copy_user_syms(&us, usyms, cnt);
2947 		if (err)
2948 			goto error;
2949 
2950 		if (cookies)
2951 			data.funcs = us.syms;
2952 
2953 		sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
2954 		       symbols_swap_r, &data);
2955 
2956 		err = ftrace_lookup_symbols(us.syms, cnt, addrs);
2957 		free_user_syms(&us);
2958 		if (err)
2959 			goto error;
2960 	}
2961 
2962 	if (prog->kprobe_override && addrs_check_error_injection_list(addrs, cnt)) {
2963 		err = -EINVAL;
2964 		goto error;
2965 	}
2966 
2967 	link = kzalloc(sizeof(*link), GFP_KERNEL);
2968 	if (!link) {
2969 		err = -ENOMEM;
2970 		goto error;
2971 	}
2972 
2973 	bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI,
2974 		      &bpf_kprobe_multi_link_lops, prog);
2975 
2976 	err = bpf_link_prime(&link->link, &link_primer);
2977 	if (err)
2978 		goto error;
2979 
2980 	if (flags & BPF_F_KPROBE_MULTI_RETURN)
2981 		link->fp.exit_handler = kprobe_multi_link_exit_handler;
2982 	else
2983 		link->fp.entry_handler = kprobe_multi_link_handler;
2984 
2985 	link->addrs = addrs;
2986 	link->cookies = cookies;
2987 	link->cnt = cnt;
2988 	link->flags = flags;
2989 
2990 	if (cookies) {
2991 		/*
2992 		 * Sorting addresses will trigger sorting cookies as well
2993 		 * (check bpf_kprobe_multi_cookie_swap). This way we can
2994 		 * find cookie based on the address in bpf_get_attach_cookie
2995 		 * helper.
2996 		 */
2997 		sort_r(addrs, cnt, sizeof(*addrs),
2998 		       bpf_kprobe_multi_cookie_cmp,
2999 		       bpf_kprobe_multi_cookie_swap,
3000 		       link);
3001 	}
3002 
3003 	err = get_modules_for_addrs(&link->mods, addrs, cnt);
3004 	if (err < 0) {
3005 		bpf_link_cleanup(&link_primer);
3006 		return err;
3007 	}
3008 	link->mods_cnt = err;
3009 
3010 	err = register_fprobe_ips(&link->fp, addrs, cnt);
3011 	if (err) {
3012 		kprobe_multi_put_modules(link->mods, link->mods_cnt);
3013 		bpf_link_cleanup(&link_primer);
3014 		return err;
3015 	}
3016 
3017 	return bpf_link_settle(&link_primer);
3018 
3019 error:
3020 	kfree(link);
3021 	kvfree(addrs);
3022 	kvfree(cookies);
3023 	return err;
3024 }
3025 #else /* !CONFIG_FPROBE */
bpf_kprobe_multi_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)3026 int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3027 {
3028 	return -EOPNOTSUPP;
3029 }
bpf_kprobe_multi_cookie(struct bpf_run_ctx * ctx)3030 static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx)
3031 {
3032 	return 0;
3033 }
bpf_kprobe_multi_entry_ip(struct bpf_run_ctx * ctx)3034 static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3035 {
3036 	return 0;
3037 }
3038 #endif
3039 
3040 #ifdef CONFIG_UPROBES
3041 struct bpf_uprobe_multi_link;
3042 
3043 struct bpf_uprobe {
3044 	struct bpf_uprobe_multi_link *link;
3045 	loff_t offset;
3046 	unsigned long ref_ctr_offset;
3047 	u64 cookie;
3048 	struct uprobe_consumer consumer;
3049 };
3050 
3051 struct bpf_uprobe_multi_link {
3052 	struct path path;
3053 	struct bpf_link link;
3054 	u32 cnt;
3055 	struct bpf_uprobe *uprobes;
3056 	struct task_struct *task;
3057 };
3058 
3059 struct bpf_uprobe_multi_run_ctx {
3060 	struct bpf_run_ctx run_ctx;
3061 	unsigned long entry_ip;
3062 	struct bpf_uprobe *uprobe;
3063 };
3064 
bpf_uprobe_unregister(struct path * path,struct bpf_uprobe * uprobes,u32 cnt)3065 static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes,
3066 				  u32 cnt)
3067 {
3068 	u32 i;
3069 
3070 	for (i = 0; i < cnt; i++) {
3071 		uprobe_unregister(d_real_inode(path->dentry), uprobes[i].offset,
3072 				  &uprobes[i].consumer);
3073 	}
3074 }
3075 
bpf_uprobe_multi_link_release(struct bpf_link * link)3076 static void bpf_uprobe_multi_link_release(struct bpf_link *link)
3077 {
3078 	struct bpf_uprobe_multi_link *umulti_link;
3079 
3080 	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3081 	bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt);
3082 	if (umulti_link->task)
3083 		put_task_struct(umulti_link->task);
3084 	path_put(&umulti_link->path);
3085 }
3086 
bpf_uprobe_multi_link_dealloc(struct bpf_link * link)3087 static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
3088 {
3089 	struct bpf_uprobe_multi_link *umulti_link;
3090 
3091 	umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
3092 	kvfree(umulti_link->uprobes);
3093 	kfree(umulti_link);
3094 }
3095 
3096 static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
3097 	.release = bpf_uprobe_multi_link_release,
3098 	.dealloc_deferred = bpf_uprobe_multi_link_dealloc,
3099 };
3100 
uprobe_prog_run(struct bpf_uprobe * uprobe,unsigned long entry_ip,struct pt_regs * regs)3101 static int uprobe_prog_run(struct bpf_uprobe *uprobe,
3102 			   unsigned long entry_ip,
3103 			   struct pt_regs *regs)
3104 {
3105 	struct bpf_uprobe_multi_link *link = uprobe->link;
3106 	struct bpf_uprobe_multi_run_ctx run_ctx = {
3107 		.entry_ip = entry_ip,
3108 		.uprobe = uprobe,
3109 	};
3110 	struct bpf_prog *prog = link->link.prog;
3111 	bool sleepable = prog->aux->sleepable;
3112 	struct bpf_run_ctx *old_run_ctx;
3113 
3114 	if (link->task && current->mm != link->task->mm)
3115 		return 0;
3116 
3117 	if (sleepable)
3118 		rcu_read_lock_trace();
3119 	else
3120 		rcu_read_lock();
3121 
3122 	migrate_disable();
3123 
3124 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
3125 	bpf_prog_run(link->link.prog, regs);
3126 	bpf_reset_run_ctx(old_run_ctx);
3127 
3128 	migrate_enable();
3129 
3130 	if (sleepable)
3131 		rcu_read_unlock_trace();
3132 	else
3133 		rcu_read_unlock();
3134 	return 0;
3135 }
3136 
3137 static bool
uprobe_multi_link_filter(struct uprobe_consumer * con,enum uprobe_filter_ctx ctx,struct mm_struct * mm)3138 uprobe_multi_link_filter(struct uprobe_consumer *con, enum uprobe_filter_ctx ctx,
3139 			 struct mm_struct *mm)
3140 {
3141 	struct bpf_uprobe *uprobe;
3142 
3143 	uprobe = container_of(con, struct bpf_uprobe, consumer);
3144 	return uprobe->link->task->mm == mm;
3145 }
3146 
3147 static int
uprobe_multi_link_handler(struct uprobe_consumer * con,struct pt_regs * regs)3148 uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
3149 {
3150 	struct bpf_uprobe *uprobe;
3151 
3152 	uprobe = container_of(con, struct bpf_uprobe, consumer);
3153 	return uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
3154 }
3155 
3156 static int
uprobe_multi_link_ret_handler(struct uprobe_consumer * con,unsigned long func,struct pt_regs * regs)3157 uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs)
3158 {
3159 	struct bpf_uprobe *uprobe;
3160 
3161 	uprobe = container_of(con, struct bpf_uprobe, consumer);
3162 	return uprobe_prog_run(uprobe, func, regs);
3163 }
3164 
bpf_uprobe_multi_entry_ip(struct bpf_run_ctx * ctx)3165 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3166 {
3167 	struct bpf_uprobe_multi_run_ctx *run_ctx;
3168 
3169 	run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
3170 	return run_ctx->entry_ip;
3171 }
3172 
bpf_uprobe_multi_cookie(struct bpf_run_ctx * ctx)3173 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3174 {
3175 	struct bpf_uprobe_multi_run_ctx *run_ctx;
3176 
3177 	run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
3178 	return run_ctx->uprobe->cookie;
3179 }
3180 
bpf_uprobe_multi_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)3181 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3182 {
3183 	struct bpf_uprobe_multi_link *link = NULL;
3184 	unsigned long __user *uref_ctr_offsets;
3185 	struct bpf_link_primer link_primer;
3186 	struct bpf_uprobe *uprobes = NULL;
3187 	struct task_struct *task = NULL;
3188 	unsigned long __user *uoffsets;
3189 	u64 __user *ucookies;
3190 	void __user *upath;
3191 	u32 flags, cnt, i;
3192 	struct path path;
3193 	char *name;
3194 	pid_t pid;
3195 	int err;
3196 
3197 	/* no support for 32bit archs yet */
3198 	if (sizeof(u64) != sizeof(void *))
3199 		return -EOPNOTSUPP;
3200 
3201 	if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI)
3202 		return -EINVAL;
3203 
3204 	flags = attr->link_create.uprobe_multi.flags;
3205 	if (flags & ~BPF_F_UPROBE_MULTI_RETURN)
3206 		return -EINVAL;
3207 
3208 	/*
3209 	 * path, offsets and cnt are mandatory,
3210 	 * ref_ctr_offsets and cookies are optional
3211 	 */
3212 	upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
3213 	uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
3214 	cnt = attr->link_create.uprobe_multi.cnt;
3215 	pid = attr->link_create.uprobe_multi.pid;
3216 
3217 	if (!upath || !uoffsets || !cnt || pid < 0)
3218 		return -EINVAL;
3219 	if (cnt > MAX_UPROBE_MULTI_CNT)
3220 		return -E2BIG;
3221 
3222 	uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
3223 	ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
3224 
3225 	name = strndup_user(upath, PATH_MAX);
3226 	if (IS_ERR(name)) {
3227 		err = PTR_ERR(name);
3228 		return err;
3229 	}
3230 
3231 	err = kern_path(name, LOOKUP_FOLLOW, &path);
3232 	kfree(name);
3233 	if (err)
3234 		return err;
3235 
3236 	if (!d_is_reg(path.dentry)) {
3237 		err = -EBADF;
3238 		goto error_path_put;
3239 	}
3240 
3241 	if (pid) {
3242 		rcu_read_lock();
3243 		task = get_pid_task(find_vpid(pid), PIDTYPE_TGID);
3244 		rcu_read_unlock();
3245 		if (!task) {
3246 			err = -ESRCH;
3247 			goto error_path_put;
3248 		}
3249 	}
3250 
3251 	err = -ENOMEM;
3252 
3253 	link = kzalloc(sizeof(*link), GFP_KERNEL);
3254 	uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL);
3255 
3256 	if (!uprobes || !link)
3257 		goto error_free;
3258 
3259 	for (i = 0; i < cnt; i++) {
3260 		if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
3261 			err = -EFAULT;
3262 			goto error_free;
3263 		}
3264 		if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) {
3265 			err = -EFAULT;
3266 			goto error_free;
3267 		}
3268 		if (__get_user(uprobes[i].offset, uoffsets + i)) {
3269 			err = -EFAULT;
3270 			goto error_free;
3271 		}
3272 
3273 		uprobes[i].link = link;
3274 
3275 		if (flags & BPF_F_UPROBE_MULTI_RETURN)
3276 			uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
3277 		else
3278 			uprobes[i].consumer.handler = uprobe_multi_link_handler;
3279 
3280 		if (pid)
3281 			uprobes[i].consumer.filter = uprobe_multi_link_filter;
3282 	}
3283 
3284 	link->cnt = cnt;
3285 	link->uprobes = uprobes;
3286 	link->path = path;
3287 	link->task = task;
3288 
3289 	bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI,
3290 		      &bpf_uprobe_multi_link_lops, prog);
3291 
3292 	for (i = 0; i < cnt; i++) {
3293 		err = uprobe_register_refctr(d_real_inode(link->path.dentry),
3294 					     uprobes[i].offset,
3295 					     uprobes[i].ref_ctr_offset,
3296 					     &uprobes[i].consumer);
3297 		if (err) {
3298 			link->cnt = i;
3299 			goto error_unregister;
3300 		}
3301 	}
3302 
3303 	err = bpf_link_prime(&link->link, &link_primer);
3304 	if (err)
3305 		goto error_unregister;
3306 
3307 	return bpf_link_settle(&link_primer);
3308 
3309 error_unregister:
3310 	bpf_uprobe_unregister(&path, uprobes, link->cnt);
3311 
3312 error_free:
3313 	kvfree(uprobes);
3314 	kfree(link);
3315 	if (task)
3316 		put_task_struct(task);
3317 error_path_put:
3318 	path_put(&path);
3319 	return err;
3320 }
3321 #else /* !CONFIG_UPROBES */
bpf_uprobe_multi_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)3322 int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
3323 {
3324 	return -EOPNOTSUPP;
3325 }
bpf_uprobe_multi_cookie(struct bpf_run_ctx * ctx)3326 static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
3327 {
3328 	return 0;
3329 }
bpf_uprobe_multi_entry_ip(struct bpf_run_ctx * ctx)3330 static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
3331 {
3332 	return 0;
3333 }
3334 #endif /* CONFIG_UPROBES */
3335