1179a0cc4SSteven Rostedt (VMware) // SPDX-License-Identifier: GPL-2.0 22541517cSAlexei Starovoitov /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com 30515e599SAlexei Starovoitov * Copyright (c) 2016 Facebook 42541517cSAlexei Starovoitov */ 52541517cSAlexei Starovoitov #include <linux/kernel.h> 62541517cSAlexei Starovoitov #include <linux/types.h> 72541517cSAlexei Starovoitov #include <linux/slab.h> 82541517cSAlexei Starovoitov #include <linux/bpf.h> 90515e599SAlexei Starovoitov #include <linux/bpf_perf_event.h> 10c4d0bfb4SAlan Maguire #include <linux/btf.h> 112541517cSAlexei Starovoitov #include <linux/filter.h> 122541517cSAlexei Starovoitov #include <linux/uaccess.h> 139c959c86SAlexei Starovoitov #include <linux/ctype.h> 149802d865SJosef Bacik #include <linux/kprobes.h> 15ac5a72eaSAlan Maguire #include <linux/spinlock.h> 1641bdc4b4SYonghong Song #include <linux/syscalls.h> 17540adea3SMasami Hiramatsu #include <linux/error-injection.h> 18c9a0f3b8SJiri Olsa #include <linux/btf_ids.h> 196f100640SKP Singh #include <linux/bpf_lsm.h> 200dcac272SJiri Olsa #include <linux/fprobe.h> 21ca74823cSJiri Olsa #include <linux/bsearch.h> 22ca74823cSJiri Olsa #include <linux/sort.h> 23f3cf4134SRoberto Sassu #include <linux/key.h> 24f3cf4134SRoberto Sassu #include <linux/verification.h> 256f100640SKP Singh 268e4597c6SMartin KaFai Lau #include <net/bpf_sk_storage.h> 279802d865SJosef Bacik 28c4d0bfb4SAlan Maguire #include <uapi/linux/bpf.h> 29c4d0bfb4SAlan Maguire #include <uapi/linux/btf.h> 30c4d0bfb4SAlan Maguire 31c7b6f29bSNadav Amit #include <asm/tlb.h> 32c7b6f29bSNadav Amit 339802d865SJosef Bacik #include "trace_probe.h" 342541517cSAlexei Starovoitov #include "trace.h" 352541517cSAlexei Starovoitov 36ac5a72eaSAlan Maguire #define CREATE_TRACE_POINTS 37ac5a72eaSAlan Maguire #include "bpf_trace.h" 38ac5a72eaSAlan Maguire 39e672db03SStanislav Fomichev #define bpf_event_rcu_dereference(p) \ 40e672db03SStanislav Fomichev rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) 41e672db03SStanislav Fomichev 42a38d1107SMatt Mullins #ifdef CONFIG_MODULES 43a38d1107SMatt Mullins struct bpf_trace_module { 44a38d1107SMatt Mullins struct module *module; 45a38d1107SMatt Mullins struct list_head list; 46a38d1107SMatt Mullins }; 47a38d1107SMatt Mullins 48a38d1107SMatt Mullins static LIST_HEAD(bpf_trace_modules); 49a38d1107SMatt Mullins static DEFINE_MUTEX(bpf_module_mutex); 50a38d1107SMatt Mullins 51a38d1107SMatt Mullins static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) 52a38d1107SMatt Mullins { 53a38d1107SMatt Mullins struct bpf_raw_event_map *btp, *ret = NULL; 54a38d1107SMatt Mullins struct bpf_trace_module *btm; 55a38d1107SMatt Mullins unsigned int i; 56a38d1107SMatt Mullins 57a38d1107SMatt Mullins mutex_lock(&bpf_module_mutex); 58a38d1107SMatt Mullins list_for_each_entry(btm, &bpf_trace_modules, list) { 59a38d1107SMatt Mullins for (i = 0; i < btm->module->num_bpf_raw_events; ++i) { 60a38d1107SMatt Mullins btp = &btm->module->bpf_raw_events[i]; 61a38d1107SMatt Mullins if (!strcmp(btp->tp->name, name)) { 62a38d1107SMatt Mullins if (try_module_get(btm->module)) 63a38d1107SMatt Mullins ret = btp; 64a38d1107SMatt Mullins goto out; 65a38d1107SMatt Mullins } 66a38d1107SMatt Mullins } 67a38d1107SMatt Mullins } 68a38d1107SMatt Mullins out: 69a38d1107SMatt Mullins mutex_unlock(&bpf_module_mutex); 70a38d1107SMatt Mullins return ret; 71a38d1107SMatt Mullins } 72a38d1107SMatt Mullins #else 73a38d1107SMatt Mullins static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) 74a38d1107SMatt Mullins { 75a38d1107SMatt Mullins return NULL; 76a38d1107SMatt Mullins } 77a38d1107SMatt Mullins #endif /* CONFIG_MODULES */ 78a38d1107SMatt Mullins 79035226b9SGianluca Borello u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 80c195651eSYonghong Song u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 81035226b9SGianluca Borello 82eb411377SAlan Maguire static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, 83eb411377SAlan Maguire u64 flags, const struct btf **btf, 84eb411377SAlan Maguire s32 *btf_id); 85f7098690SJiri Olsa static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx); 86f7098690SJiri Olsa static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx); 87eb411377SAlan Maguire 882541517cSAlexei Starovoitov /** 892541517cSAlexei Starovoitov * trace_call_bpf - invoke BPF program 90e87c6bc3SYonghong Song * @call: tracepoint event 912541517cSAlexei Starovoitov * @ctx: opaque context pointer 922541517cSAlexei Starovoitov * 932541517cSAlexei Starovoitov * kprobe handlers execute BPF programs via this helper. 942541517cSAlexei Starovoitov * Can be used from static tracepoints in the future. 952541517cSAlexei Starovoitov * 962541517cSAlexei Starovoitov * Return: BPF programs always return an integer which is interpreted by 972541517cSAlexei Starovoitov * kprobe handler as: 982541517cSAlexei Starovoitov * 0 - return from kprobe (event is filtered out) 992541517cSAlexei Starovoitov * 1 - store kprobe event into ring buffer 1002541517cSAlexei Starovoitov * Other values are reserved and currently alias to 1 1012541517cSAlexei Starovoitov */ 102e87c6bc3SYonghong Song unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) 1032541517cSAlexei Starovoitov { 1042541517cSAlexei Starovoitov unsigned int ret; 1052541517cSAlexei Starovoitov 106b0a81b94SThomas Gleixner cant_sleep(); 1072541517cSAlexei Starovoitov 1082541517cSAlexei Starovoitov if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 1092541517cSAlexei Starovoitov /* 1102541517cSAlexei Starovoitov * since some bpf program is already running on this cpu, 1112541517cSAlexei Starovoitov * don't call into another bpf program (same or different) 1122541517cSAlexei Starovoitov * and don't send kprobe event into ring-buffer, 1132541517cSAlexei Starovoitov * so return zero here 1142541517cSAlexei Starovoitov */ 1152541517cSAlexei Starovoitov ret = 0; 1162541517cSAlexei Starovoitov goto out; 1172541517cSAlexei Starovoitov } 1182541517cSAlexei Starovoitov 119e87c6bc3SYonghong Song /* 120e87c6bc3SYonghong Song * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock 121e87c6bc3SYonghong Song * to all call sites, we did a bpf_prog_array_valid() there to check 122e87c6bc3SYonghong Song * whether call->prog_array is empty or not, which is 1232b5894ccSQiujun Huang * a heuristic to speed up execution. 124e87c6bc3SYonghong Song * 125e87c6bc3SYonghong Song * If bpf_prog_array_valid() fetched prog_array was 126e87c6bc3SYonghong Song * non-NULL, we go into trace_call_bpf() and do the actual 127e87c6bc3SYonghong Song * proper rcu_dereference() under RCU lock. 128e87c6bc3SYonghong Song * If it turns out that prog_array is NULL then, we bail out. 129e87c6bc3SYonghong Song * For the opposite, if the bpf_prog_array_valid() fetched pointer 130e87c6bc3SYonghong Song * was NULL, you'll skip the prog_array with the risk of missing 131e87c6bc3SYonghong Song * out of events when it was updated in between this and the 132e87c6bc3SYonghong Song * rcu_dereference() which is accepted risk. 133e87c6bc3SYonghong Song */ 134055eb955SStanislav Fomichev rcu_read_lock(); 135055eb955SStanislav Fomichev ret = bpf_prog_run_array(rcu_dereference(call->prog_array), 136055eb955SStanislav Fomichev ctx, bpf_prog_run); 137055eb955SStanislav Fomichev rcu_read_unlock(); 1382541517cSAlexei Starovoitov 1392541517cSAlexei Starovoitov out: 1402541517cSAlexei Starovoitov __this_cpu_dec(bpf_prog_active); 1412541517cSAlexei Starovoitov 1422541517cSAlexei Starovoitov return ret; 1432541517cSAlexei Starovoitov } 1442541517cSAlexei Starovoitov 1459802d865SJosef Bacik #ifdef CONFIG_BPF_KPROBE_OVERRIDE 1469802d865SJosef Bacik BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) 1479802d865SJosef Bacik { 1489802d865SJosef Bacik regs_set_return_value(regs, rc); 149540adea3SMasami Hiramatsu override_function_with_return(regs); 1509802d865SJosef Bacik return 0; 1519802d865SJosef Bacik } 1529802d865SJosef Bacik 1539802d865SJosef Bacik static const struct bpf_func_proto bpf_override_return_proto = { 1549802d865SJosef Bacik .func = bpf_override_return, 1559802d865SJosef Bacik .gpl_only = true, 1569802d865SJosef Bacik .ret_type = RET_INTEGER, 1579802d865SJosef Bacik .arg1_type = ARG_PTR_TO_CTX, 1589802d865SJosef Bacik .arg2_type = ARG_ANYTHING, 1599802d865SJosef Bacik }; 1609802d865SJosef Bacik #endif 1619802d865SJosef Bacik 1628d92db5cSChristoph Hellwig static __always_inline int 1638d92db5cSChristoph Hellwig bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr) 1648d92db5cSChristoph Hellwig { 1658d92db5cSChristoph Hellwig int ret; 1668d92db5cSChristoph Hellwig 167c0ee37e8SChristoph Hellwig ret = copy_from_user_nofault(dst, unsafe_ptr, size); 1688d92db5cSChristoph Hellwig if (unlikely(ret < 0)) 1698d92db5cSChristoph Hellwig memset(dst, 0, size); 1708d92db5cSChristoph Hellwig return ret; 1718d92db5cSChristoph Hellwig } 1728d92db5cSChristoph Hellwig 1736ae08ae3SDaniel Borkmann BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, 1746ae08ae3SDaniel Borkmann const void __user *, unsafe_ptr) 1752541517cSAlexei Starovoitov { 1768d92db5cSChristoph Hellwig return bpf_probe_read_user_common(dst, size, unsafe_ptr); 1772541517cSAlexei Starovoitov } 1782541517cSAlexei Starovoitov 179f470378cSJohn Fastabend const struct bpf_func_proto bpf_probe_read_user_proto = { 1806ae08ae3SDaniel Borkmann .func = bpf_probe_read_user, 1816ae08ae3SDaniel Borkmann .gpl_only = true, 1826ae08ae3SDaniel Borkmann .ret_type = RET_INTEGER, 1836ae08ae3SDaniel Borkmann .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1846ae08ae3SDaniel Borkmann .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1856ae08ae3SDaniel Borkmann .arg3_type = ARG_ANYTHING, 1866ae08ae3SDaniel Borkmann }; 1876ae08ae3SDaniel Borkmann 1888d92db5cSChristoph Hellwig static __always_inline int 1898d92db5cSChristoph Hellwig bpf_probe_read_user_str_common(void *dst, u32 size, 1908d92db5cSChristoph Hellwig const void __user *unsafe_ptr) 1918d92db5cSChristoph Hellwig { 1928d92db5cSChristoph Hellwig int ret; 1938d92db5cSChristoph Hellwig 1946fa6d280SDaniel Xu /* 1956fa6d280SDaniel Xu * NB: We rely on strncpy_from_user() not copying junk past the NUL 1966fa6d280SDaniel Xu * terminator into `dst`. 1976fa6d280SDaniel Xu * 1986fa6d280SDaniel Xu * strncpy_from_user() does long-sized strides in the fast path. If the 1996fa6d280SDaniel Xu * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`, 2006fa6d280SDaniel Xu * then there could be junk after the NUL in `dst`. If user takes `dst` 2016fa6d280SDaniel Xu * and keys a hash map with it, then semantically identical strings can 2026fa6d280SDaniel Xu * occupy multiple entries in the map. 2036fa6d280SDaniel Xu */ 2048d92db5cSChristoph Hellwig ret = strncpy_from_user_nofault(dst, unsafe_ptr, size); 2058d92db5cSChristoph Hellwig if (unlikely(ret < 0)) 2068d92db5cSChristoph Hellwig memset(dst, 0, size); 2078d92db5cSChristoph Hellwig return ret; 2088d92db5cSChristoph Hellwig } 2098d92db5cSChristoph Hellwig 2106ae08ae3SDaniel Borkmann BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, 2116ae08ae3SDaniel Borkmann const void __user *, unsafe_ptr) 2126ae08ae3SDaniel Borkmann { 2138d92db5cSChristoph Hellwig return bpf_probe_read_user_str_common(dst, size, unsafe_ptr); 2146ae08ae3SDaniel Borkmann } 2156ae08ae3SDaniel Borkmann 216f470378cSJohn Fastabend const struct bpf_func_proto bpf_probe_read_user_str_proto = { 2176ae08ae3SDaniel Borkmann .func = bpf_probe_read_user_str, 2186ae08ae3SDaniel Borkmann .gpl_only = true, 2196ae08ae3SDaniel Borkmann .ret_type = RET_INTEGER, 2206ae08ae3SDaniel Borkmann .arg1_type = ARG_PTR_TO_UNINIT_MEM, 2216ae08ae3SDaniel Borkmann .arg2_type = ARG_CONST_SIZE_OR_ZERO, 2226ae08ae3SDaniel Borkmann .arg3_type = ARG_ANYTHING, 2236ae08ae3SDaniel Borkmann }; 2246ae08ae3SDaniel Borkmann 2256ae08ae3SDaniel Borkmann static __always_inline int 2268d92db5cSChristoph Hellwig bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) 2276ae08ae3SDaniel Borkmann { 228ff40e510SDaniel Borkmann int ret; 2296ae08ae3SDaniel Borkmann 230fe557319SChristoph Hellwig ret = copy_from_kernel_nofault(dst, unsafe_ptr, size); 2316ae08ae3SDaniel Borkmann if (unlikely(ret < 0)) 2326ae08ae3SDaniel Borkmann memset(dst, 0, size); 2336ae08ae3SDaniel Borkmann return ret; 2346ae08ae3SDaniel Borkmann } 2356ae08ae3SDaniel Borkmann 2366ae08ae3SDaniel Borkmann BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size, 2376ae08ae3SDaniel Borkmann const void *, unsafe_ptr) 2386ae08ae3SDaniel Borkmann { 2398d92db5cSChristoph Hellwig return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); 2406ae08ae3SDaniel Borkmann } 2416ae08ae3SDaniel Borkmann 242f470378cSJohn Fastabend const struct bpf_func_proto bpf_probe_read_kernel_proto = { 2436ae08ae3SDaniel Borkmann .func = bpf_probe_read_kernel, 2446ae08ae3SDaniel Borkmann .gpl_only = true, 2456ae08ae3SDaniel Borkmann .ret_type = RET_INTEGER, 2466ae08ae3SDaniel Borkmann .arg1_type = ARG_PTR_TO_UNINIT_MEM, 2476ae08ae3SDaniel Borkmann .arg2_type = ARG_CONST_SIZE_OR_ZERO, 2486ae08ae3SDaniel Borkmann .arg3_type = ARG_ANYTHING, 2496ae08ae3SDaniel Borkmann }; 2506ae08ae3SDaniel Borkmann 2516ae08ae3SDaniel Borkmann static __always_inline int 2528d92db5cSChristoph Hellwig bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) 2536ae08ae3SDaniel Borkmann { 254ff40e510SDaniel Borkmann int ret; 2558d92db5cSChristoph Hellwig 2566ae08ae3SDaniel Borkmann /* 2578d92db5cSChristoph Hellwig * The strncpy_from_kernel_nofault() call will likely not fill the 2588d92db5cSChristoph Hellwig * entire buffer, but that's okay in this circumstance as we're probing 2596ae08ae3SDaniel Borkmann * arbitrary memory anyway similar to bpf_probe_read_*() and might 2606ae08ae3SDaniel Borkmann * as well probe the stack. Thus, memory is explicitly cleared 2616ae08ae3SDaniel Borkmann * only in error case, so that improper users ignoring return 2626ae08ae3SDaniel Borkmann * code altogether don't copy garbage; otherwise length of string 2636ae08ae3SDaniel Borkmann * is returned that can be used for bpf_perf_event_output() et al. 2646ae08ae3SDaniel Borkmann */ 2658d92db5cSChristoph Hellwig ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size); 2666ae08ae3SDaniel Borkmann if (unlikely(ret < 0)) 2676ae08ae3SDaniel Borkmann memset(dst, 0, size); 2686ae08ae3SDaniel Borkmann return ret; 2696ae08ae3SDaniel Borkmann } 2706ae08ae3SDaniel Borkmann 2716ae08ae3SDaniel Borkmann BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size, 2726ae08ae3SDaniel Borkmann const void *, unsafe_ptr) 2736ae08ae3SDaniel Borkmann { 2748d92db5cSChristoph Hellwig return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); 2756ae08ae3SDaniel Borkmann } 2766ae08ae3SDaniel Borkmann 277f470378cSJohn Fastabend const struct bpf_func_proto bpf_probe_read_kernel_str_proto = { 2786ae08ae3SDaniel Borkmann .func = bpf_probe_read_kernel_str, 2796ae08ae3SDaniel Borkmann .gpl_only = true, 2806ae08ae3SDaniel Borkmann .ret_type = RET_INTEGER, 2816ae08ae3SDaniel Borkmann .arg1_type = ARG_PTR_TO_UNINIT_MEM, 2826ae08ae3SDaniel Borkmann .arg2_type = ARG_CONST_SIZE_OR_ZERO, 2836ae08ae3SDaniel Borkmann .arg3_type = ARG_ANYTHING, 2846ae08ae3SDaniel Borkmann }; 2856ae08ae3SDaniel Borkmann 2868d92db5cSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 2878d92db5cSChristoph Hellwig BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size, 2888d92db5cSChristoph Hellwig const void *, unsafe_ptr) 2898d92db5cSChristoph Hellwig { 2908d92db5cSChristoph Hellwig if ((unsigned long)unsafe_ptr < TASK_SIZE) { 2918d92db5cSChristoph Hellwig return bpf_probe_read_user_common(dst, size, 2928d92db5cSChristoph Hellwig (__force void __user *)unsafe_ptr); 2938d92db5cSChristoph Hellwig } 2948d92db5cSChristoph Hellwig return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); 2958d92db5cSChristoph Hellwig } 2968d92db5cSChristoph Hellwig 2978d92db5cSChristoph Hellwig static const struct bpf_func_proto bpf_probe_read_compat_proto = { 2988d92db5cSChristoph Hellwig .func = bpf_probe_read_compat, 2998d92db5cSChristoph Hellwig .gpl_only = true, 3008d92db5cSChristoph Hellwig .ret_type = RET_INTEGER, 3018d92db5cSChristoph Hellwig .arg1_type = ARG_PTR_TO_UNINIT_MEM, 3028d92db5cSChristoph Hellwig .arg2_type = ARG_CONST_SIZE_OR_ZERO, 3038d92db5cSChristoph Hellwig .arg3_type = ARG_ANYTHING, 3048d92db5cSChristoph Hellwig }; 3058d92db5cSChristoph Hellwig 3066ae08ae3SDaniel Borkmann BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size, 3076ae08ae3SDaniel Borkmann const void *, unsafe_ptr) 3086ae08ae3SDaniel Borkmann { 3098d92db5cSChristoph Hellwig if ((unsigned long)unsafe_ptr < TASK_SIZE) { 3108d92db5cSChristoph Hellwig return bpf_probe_read_user_str_common(dst, size, 3118d92db5cSChristoph Hellwig (__force void __user *)unsafe_ptr); 3128d92db5cSChristoph Hellwig } 3138d92db5cSChristoph Hellwig return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); 3146ae08ae3SDaniel Borkmann } 3156ae08ae3SDaniel Borkmann 3166ae08ae3SDaniel Borkmann static const struct bpf_func_proto bpf_probe_read_compat_str_proto = { 3176ae08ae3SDaniel Borkmann .func = bpf_probe_read_compat_str, 3182541517cSAlexei Starovoitov .gpl_only = true, 3192541517cSAlexei Starovoitov .ret_type = RET_INTEGER, 32039f19ebbSAlexei Starovoitov .arg1_type = ARG_PTR_TO_UNINIT_MEM, 3219c019e2bSYonghong Song .arg2_type = ARG_CONST_SIZE_OR_ZERO, 3222541517cSAlexei Starovoitov .arg3_type = ARG_ANYTHING, 3232541517cSAlexei Starovoitov }; 3248d92db5cSChristoph Hellwig #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ 3252541517cSAlexei Starovoitov 326eb1b6688SDaniel Borkmann BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src, 327f3694e00SDaniel Borkmann u32, size) 32896ae5227SSargun Dhillon { 32996ae5227SSargun Dhillon /* 33096ae5227SSargun Dhillon * Ensure we're in user context which is safe for the helper to 33196ae5227SSargun Dhillon * run. This helper has no business in a kthread. 33296ae5227SSargun Dhillon * 33396ae5227SSargun Dhillon * access_ok() should prevent writing to non-user memory, but in 33496ae5227SSargun Dhillon * some situations (nommu, temporary switch, etc) access_ok() does 33596ae5227SSargun Dhillon * not provide enough validation, hence the check on KERNEL_DS. 336c7b6f29bSNadav Amit * 337c7b6f29bSNadav Amit * nmi_uaccess_okay() ensures the probe is not run in an interim 338c7b6f29bSNadav Amit * state, when the task or mm are switched. This is specifically 339c7b6f29bSNadav Amit * required to prevent the use of temporary mm. 34096ae5227SSargun Dhillon */ 34196ae5227SSargun Dhillon 34296ae5227SSargun Dhillon if (unlikely(in_interrupt() || 34396ae5227SSargun Dhillon current->flags & (PF_KTHREAD | PF_EXITING))) 34496ae5227SSargun Dhillon return -EPERM; 345c7b6f29bSNadav Amit if (unlikely(!nmi_uaccess_okay())) 346c7b6f29bSNadav Amit return -EPERM; 34796ae5227SSargun Dhillon 348c0ee37e8SChristoph Hellwig return copy_to_user_nofault(unsafe_ptr, src, size); 34996ae5227SSargun Dhillon } 35096ae5227SSargun Dhillon 35196ae5227SSargun Dhillon static const struct bpf_func_proto bpf_probe_write_user_proto = { 35296ae5227SSargun Dhillon .func = bpf_probe_write_user, 35396ae5227SSargun Dhillon .gpl_only = true, 35496ae5227SSargun Dhillon .ret_type = RET_INTEGER, 35596ae5227SSargun Dhillon .arg1_type = ARG_ANYTHING, 356216e3cd2SHao Luo .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 35739f19ebbSAlexei Starovoitov .arg3_type = ARG_CONST_SIZE, 35896ae5227SSargun Dhillon }; 35996ae5227SSargun Dhillon 36096ae5227SSargun Dhillon static const struct bpf_func_proto *bpf_get_probe_write_proto(void) 36196ae5227SSargun Dhillon { 3622c78ee89SAlexei Starovoitov if (!capable(CAP_SYS_ADMIN)) 3632c78ee89SAlexei Starovoitov return NULL; 3642c78ee89SAlexei Starovoitov 36596ae5227SSargun Dhillon pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!", 36696ae5227SSargun Dhillon current->comm, task_pid_nr(current)); 36796ae5227SSargun Dhillon 36896ae5227SSargun Dhillon return &bpf_probe_write_user_proto; 36996ae5227SSargun Dhillon } 37096ae5227SSargun Dhillon 371ac5a72eaSAlan Maguire static DEFINE_RAW_SPINLOCK(trace_printk_lock); 372ac5a72eaSAlan Maguire 373d9c9e4dbSFlorent Revest #define MAX_TRACE_PRINTK_VARARGS 3 374ac5a72eaSAlan Maguire #define BPF_TRACE_PRINTK_SIZE 1024 375ac5a72eaSAlan Maguire 376f3694e00SDaniel Borkmann BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, 377f3694e00SDaniel Borkmann u64, arg2, u64, arg3) 3789c959c86SAlexei Starovoitov { 379d9c9e4dbSFlorent Revest u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 }; 38048cac3f4SFlorent Revest u32 *bin_args; 381d9c9e4dbSFlorent Revest static char buf[BPF_TRACE_PRINTK_SIZE]; 382d9c9e4dbSFlorent Revest unsigned long flags; 383d9c9e4dbSFlorent Revest int ret; 3849c959c86SAlexei Starovoitov 38548cac3f4SFlorent Revest ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args, 386d9c9e4dbSFlorent Revest MAX_TRACE_PRINTK_VARARGS); 387d9c9e4dbSFlorent Revest if (ret < 0) 388d9c9e4dbSFlorent Revest return ret; 3899c959c86SAlexei Starovoitov 39038d26d89SFlorent Revest raw_spin_lock_irqsave(&trace_printk_lock, flags); 39148cac3f4SFlorent Revest ret = bstr_printf(buf, sizeof(buf), fmt, bin_args); 3929c959c86SAlexei Starovoitov 393d9c9e4dbSFlorent Revest trace_bpf_trace_printk(buf); 394d9c9e4dbSFlorent Revest raw_spin_unlock_irqrestore(&trace_printk_lock, flags); 3959c959c86SAlexei Starovoitov 39648cac3f4SFlorent Revest bpf_bprintf_cleanup(); 3979c959c86SAlexei Starovoitov 398d9c9e4dbSFlorent Revest return ret; 3999c959c86SAlexei Starovoitov } 4009c959c86SAlexei Starovoitov 4019c959c86SAlexei Starovoitov static const struct bpf_func_proto bpf_trace_printk_proto = { 4029c959c86SAlexei Starovoitov .func = bpf_trace_printk, 4039c959c86SAlexei Starovoitov .gpl_only = true, 4049c959c86SAlexei Starovoitov .ret_type = RET_INTEGER, 405216e3cd2SHao Luo .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 40639f19ebbSAlexei Starovoitov .arg2_type = ARG_CONST_SIZE, 4079c959c86SAlexei Starovoitov }; 4089c959c86SAlexei Starovoitov 40910aceb62SDave Marchevsky static void __set_printk_clr_event(void) 4100756ea3eSAlexei Starovoitov { 4110756ea3eSAlexei Starovoitov /* 412ac5a72eaSAlan Maguire * This program might be calling bpf_trace_printk, 413ac5a72eaSAlan Maguire * so enable the associated bpf_trace/bpf_trace_printk event. 414ac5a72eaSAlan Maguire * Repeat this each time as it is possible a user has 415ac5a72eaSAlan Maguire * disabled bpf_trace_printk events. By loading a program 416ac5a72eaSAlan Maguire * calling bpf_trace_printk() however the user has expressed 417ac5a72eaSAlan Maguire * the intent to see such events. 4180756ea3eSAlexei Starovoitov */ 419ac5a72eaSAlan Maguire if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) 420ac5a72eaSAlan Maguire pr_warn_ratelimited("could not enable bpf_trace_printk events"); 42110aceb62SDave Marchevsky } 4220756ea3eSAlexei Starovoitov 42310aceb62SDave Marchevsky const struct bpf_func_proto *bpf_get_trace_printk_proto(void) 42410aceb62SDave Marchevsky { 42510aceb62SDave Marchevsky __set_printk_clr_event(); 4260756ea3eSAlexei Starovoitov return &bpf_trace_printk_proto; 4270756ea3eSAlexei Starovoitov } 4280756ea3eSAlexei Starovoitov 42910aceb62SDave Marchevsky BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data, 43010aceb62SDave Marchevsky u32, data_len) 43110aceb62SDave Marchevsky { 43210aceb62SDave Marchevsky static char buf[BPF_TRACE_PRINTK_SIZE]; 43310aceb62SDave Marchevsky unsigned long flags; 43410aceb62SDave Marchevsky int ret, num_args; 43510aceb62SDave Marchevsky u32 *bin_args; 43610aceb62SDave Marchevsky 43710aceb62SDave Marchevsky if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || 43810aceb62SDave Marchevsky (data_len && !data)) 43910aceb62SDave Marchevsky return -EINVAL; 44010aceb62SDave Marchevsky num_args = data_len / 8; 44110aceb62SDave Marchevsky 44210aceb62SDave Marchevsky ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args); 44310aceb62SDave Marchevsky if (ret < 0) 44410aceb62SDave Marchevsky return ret; 44510aceb62SDave Marchevsky 44610aceb62SDave Marchevsky raw_spin_lock_irqsave(&trace_printk_lock, flags); 44710aceb62SDave Marchevsky ret = bstr_printf(buf, sizeof(buf), fmt, bin_args); 44810aceb62SDave Marchevsky 44910aceb62SDave Marchevsky trace_bpf_trace_printk(buf); 45010aceb62SDave Marchevsky raw_spin_unlock_irqrestore(&trace_printk_lock, flags); 45110aceb62SDave Marchevsky 45210aceb62SDave Marchevsky bpf_bprintf_cleanup(); 45310aceb62SDave Marchevsky 45410aceb62SDave Marchevsky return ret; 45510aceb62SDave Marchevsky } 45610aceb62SDave Marchevsky 45710aceb62SDave Marchevsky static const struct bpf_func_proto bpf_trace_vprintk_proto = { 45810aceb62SDave Marchevsky .func = bpf_trace_vprintk, 45910aceb62SDave Marchevsky .gpl_only = true, 46010aceb62SDave Marchevsky .ret_type = RET_INTEGER, 461216e3cd2SHao Luo .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 46210aceb62SDave Marchevsky .arg2_type = ARG_CONST_SIZE, 463216e3cd2SHao Luo .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 46410aceb62SDave Marchevsky .arg4_type = ARG_CONST_SIZE_OR_ZERO, 46510aceb62SDave Marchevsky }; 46610aceb62SDave Marchevsky 46710aceb62SDave Marchevsky const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void) 46810aceb62SDave Marchevsky { 46910aceb62SDave Marchevsky __set_printk_clr_event(); 47010aceb62SDave Marchevsky return &bpf_trace_vprintk_proto; 47110aceb62SDave Marchevsky } 47210aceb62SDave Marchevsky 473492e639fSYonghong Song BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, 474492e639fSYonghong Song const void *, data, u32, data_len) 475492e639fSYonghong Song { 476d9c9e4dbSFlorent Revest int err, num_args; 47748cac3f4SFlorent Revest u32 *bin_args; 478492e639fSYonghong Song 479335ff499SDave Marchevsky if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || 480d9c9e4dbSFlorent Revest (data_len && !data)) 481d9c9e4dbSFlorent Revest return -EINVAL; 482492e639fSYonghong Song num_args = data_len / 8; 483492e639fSYonghong Song 48448cac3f4SFlorent Revest err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args); 485492e639fSYonghong Song if (err < 0) 486d9c9e4dbSFlorent Revest return err; 487492e639fSYonghong Song 48848cac3f4SFlorent Revest seq_bprintf(m, fmt, bin_args); 489492e639fSYonghong Song 49048cac3f4SFlorent Revest bpf_bprintf_cleanup(); 491d9c9e4dbSFlorent Revest 492d9c9e4dbSFlorent Revest return seq_has_overflowed(m) ? -EOVERFLOW : 0; 493492e639fSYonghong Song } 494492e639fSYonghong Song 4959436ef6eSLorenz Bauer BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file) 496c9a0f3b8SJiri Olsa 497492e639fSYonghong Song static const struct bpf_func_proto bpf_seq_printf_proto = { 498492e639fSYonghong Song .func = bpf_seq_printf, 499492e639fSYonghong Song .gpl_only = true, 500492e639fSYonghong Song .ret_type = RET_INTEGER, 501492e639fSYonghong Song .arg1_type = ARG_PTR_TO_BTF_ID, 5029436ef6eSLorenz Bauer .arg1_btf_id = &btf_seq_file_ids[0], 503216e3cd2SHao Luo .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 504492e639fSYonghong Song .arg3_type = ARG_CONST_SIZE, 505216e3cd2SHao Luo .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 506492e639fSYonghong Song .arg5_type = ARG_CONST_SIZE_OR_ZERO, 507492e639fSYonghong Song }; 508492e639fSYonghong Song 509492e639fSYonghong Song BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len) 510492e639fSYonghong Song { 511492e639fSYonghong Song return seq_write(m, data, len) ? -EOVERFLOW : 0; 512492e639fSYonghong Song } 513492e639fSYonghong Song 514492e639fSYonghong Song static const struct bpf_func_proto bpf_seq_write_proto = { 515492e639fSYonghong Song .func = bpf_seq_write, 516492e639fSYonghong Song .gpl_only = true, 517492e639fSYonghong Song .ret_type = RET_INTEGER, 518492e639fSYonghong Song .arg1_type = ARG_PTR_TO_BTF_ID, 5199436ef6eSLorenz Bauer .arg1_btf_id = &btf_seq_file_ids[0], 520216e3cd2SHao Luo .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 521492e639fSYonghong Song .arg3_type = ARG_CONST_SIZE_OR_ZERO, 522492e639fSYonghong Song }; 523492e639fSYonghong Song 524eb411377SAlan Maguire BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr, 525eb411377SAlan Maguire u32, btf_ptr_size, u64, flags) 526eb411377SAlan Maguire { 527eb411377SAlan Maguire const struct btf *btf; 528eb411377SAlan Maguire s32 btf_id; 529eb411377SAlan Maguire int ret; 530eb411377SAlan Maguire 531eb411377SAlan Maguire ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); 532eb411377SAlan Maguire if (ret) 533eb411377SAlan Maguire return ret; 534eb411377SAlan Maguire 535eb411377SAlan Maguire return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags); 536eb411377SAlan Maguire } 537eb411377SAlan Maguire 538eb411377SAlan Maguire static const struct bpf_func_proto bpf_seq_printf_btf_proto = { 539eb411377SAlan Maguire .func = bpf_seq_printf_btf, 540eb411377SAlan Maguire .gpl_only = true, 541eb411377SAlan Maguire .ret_type = RET_INTEGER, 542eb411377SAlan Maguire .arg1_type = ARG_PTR_TO_BTF_ID, 543eb411377SAlan Maguire .arg1_btf_id = &btf_seq_file_ids[0], 544216e3cd2SHao Luo .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 545eb411377SAlan Maguire .arg3_type = ARG_CONST_SIZE_OR_ZERO, 546eb411377SAlan Maguire .arg4_type = ARG_ANYTHING, 547d9847d31SAlexei Starovoitov }; 548d9847d31SAlexei Starovoitov 549908432caSYonghong Song static __always_inline int 550908432caSYonghong Song get_map_perf_counter(struct bpf_map *map, u64 flags, 551908432caSYonghong Song u64 *value, u64 *enabled, u64 *running) 55235578d79SKaixu Xia { 55335578d79SKaixu Xia struct bpf_array *array = container_of(map, struct bpf_array, map); 5546816a7ffSDaniel Borkmann unsigned int cpu = smp_processor_id(); 5556816a7ffSDaniel Borkmann u64 index = flags & BPF_F_INDEX_MASK; 5563b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 55735578d79SKaixu Xia 5586816a7ffSDaniel Borkmann if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 5596816a7ffSDaniel Borkmann return -EINVAL; 5606816a7ffSDaniel Borkmann if (index == BPF_F_CURRENT_CPU) 5616816a7ffSDaniel Borkmann index = cpu; 56235578d79SKaixu Xia if (unlikely(index >= array->map.max_entries)) 56335578d79SKaixu Xia return -E2BIG; 56435578d79SKaixu Xia 5653b1efb19SDaniel Borkmann ee = READ_ONCE(array->ptrs[index]); 5661ca1cc98SDaniel Borkmann if (!ee) 56735578d79SKaixu Xia return -ENOENT; 56835578d79SKaixu Xia 569908432caSYonghong Song return perf_event_read_local(ee->event, value, enabled, running); 570908432caSYonghong Song } 571908432caSYonghong Song 572908432caSYonghong Song BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) 573908432caSYonghong Song { 574908432caSYonghong Song u64 value = 0; 575908432caSYonghong Song int err; 576908432caSYonghong Song 577908432caSYonghong Song err = get_map_perf_counter(map, flags, &value, NULL, NULL); 57835578d79SKaixu Xia /* 579f91840a3SAlexei Starovoitov * this api is ugly since we miss [-22..-2] range of valid 580f91840a3SAlexei Starovoitov * counter values, but that's uapi 58135578d79SKaixu Xia */ 582f91840a3SAlexei Starovoitov if (err) 583f91840a3SAlexei Starovoitov return err; 584f91840a3SAlexei Starovoitov return value; 58535578d79SKaixu Xia } 58635578d79SKaixu Xia 58762544ce8SAlexei Starovoitov static const struct bpf_func_proto bpf_perf_event_read_proto = { 58835578d79SKaixu Xia .func = bpf_perf_event_read, 5891075ef59SAlexei Starovoitov .gpl_only = true, 59035578d79SKaixu Xia .ret_type = RET_INTEGER, 59135578d79SKaixu Xia .arg1_type = ARG_CONST_MAP_PTR, 59235578d79SKaixu Xia .arg2_type = ARG_ANYTHING, 59335578d79SKaixu Xia }; 59435578d79SKaixu Xia 595908432caSYonghong Song BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags, 596908432caSYonghong Song struct bpf_perf_event_value *, buf, u32, size) 597908432caSYonghong Song { 598908432caSYonghong Song int err = -EINVAL; 599908432caSYonghong Song 600908432caSYonghong Song if (unlikely(size != sizeof(struct bpf_perf_event_value))) 601908432caSYonghong Song goto clear; 602908432caSYonghong Song err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, 603908432caSYonghong Song &buf->running); 604908432caSYonghong Song if (unlikely(err)) 605908432caSYonghong Song goto clear; 606908432caSYonghong Song return 0; 607908432caSYonghong Song clear: 608908432caSYonghong Song memset(buf, 0, size); 609908432caSYonghong Song return err; 610908432caSYonghong Song } 611908432caSYonghong Song 612908432caSYonghong Song static const struct bpf_func_proto bpf_perf_event_read_value_proto = { 613908432caSYonghong Song .func = bpf_perf_event_read_value, 614908432caSYonghong Song .gpl_only = true, 615908432caSYonghong Song .ret_type = RET_INTEGER, 616908432caSYonghong Song .arg1_type = ARG_CONST_MAP_PTR, 617908432caSYonghong Song .arg2_type = ARG_ANYTHING, 618908432caSYonghong Song .arg3_type = ARG_PTR_TO_UNINIT_MEM, 619908432caSYonghong Song .arg4_type = ARG_CONST_SIZE, 620908432caSYonghong Song }; 621908432caSYonghong Song 6228e7a3920SDaniel Borkmann static __always_inline u64 6238e7a3920SDaniel Borkmann __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, 624283ca526SDaniel Borkmann u64 flags, struct perf_sample_data *sd) 625a43eec30SAlexei Starovoitov { 626a43eec30SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 627d7931330SDaniel Borkmann unsigned int cpu = smp_processor_id(); 6281e33759cSDaniel Borkmann u64 index = flags & BPF_F_INDEX_MASK; 6293b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 630a43eec30SAlexei Starovoitov struct perf_event *event; 631a43eec30SAlexei Starovoitov 6321e33759cSDaniel Borkmann if (index == BPF_F_CURRENT_CPU) 633d7931330SDaniel Borkmann index = cpu; 634a43eec30SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 635a43eec30SAlexei Starovoitov return -E2BIG; 636a43eec30SAlexei Starovoitov 6373b1efb19SDaniel Borkmann ee = READ_ONCE(array->ptrs[index]); 6381ca1cc98SDaniel Borkmann if (!ee) 639a43eec30SAlexei Starovoitov return -ENOENT; 640a43eec30SAlexei Starovoitov 6413b1efb19SDaniel Borkmann event = ee->event; 642a43eec30SAlexei Starovoitov if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || 643a43eec30SAlexei Starovoitov event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) 644a43eec30SAlexei Starovoitov return -EINVAL; 645a43eec30SAlexei Starovoitov 646d7931330SDaniel Borkmann if (unlikely(event->oncpu != cpu)) 647a43eec30SAlexei Starovoitov return -EOPNOTSUPP; 648a43eec30SAlexei Starovoitov 64956201969SArnaldo Carvalho de Melo return perf_event_output(event, sd, regs); 650a43eec30SAlexei Starovoitov } 651a43eec30SAlexei Starovoitov 6529594dc3cSMatt Mullins /* 6539594dc3cSMatt Mullins * Support executing tracepoints in normal, irq, and nmi context that each call 6549594dc3cSMatt Mullins * bpf_perf_event_output 6559594dc3cSMatt Mullins */ 6569594dc3cSMatt Mullins struct bpf_trace_sample_data { 6579594dc3cSMatt Mullins struct perf_sample_data sds[3]; 6589594dc3cSMatt Mullins }; 6599594dc3cSMatt Mullins 6609594dc3cSMatt Mullins static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds); 6619594dc3cSMatt Mullins static DEFINE_PER_CPU(int, bpf_trace_nest_level); 662f3694e00SDaniel Borkmann BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, 663f3694e00SDaniel Borkmann u64, flags, void *, data, u64, size) 6648e7a3920SDaniel Borkmann { 6659594dc3cSMatt Mullins struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds); 6669594dc3cSMatt Mullins int nest_level = this_cpu_inc_return(bpf_trace_nest_level); 6678e7a3920SDaniel Borkmann struct perf_raw_record raw = { 6688e7a3920SDaniel Borkmann .frag = { 6698e7a3920SDaniel Borkmann .size = size, 6708e7a3920SDaniel Borkmann .data = data, 6718e7a3920SDaniel Borkmann }, 6728e7a3920SDaniel Borkmann }; 6739594dc3cSMatt Mullins struct perf_sample_data *sd; 6749594dc3cSMatt Mullins int err; 6758e7a3920SDaniel Borkmann 6769594dc3cSMatt Mullins if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { 6779594dc3cSMatt Mullins err = -EBUSY; 6789594dc3cSMatt Mullins goto out; 6799594dc3cSMatt Mullins } 6809594dc3cSMatt Mullins 6819594dc3cSMatt Mullins sd = &sds->sds[nest_level - 1]; 6829594dc3cSMatt Mullins 6839594dc3cSMatt Mullins if (unlikely(flags & ~(BPF_F_INDEX_MASK))) { 6849594dc3cSMatt Mullins err = -EINVAL; 6859594dc3cSMatt Mullins goto out; 6869594dc3cSMatt Mullins } 6878e7a3920SDaniel Borkmann 688283ca526SDaniel Borkmann perf_sample_data_init(sd, 0, 0); 689283ca526SDaniel Borkmann sd->raw = &raw; 690283ca526SDaniel Borkmann 6919594dc3cSMatt Mullins err = __bpf_perf_event_output(regs, map, flags, sd); 6929594dc3cSMatt Mullins 6939594dc3cSMatt Mullins out: 6949594dc3cSMatt Mullins this_cpu_dec(bpf_trace_nest_level); 6959594dc3cSMatt Mullins return err; 6968e7a3920SDaniel Borkmann } 6978e7a3920SDaniel Borkmann 698a43eec30SAlexei Starovoitov static const struct bpf_func_proto bpf_perf_event_output_proto = { 699a43eec30SAlexei Starovoitov .func = bpf_perf_event_output, 7001075ef59SAlexei Starovoitov .gpl_only = true, 701a43eec30SAlexei Starovoitov .ret_type = RET_INTEGER, 702a43eec30SAlexei Starovoitov .arg1_type = ARG_PTR_TO_CTX, 703a43eec30SAlexei Starovoitov .arg2_type = ARG_CONST_MAP_PTR, 704a43eec30SAlexei Starovoitov .arg3_type = ARG_ANYTHING, 705216e3cd2SHao Luo .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 706a60dd35dSGianluca Borello .arg5_type = ARG_CONST_SIZE_OR_ZERO, 707a43eec30SAlexei Starovoitov }; 708a43eec30SAlexei Starovoitov 709768fb61fSAllan Zhang static DEFINE_PER_CPU(int, bpf_event_output_nest_level); 710768fb61fSAllan Zhang struct bpf_nested_pt_regs { 711768fb61fSAllan Zhang struct pt_regs regs[3]; 712768fb61fSAllan Zhang }; 713768fb61fSAllan Zhang static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs); 714768fb61fSAllan Zhang static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds); 715bd570ff9SDaniel Borkmann 716555c8a86SDaniel Borkmann u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 717555c8a86SDaniel Borkmann void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 718bd570ff9SDaniel Borkmann { 719768fb61fSAllan Zhang int nest_level = this_cpu_inc_return(bpf_event_output_nest_level); 720555c8a86SDaniel Borkmann struct perf_raw_frag frag = { 721555c8a86SDaniel Borkmann .copy = ctx_copy, 722555c8a86SDaniel Borkmann .size = ctx_size, 723555c8a86SDaniel Borkmann .data = ctx, 724555c8a86SDaniel Borkmann }; 725555c8a86SDaniel Borkmann struct perf_raw_record raw = { 726555c8a86SDaniel Borkmann .frag = { 727183fc153SAndrew Morton { 728555c8a86SDaniel Borkmann .next = ctx_size ? &frag : NULL, 729183fc153SAndrew Morton }, 730555c8a86SDaniel Borkmann .size = meta_size, 731555c8a86SDaniel Borkmann .data = meta, 732555c8a86SDaniel Borkmann }, 733555c8a86SDaniel Borkmann }; 734768fb61fSAllan Zhang struct perf_sample_data *sd; 735768fb61fSAllan Zhang struct pt_regs *regs; 736768fb61fSAllan Zhang u64 ret; 737768fb61fSAllan Zhang 738768fb61fSAllan Zhang if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { 739768fb61fSAllan Zhang ret = -EBUSY; 740768fb61fSAllan Zhang goto out; 741768fb61fSAllan Zhang } 742768fb61fSAllan Zhang sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]); 743768fb61fSAllan Zhang regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]); 744bd570ff9SDaniel Borkmann 745bd570ff9SDaniel Borkmann perf_fetch_caller_regs(regs); 746283ca526SDaniel Borkmann perf_sample_data_init(sd, 0, 0); 747283ca526SDaniel Borkmann sd->raw = &raw; 748bd570ff9SDaniel Borkmann 749768fb61fSAllan Zhang ret = __bpf_perf_event_output(regs, map, flags, sd); 750768fb61fSAllan Zhang out: 751768fb61fSAllan Zhang this_cpu_dec(bpf_event_output_nest_level); 752768fb61fSAllan Zhang return ret; 753bd570ff9SDaniel Borkmann } 754bd570ff9SDaniel Borkmann 755f3694e00SDaniel Borkmann BPF_CALL_0(bpf_get_current_task) 756606274c5SAlexei Starovoitov { 757606274c5SAlexei Starovoitov return (long) current; 758606274c5SAlexei Starovoitov } 759606274c5SAlexei Starovoitov 760f470378cSJohn Fastabend const struct bpf_func_proto bpf_get_current_task_proto = { 761606274c5SAlexei Starovoitov .func = bpf_get_current_task, 762606274c5SAlexei Starovoitov .gpl_only = true, 763606274c5SAlexei Starovoitov .ret_type = RET_INTEGER, 764606274c5SAlexei Starovoitov }; 765606274c5SAlexei Starovoitov 7663ca1032aSKP Singh BPF_CALL_0(bpf_get_current_task_btf) 7673ca1032aSKP Singh { 7683ca1032aSKP Singh return (unsigned long) current; 7693ca1032aSKP Singh } 7703ca1032aSKP Singh 771a396eda5SDaniel Xu const struct bpf_func_proto bpf_get_current_task_btf_proto = { 7723ca1032aSKP Singh .func = bpf_get_current_task_btf, 7733ca1032aSKP Singh .gpl_only = true, 7743ca1032aSKP Singh .ret_type = RET_PTR_TO_BTF_ID, 775d19ddb47SSong Liu .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], 7763ca1032aSKP Singh }; 7773ca1032aSKP Singh 778dd6e10fbSDaniel Xu BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task) 779dd6e10fbSDaniel Xu { 780dd6e10fbSDaniel Xu return (unsigned long) task_pt_regs(task); 781dd6e10fbSDaniel Xu } 782dd6e10fbSDaniel Xu 783dd6e10fbSDaniel Xu BTF_ID_LIST(bpf_task_pt_regs_ids) 784dd6e10fbSDaniel Xu BTF_ID(struct, pt_regs) 785dd6e10fbSDaniel Xu 786dd6e10fbSDaniel Xu const struct bpf_func_proto bpf_task_pt_regs_proto = { 787dd6e10fbSDaniel Xu .func = bpf_task_pt_regs, 788dd6e10fbSDaniel Xu .gpl_only = true, 789dd6e10fbSDaniel Xu .arg1_type = ARG_PTR_TO_BTF_ID, 790d19ddb47SSong Liu .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], 791dd6e10fbSDaniel Xu .ret_type = RET_PTR_TO_BTF_ID, 792dd6e10fbSDaniel Xu .ret_btf_id = &bpf_task_pt_regs_ids[0], 793dd6e10fbSDaniel Xu }; 794dd6e10fbSDaniel Xu 795f3694e00SDaniel Borkmann BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) 79660d20f91SSargun Dhillon { 79760d20f91SSargun Dhillon struct bpf_array *array = container_of(map, struct bpf_array, map); 79860d20f91SSargun Dhillon struct cgroup *cgrp; 79960d20f91SSargun Dhillon 80060d20f91SSargun Dhillon if (unlikely(idx >= array->map.max_entries)) 80160d20f91SSargun Dhillon return -E2BIG; 80260d20f91SSargun Dhillon 80360d20f91SSargun Dhillon cgrp = READ_ONCE(array->ptrs[idx]); 80460d20f91SSargun Dhillon if (unlikely(!cgrp)) 80560d20f91SSargun Dhillon return -EAGAIN; 80660d20f91SSargun Dhillon 80760d20f91SSargun Dhillon return task_under_cgroup_hierarchy(current, cgrp); 80860d20f91SSargun Dhillon } 80960d20f91SSargun Dhillon 81060d20f91SSargun Dhillon static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { 81160d20f91SSargun Dhillon .func = bpf_current_task_under_cgroup, 81260d20f91SSargun Dhillon .gpl_only = false, 81360d20f91SSargun Dhillon .ret_type = RET_INTEGER, 81460d20f91SSargun Dhillon .arg1_type = ARG_CONST_MAP_PTR, 81560d20f91SSargun Dhillon .arg2_type = ARG_ANYTHING, 81660d20f91SSargun Dhillon }; 81760d20f91SSargun Dhillon 8188b401f9eSYonghong Song struct send_signal_irq_work { 8198b401f9eSYonghong Song struct irq_work irq_work; 8208b401f9eSYonghong Song struct task_struct *task; 8218b401f9eSYonghong Song u32 sig; 8228482941fSYonghong Song enum pid_type type; 8238b401f9eSYonghong Song }; 8248b401f9eSYonghong Song 8258b401f9eSYonghong Song static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); 8268b401f9eSYonghong Song 8278b401f9eSYonghong Song static void do_bpf_send_signal(struct irq_work *entry) 8288b401f9eSYonghong Song { 8298b401f9eSYonghong Song struct send_signal_irq_work *work; 8308b401f9eSYonghong Song 8318b401f9eSYonghong Song work = container_of(entry, struct send_signal_irq_work, irq_work); 8328482941fSYonghong Song group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); 8338b401f9eSYonghong Song } 8348b401f9eSYonghong Song 8358482941fSYonghong Song static int bpf_send_signal_common(u32 sig, enum pid_type type) 8368b401f9eSYonghong Song { 8378b401f9eSYonghong Song struct send_signal_irq_work *work = NULL; 8388b401f9eSYonghong Song 8398b401f9eSYonghong Song /* Similar to bpf_probe_write_user, task needs to be 8408b401f9eSYonghong Song * in a sound condition and kernel memory access be 8418b401f9eSYonghong Song * permitted in order to send signal to the current 8428b401f9eSYonghong Song * task. 8438b401f9eSYonghong Song */ 8448b401f9eSYonghong Song if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING))) 8458b401f9eSYonghong Song return -EPERM; 8468b401f9eSYonghong Song if (unlikely(!nmi_uaccess_okay())) 8478b401f9eSYonghong Song return -EPERM; 8488b401f9eSYonghong Song 8491bc7896eSYonghong Song if (irqs_disabled()) { 850e1afb702SYonghong Song /* Do an early check on signal validity. Otherwise, 851e1afb702SYonghong Song * the error is lost in deferred irq_work. 852e1afb702SYonghong Song */ 853e1afb702SYonghong Song if (unlikely(!valid_signal(sig))) 854e1afb702SYonghong Song return -EINVAL; 855e1afb702SYonghong Song 8568b401f9eSYonghong Song work = this_cpu_ptr(&send_signal_work); 8577a9f50a0SPeter Zijlstra if (irq_work_is_busy(&work->irq_work)) 8588b401f9eSYonghong Song return -EBUSY; 8598b401f9eSYonghong Song 8608b401f9eSYonghong Song /* Add the current task, which is the target of sending signal, 8618b401f9eSYonghong Song * to the irq_work. The current task may change when queued 8628b401f9eSYonghong Song * irq works get executed. 8638b401f9eSYonghong Song */ 8648b401f9eSYonghong Song work->task = current; 8658b401f9eSYonghong Song work->sig = sig; 8668482941fSYonghong Song work->type = type; 8678b401f9eSYonghong Song irq_work_queue(&work->irq_work); 8688b401f9eSYonghong Song return 0; 8698b401f9eSYonghong Song } 8708b401f9eSYonghong Song 8718482941fSYonghong Song return group_send_sig_info(sig, SEND_SIG_PRIV, current, type); 8728482941fSYonghong Song } 8738482941fSYonghong Song 8748482941fSYonghong Song BPF_CALL_1(bpf_send_signal, u32, sig) 8758482941fSYonghong Song { 8768482941fSYonghong Song return bpf_send_signal_common(sig, PIDTYPE_TGID); 8778b401f9eSYonghong Song } 8788b401f9eSYonghong Song 8798b401f9eSYonghong Song static const struct bpf_func_proto bpf_send_signal_proto = { 8808b401f9eSYonghong Song .func = bpf_send_signal, 8818b401f9eSYonghong Song .gpl_only = false, 8828b401f9eSYonghong Song .ret_type = RET_INTEGER, 8838b401f9eSYonghong Song .arg1_type = ARG_ANYTHING, 8848b401f9eSYonghong Song }; 8858b401f9eSYonghong Song 8868482941fSYonghong Song BPF_CALL_1(bpf_send_signal_thread, u32, sig) 8878482941fSYonghong Song { 8888482941fSYonghong Song return bpf_send_signal_common(sig, PIDTYPE_PID); 8898482941fSYonghong Song } 8908482941fSYonghong Song 8918482941fSYonghong Song static const struct bpf_func_proto bpf_send_signal_thread_proto = { 8928482941fSYonghong Song .func = bpf_send_signal_thread, 8938482941fSYonghong Song .gpl_only = false, 8948482941fSYonghong Song .ret_type = RET_INTEGER, 8958482941fSYonghong Song .arg1_type = ARG_ANYTHING, 8968482941fSYonghong Song }; 8978482941fSYonghong Song 8986e22ab9dSJiri Olsa BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz) 8996e22ab9dSJiri Olsa { 9006e22ab9dSJiri Olsa long len; 9016e22ab9dSJiri Olsa char *p; 9026e22ab9dSJiri Olsa 9036e22ab9dSJiri Olsa if (!sz) 9046e22ab9dSJiri Olsa return 0; 9056e22ab9dSJiri Olsa 9066e22ab9dSJiri Olsa p = d_path(path, buf, sz); 9076e22ab9dSJiri Olsa if (IS_ERR(p)) { 9086e22ab9dSJiri Olsa len = PTR_ERR(p); 9096e22ab9dSJiri Olsa } else { 9106e22ab9dSJiri Olsa len = buf + sz - p; 9116e22ab9dSJiri Olsa memmove(buf, p, len); 9126e22ab9dSJiri Olsa } 9136e22ab9dSJiri Olsa 9146e22ab9dSJiri Olsa return len; 9156e22ab9dSJiri Olsa } 9166e22ab9dSJiri Olsa 9176e22ab9dSJiri Olsa BTF_SET_START(btf_allowlist_d_path) 918a8a71796SJiri Olsa #ifdef CONFIG_SECURITY 919a8a71796SJiri Olsa BTF_ID(func, security_file_permission) 920a8a71796SJiri Olsa BTF_ID(func, security_inode_getattr) 921a8a71796SJiri Olsa BTF_ID(func, security_file_open) 922a8a71796SJiri Olsa #endif 923a8a71796SJiri Olsa #ifdef CONFIG_SECURITY_PATH 924a8a71796SJiri Olsa BTF_ID(func, security_path_truncate) 925a8a71796SJiri Olsa #endif 9266e22ab9dSJiri Olsa BTF_ID(func, vfs_truncate) 9276e22ab9dSJiri Olsa BTF_ID(func, vfs_fallocate) 9286e22ab9dSJiri Olsa BTF_ID(func, dentry_open) 9296e22ab9dSJiri Olsa BTF_ID(func, vfs_getattr) 9306e22ab9dSJiri Olsa BTF_ID(func, filp_close) 9316e22ab9dSJiri Olsa BTF_SET_END(btf_allowlist_d_path) 9326e22ab9dSJiri Olsa 9336e22ab9dSJiri Olsa static bool bpf_d_path_allowed(const struct bpf_prog *prog) 9346e22ab9dSJiri Olsa { 9353d06f34aSSong Liu if (prog->type == BPF_PROG_TYPE_TRACING && 9363d06f34aSSong Liu prog->expected_attach_type == BPF_TRACE_ITER) 9373d06f34aSSong Liu return true; 9383d06f34aSSong Liu 9396f100640SKP Singh if (prog->type == BPF_PROG_TYPE_LSM) 9406f100640SKP Singh return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id); 9416f100640SKP Singh 9426f100640SKP Singh return btf_id_set_contains(&btf_allowlist_d_path, 9436f100640SKP Singh prog->aux->attach_btf_id); 9446e22ab9dSJiri Olsa } 9456e22ab9dSJiri Olsa 9469436ef6eSLorenz Bauer BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path) 9476e22ab9dSJiri Olsa 9486e22ab9dSJiri Olsa static const struct bpf_func_proto bpf_d_path_proto = { 9496e22ab9dSJiri Olsa .func = bpf_d_path, 9506e22ab9dSJiri Olsa .gpl_only = false, 9516e22ab9dSJiri Olsa .ret_type = RET_INTEGER, 9526e22ab9dSJiri Olsa .arg1_type = ARG_PTR_TO_BTF_ID, 9539436ef6eSLorenz Bauer .arg1_btf_id = &bpf_d_path_btf_ids[0], 9546e22ab9dSJiri Olsa .arg2_type = ARG_PTR_TO_MEM, 9556e22ab9dSJiri Olsa .arg3_type = ARG_CONST_SIZE_OR_ZERO, 9566e22ab9dSJiri Olsa .allowed = bpf_d_path_allowed, 9576e22ab9dSJiri Olsa }; 9586e22ab9dSJiri Olsa 959c4d0bfb4SAlan Maguire #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \ 960c4d0bfb4SAlan Maguire BTF_F_PTR_RAW | BTF_F_ZERO) 961c4d0bfb4SAlan Maguire 962c4d0bfb4SAlan Maguire static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, 963c4d0bfb4SAlan Maguire u64 flags, const struct btf **btf, 964c4d0bfb4SAlan Maguire s32 *btf_id) 965c4d0bfb4SAlan Maguire { 966c4d0bfb4SAlan Maguire const struct btf_type *t; 967c4d0bfb4SAlan Maguire 968c4d0bfb4SAlan Maguire if (unlikely(flags & ~(BTF_F_ALL))) 969c4d0bfb4SAlan Maguire return -EINVAL; 970c4d0bfb4SAlan Maguire 971c4d0bfb4SAlan Maguire if (btf_ptr_size != sizeof(struct btf_ptr)) 972c4d0bfb4SAlan Maguire return -EINVAL; 973c4d0bfb4SAlan Maguire 974c4d0bfb4SAlan Maguire *btf = bpf_get_btf_vmlinux(); 975c4d0bfb4SAlan Maguire 976c4d0bfb4SAlan Maguire if (IS_ERR_OR_NULL(*btf)) 977abbaa433SWang Qing return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL; 978c4d0bfb4SAlan Maguire 979c4d0bfb4SAlan Maguire if (ptr->type_id > 0) 980c4d0bfb4SAlan Maguire *btf_id = ptr->type_id; 981c4d0bfb4SAlan Maguire else 982c4d0bfb4SAlan Maguire return -EINVAL; 983c4d0bfb4SAlan Maguire 984c4d0bfb4SAlan Maguire if (*btf_id > 0) 985c4d0bfb4SAlan Maguire t = btf_type_by_id(*btf, *btf_id); 986c4d0bfb4SAlan Maguire if (*btf_id <= 0 || !t) 987c4d0bfb4SAlan Maguire return -ENOENT; 988c4d0bfb4SAlan Maguire 989c4d0bfb4SAlan Maguire return 0; 990c4d0bfb4SAlan Maguire } 991c4d0bfb4SAlan Maguire 992c4d0bfb4SAlan Maguire BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr, 993c4d0bfb4SAlan Maguire u32, btf_ptr_size, u64, flags) 994c4d0bfb4SAlan Maguire { 995c4d0bfb4SAlan Maguire const struct btf *btf; 996c4d0bfb4SAlan Maguire s32 btf_id; 997c4d0bfb4SAlan Maguire int ret; 998c4d0bfb4SAlan Maguire 999c4d0bfb4SAlan Maguire ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); 1000c4d0bfb4SAlan Maguire if (ret) 1001c4d0bfb4SAlan Maguire return ret; 1002c4d0bfb4SAlan Maguire 1003c4d0bfb4SAlan Maguire return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size, 1004c4d0bfb4SAlan Maguire flags); 1005c4d0bfb4SAlan Maguire } 1006c4d0bfb4SAlan Maguire 1007c4d0bfb4SAlan Maguire const struct bpf_func_proto bpf_snprintf_btf_proto = { 1008c4d0bfb4SAlan Maguire .func = bpf_snprintf_btf, 1009c4d0bfb4SAlan Maguire .gpl_only = false, 1010c4d0bfb4SAlan Maguire .ret_type = RET_INTEGER, 1011c4d0bfb4SAlan Maguire .arg1_type = ARG_PTR_TO_MEM, 1012c4d0bfb4SAlan Maguire .arg2_type = ARG_CONST_SIZE, 1013216e3cd2SHao Luo .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1014c4d0bfb4SAlan Maguire .arg4_type = ARG_CONST_SIZE, 1015c4d0bfb4SAlan Maguire .arg5_type = ARG_ANYTHING, 1016c4d0bfb4SAlan Maguire }; 1017c4d0bfb4SAlan Maguire 10189b99edcaSJiri Olsa BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx) 10199b99edcaSJiri Olsa { 10209b99edcaSJiri Olsa /* This helper call is inlined by verifier. */ 1021f92c1e18SJiri Olsa return ((u64 *)ctx)[-2]; 10229b99edcaSJiri Olsa } 10239b99edcaSJiri Olsa 10249b99edcaSJiri Olsa static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = { 10259b99edcaSJiri Olsa .func = bpf_get_func_ip_tracing, 10269b99edcaSJiri Olsa .gpl_only = true, 10279b99edcaSJiri Olsa .ret_type = RET_INTEGER, 10289b99edcaSJiri Olsa .arg1_type = ARG_PTR_TO_CTX, 10299b99edcaSJiri Olsa }; 10309b99edcaSJiri Olsa 10319ffd9f3fSJiri Olsa BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs) 10329ffd9f3fSJiri Olsa { 10339ffd9f3fSJiri Olsa struct kprobe *kp = kprobe_running(); 10349ffd9f3fSJiri Olsa 103516c5900bSArnd Bergmann return kp ? (uintptr_t)kp->addr : 0; 10369ffd9f3fSJiri Olsa } 10379ffd9f3fSJiri Olsa 10389ffd9f3fSJiri Olsa static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { 10399ffd9f3fSJiri Olsa .func = bpf_get_func_ip_kprobe, 10409ffd9f3fSJiri Olsa .gpl_only = true, 10419ffd9f3fSJiri Olsa .ret_type = RET_INTEGER, 10429ffd9f3fSJiri Olsa .arg1_type = ARG_PTR_TO_CTX, 10439ffd9f3fSJiri Olsa }; 10449ffd9f3fSJiri Olsa 104542a57120SJiri Olsa BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs) 104642a57120SJiri Olsa { 1047f7098690SJiri Olsa return bpf_kprobe_multi_entry_ip(current->bpf_ctx); 104842a57120SJiri Olsa } 104942a57120SJiri Olsa 105042a57120SJiri Olsa static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = { 105142a57120SJiri Olsa .func = bpf_get_func_ip_kprobe_multi, 105242a57120SJiri Olsa .gpl_only = false, 105342a57120SJiri Olsa .ret_type = RET_INTEGER, 105442a57120SJiri Olsa .arg1_type = ARG_PTR_TO_CTX, 105542a57120SJiri Olsa }; 105642a57120SJiri Olsa 1057ca74823cSJiri Olsa BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs) 1058ca74823cSJiri Olsa { 1059f7098690SJiri Olsa return bpf_kprobe_multi_cookie(current->bpf_ctx); 1060ca74823cSJiri Olsa } 1061ca74823cSJiri Olsa 1062ca74823cSJiri Olsa static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = { 1063ca74823cSJiri Olsa .func = bpf_get_attach_cookie_kprobe_multi, 1064ca74823cSJiri Olsa .gpl_only = false, 1065ca74823cSJiri Olsa .ret_type = RET_INTEGER, 1066ca74823cSJiri Olsa .arg1_type = ARG_PTR_TO_CTX, 1067ca74823cSJiri Olsa }; 1068ca74823cSJiri Olsa 10697adfc6c9SAndrii Nakryiko BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) 10707adfc6c9SAndrii Nakryiko { 10717adfc6c9SAndrii Nakryiko struct bpf_trace_run_ctx *run_ctx; 10727adfc6c9SAndrii Nakryiko 10737adfc6c9SAndrii Nakryiko run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); 10747adfc6c9SAndrii Nakryiko return run_ctx->bpf_cookie; 10757adfc6c9SAndrii Nakryiko } 10767adfc6c9SAndrii Nakryiko 10777adfc6c9SAndrii Nakryiko static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = { 10787adfc6c9SAndrii Nakryiko .func = bpf_get_attach_cookie_trace, 10797adfc6c9SAndrii Nakryiko .gpl_only = false, 10807adfc6c9SAndrii Nakryiko .ret_type = RET_INTEGER, 10817adfc6c9SAndrii Nakryiko .arg1_type = ARG_PTR_TO_CTX, 10827adfc6c9SAndrii Nakryiko }; 10837adfc6c9SAndrii Nakryiko 10847adfc6c9SAndrii Nakryiko BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx) 10857adfc6c9SAndrii Nakryiko { 10867adfc6c9SAndrii Nakryiko return ctx->event->bpf_cookie; 10877adfc6c9SAndrii Nakryiko } 10887adfc6c9SAndrii Nakryiko 10897adfc6c9SAndrii Nakryiko static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = { 10907adfc6c9SAndrii Nakryiko .func = bpf_get_attach_cookie_pe, 10917adfc6c9SAndrii Nakryiko .gpl_only = false, 10927adfc6c9SAndrii Nakryiko .ret_type = RET_INTEGER, 10937adfc6c9SAndrii Nakryiko .arg1_type = ARG_PTR_TO_CTX, 10947adfc6c9SAndrii Nakryiko }; 10957adfc6c9SAndrii Nakryiko 10962fcc8241SKui-Feng Lee BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx) 10972fcc8241SKui-Feng Lee { 10982fcc8241SKui-Feng Lee struct bpf_trace_run_ctx *run_ctx; 10992fcc8241SKui-Feng Lee 11002fcc8241SKui-Feng Lee run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); 11012fcc8241SKui-Feng Lee return run_ctx->bpf_cookie; 11022fcc8241SKui-Feng Lee } 11032fcc8241SKui-Feng Lee 11042fcc8241SKui-Feng Lee static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = { 11052fcc8241SKui-Feng Lee .func = bpf_get_attach_cookie_tracing, 11062fcc8241SKui-Feng Lee .gpl_only = false, 11072fcc8241SKui-Feng Lee .ret_type = RET_INTEGER, 11082fcc8241SKui-Feng Lee .arg1_type = ARG_PTR_TO_CTX, 11092fcc8241SKui-Feng Lee }; 11102fcc8241SKui-Feng Lee 1111856c02dbSSong Liu BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags) 1112856c02dbSSong Liu { 1113856c02dbSSong Liu #ifndef CONFIG_X86 1114856c02dbSSong Liu return -ENOENT; 1115856c02dbSSong Liu #else 1116856c02dbSSong Liu static const u32 br_entry_size = sizeof(struct perf_branch_entry); 1117856c02dbSSong Liu u32 entry_cnt = size / br_entry_size; 1118856c02dbSSong Liu 1119856c02dbSSong Liu entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt); 1120856c02dbSSong Liu 1121856c02dbSSong Liu if (unlikely(flags)) 1122856c02dbSSong Liu return -EINVAL; 1123856c02dbSSong Liu 1124856c02dbSSong Liu if (!entry_cnt) 1125856c02dbSSong Liu return -ENOENT; 1126856c02dbSSong Liu 1127856c02dbSSong Liu return entry_cnt * br_entry_size; 1128856c02dbSSong Liu #endif 1129856c02dbSSong Liu } 1130856c02dbSSong Liu 1131856c02dbSSong Liu static const struct bpf_func_proto bpf_get_branch_snapshot_proto = { 1132856c02dbSSong Liu .func = bpf_get_branch_snapshot, 1133856c02dbSSong Liu .gpl_only = true, 1134856c02dbSSong Liu .ret_type = RET_INTEGER, 1135856c02dbSSong Liu .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1136856c02dbSSong Liu .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1137856c02dbSSong Liu }; 1138856c02dbSSong Liu 1139f92c1e18SJiri Olsa BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value) 1140f92c1e18SJiri Olsa { 1141f92c1e18SJiri Olsa /* This helper call is inlined by verifier. */ 1142f92c1e18SJiri Olsa u64 nr_args = ((u64 *)ctx)[-1]; 1143f92c1e18SJiri Olsa 1144f92c1e18SJiri Olsa if ((u64) n >= nr_args) 1145f92c1e18SJiri Olsa return -EINVAL; 1146f92c1e18SJiri Olsa *value = ((u64 *)ctx)[n]; 1147f92c1e18SJiri Olsa return 0; 1148f92c1e18SJiri Olsa } 1149f92c1e18SJiri Olsa 1150f92c1e18SJiri Olsa static const struct bpf_func_proto bpf_get_func_arg_proto = { 1151f92c1e18SJiri Olsa .func = get_func_arg, 1152f92c1e18SJiri Olsa .ret_type = RET_INTEGER, 1153f92c1e18SJiri Olsa .arg1_type = ARG_PTR_TO_CTX, 1154f92c1e18SJiri Olsa .arg2_type = ARG_ANYTHING, 1155f92c1e18SJiri Olsa .arg3_type = ARG_PTR_TO_LONG, 1156f92c1e18SJiri Olsa }; 1157f92c1e18SJiri Olsa 1158f92c1e18SJiri Olsa BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value) 1159f92c1e18SJiri Olsa { 1160f92c1e18SJiri Olsa /* This helper call is inlined by verifier. */ 1161f92c1e18SJiri Olsa u64 nr_args = ((u64 *)ctx)[-1]; 1162f92c1e18SJiri Olsa 1163f92c1e18SJiri Olsa *value = ((u64 *)ctx)[nr_args]; 1164f92c1e18SJiri Olsa return 0; 1165f92c1e18SJiri Olsa } 1166f92c1e18SJiri Olsa 1167f92c1e18SJiri Olsa static const struct bpf_func_proto bpf_get_func_ret_proto = { 1168f92c1e18SJiri Olsa .func = get_func_ret, 1169f92c1e18SJiri Olsa .ret_type = RET_INTEGER, 1170f92c1e18SJiri Olsa .arg1_type = ARG_PTR_TO_CTX, 1171f92c1e18SJiri Olsa .arg2_type = ARG_PTR_TO_LONG, 1172f92c1e18SJiri Olsa }; 1173f92c1e18SJiri Olsa 1174f92c1e18SJiri Olsa BPF_CALL_1(get_func_arg_cnt, void *, ctx) 1175f92c1e18SJiri Olsa { 1176f92c1e18SJiri Olsa /* This helper call is inlined by verifier. */ 1177f92c1e18SJiri Olsa return ((u64 *)ctx)[-1]; 1178f92c1e18SJiri Olsa } 1179f92c1e18SJiri Olsa 1180f92c1e18SJiri Olsa static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = { 1181f92c1e18SJiri Olsa .func = get_func_arg_cnt, 1182f92c1e18SJiri Olsa .ret_type = RET_INTEGER, 1183f92c1e18SJiri Olsa .arg1_type = ARG_PTR_TO_CTX, 1184f92c1e18SJiri Olsa }; 1185f92c1e18SJiri Olsa 1186f3cf4134SRoberto Sassu #ifdef CONFIG_KEYS 1187f3cf4134SRoberto Sassu __diag_push(); 1188f3cf4134SRoberto Sassu __diag_ignore_all("-Wmissing-prototypes", 1189f3cf4134SRoberto Sassu "kfuncs which will be used in BPF programs"); 1190f3cf4134SRoberto Sassu 1191f3cf4134SRoberto Sassu /** 1192f3cf4134SRoberto Sassu * bpf_lookup_user_key - lookup a key by its serial 1193f3cf4134SRoberto Sassu * @serial: key handle serial number 1194f3cf4134SRoberto Sassu * @flags: lookup-specific flags 1195f3cf4134SRoberto Sassu * 1196f3cf4134SRoberto Sassu * Search a key with a given *serial* and the provided *flags*. 1197f3cf4134SRoberto Sassu * If found, increment the reference count of the key by one, and 1198f3cf4134SRoberto Sassu * return it in the bpf_key structure. 1199f3cf4134SRoberto Sassu * 1200f3cf4134SRoberto Sassu * The bpf_key structure must be passed to bpf_key_put() when done 1201f3cf4134SRoberto Sassu * with it, so that the key reference count is decremented and the 1202f3cf4134SRoberto Sassu * bpf_key structure is freed. 1203f3cf4134SRoberto Sassu * 1204f3cf4134SRoberto Sassu * Permission checks are deferred to the time the key is used by 1205f3cf4134SRoberto Sassu * one of the available key-specific kfuncs. 1206f3cf4134SRoberto Sassu * 1207f3cf4134SRoberto Sassu * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested 1208f3cf4134SRoberto Sassu * special keyring (e.g. session keyring), if it doesn't yet exist. 1209f3cf4134SRoberto Sassu * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting 1210f3cf4134SRoberto Sassu * for the key construction, and to retrieve uninstantiated keys (keys 1211f3cf4134SRoberto Sassu * without data attached to them). 1212f3cf4134SRoberto Sassu * 1213f3cf4134SRoberto Sassu * Return: a bpf_key pointer with a valid key pointer if the key is found, a 1214f3cf4134SRoberto Sassu * NULL pointer otherwise. 1215f3cf4134SRoberto Sassu */ 1216f3cf4134SRoberto Sassu struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags) 1217f3cf4134SRoberto Sassu { 1218f3cf4134SRoberto Sassu key_ref_t key_ref; 1219f3cf4134SRoberto Sassu struct bpf_key *bkey; 1220f3cf4134SRoberto Sassu 1221f3cf4134SRoberto Sassu if (flags & ~KEY_LOOKUP_ALL) 1222f3cf4134SRoberto Sassu return NULL; 1223f3cf4134SRoberto Sassu 1224f3cf4134SRoberto Sassu /* 1225f3cf4134SRoberto Sassu * Permission check is deferred until the key is used, as the 1226f3cf4134SRoberto Sassu * intent of the caller is unknown here. 1227f3cf4134SRoberto Sassu */ 1228f3cf4134SRoberto Sassu key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK); 1229f3cf4134SRoberto Sassu if (IS_ERR(key_ref)) 1230f3cf4134SRoberto Sassu return NULL; 1231f3cf4134SRoberto Sassu 1232f3cf4134SRoberto Sassu bkey = kmalloc(sizeof(*bkey), GFP_KERNEL); 1233f3cf4134SRoberto Sassu if (!bkey) { 1234f3cf4134SRoberto Sassu key_put(key_ref_to_ptr(key_ref)); 1235f3cf4134SRoberto Sassu return NULL; 1236f3cf4134SRoberto Sassu } 1237f3cf4134SRoberto Sassu 1238f3cf4134SRoberto Sassu bkey->key = key_ref_to_ptr(key_ref); 1239f3cf4134SRoberto Sassu bkey->has_ref = true; 1240f3cf4134SRoberto Sassu 1241f3cf4134SRoberto Sassu return bkey; 1242f3cf4134SRoberto Sassu } 1243f3cf4134SRoberto Sassu 1244f3cf4134SRoberto Sassu /** 1245f3cf4134SRoberto Sassu * bpf_lookup_system_key - lookup a key by a system-defined ID 1246f3cf4134SRoberto Sassu * @id: key ID 1247f3cf4134SRoberto Sassu * 1248f3cf4134SRoberto Sassu * Obtain a bpf_key structure with a key pointer set to the passed key ID. 1249f3cf4134SRoberto Sassu * The key pointer is marked as invalid, to prevent bpf_key_put() from 1250f3cf4134SRoberto Sassu * attempting to decrement the key reference count on that pointer. The key 1251f3cf4134SRoberto Sassu * pointer set in such way is currently understood only by 1252f3cf4134SRoberto Sassu * verify_pkcs7_signature(). 1253f3cf4134SRoberto Sassu * 1254f3cf4134SRoberto Sassu * Set *id* to one of the values defined in include/linux/verification.h: 1255f3cf4134SRoberto Sassu * 0 for the primary keyring (immutable keyring of system keys); 1256f3cf4134SRoberto Sassu * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring 1257f3cf4134SRoberto Sassu * (where keys can be added only if they are vouched for by existing keys 1258f3cf4134SRoberto Sassu * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform 1259f3cf4134SRoberto Sassu * keyring (primarily used by the integrity subsystem to verify a kexec'ed 1260f3cf4134SRoberto Sassu * kerned image and, possibly, the initramfs signature). 1261f3cf4134SRoberto Sassu * 1262f3cf4134SRoberto Sassu * Return: a bpf_key pointer with an invalid key pointer set from the 1263f3cf4134SRoberto Sassu * pre-determined ID on success, a NULL pointer otherwise 1264f3cf4134SRoberto Sassu */ 1265f3cf4134SRoberto Sassu struct bpf_key *bpf_lookup_system_key(u64 id) 1266f3cf4134SRoberto Sassu { 1267f3cf4134SRoberto Sassu struct bpf_key *bkey; 1268f3cf4134SRoberto Sassu 1269f3cf4134SRoberto Sassu if (system_keyring_id_check(id) < 0) 1270f3cf4134SRoberto Sassu return NULL; 1271f3cf4134SRoberto Sassu 1272f3cf4134SRoberto Sassu bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC); 1273f3cf4134SRoberto Sassu if (!bkey) 1274f3cf4134SRoberto Sassu return NULL; 1275f3cf4134SRoberto Sassu 1276f3cf4134SRoberto Sassu bkey->key = (struct key *)(unsigned long)id; 1277f3cf4134SRoberto Sassu bkey->has_ref = false; 1278f3cf4134SRoberto Sassu 1279f3cf4134SRoberto Sassu return bkey; 1280f3cf4134SRoberto Sassu } 1281f3cf4134SRoberto Sassu 1282f3cf4134SRoberto Sassu /** 1283f3cf4134SRoberto Sassu * bpf_key_put - decrement key reference count if key is valid and free bpf_key 1284f3cf4134SRoberto Sassu * @bkey: bpf_key structure 1285f3cf4134SRoberto Sassu * 1286f3cf4134SRoberto Sassu * Decrement the reference count of the key inside *bkey*, if the pointer 1287f3cf4134SRoberto Sassu * is valid, and free *bkey*. 1288f3cf4134SRoberto Sassu */ 1289f3cf4134SRoberto Sassu void bpf_key_put(struct bpf_key *bkey) 1290f3cf4134SRoberto Sassu { 1291f3cf4134SRoberto Sassu if (bkey->has_ref) 1292f3cf4134SRoberto Sassu key_put(bkey->key); 1293f3cf4134SRoberto Sassu 1294f3cf4134SRoberto Sassu kfree(bkey); 1295f3cf4134SRoberto Sassu } 1296f3cf4134SRoberto Sassu 1297*865b0566SRoberto Sassu #ifdef CONFIG_SYSTEM_DATA_VERIFICATION 1298*865b0566SRoberto Sassu /** 1299*865b0566SRoberto Sassu * bpf_verify_pkcs7_signature - verify a PKCS#7 signature 1300*865b0566SRoberto Sassu * @data_ptr: data to verify 1301*865b0566SRoberto Sassu * @sig_ptr: signature of the data 1302*865b0566SRoberto Sassu * @trusted_keyring: keyring with keys trusted for signature verification 1303*865b0566SRoberto Sassu * 1304*865b0566SRoberto Sassu * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr* 1305*865b0566SRoberto Sassu * with keys in a keyring referenced by *trusted_keyring*. 1306*865b0566SRoberto Sassu * 1307*865b0566SRoberto Sassu * Return: 0 on success, a negative value on error. 1308*865b0566SRoberto Sassu */ 1309*865b0566SRoberto Sassu int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr, 1310*865b0566SRoberto Sassu struct bpf_dynptr_kern *sig_ptr, 1311*865b0566SRoberto Sassu struct bpf_key *trusted_keyring) 1312*865b0566SRoberto Sassu { 1313*865b0566SRoberto Sassu int ret; 1314*865b0566SRoberto Sassu 1315*865b0566SRoberto Sassu if (trusted_keyring->has_ref) { 1316*865b0566SRoberto Sassu /* 1317*865b0566SRoberto Sassu * Do the permission check deferred in bpf_lookup_user_key(). 1318*865b0566SRoberto Sassu * See bpf_lookup_user_key() for more details. 1319*865b0566SRoberto Sassu * 1320*865b0566SRoberto Sassu * A call to key_task_permission() here would be redundant, as 1321*865b0566SRoberto Sassu * it is already done by keyring_search() called by 1322*865b0566SRoberto Sassu * find_asymmetric_key(). 1323*865b0566SRoberto Sassu */ 1324*865b0566SRoberto Sassu ret = key_validate(trusted_keyring->key); 1325*865b0566SRoberto Sassu if (ret < 0) 1326*865b0566SRoberto Sassu return ret; 1327*865b0566SRoberto Sassu } 1328*865b0566SRoberto Sassu 1329*865b0566SRoberto Sassu return verify_pkcs7_signature(data_ptr->data, 1330*865b0566SRoberto Sassu bpf_dynptr_get_size(data_ptr), 1331*865b0566SRoberto Sassu sig_ptr->data, 1332*865b0566SRoberto Sassu bpf_dynptr_get_size(sig_ptr), 1333*865b0566SRoberto Sassu trusted_keyring->key, 1334*865b0566SRoberto Sassu VERIFYING_UNSPECIFIED_SIGNATURE, NULL, 1335*865b0566SRoberto Sassu NULL); 1336*865b0566SRoberto Sassu } 1337*865b0566SRoberto Sassu #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ 1338*865b0566SRoberto Sassu 1339f3cf4134SRoberto Sassu __diag_pop(); 1340f3cf4134SRoberto Sassu 1341f3cf4134SRoberto Sassu BTF_SET8_START(key_sig_kfunc_set) 1342f3cf4134SRoberto Sassu BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE) 1343f3cf4134SRoberto Sassu BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL) 1344f3cf4134SRoberto Sassu BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE) 1345*865b0566SRoberto Sassu #ifdef CONFIG_SYSTEM_DATA_VERIFICATION 1346*865b0566SRoberto Sassu BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE) 1347*865b0566SRoberto Sassu #endif 1348f3cf4134SRoberto Sassu BTF_SET8_END(key_sig_kfunc_set) 1349f3cf4134SRoberto Sassu 1350f3cf4134SRoberto Sassu static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = { 1351f3cf4134SRoberto Sassu .owner = THIS_MODULE, 1352f3cf4134SRoberto Sassu .set = &key_sig_kfunc_set, 1353f3cf4134SRoberto Sassu }; 1354f3cf4134SRoberto Sassu 1355f3cf4134SRoberto Sassu static int __init bpf_key_sig_kfuncs_init(void) 1356f3cf4134SRoberto Sassu { 1357f3cf4134SRoberto Sassu return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, 1358f3cf4134SRoberto Sassu &bpf_key_sig_kfunc_set); 1359f3cf4134SRoberto Sassu } 1360f3cf4134SRoberto Sassu 1361f3cf4134SRoberto Sassu late_initcall(bpf_key_sig_kfuncs_init); 1362f3cf4134SRoberto Sassu #endif /* CONFIG_KEYS */ 1363f3cf4134SRoberto Sassu 13647adfc6c9SAndrii Nakryiko static const struct bpf_func_proto * 1365fc611f47SKP Singh bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 13662541517cSAlexei Starovoitov { 13672541517cSAlexei Starovoitov switch (func_id) { 13682541517cSAlexei Starovoitov case BPF_FUNC_map_lookup_elem: 13692541517cSAlexei Starovoitov return &bpf_map_lookup_elem_proto; 13702541517cSAlexei Starovoitov case BPF_FUNC_map_update_elem: 13712541517cSAlexei Starovoitov return &bpf_map_update_elem_proto; 13722541517cSAlexei Starovoitov case BPF_FUNC_map_delete_elem: 13732541517cSAlexei Starovoitov return &bpf_map_delete_elem_proto; 137402a8c817SAlban Crequy case BPF_FUNC_map_push_elem: 137502a8c817SAlban Crequy return &bpf_map_push_elem_proto; 137602a8c817SAlban Crequy case BPF_FUNC_map_pop_elem: 137702a8c817SAlban Crequy return &bpf_map_pop_elem_proto; 137802a8c817SAlban Crequy case BPF_FUNC_map_peek_elem: 137902a8c817SAlban Crequy return &bpf_map_peek_elem_proto; 138007343110SFeng Zhou case BPF_FUNC_map_lookup_percpu_elem: 138107343110SFeng Zhou return &bpf_map_lookup_percpu_elem_proto; 1382d9847d31SAlexei Starovoitov case BPF_FUNC_ktime_get_ns: 1383d9847d31SAlexei Starovoitov return &bpf_ktime_get_ns_proto; 138471d19214SMaciej Żenczykowski case BPF_FUNC_ktime_get_boot_ns: 138571d19214SMaciej Żenczykowski return &bpf_ktime_get_boot_ns_proto; 138604fd61abSAlexei Starovoitov case BPF_FUNC_tail_call: 138704fd61abSAlexei Starovoitov return &bpf_tail_call_proto; 1388ffeedafbSAlexei Starovoitov case BPF_FUNC_get_current_pid_tgid: 1389ffeedafbSAlexei Starovoitov return &bpf_get_current_pid_tgid_proto; 1390606274c5SAlexei Starovoitov case BPF_FUNC_get_current_task: 1391606274c5SAlexei Starovoitov return &bpf_get_current_task_proto; 13923ca1032aSKP Singh case BPF_FUNC_get_current_task_btf: 13933ca1032aSKP Singh return &bpf_get_current_task_btf_proto; 1394dd6e10fbSDaniel Xu case BPF_FUNC_task_pt_regs: 1395dd6e10fbSDaniel Xu return &bpf_task_pt_regs_proto; 1396ffeedafbSAlexei Starovoitov case BPF_FUNC_get_current_uid_gid: 1397ffeedafbSAlexei Starovoitov return &bpf_get_current_uid_gid_proto; 1398ffeedafbSAlexei Starovoitov case BPF_FUNC_get_current_comm: 1399ffeedafbSAlexei Starovoitov return &bpf_get_current_comm_proto; 14009c959c86SAlexei Starovoitov case BPF_FUNC_trace_printk: 14010756ea3eSAlexei Starovoitov return bpf_get_trace_printk_proto(); 1402ab1973d3SAlexei Starovoitov case BPF_FUNC_get_smp_processor_id: 1403ab1973d3SAlexei Starovoitov return &bpf_get_smp_processor_id_proto; 14042d0e30c3SDaniel Borkmann case BPF_FUNC_get_numa_node_id: 14052d0e30c3SDaniel Borkmann return &bpf_get_numa_node_id_proto; 140635578d79SKaixu Xia case BPF_FUNC_perf_event_read: 140735578d79SKaixu Xia return &bpf_perf_event_read_proto; 140860d20f91SSargun Dhillon case BPF_FUNC_current_task_under_cgroup: 140960d20f91SSargun Dhillon return &bpf_current_task_under_cgroup_proto; 14108937bd80SAlexei Starovoitov case BPF_FUNC_get_prandom_u32: 14118937bd80SAlexei Starovoitov return &bpf_get_prandom_u32_proto; 141251e1bb9eSDaniel Borkmann case BPF_FUNC_probe_write_user: 141351e1bb9eSDaniel Borkmann return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ? 141451e1bb9eSDaniel Borkmann NULL : bpf_get_probe_write_proto(); 14156ae08ae3SDaniel Borkmann case BPF_FUNC_probe_read_user: 14166ae08ae3SDaniel Borkmann return &bpf_probe_read_user_proto; 14176ae08ae3SDaniel Borkmann case BPF_FUNC_probe_read_kernel: 141871330842SDaniel Borkmann return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1419ff40e510SDaniel Borkmann NULL : &bpf_probe_read_kernel_proto; 14206ae08ae3SDaniel Borkmann case BPF_FUNC_probe_read_user_str: 14216ae08ae3SDaniel Borkmann return &bpf_probe_read_user_str_proto; 14226ae08ae3SDaniel Borkmann case BPF_FUNC_probe_read_kernel_str: 142371330842SDaniel Borkmann return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1424ff40e510SDaniel Borkmann NULL : &bpf_probe_read_kernel_str_proto; 14250ebeea8cSDaniel Borkmann #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 14260ebeea8cSDaniel Borkmann case BPF_FUNC_probe_read: 142771330842SDaniel Borkmann return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1428ff40e510SDaniel Borkmann NULL : &bpf_probe_read_compat_proto; 1429a5e8c070SGianluca Borello case BPF_FUNC_probe_read_str: 143071330842SDaniel Borkmann return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1431ff40e510SDaniel Borkmann NULL : &bpf_probe_read_compat_str_proto; 14320ebeea8cSDaniel Borkmann #endif 143334ea38caSYonghong Song #ifdef CONFIG_CGROUPS 1434bf6fa2c8SYonghong Song case BPF_FUNC_get_current_cgroup_id: 1435bf6fa2c8SYonghong Song return &bpf_get_current_cgroup_id_proto; 143695b861a7SNamhyung Kim case BPF_FUNC_get_current_ancestor_cgroup_id: 143795b861a7SNamhyung Kim return &bpf_get_current_ancestor_cgroup_id_proto; 143834ea38caSYonghong Song #endif 14398b401f9eSYonghong Song case BPF_FUNC_send_signal: 14408b401f9eSYonghong Song return &bpf_send_signal_proto; 14418482941fSYonghong Song case BPF_FUNC_send_signal_thread: 14428482941fSYonghong Song return &bpf_send_signal_thread_proto; 1443b80b033bSSong Liu case BPF_FUNC_perf_event_read_value: 1444b80b033bSSong Liu return &bpf_perf_event_read_value_proto; 1445b4490c5cSCarlos Neira case BPF_FUNC_get_ns_current_pid_tgid: 1446b4490c5cSCarlos Neira return &bpf_get_ns_current_pid_tgid_proto; 1447457f4436SAndrii Nakryiko case BPF_FUNC_ringbuf_output: 1448457f4436SAndrii Nakryiko return &bpf_ringbuf_output_proto; 1449457f4436SAndrii Nakryiko case BPF_FUNC_ringbuf_reserve: 1450457f4436SAndrii Nakryiko return &bpf_ringbuf_reserve_proto; 1451457f4436SAndrii Nakryiko case BPF_FUNC_ringbuf_submit: 1452457f4436SAndrii Nakryiko return &bpf_ringbuf_submit_proto; 1453457f4436SAndrii Nakryiko case BPF_FUNC_ringbuf_discard: 1454457f4436SAndrii Nakryiko return &bpf_ringbuf_discard_proto; 1455457f4436SAndrii Nakryiko case BPF_FUNC_ringbuf_query: 1456457f4436SAndrii Nakryiko return &bpf_ringbuf_query_proto; 145772e2b2b6SYonghong Song case BPF_FUNC_jiffies64: 145872e2b2b6SYonghong Song return &bpf_jiffies64_proto; 1459fa28dcb8SSong Liu case BPF_FUNC_get_task_stack: 1460fa28dcb8SSong Liu return &bpf_get_task_stack_proto; 146107be4c4aSAlexei Starovoitov case BPF_FUNC_copy_from_user: 146207be4c4aSAlexei Starovoitov return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL; 1463376040e4SKenny Yu case BPF_FUNC_copy_from_user_task: 1464376040e4SKenny Yu return prog->aux->sleepable ? &bpf_copy_from_user_task_proto : NULL; 1465c4d0bfb4SAlan Maguire case BPF_FUNC_snprintf_btf: 1466c4d0bfb4SAlan Maguire return &bpf_snprintf_btf_proto; 1467b7906b70SAndrii Nakryiko case BPF_FUNC_per_cpu_ptr: 1468eaa6bcb7SHao Luo return &bpf_per_cpu_ptr_proto; 1469b7906b70SAndrii Nakryiko case BPF_FUNC_this_cpu_ptr: 147063d9b80dSHao Luo return &bpf_this_cpu_ptr_proto; 1471a10787e6SSong Liu case BPF_FUNC_task_storage_get: 1472a10787e6SSong Liu return &bpf_task_storage_get_proto; 1473a10787e6SSong Liu case BPF_FUNC_task_storage_delete: 1474a10787e6SSong Liu return &bpf_task_storage_delete_proto; 147569c087baSYonghong Song case BPF_FUNC_for_each_map_elem: 147669c087baSYonghong Song return &bpf_for_each_map_elem_proto; 14777b15523aSFlorent Revest case BPF_FUNC_snprintf: 14787b15523aSFlorent Revest return &bpf_snprintf_proto; 14799b99edcaSJiri Olsa case BPF_FUNC_get_func_ip: 14809b99edcaSJiri Olsa return &bpf_get_func_ip_proto_tracing; 1481856c02dbSSong Liu case BPF_FUNC_get_branch_snapshot: 1482856c02dbSSong Liu return &bpf_get_branch_snapshot_proto; 14837c7e3d31SSong Liu case BPF_FUNC_find_vma: 14847c7e3d31SSong Liu return &bpf_find_vma_proto; 148510aceb62SDave Marchevsky case BPF_FUNC_trace_vprintk: 148610aceb62SDave Marchevsky return bpf_get_trace_vprintk_proto(); 14879fd82b61SAlexei Starovoitov default: 1488b00628b1SAlexei Starovoitov return bpf_base_func_proto(func_id); 14899fd82b61SAlexei Starovoitov } 14909fd82b61SAlexei Starovoitov } 14919fd82b61SAlexei Starovoitov 14925e43f899SAndrey Ignatov static const struct bpf_func_proto * 14935e43f899SAndrey Ignatov kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 14949fd82b61SAlexei Starovoitov { 14959fd82b61SAlexei Starovoitov switch (func_id) { 1496a43eec30SAlexei Starovoitov case BPF_FUNC_perf_event_output: 1497a43eec30SAlexei Starovoitov return &bpf_perf_event_output_proto; 1498d5a3b1f6SAlexei Starovoitov case BPF_FUNC_get_stackid: 1499d5a3b1f6SAlexei Starovoitov return &bpf_get_stackid_proto; 1500c195651eSYonghong Song case BPF_FUNC_get_stack: 1501c195651eSYonghong Song return &bpf_get_stack_proto; 15029802d865SJosef Bacik #ifdef CONFIG_BPF_KPROBE_OVERRIDE 15039802d865SJosef Bacik case BPF_FUNC_override_return: 15049802d865SJosef Bacik return &bpf_override_return_proto; 15059802d865SJosef Bacik #endif 15069ffd9f3fSJiri Olsa case BPF_FUNC_get_func_ip: 150742a57120SJiri Olsa return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ? 150842a57120SJiri Olsa &bpf_get_func_ip_proto_kprobe_multi : 150942a57120SJiri Olsa &bpf_get_func_ip_proto_kprobe; 15107adfc6c9SAndrii Nakryiko case BPF_FUNC_get_attach_cookie: 1511ca74823cSJiri Olsa return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ? 1512ca74823cSJiri Olsa &bpf_get_attach_cookie_proto_kmulti : 1513ca74823cSJiri Olsa &bpf_get_attach_cookie_proto_trace; 15142541517cSAlexei Starovoitov default: 1515fc611f47SKP Singh return bpf_tracing_func_proto(func_id, prog); 15162541517cSAlexei Starovoitov } 15172541517cSAlexei Starovoitov } 15182541517cSAlexei Starovoitov 15192541517cSAlexei Starovoitov /* bpf+kprobe programs can access fields of 'struct pt_regs' */ 152019de99f7SAlexei Starovoitov static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 15215e43f899SAndrey Ignatov const struct bpf_prog *prog, 152223994631SYonghong Song struct bpf_insn_access_aux *info) 15232541517cSAlexei Starovoitov { 15242541517cSAlexei Starovoitov if (off < 0 || off >= sizeof(struct pt_regs)) 15252541517cSAlexei Starovoitov return false; 15262541517cSAlexei Starovoitov if (type != BPF_READ) 15272541517cSAlexei Starovoitov return false; 15282541517cSAlexei Starovoitov if (off % size != 0) 15292541517cSAlexei Starovoitov return false; 15302d071c64SDaniel Borkmann /* 15312d071c64SDaniel Borkmann * Assertion for 32 bit to make sure last 8 byte access 15322d071c64SDaniel Borkmann * (BPF_DW) to the last 4 byte member is disallowed. 15332d071c64SDaniel Borkmann */ 15342d071c64SDaniel Borkmann if (off + size > sizeof(struct pt_regs)) 15352d071c64SDaniel Borkmann return false; 15362d071c64SDaniel Borkmann 15372541517cSAlexei Starovoitov return true; 15382541517cSAlexei Starovoitov } 15392541517cSAlexei Starovoitov 15407de16e3aSJakub Kicinski const struct bpf_verifier_ops kprobe_verifier_ops = { 15412541517cSAlexei Starovoitov .get_func_proto = kprobe_prog_func_proto, 15422541517cSAlexei Starovoitov .is_valid_access = kprobe_prog_is_valid_access, 15432541517cSAlexei Starovoitov }; 15442541517cSAlexei Starovoitov 15457de16e3aSJakub Kicinski const struct bpf_prog_ops kprobe_prog_ops = { 15467de16e3aSJakub Kicinski }; 15477de16e3aSJakub Kicinski 1548f3694e00SDaniel Borkmann BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, 1549f3694e00SDaniel Borkmann u64, flags, void *, data, u64, size) 15509940d67cSAlexei Starovoitov { 1551f3694e00SDaniel Borkmann struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1552f3694e00SDaniel Borkmann 15539940d67cSAlexei Starovoitov /* 15549940d67cSAlexei Starovoitov * r1 points to perf tracepoint buffer where first 8 bytes are hidden 15559940d67cSAlexei Starovoitov * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it 1556f3694e00SDaniel Borkmann * from there and call the same bpf_perf_event_output() helper inline. 15579940d67cSAlexei Starovoitov */ 1558f3694e00SDaniel Borkmann return ____bpf_perf_event_output(regs, map, flags, data, size); 15599940d67cSAlexei Starovoitov } 15609940d67cSAlexei Starovoitov 15619940d67cSAlexei Starovoitov static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { 15629940d67cSAlexei Starovoitov .func = bpf_perf_event_output_tp, 15639940d67cSAlexei Starovoitov .gpl_only = true, 15649940d67cSAlexei Starovoitov .ret_type = RET_INTEGER, 15659940d67cSAlexei Starovoitov .arg1_type = ARG_PTR_TO_CTX, 15669940d67cSAlexei Starovoitov .arg2_type = ARG_CONST_MAP_PTR, 15679940d67cSAlexei Starovoitov .arg3_type = ARG_ANYTHING, 1568216e3cd2SHao Luo .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1569a60dd35dSGianluca Borello .arg5_type = ARG_CONST_SIZE_OR_ZERO, 15709940d67cSAlexei Starovoitov }; 15719940d67cSAlexei Starovoitov 1572f3694e00SDaniel Borkmann BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, 1573f3694e00SDaniel Borkmann u64, flags) 15749940d67cSAlexei Starovoitov { 1575f3694e00SDaniel Borkmann struct pt_regs *regs = *(struct pt_regs **)tp_buff; 15769940d67cSAlexei Starovoitov 1577f3694e00SDaniel Borkmann /* 1578f3694e00SDaniel Borkmann * Same comment as in bpf_perf_event_output_tp(), only that this time 1579f3694e00SDaniel Borkmann * the other helper's function body cannot be inlined due to being 1580f3694e00SDaniel Borkmann * external, thus we need to call raw helper function. 1581f3694e00SDaniel Borkmann */ 1582f3694e00SDaniel Borkmann return bpf_get_stackid((unsigned long) regs, (unsigned long) map, 1583f3694e00SDaniel Borkmann flags, 0, 0); 15849940d67cSAlexei Starovoitov } 15859940d67cSAlexei Starovoitov 15869940d67cSAlexei Starovoitov static const struct bpf_func_proto bpf_get_stackid_proto_tp = { 15879940d67cSAlexei Starovoitov .func = bpf_get_stackid_tp, 15889940d67cSAlexei Starovoitov .gpl_only = true, 15899940d67cSAlexei Starovoitov .ret_type = RET_INTEGER, 15909940d67cSAlexei Starovoitov .arg1_type = ARG_PTR_TO_CTX, 15919940d67cSAlexei Starovoitov .arg2_type = ARG_CONST_MAP_PTR, 15929940d67cSAlexei Starovoitov .arg3_type = ARG_ANYTHING, 15939940d67cSAlexei Starovoitov }; 15949940d67cSAlexei Starovoitov 1595c195651eSYonghong Song BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size, 1596c195651eSYonghong Song u64, flags) 1597c195651eSYonghong Song { 1598c195651eSYonghong Song struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1599c195651eSYonghong Song 1600c195651eSYonghong Song return bpf_get_stack((unsigned long) regs, (unsigned long) buf, 1601c195651eSYonghong Song (unsigned long) size, flags, 0); 1602c195651eSYonghong Song } 1603c195651eSYonghong Song 1604c195651eSYonghong Song static const struct bpf_func_proto bpf_get_stack_proto_tp = { 1605c195651eSYonghong Song .func = bpf_get_stack_tp, 1606c195651eSYonghong Song .gpl_only = true, 1607c195651eSYonghong Song .ret_type = RET_INTEGER, 1608c195651eSYonghong Song .arg1_type = ARG_PTR_TO_CTX, 1609c195651eSYonghong Song .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1610c195651eSYonghong Song .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1611c195651eSYonghong Song .arg4_type = ARG_ANYTHING, 1612c195651eSYonghong Song }; 1613c195651eSYonghong Song 16145e43f899SAndrey Ignatov static const struct bpf_func_proto * 16155e43f899SAndrey Ignatov tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 16169fd82b61SAlexei Starovoitov { 16179fd82b61SAlexei Starovoitov switch (func_id) { 16189fd82b61SAlexei Starovoitov case BPF_FUNC_perf_event_output: 16199940d67cSAlexei Starovoitov return &bpf_perf_event_output_proto_tp; 16209fd82b61SAlexei Starovoitov case BPF_FUNC_get_stackid: 16219940d67cSAlexei Starovoitov return &bpf_get_stackid_proto_tp; 1622c195651eSYonghong Song case BPF_FUNC_get_stack: 1623c195651eSYonghong Song return &bpf_get_stack_proto_tp; 16247adfc6c9SAndrii Nakryiko case BPF_FUNC_get_attach_cookie: 16257adfc6c9SAndrii Nakryiko return &bpf_get_attach_cookie_proto_trace; 16269fd82b61SAlexei Starovoitov default: 1627fc611f47SKP Singh return bpf_tracing_func_proto(func_id, prog); 16289fd82b61SAlexei Starovoitov } 16299fd82b61SAlexei Starovoitov } 16309fd82b61SAlexei Starovoitov 163119de99f7SAlexei Starovoitov static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, 16325e43f899SAndrey Ignatov const struct bpf_prog *prog, 163323994631SYonghong Song struct bpf_insn_access_aux *info) 16349fd82b61SAlexei Starovoitov { 16359fd82b61SAlexei Starovoitov if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) 16369fd82b61SAlexei Starovoitov return false; 16379fd82b61SAlexei Starovoitov if (type != BPF_READ) 16389fd82b61SAlexei Starovoitov return false; 16399fd82b61SAlexei Starovoitov if (off % size != 0) 16409fd82b61SAlexei Starovoitov return false; 16412d071c64SDaniel Borkmann 16422d071c64SDaniel Borkmann BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); 16439fd82b61SAlexei Starovoitov return true; 16449fd82b61SAlexei Starovoitov } 16459fd82b61SAlexei Starovoitov 16467de16e3aSJakub Kicinski const struct bpf_verifier_ops tracepoint_verifier_ops = { 16479fd82b61SAlexei Starovoitov .get_func_proto = tp_prog_func_proto, 16489fd82b61SAlexei Starovoitov .is_valid_access = tp_prog_is_valid_access, 16499fd82b61SAlexei Starovoitov }; 16509fd82b61SAlexei Starovoitov 16517de16e3aSJakub Kicinski const struct bpf_prog_ops tracepoint_prog_ops = { 16527de16e3aSJakub Kicinski }; 16537de16e3aSJakub Kicinski 1654f005afedSYonghong Song BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, 1655f005afedSYonghong Song struct bpf_perf_event_value *, buf, u32, size) 1656f005afedSYonghong Song { 1657f005afedSYonghong Song int err = -EINVAL; 1658f005afedSYonghong Song 1659f005afedSYonghong Song if (unlikely(size != sizeof(struct bpf_perf_event_value))) 1660f005afedSYonghong Song goto clear; 1661f005afedSYonghong Song err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, 1662f005afedSYonghong Song &buf->running); 1663f005afedSYonghong Song if (unlikely(err)) 1664f005afedSYonghong Song goto clear; 1665f005afedSYonghong Song return 0; 1666f005afedSYonghong Song clear: 1667f005afedSYonghong Song memset(buf, 0, size); 1668f005afedSYonghong Song return err; 1669f005afedSYonghong Song } 1670f005afedSYonghong Song 1671f005afedSYonghong Song static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { 1672f005afedSYonghong Song .func = bpf_perf_prog_read_value, 1673f005afedSYonghong Song .gpl_only = true, 1674f005afedSYonghong Song .ret_type = RET_INTEGER, 1675f005afedSYonghong Song .arg1_type = ARG_PTR_TO_CTX, 1676f005afedSYonghong Song .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1677f005afedSYonghong Song .arg3_type = ARG_CONST_SIZE, 1678f005afedSYonghong Song }; 1679f005afedSYonghong Song 1680fff7b643SDaniel Xu BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, 1681fff7b643SDaniel Xu void *, buf, u32, size, u64, flags) 1682fff7b643SDaniel Xu { 1683fff7b643SDaniel Xu static const u32 br_entry_size = sizeof(struct perf_branch_entry); 1684fff7b643SDaniel Xu struct perf_branch_stack *br_stack = ctx->data->br_stack; 1685fff7b643SDaniel Xu u32 to_copy; 1686fff7b643SDaniel Xu 1687fff7b643SDaniel Xu if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE)) 1688fff7b643SDaniel Xu return -EINVAL; 1689fff7b643SDaniel Xu 1690fff7b643SDaniel Xu if (unlikely(!br_stack)) 1691db52f572SKajol Jain return -ENOENT; 1692fff7b643SDaniel Xu 1693fff7b643SDaniel Xu if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE) 1694fff7b643SDaniel Xu return br_stack->nr * br_entry_size; 1695fff7b643SDaniel Xu 1696fff7b643SDaniel Xu if (!buf || (size % br_entry_size != 0)) 1697fff7b643SDaniel Xu return -EINVAL; 1698fff7b643SDaniel Xu 1699fff7b643SDaniel Xu to_copy = min_t(u32, br_stack->nr * br_entry_size, size); 1700fff7b643SDaniel Xu memcpy(buf, br_stack->entries, to_copy); 1701fff7b643SDaniel Xu 1702fff7b643SDaniel Xu return to_copy; 1703fff7b643SDaniel Xu } 1704fff7b643SDaniel Xu 1705fff7b643SDaniel Xu static const struct bpf_func_proto bpf_read_branch_records_proto = { 1706fff7b643SDaniel Xu .func = bpf_read_branch_records, 1707fff7b643SDaniel Xu .gpl_only = true, 1708fff7b643SDaniel Xu .ret_type = RET_INTEGER, 1709fff7b643SDaniel Xu .arg1_type = ARG_PTR_TO_CTX, 1710fff7b643SDaniel Xu .arg2_type = ARG_PTR_TO_MEM_OR_NULL, 1711fff7b643SDaniel Xu .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1712fff7b643SDaniel Xu .arg4_type = ARG_ANYTHING, 1713fff7b643SDaniel Xu }; 1714fff7b643SDaniel Xu 17155e43f899SAndrey Ignatov static const struct bpf_func_proto * 17165e43f899SAndrey Ignatov pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1717f005afedSYonghong Song { 1718f005afedSYonghong Song switch (func_id) { 1719f005afedSYonghong Song case BPF_FUNC_perf_event_output: 1720f005afedSYonghong Song return &bpf_perf_event_output_proto_tp; 1721f005afedSYonghong Song case BPF_FUNC_get_stackid: 17227b04d6d6SSong Liu return &bpf_get_stackid_proto_pe; 1723c195651eSYonghong Song case BPF_FUNC_get_stack: 17247b04d6d6SSong Liu return &bpf_get_stack_proto_pe; 1725f005afedSYonghong Song case BPF_FUNC_perf_prog_read_value: 1726f005afedSYonghong Song return &bpf_perf_prog_read_value_proto; 1727fff7b643SDaniel Xu case BPF_FUNC_read_branch_records: 1728fff7b643SDaniel Xu return &bpf_read_branch_records_proto; 17297adfc6c9SAndrii Nakryiko case BPF_FUNC_get_attach_cookie: 17307adfc6c9SAndrii Nakryiko return &bpf_get_attach_cookie_proto_pe; 1731f005afedSYonghong Song default: 1732fc611f47SKP Singh return bpf_tracing_func_proto(func_id, prog); 1733f005afedSYonghong Song } 1734f005afedSYonghong Song } 1735f005afedSYonghong Song 1736c4f6699dSAlexei Starovoitov /* 1737c4f6699dSAlexei Starovoitov * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp 1738c4f6699dSAlexei Starovoitov * to avoid potential recursive reuse issue when/if tracepoints are added 17399594dc3cSMatt Mullins * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack. 17409594dc3cSMatt Mullins * 17419594dc3cSMatt Mullins * Since raw tracepoints run despite bpf_prog_active, support concurrent usage 17429594dc3cSMatt Mullins * in normal, irq, and nmi context. 1743c4f6699dSAlexei Starovoitov */ 17449594dc3cSMatt Mullins struct bpf_raw_tp_regs { 17459594dc3cSMatt Mullins struct pt_regs regs[3]; 17469594dc3cSMatt Mullins }; 17479594dc3cSMatt Mullins static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs); 17489594dc3cSMatt Mullins static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level); 17499594dc3cSMatt Mullins static struct pt_regs *get_bpf_raw_tp_regs(void) 17509594dc3cSMatt Mullins { 17519594dc3cSMatt Mullins struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs); 17529594dc3cSMatt Mullins int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level); 17539594dc3cSMatt Mullins 17549594dc3cSMatt Mullins if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) { 17559594dc3cSMatt Mullins this_cpu_dec(bpf_raw_tp_nest_level); 17569594dc3cSMatt Mullins return ERR_PTR(-EBUSY); 17579594dc3cSMatt Mullins } 17589594dc3cSMatt Mullins 17599594dc3cSMatt Mullins return &tp_regs->regs[nest_level - 1]; 17609594dc3cSMatt Mullins } 17619594dc3cSMatt Mullins 17629594dc3cSMatt Mullins static void put_bpf_raw_tp_regs(void) 17639594dc3cSMatt Mullins { 17649594dc3cSMatt Mullins this_cpu_dec(bpf_raw_tp_nest_level); 17659594dc3cSMatt Mullins } 17669594dc3cSMatt Mullins 1767c4f6699dSAlexei Starovoitov BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, 1768c4f6699dSAlexei Starovoitov struct bpf_map *, map, u64, flags, void *, data, u64, size) 1769c4f6699dSAlexei Starovoitov { 17709594dc3cSMatt Mullins struct pt_regs *regs = get_bpf_raw_tp_regs(); 17719594dc3cSMatt Mullins int ret; 17729594dc3cSMatt Mullins 17739594dc3cSMatt Mullins if (IS_ERR(regs)) 17749594dc3cSMatt Mullins return PTR_ERR(regs); 1775c4f6699dSAlexei Starovoitov 1776c4f6699dSAlexei Starovoitov perf_fetch_caller_regs(regs); 17779594dc3cSMatt Mullins ret = ____bpf_perf_event_output(regs, map, flags, data, size); 17789594dc3cSMatt Mullins 17799594dc3cSMatt Mullins put_bpf_raw_tp_regs(); 17809594dc3cSMatt Mullins return ret; 1781c4f6699dSAlexei Starovoitov } 1782c4f6699dSAlexei Starovoitov 1783c4f6699dSAlexei Starovoitov static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { 1784c4f6699dSAlexei Starovoitov .func = bpf_perf_event_output_raw_tp, 1785c4f6699dSAlexei Starovoitov .gpl_only = true, 1786c4f6699dSAlexei Starovoitov .ret_type = RET_INTEGER, 1787c4f6699dSAlexei Starovoitov .arg1_type = ARG_PTR_TO_CTX, 1788c4f6699dSAlexei Starovoitov .arg2_type = ARG_CONST_MAP_PTR, 1789c4f6699dSAlexei Starovoitov .arg3_type = ARG_ANYTHING, 1790216e3cd2SHao Luo .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1791c4f6699dSAlexei Starovoitov .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1792c4f6699dSAlexei Starovoitov }; 1793c4f6699dSAlexei Starovoitov 1794a7658e1aSAlexei Starovoitov extern const struct bpf_func_proto bpf_skb_output_proto; 1795d831ee84SEelco Chaudron extern const struct bpf_func_proto bpf_xdp_output_proto; 1796d9917302SEelco Chaudron extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto; 1797a7658e1aSAlexei Starovoitov 1798c4f6699dSAlexei Starovoitov BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, 1799c4f6699dSAlexei Starovoitov struct bpf_map *, map, u64, flags) 1800c4f6699dSAlexei Starovoitov { 18019594dc3cSMatt Mullins struct pt_regs *regs = get_bpf_raw_tp_regs(); 18029594dc3cSMatt Mullins int ret; 18039594dc3cSMatt Mullins 18049594dc3cSMatt Mullins if (IS_ERR(regs)) 18059594dc3cSMatt Mullins return PTR_ERR(regs); 1806c4f6699dSAlexei Starovoitov 1807c4f6699dSAlexei Starovoitov perf_fetch_caller_regs(regs); 1808c4f6699dSAlexei Starovoitov /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ 18099594dc3cSMatt Mullins ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map, 1810c4f6699dSAlexei Starovoitov flags, 0, 0); 18119594dc3cSMatt Mullins put_bpf_raw_tp_regs(); 18129594dc3cSMatt Mullins return ret; 1813c4f6699dSAlexei Starovoitov } 1814c4f6699dSAlexei Starovoitov 1815c4f6699dSAlexei Starovoitov static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { 1816c4f6699dSAlexei Starovoitov .func = bpf_get_stackid_raw_tp, 1817c4f6699dSAlexei Starovoitov .gpl_only = true, 1818c4f6699dSAlexei Starovoitov .ret_type = RET_INTEGER, 1819c4f6699dSAlexei Starovoitov .arg1_type = ARG_PTR_TO_CTX, 1820c4f6699dSAlexei Starovoitov .arg2_type = ARG_CONST_MAP_PTR, 1821c4f6699dSAlexei Starovoitov .arg3_type = ARG_ANYTHING, 1822c4f6699dSAlexei Starovoitov }; 1823c4f6699dSAlexei Starovoitov 1824c195651eSYonghong Song BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args, 1825c195651eSYonghong Song void *, buf, u32, size, u64, flags) 1826c195651eSYonghong Song { 18279594dc3cSMatt Mullins struct pt_regs *regs = get_bpf_raw_tp_regs(); 18289594dc3cSMatt Mullins int ret; 18299594dc3cSMatt Mullins 18309594dc3cSMatt Mullins if (IS_ERR(regs)) 18319594dc3cSMatt Mullins return PTR_ERR(regs); 1832c195651eSYonghong Song 1833c195651eSYonghong Song perf_fetch_caller_regs(regs); 18349594dc3cSMatt Mullins ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf, 1835c195651eSYonghong Song (unsigned long) size, flags, 0); 18369594dc3cSMatt Mullins put_bpf_raw_tp_regs(); 18379594dc3cSMatt Mullins return ret; 1838c195651eSYonghong Song } 1839c195651eSYonghong Song 1840c195651eSYonghong Song static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { 1841c195651eSYonghong Song .func = bpf_get_stack_raw_tp, 1842c195651eSYonghong Song .gpl_only = true, 1843c195651eSYonghong Song .ret_type = RET_INTEGER, 1844c195651eSYonghong Song .arg1_type = ARG_PTR_TO_CTX, 1845216e3cd2SHao Luo .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1846c195651eSYonghong Song .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1847c195651eSYonghong Song .arg4_type = ARG_ANYTHING, 1848c195651eSYonghong Song }; 1849c195651eSYonghong Song 18505e43f899SAndrey Ignatov static const struct bpf_func_proto * 18515e43f899SAndrey Ignatov raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1852c4f6699dSAlexei Starovoitov { 1853c4f6699dSAlexei Starovoitov switch (func_id) { 1854c4f6699dSAlexei Starovoitov case BPF_FUNC_perf_event_output: 1855c4f6699dSAlexei Starovoitov return &bpf_perf_event_output_proto_raw_tp; 1856c4f6699dSAlexei Starovoitov case BPF_FUNC_get_stackid: 1857c4f6699dSAlexei Starovoitov return &bpf_get_stackid_proto_raw_tp; 1858c195651eSYonghong Song case BPF_FUNC_get_stack: 1859c195651eSYonghong Song return &bpf_get_stack_proto_raw_tp; 1860c4f6699dSAlexei Starovoitov default: 1861fc611f47SKP Singh return bpf_tracing_func_proto(func_id, prog); 1862c4f6699dSAlexei Starovoitov } 1863c4f6699dSAlexei Starovoitov } 1864c4f6699dSAlexei Starovoitov 1865958a3f2dSJiri Olsa const struct bpf_func_proto * 1866f1b9509cSAlexei Starovoitov tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1867f1b9509cSAlexei Starovoitov { 18683cee6fb8SMartin KaFai Lau const struct bpf_func_proto *fn; 18693cee6fb8SMartin KaFai Lau 1870f1b9509cSAlexei Starovoitov switch (func_id) { 1871f1b9509cSAlexei Starovoitov #ifdef CONFIG_NET 1872f1b9509cSAlexei Starovoitov case BPF_FUNC_skb_output: 1873f1b9509cSAlexei Starovoitov return &bpf_skb_output_proto; 1874d831ee84SEelco Chaudron case BPF_FUNC_xdp_output: 1875d831ee84SEelco Chaudron return &bpf_xdp_output_proto; 1876af7ec138SYonghong Song case BPF_FUNC_skc_to_tcp6_sock: 1877af7ec138SYonghong Song return &bpf_skc_to_tcp6_sock_proto; 1878478cfbdfSYonghong Song case BPF_FUNC_skc_to_tcp_sock: 1879478cfbdfSYonghong Song return &bpf_skc_to_tcp_sock_proto; 1880478cfbdfSYonghong Song case BPF_FUNC_skc_to_tcp_timewait_sock: 1881478cfbdfSYonghong Song return &bpf_skc_to_tcp_timewait_sock_proto; 1882478cfbdfSYonghong Song case BPF_FUNC_skc_to_tcp_request_sock: 1883478cfbdfSYonghong Song return &bpf_skc_to_tcp_request_sock_proto; 18840d4fad3eSYonghong Song case BPF_FUNC_skc_to_udp6_sock: 18850d4fad3eSYonghong Song return &bpf_skc_to_udp6_sock_proto; 18869eeb3aa3SHengqi Chen case BPF_FUNC_skc_to_unix_sock: 18879eeb3aa3SHengqi Chen return &bpf_skc_to_unix_sock_proto; 18883bc253c2SGeliang Tang case BPF_FUNC_skc_to_mptcp_sock: 18893bc253c2SGeliang Tang return &bpf_skc_to_mptcp_sock_proto; 18908e4597c6SMartin KaFai Lau case BPF_FUNC_sk_storage_get: 18918e4597c6SMartin KaFai Lau return &bpf_sk_storage_get_tracing_proto; 18928e4597c6SMartin KaFai Lau case BPF_FUNC_sk_storage_delete: 18938e4597c6SMartin KaFai Lau return &bpf_sk_storage_delete_tracing_proto; 1894b60da495SFlorent Revest case BPF_FUNC_sock_from_file: 1895b60da495SFlorent Revest return &bpf_sock_from_file_proto; 1896c5dbb89fSFlorent Revest case BPF_FUNC_get_socket_cookie: 1897c5dbb89fSFlorent Revest return &bpf_get_socket_ptr_cookie_proto; 1898d9917302SEelco Chaudron case BPF_FUNC_xdp_get_buff_len: 1899d9917302SEelco Chaudron return &bpf_xdp_get_buff_len_trace_proto; 1900f1b9509cSAlexei Starovoitov #endif 1901492e639fSYonghong Song case BPF_FUNC_seq_printf: 1902492e639fSYonghong Song return prog->expected_attach_type == BPF_TRACE_ITER ? 1903492e639fSYonghong Song &bpf_seq_printf_proto : 1904492e639fSYonghong Song NULL; 1905492e639fSYonghong Song case BPF_FUNC_seq_write: 1906492e639fSYonghong Song return prog->expected_attach_type == BPF_TRACE_ITER ? 1907492e639fSYonghong Song &bpf_seq_write_proto : 1908492e639fSYonghong Song NULL; 1909eb411377SAlan Maguire case BPF_FUNC_seq_printf_btf: 1910eb411377SAlan Maguire return prog->expected_attach_type == BPF_TRACE_ITER ? 1911eb411377SAlan Maguire &bpf_seq_printf_btf_proto : 1912eb411377SAlan Maguire NULL; 19136e22ab9dSJiri Olsa case BPF_FUNC_d_path: 19146e22ab9dSJiri Olsa return &bpf_d_path_proto; 1915f92c1e18SJiri Olsa case BPF_FUNC_get_func_arg: 1916f92c1e18SJiri Olsa return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL; 1917f92c1e18SJiri Olsa case BPF_FUNC_get_func_ret: 1918f92c1e18SJiri Olsa return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL; 1919f92c1e18SJiri Olsa case BPF_FUNC_get_func_arg_cnt: 1920f92c1e18SJiri Olsa return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL; 19212fcc8241SKui-Feng Lee case BPF_FUNC_get_attach_cookie: 19222fcc8241SKui-Feng Lee return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL; 1923f1b9509cSAlexei Starovoitov default: 19243cee6fb8SMartin KaFai Lau fn = raw_tp_prog_func_proto(func_id, prog); 19253cee6fb8SMartin KaFai Lau if (!fn && prog->expected_attach_type == BPF_TRACE_ITER) 19263cee6fb8SMartin KaFai Lau fn = bpf_iter_get_func_proto(func_id, prog); 19273cee6fb8SMartin KaFai Lau return fn; 1928f1b9509cSAlexei Starovoitov } 1929f1b9509cSAlexei Starovoitov } 1930f1b9509cSAlexei Starovoitov 1931c4f6699dSAlexei Starovoitov static bool raw_tp_prog_is_valid_access(int off, int size, 1932c4f6699dSAlexei Starovoitov enum bpf_access_type type, 19335e43f899SAndrey Ignatov const struct bpf_prog *prog, 1934c4f6699dSAlexei Starovoitov struct bpf_insn_access_aux *info) 1935c4f6699dSAlexei Starovoitov { 193635346ab6SHou Tao return bpf_tracing_ctx_access(off, size, type); 1937f1b9509cSAlexei Starovoitov } 1938f1b9509cSAlexei Starovoitov 1939f1b9509cSAlexei Starovoitov static bool tracing_prog_is_valid_access(int off, int size, 1940f1b9509cSAlexei Starovoitov enum bpf_access_type type, 1941f1b9509cSAlexei Starovoitov const struct bpf_prog *prog, 1942f1b9509cSAlexei Starovoitov struct bpf_insn_access_aux *info) 1943f1b9509cSAlexei Starovoitov { 194435346ab6SHou Tao return bpf_tracing_btf_ctx_access(off, size, type, prog, info); 1945c4f6699dSAlexei Starovoitov } 1946c4f6699dSAlexei Starovoitov 19473e7c67d9SKP Singh int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog, 19483e7c67d9SKP Singh const union bpf_attr *kattr, 19493e7c67d9SKP Singh union bpf_attr __user *uattr) 19503e7c67d9SKP Singh { 19513e7c67d9SKP Singh return -ENOTSUPP; 19523e7c67d9SKP Singh } 19533e7c67d9SKP Singh 1954c4f6699dSAlexei Starovoitov const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { 1955c4f6699dSAlexei Starovoitov .get_func_proto = raw_tp_prog_func_proto, 1956c4f6699dSAlexei Starovoitov .is_valid_access = raw_tp_prog_is_valid_access, 1957c4f6699dSAlexei Starovoitov }; 1958c4f6699dSAlexei Starovoitov 1959c4f6699dSAlexei Starovoitov const struct bpf_prog_ops raw_tracepoint_prog_ops = { 1960ebfb4d40SYonghong Song #ifdef CONFIG_NET 19611b4d60ecSSong Liu .test_run = bpf_prog_test_run_raw_tp, 1962ebfb4d40SYonghong Song #endif 1963c4f6699dSAlexei Starovoitov }; 1964c4f6699dSAlexei Starovoitov 1965f1b9509cSAlexei Starovoitov const struct bpf_verifier_ops tracing_verifier_ops = { 1966f1b9509cSAlexei Starovoitov .get_func_proto = tracing_prog_func_proto, 1967f1b9509cSAlexei Starovoitov .is_valid_access = tracing_prog_is_valid_access, 1968f1b9509cSAlexei Starovoitov }; 1969f1b9509cSAlexei Starovoitov 1970f1b9509cSAlexei Starovoitov const struct bpf_prog_ops tracing_prog_ops = { 1971da00d2f1SKP Singh .test_run = bpf_prog_test_run_tracing, 1972f1b9509cSAlexei Starovoitov }; 1973f1b9509cSAlexei Starovoitov 19749df1c28bSMatt Mullins static bool raw_tp_writable_prog_is_valid_access(int off, int size, 19759df1c28bSMatt Mullins enum bpf_access_type type, 19769df1c28bSMatt Mullins const struct bpf_prog *prog, 19779df1c28bSMatt Mullins struct bpf_insn_access_aux *info) 19789df1c28bSMatt Mullins { 19799df1c28bSMatt Mullins if (off == 0) { 19809df1c28bSMatt Mullins if (size != sizeof(u64) || type != BPF_READ) 19819df1c28bSMatt Mullins return false; 19829df1c28bSMatt Mullins info->reg_type = PTR_TO_TP_BUFFER; 19839df1c28bSMatt Mullins } 19849df1c28bSMatt Mullins return raw_tp_prog_is_valid_access(off, size, type, prog, info); 19859df1c28bSMatt Mullins } 19869df1c28bSMatt Mullins 19879df1c28bSMatt Mullins const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = { 19889df1c28bSMatt Mullins .get_func_proto = raw_tp_prog_func_proto, 19899df1c28bSMatt Mullins .is_valid_access = raw_tp_writable_prog_is_valid_access, 19909df1c28bSMatt Mullins }; 19919df1c28bSMatt Mullins 19929df1c28bSMatt Mullins const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = { 19939df1c28bSMatt Mullins }; 19949df1c28bSMatt Mullins 19950515e599SAlexei Starovoitov static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 19965e43f899SAndrey Ignatov const struct bpf_prog *prog, 199723994631SYonghong Song struct bpf_insn_access_aux *info) 19980515e599SAlexei Starovoitov { 199995da0cdbSTeng Qin const int size_u64 = sizeof(u64); 200031fd8581SYonghong Song 20010515e599SAlexei Starovoitov if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) 20020515e599SAlexei Starovoitov return false; 20030515e599SAlexei Starovoitov if (type != BPF_READ) 20040515e599SAlexei Starovoitov return false; 2005bc23105cSDaniel Borkmann if (off % size != 0) { 2006bc23105cSDaniel Borkmann if (sizeof(unsigned long) != 4) 20070515e599SAlexei Starovoitov return false; 2008bc23105cSDaniel Borkmann if (size != 8) 2009bc23105cSDaniel Borkmann return false; 2010bc23105cSDaniel Borkmann if (off % size != 4) 2011bc23105cSDaniel Borkmann return false; 2012bc23105cSDaniel Borkmann } 201331fd8581SYonghong Song 2014f96da094SDaniel Borkmann switch (off) { 2015f96da094SDaniel Borkmann case bpf_ctx_range(struct bpf_perf_event_data, sample_period): 201695da0cdbSTeng Qin bpf_ctx_record_field_size(info, size_u64); 201795da0cdbSTeng Qin if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) 201895da0cdbSTeng Qin return false; 201995da0cdbSTeng Qin break; 202095da0cdbSTeng Qin case bpf_ctx_range(struct bpf_perf_event_data, addr): 202195da0cdbSTeng Qin bpf_ctx_record_field_size(info, size_u64); 202295da0cdbSTeng Qin if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) 202323994631SYonghong Song return false; 2024f96da094SDaniel Borkmann break; 2025f96da094SDaniel Borkmann default: 20260515e599SAlexei Starovoitov if (size != sizeof(long)) 20270515e599SAlexei Starovoitov return false; 20280515e599SAlexei Starovoitov } 2029f96da094SDaniel Borkmann 20300515e599SAlexei Starovoitov return true; 20310515e599SAlexei Starovoitov } 20320515e599SAlexei Starovoitov 20336b8cc1d1SDaniel Borkmann static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, 20346b8cc1d1SDaniel Borkmann const struct bpf_insn *si, 20350515e599SAlexei Starovoitov struct bpf_insn *insn_buf, 2036f96da094SDaniel Borkmann struct bpf_prog *prog, u32 *target_size) 20370515e599SAlexei Starovoitov { 20380515e599SAlexei Starovoitov struct bpf_insn *insn = insn_buf; 20390515e599SAlexei Starovoitov 20406b8cc1d1SDaniel Borkmann switch (si->off) { 20410515e599SAlexei Starovoitov case offsetof(struct bpf_perf_event_data, sample_period): 2042f035a515SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 20436b8cc1d1SDaniel Borkmann data), si->dst_reg, si->src_reg, 20440515e599SAlexei Starovoitov offsetof(struct bpf_perf_event_data_kern, data)); 20456b8cc1d1SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, 2046f96da094SDaniel Borkmann bpf_target_off(struct perf_sample_data, period, 8, 2047f96da094SDaniel Borkmann target_size)); 20480515e599SAlexei Starovoitov break; 204995da0cdbSTeng Qin case offsetof(struct bpf_perf_event_data, addr): 205095da0cdbSTeng Qin *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 205195da0cdbSTeng Qin data), si->dst_reg, si->src_reg, 205295da0cdbSTeng Qin offsetof(struct bpf_perf_event_data_kern, data)); 205395da0cdbSTeng Qin *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, 205495da0cdbSTeng Qin bpf_target_off(struct perf_sample_data, addr, 8, 205595da0cdbSTeng Qin target_size)); 205695da0cdbSTeng Qin break; 20570515e599SAlexei Starovoitov default: 2058f035a515SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 20596b8cc1d1SDaniel Borkmann regs), si->dst_reg, si->src_reg, 20600515e599SAlexei Starovoitov offsetof(struct bpf_perf_event_data_kern, regs)); 20616b8cc1d1SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, 20626b8cc1d1SDaniel Borkmann si->off); 20630515e599SAlexei Starovoitov break; 20640515e599SAlexei Starovoitov } 20650515e599SAlexei Starovoitov 20660515e599SAlexei Starovoitov return insn - insn_buf; 20670515e599SAlexei Starovoitov } 20680515e599SAlexei Starovoitov 20697de16e3aSJakub Kicinski const struct bpf_verifier_ops perf_event_verifier_ops = { 2070f005afedSYonghong Song .get_func_proto = pe_prog_func_proto, 20710515e599SAlexei Starovoitov .is_valid_access = pe_prog_is_valid_access, 20720515e599SAlexei Starovoitov .convert_ctx_access = pe_prog_convert_ctx_access, 20730515e599SAlexei Starovoitov }; 20747de16e3aSJakub Kicinski 20757de16e3aSJakub Kicinski const struct bpf_prog_ops perf_event_prog_ops = { 20767de16e3aSJakub Kicinski }; 2077e87c6bc3SYonghong Song 2078e87c6bc3SYonghong Song static DEFINE_MUTEX(bpf_event_mutex); 2079e87c6bc3SYonghong Song 2080c8c088baSYonghong Song #define BPF_TRACE_MAX_PROGS 64 2081c8c088baSYonghong Song 2082e87c6bc3SYonghong Song int perf_event_attach_bpf_prog(struct perf_event *event, 208382e6b1eeSAndrii Nakryiko struct bpf_prog *prog, 208482e6b1eeSAndrii Nakryiko u64 bpf_cookie) 2085e87c6bc3SYonghong Song { 2086e672db03SStanislav Fomichev struct bpf_prog_array *old_array; 2087e87c6bc3SYonghong Song struct bpf_prog_array *new_array; 2088e87c6bc3SYonghong Song int ret = -EEXIST; 2089e87c6bc3SYonghong Song 20909802d865SJosef Bacik /* 2091b4da3340SMasami Hiramatsu * Kprobe override only works if they are on the function entry, 2092b4da3340SMasami Hiramatsu * and only if they are on the opt-in list. 20939802d865SJosef Bacik */ 20949802d865SJosef Bacik if (prog->kprobe_override && 2095b4da3340SMasami Hiramatsu (!trace_kprobe_on_func_entry(event->tp_event) || 20969802d865SJosef Bacik !trace_kprobe_error_injectable(event->tp_event))) 20979802d865SJosef Bacik return -EINVAL; 20989802d865SJosef Bacik 2099e87c6bc3SYonghong Song mutex_lock(&bpf_event_mutex); 2100e87c6bc3SYonghong Song 2101e87c6bc3SYonghong Song if (event->prog) 210207c41a29SYonghong Song goto unlock; 2103e87c6bc3SYonghong Song 2104e672db03SStanislav Fomichev old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); 2105c8c088baSYonghong Song if (old_array && 2106c8c088baSYonghong Song bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) { 2107c8c088baSYonghong Song ret = -E2BIG; 2108c8c088baSYonghong Song goto unlock; 2109c8c088baSYonghong Song } 2110c8c088baSYonghong Song 211182e6b1eeSAndrii Nakryiko ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array); 2112e87c6bc3SYonghong Song if (ret < 0) 211307c41a29SYonghong Song goto unlock; 2114e87c6bc3SYonghong Song 2115e87c6bc3SYonghong Song /* set the new array to event->tp_event and set event->prog */ 2116e87c6bc3SYonghong Song event->prog = prog; 211782e6b1eeSAndrii Nakryiko event->bpf_cookie = bpf_cookie; 2118e87c6bc3SYonghong Song rcu_assign_pointer(event->tp_event->prog_array, new_array); 21198c7dcb84SDelyan Kratunov bpf_prog_array_free_sleepable(old_array); 2120e87c6bc3SYonghong Song 212107c41a29SYonghong Song unlock: 2122e87c6bc3SYonghong Song mutex_unlock(&bpf_event_mutex); 2123e87c6bc3SYonghong Song return ret; 2124e87c6bc3SYonghong Song } 2125e87c6bc3SYonghong Song 2126e87c6bc3SYonghong Song void perf_event_detach_bpf_prog(struct perf_event *event) 2127e87c6bc3SYonghong Song { 2128e672db03SStanislav Fomichev struct bpf_prog_array *old_array; 2129e87c6bc3SYonghong Song struct bpf_prog_array *new_array; 2130e87c6bc3SYonghong Song int ret; 2131e87c6bc3SYonghong Song 2132e87c6bc3SYonghong Song mutex_lock(&bpf_event_mutex); 2133e87c6bc3SYonghong Song 2134e87c6bc3SYonghong Song if (!event->prog) 213507c41a29SYonghong Song goto unlock; 2136e87c6bc3SYonghong Song 2137e672db03SStanislav Fomichev old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); 213882e6b1eeSAndrii Nakryiko ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array); 2139170a7e3eSSean Young if (ret == -ENOENT) 2140170a7e3eSSean Young goto unlock; 2141e87c6bc3SYonghong Song if (ret < 0) { 2142e87c6bc3SYonghong Song bpf_prog_array_delete_safe(old_array, event->prog); 2143e87c6bc3SYonghong Song } else { 2144e87c6bc3SYonghong Song rcu_assign_pointer(event->tp_event->prog_array, new_array); 21458c7dcb84SDelyan Kratunov bpf_prog_array_free_sleepable(old_array); 2146e87c6bc3SYonghong Song } 2147e87c6bc3SYonghong Song 2148e87c6bc3SYonghong Song bpf_prog_put(event->prog); 2149e87c6bc3SYonghong Song event->prog = NULL; 2150e87c6bc3SYonghong Song 215107c41a29SYonghong Song unlock: 2152e87c6bc3SYonghong Song mutex_unlock(&bpf_event_mutex); 2153e87c6bc3SYonghong Song } 2154f371b304SYonghong Song 2155f4e2298eSYonghong Song int perf_event_query_prog_array(struct perf_event *event, void __user *info) 2156f371b304SYonghong Song { 2157f371b304SYonghong Song struct perf_event_query_bpf __user *uquery = info; 2158f371b304SYonghong Song struct perf_event_query_bpf query = {}; 2159e672db03SStanislav Fomichev struct bpf_prog_array *progs; 21603a38bb98SYonghong Song u32 *ids, prog_cnt, ids_len; 2161f371b304SYonghong Song int ret; 2162f371b304SYonghong Song 2163031258daSAlexey Budankov if (!perfmon_capable()) 2164f371b304SYonghong Song return -EPERM; 2165f371b304SYonghong Song if (event->attr.type != PERF_TYPE_TRACEPOINT) 2166f371b304SYonghong Song return -EINVAL; 2167f371b304SYonghong Song if (copy_from_user(&query, uquery, sizeof(query))) 2168f371b304SYonghong Song return -EFAULT; 21693a38bb98SYonghong Song 21703a38bb98SYonghong Song ids_len = query.ids_len; 21713a38bb98SYonghong Song if (ids_len > BPF_TRACE_MAX_PROGS) 21729c481b90SDaniel Borkmann return -E2BIG; 21733a38bb98SYonghong Song ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN); 21743a38bb98SYonghong Song if (!ids) 21753a38bb98SYonghong Song return -ENOMEM; 21763a38bb98SYonghong Song /* 21773a38bb98SYonghong Song * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which 21783a38bb98SYonghong Song * is required when user only wants to check for uquery->prog_cnt. 21793a38bb98SYonghong Song * There is no need to check for it since the case is handled 21803a38bb98SYonghong Song * gracefully in bpf_prog_array_copy_info. 21813a38bb98SYonghong Song */ 2182f371b304SYonghong Song 2183f371b304SYonghong Song mutex_lock(&bpf_event_mutex); 2184e672db03SStanislav Fomichev progs = bpf_event_rcu_dereference(event->tp_event->prog_array); 2185e672db03SStanislav Fomichev ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt); 2186f371b304SYonghong Song mutex_unlock(&bpf_event_mutex); 2187f371b304SYonghong Song 21883a38bb98SYonghong Song if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || 21893a38bb98SYonghong Song copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) 21903a38bb98SYonghong Song ret = -EFAULT; 21913a38bb98SYonghong Song 21923a38bb98SYonghong Song kfree(ids); 2193f371b304SYonghong Song return ret; 2194f371b304SYonghong Song } 2195c4f6699dSAlexei Starovoitov 2196c4f6699dSAlexei Starovoitov extern struct bpf_raw_event_map __start__bpf_raw_tp[]; 2197c4f6699dSAlexei Starovoitov extern struct bpf_raw_event_map __stop__bpf_raw_tp[]; 2198c4f6699dSAlexei Starovoitov 2199a38d1107SMatt Mullins struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) 2200c4f6699dSAlexei Starovoitov { 2201c4f6699dSAlexei Starovoitov struct bpf_raw_event_map *btp = __start__bpf_raw_tp; 2202c4f6699dSAlexei Starovoitov 2203c4f6699dSAlexei Starovoitov for (; btp < __stop__bpf_raw_tp; btp++) { 2204c4f6699dSAlexei Starovoitov if (!strcmp(btp->tp->name, name)) 2205c4f6699dSAlexei Starovoitov return btp; 2206c4f6699dSAlexei Starovoitov } 2207a38d1107SMatt Mullins 2208a38d1107SMatt Mullins return bpf_get_raw_tracepoint_module(name); 2209a38d1107SMatt Mullins } 2210a38d1107SMatt Mullins 2211a38d1107SMatt Mullins void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) 2212a38d1107SMatt Mullins { 221312cc126dSAndrii Nakryiko struct module *mod; 2214a38d1107SMatt Mullins 221512cc126dSAndrii Nakryiko preempt_disable(); 221612cc126dSAndrii Nakryiko mod = __module_address((unsigned long)btp); 2217a38d1107SMatt Mullins module_put(mod); 221812cc126dSAndrii Nakryiko preempt_enable(); 2219c4f6699dSAlexei Starovoitov } 2220c4f6699dSAlexei Starovoitov 2221c4f6699dSAlexei Starovoitov static __always_inline 2222c4f6699dSAlexei Starovoitov void __bpf_trace_run(struct bpf_prog *prog, u64 *args) 2223c4f6699dSAlexei Starovoitov { 2224f03efe49SThomas Gleixner cant_sleep(); 2225c4f6699dSAlexei Starovoitov rcu_read_lock(); 2226fb7dd8bcSAndrii Nakryiko (void) bpf_prog_run(prog, args); 2227c4f6699dSAlexei Starovoitov rcu_read_unlock(); 2228c4f6699dSAlexei Starovoitov } 2229c4f6699dSAlexei Starovoitov 2230c4f6699dSAlexei Starovoitov #define UNPACK(...) __VA_ARGS__ 2231c4f6699dSAlexei Starovoitov #define REPEAT_1(FN, DL, X, ...) FN(X) 2232c4f6699dSAlexei Starovoitov #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__) 2233c4f6699dSAlexei Starovoitov #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__) 2234c4f6699dSAlexei Starovoitov #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__) 2235c4f6699dSAlexei Starovoitov #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__) 2236c4f6699dSAlexei Starovoitov #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__) 2237c4f6699dSAlexei Starovoitov #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__) 2238c4f6699dSAlexei Starovoitov #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__) 2239c4f6699dSAlexei Starovoitov #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__) 2240c4f6699dSAlexei Starovoitov #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__) 2241c4f6699dSAlexei Starovoitov #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__) 2242c4f6699dSAlexei Starovoitov #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__) 2243c4f6699dSAlexei Starovoitov #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__) 2244c4f6699dSAlexei Starovoitov 2245c4f6699dSAlexei Starovoitov #define SARG(X) u64 arg##X 2246c4f6699dSAlexei Starovoitov #define COPY(X) args[X] = arg##X 2247c4f6699dSAlexei Starovoitov 2248c4f6699dSAlexei Starovoitov #define __DL_COM (,) 2249c4f6699dSAlexei Starovoitov #define __DL_SEM (;) 2250c4f6699dSAlexei Starovoitov 2251c4f6699dSAlexei Starovoitov #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 2252c4f6699dSAlexei Starovoitov 2253c4f6699dSAlexei Starovoitov #define BPF_TRACE_DEFN_x(x) \ 2254c4f6699dSAlexei Starovoitov void bpf_trace_run##x(struct bpf_prog *prog, \ 2255c4f6699dSAlexei Starovoitov REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \ 2256c4f6699dSAlexei Starovoitov { \ 2257c4f6699dSAlexei Starovoitov u64 args[x]; \ 2258c4f6699dSAlexei Starovoitov REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \ 2259c4f6699dSAlexei Starovoitov __bpf_trace_run(prog, args); \ 2260c4f6699dSAlexei Starovoitov } \ 2261c4f6699dSAlexei Starovoitov EXPORT_SYMBOL_GPL(bpf_trace_run##x) 2262c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(1); 2263c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(2); 2264c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(3); 2265c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(4); 2266c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(5); 2267c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(6); 2268c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(7); 2269c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(8); 2270c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(9); 2271c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(10); 2272c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(11); 2273c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(12); 2274c4f6699dSAlexei Starovoitov 2275c4f6699dSAlexei Starovoitov static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 2276c4f6699dSAlexei Starovoitov { 2277c4f6699dSAlexei Starovoitov struct tracepoint *tp = btp->tp; 2278c4f6699dSAlexei Starovoitov 2279c4f6699dSAlexei Starovoitov /* 2280c4f6699dSAlexei Starovoitov * check that program doesn't access arguments beyond what's 2281c4f6699dSAlexei Starovoitov * available in this tracepoint 2282c4f6699dSAlexei Starovoitov */ 2283c4f6699dSAlexei Starovoitov if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) 2284c4f6699dSAlexei Starovoitov return -EINVAL; 2285c4f6699dSAlexei Starovoitov 22869df1c28bSMatt Mullins if (prog->aux->max_tp_access > btp->writable_size) 22879df1c28bSMatt Mullins return -EINVAL; 22889df1c28bSMatt Mullins 22899913d574SSteven Rostedt (VMware) return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, 22909913d574SSteven Rostedt (VMware) prog); 2291c4f6699dSAlexei Starovoitov } 2292c4f6699dSAlexei Starovoitov 2293c4f6699dSAlexei Starovoitov int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 2294c4f6699dSAlexei Starovoitov { 2295e16ec340SAlexei Starovoitov return __bpf_probe_register(btp, prog); 2296c4f6699dSAlexei Starovoitov } 2297c4f6699dSAlexei Starovoitov 2298c4f6699dSAlexei Starovoitov int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 2299c4f6699dSAlexei Starovoitov { 2300e16ec340SAlexei Starovoitov return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); 2301c4f6699dSAlexei Starovoitov } 230241bdc4b4SYonghong Song 230341bdc4b4SYonghong Song int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, 230441bdc4b4SYonghong Song u32 *fd_type, const char **buf, 230541bdc4b4SYonghong Song u64 *probe_offset, u64 *probe_addr) 230641bdc4b4SYonghong Song { 230741bdc4b4SYonghong Song bool is_tracepoint, is_syscall_tp; 230841bdc4b4SYonghong Song struct bpf_prog *prog; 230941bdc4b4SYonghong Song int flags, err = 0; 231041bdc4b4SYonghong Song 231141bdc4b4SYonghong Song prog = event->prog; 231241bdc4b4SYonghong Song if (!prog) 231341bdc4b4SYonghong Song return -ENOENT; 231441bdc4b4SYonghong Song 231541bdc4b4SYonghong Song /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */ 231641bdc4b4SYonghong Song if (prog->type == BPF_PROG_TYPE_PERF_EVENT) 231741bdc4b4SYonghong Song return -EOPNOTSUPP; 231841bdc4b4SYonghong Song 231941bdc4b4SYonghong Song *prog_id = prog->aux->id; 232041bdc4b4SYonghong Song flags = event->tp_event->flags; 232141bdc4b4SYonghong Song is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT; 232241bdc4b4SYonghong Song is_syscall_tp = is_syscall_trace_event(event->tp_event); 232341bdc4b4SYonghong Song 232441bdc4b4SYonghong Song if (is_tracepoint || is_syscall_tp) { 232541bdc4b4SYonghong Song *buf = is_tracepoint ? event->tp_event->tp->name 232641bdc4b4SYonghong Song : event->tp_event->name; 232741bdc4b4SYonghong Song *fd_type = BPF_FD_TYPE_TRACEPOINT; 232841bdc4b4SYonghong Song *probe_offset = 0x0; 232941bdc4b4SYonghong Song *probe_addr = 0x0; 233041bdc4b4SYonghong Song } else { 233141bdc4b4SYonghong Song /* kprobe/uprobe */ 233241bdc4b4SYonghong Song err = -EOPNOTSUPP; 233341bdc4b4SYonghong Song #ifdef CONFIG_KPROBE_EVENTS 233441bdc4b4SYonghong Song if (flags & TRACE_EVENT_FL_KPROBE) 233541bdc4b4SYonghong Song err = bpf_get_kprobe_info(event, fd_type, buf, 233641bdc4b4SYonghong Song probe_offset, probe_addr, 233741bdc4b4SYonghong Song event->attr.type == PERF_TYPE_TRACEPOINT); 233841bdc4b4SYonghong Song #endif 233941bdc4b4SYonghong Song #ifdef CONFIG_UPROBE_EVENTS 234041bdc4b4SYonghong Song if (flags & TRACE_EVENT_FL_UPROBE) 234141bdc4b4SYonghong Song err = bpf_get_uprobe_info(event, fd_type, buf, 234241bdc4b4SYonghong Song probe_offset, 234341bdc4b4SYonghong Song event->attr.type == PERF_TYPE_TRACEPOINT); 234441bdc4b4SYonghong Song #endif 234541bdc4b4SYonghong Song } 234641bdc4b4SYonghong Song 234741bdc4b4SYonghong Song return err; 234841bdc4b4SYonghong Song } 2349a38d1107SMatt Mullins 23509db1ff0aSYonghong Song static int __init send_signal_irq_work_init(void) 23519db1ff0aSYonghong Song { 23529db1ff0aSYonghong Song int cpu; 23539db1ff0aSYonghong Song struct send_signal_irq_work *work; 23549db1ff0aSYonghong Song 23559db1ff0aSYonghong Song for_each_possible_cpu(cpu) { 23569db1ff0aSYonghong Song work = per_cpu_ptr(&send_signal_work, cpu); 23579db1ff0aSYonghong Song init_irq_work(&work->irq_work, do_bpf_send_signal); 23589db1ff0aSYonghong Song } 23599db1ff0aSYonghong Song return 0; 23609db1ff0aSYonghong Song } 23619db1ff0aSYonghong Song 23629db1ff0aSYonghong Song subsys_initcall(send_signal_irq_work_init); 23639db1ff0aSYonghong Song 2364a38d1107SMatt Mullins #ifdef CONFIG_MODULES 2365390e99cfSStanislav Fomichev static int bpf_event_notify(struct notifier_block *nb, unsigned long op, 2366390e99cfSStanislav Fomichev void *module) 2367a38d1107SMatt Mullins { 2368a38d1107SMatt Mullins struct bpf_trace_module *btm, *tmp; 2369a38d1107SMatt Mullins struct module *mod = module; 23700340a6b7SPeter Zijlstra int ret = 0; 2371a38d1107SMatt Mullins 2372a38d1107SMatt Mullins if (mod->num_bpf_raw_events == 0 || 2373a38d1107SMatt Mullins (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING)) 23740340a6b7SPeter Zijlstra goto out; 2375a38d1107SMatt Mullins 2376a38d1107SMatt Mullins mutex_lock(&bpf_module_mutex); 2377a38d1107SMatt Mullins 2378a38d1107SMatt Mullins switch (op) { 2379a38d1107SMatt Mullins case MODULE_STATE_COMING: 2380a38d1107SMatt Mullins btm = kzalloc(sizeof(*btm), GFP_KERNEL); 2381a38d1107SMatt Mullins if (btm) { 2382a38d1107SMatt Mullins btm->module = module; 2383a38d1107SMatt Mullins list_add(&btm->list, &bpf_trace_modules); 23840340a6b7SPeter Zijlstra } else { 23850340a6b7SPeter Zijlstra ret = -ENOMEM; 2386a38d1107SMatt Mullins } 2387a38d1107SMatt Mullins break; 2388a38d1107SMatt Mullins case MODULE_STATE_GOING: 2389a38d1107SMatt Mullins list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) { 2390a38d1107SMatt Mullins if (btm->module == module) { 2391a38d1107SMatt Mullins list_del(&btm->list); 2392a38d1107SMatt Mullins kfree(btm); 2393a38d1107SMatt Mullins break; 2394a38d1107SMatt Mullins } 2395a38d1107SMatt Mullins } 2396a38d1107SMatt Mullins break; 2397a38d1107SMatt Mullins } 2398a38d1107SMatt Mullins 2399a38d1107SMatt Mullins mutex_unlock(&bpf_module_mutex); 2400a38d1107SMatt Mullins 24010340a6b7SPeter Zijlstra out: 24020340a6b7SPeter Zijlstra return notifier_from_errno(ret); 2403a38d1107SMatt Mullins } 2404a38d1107SMatt Mullins 2405a38d1107SMatt Mullins static struct notifier_block bpf_module_nb = { 2406a38d1107SMatt Mullins .notifier_call = bpf_event_notify, 2407a38d1107SMatt Mullins }; 2408a38d1107SMatt Mullins 2409390e99cfSStanislav Fomichev static int __init bpf_event_init(void) 2410a38d1107SMatt Mullins { 2411a38d1107SMatt Mullins register_module_notifier(&bpf_module_nb); 2412a38d1107SMatt Mullins return 0; 2413a38d1107SMatt Mullins } 2414a38d1107SMatt Mullins 2415a38d1107SMatt Mullins fs_initcall(bpf_event_init); 2416a38d1107SMatt Mullins #endif /* CONFIG_MODULES */ 24170dcac272SJiri Olsa 24180dcac272SJiri Olsa #ifdef CONFIG_FPROBE 24190dcac272SJiri Olsa struct bpf_kprobe_multi_link { 24200dcac272SJiri Olsa struct bpf_link link; 24210dcac272SJiri Olsa struct fprobe fp; 24220dcac272SJiri Olsa unsigned long *addrs; 2423ca74823cSJiri Olsa u64 *cookies; 2424ca74823cSJiri Olsa u32 cnt; 24250dcac272SJiri Olsa }; 24260dcac272SJiri Olsa 2427f7098690SJiri Olsa struct bpf_kprobe_multi_run_ctx { 2428f7098690SJiri Olsa struct bpf_run_ctx run_ctx; 2429f7098690SJiri Olsa struct bpf_kprobe_multi_link *link; 2430f7098690SJiri Olsa unsigned long entry_ip; 2431f7098690SJiri Olsa }; 2432f7098690SJiri Olsa 24330236fec5SJiri Olsa struct user_syms { 24340236fec5SJiri Olsa const char **syms; 24350236fec5SJiri Olsa char *buf; 24360236fec5SJiri Olsa }; 24370236fec5SJiri Olsa 24380236fec5SJiri Olsa static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt) 24390236fec5SJiri Olsa { 24400236fec5SJiri Olsa unsigned long __user usymbol; 24410236fec5SJiri Olsa const char **syms = NULL; 24420236fec5SJiri Olsa char *buf = NULL, *p; 24430236fec5SJiri Olsa int err = -ENOMEM; 24440236fec5SJiri Olsa unsigned int i; 24450236fec5SJiri Olsa 2446fd58f7dfSDan Carpenter syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL); 24470236fec5SJiri Olsa if (!syms) 24480236fec5SJiri Olsa goto error; 24490236fec5SJiri Olsa 2450fd58f7dfSDan Carpenter buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL); 24510236fec5SJiri Olsa if (!buf) 24520236fec5SJiri Olsa goto error; 24530236fec5SJiri Olsa 24540236fec5SJiri Olsa for (p = buf, i = 0; i < cnt; i++) { 24550236fec5SJiri Olsa if (__get_user(usymbol, usyms + i)) { 24560236fec5SJiri Olsa err = -EFAULT; 24570236fec5SJiri Olsa goto error; 24580236fec5SJiri Olsa } 24590236fec5SJiri Olsa err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN); 24600236fec5SJiri Olsa if (err == KSYM_NAME_LEN) 24610236fec5SJiri Olsa err = -E2BIG; 24620236fec5SJiri Olsa if (err < 0) 24630236fec5SJiri Olsa goto error; 24640236fec5SJiri Olsa syms[i] = p; 24650236fec5SJiri Olsa p += err + 1; 24660236fec5SJiri Olsa } 24670236fec5SJiri Olsa 24680236fec5SJiri Olsa us->syms = syms; 24690236fec5SJiri Olsa us->buf = buf; 24700236fec5SJiri Olsa return 0; 24710236fec5SJiri Olsa 24720236fec5SJiri Olsa error: 24730236fec5SJiri Olsa if (err) { 24740236fec5SJiri Olsa kvfree(syms); 24750236fec5SJiri Olsa kvfree(buf); 24760236fec5SJiri Olsa } 24770236fec5SJiri Olsa return err; 24780236fec5SJiri Olsa } 24790236fec5SJiri Olsa 24800236fec5SJiri Olsa static void free_user_syms(struct user_syms *us) 24810236fec5SJiri Olsa { 24820236fec5SJiri Olsa kvfree(us->syms); 24830236fec5SJiri Olsa kvfree(us->buf); 24840236fec5SJiri Olsa } 24850236fec5SJiri Olsa 24860dcac272SJiri Olsa static void bpf_kprobe_multi_link_release(struct bpf_link *link) 24870dcac272SJiri Olsa { 24880dcac272SJiri Olsa struct bpf_kprobe_multi_link *kmulti_link; 24890dcac272SJiri Olsa 24900dcac272SJiri Olsa kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); 24910dcac272SJiri Olsa unregister_fprobe(&kmulti_link->fp); 24920dcac272SJiri Olsa } 24930dcac272SJiri Olsa 24940dcac272SJiri Olsa static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link) 24950dcac272SJiri Olsa { 24960dcac272SJiri Olsa struct bpf_kprobe_multi_link *kmulti_link; 24970dcac272SJiri Olsa 24980dcac272SJiri Olsa kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); 24990dcac272SJiri Olsa kvfree(kmulti_link->addrs); 2500ca74823cSJiri Olsa kvfree(kmulti_link->cookies); 25010dcac272SJiri Olsa kfree(kmulti_link); 25020dcac272SJiri Olsa } 25030dcac272SJiri Olsa 25040dcac272SJiri Olsa static const struct bpf_link_ops bpf_kprobe_multi_link_lops = { 25050dcac272SJiri Olsa .release = bpf_kprobe_multi_link_release, 25060dcac272SJiri Olsa .dealloc = bpf_kprobe_multi_link_dealloc, 25070dcac272SJiri Olsa }; 25080dcac272SJiri Olsa 2509ca74823cSJiri Olsa static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv) 2510ca74823cSJiri Olsa { 2511ca74823cSJiri Olsa const struct bpf_kprobe_multi_link *link = priv; 2512ca74823cSJiri Olsa unsigned long *addr_a = a, *addr_b = b; 2513ca74823cSJiri Olsa u64 *cookie_a, *cookie_b; 2514ca74823cSJiri Olsa 2515ca74823cSJiri Olsa cookie_a = link->cookies + (addr_a - link->addrs); 2516ca74823cSJiri Olsa cookie_b = link->cookies + (addr_b - link->addrs); 2517ca74823cSJiri Olsa 2518ca74823cSJiri Olsa /* swap addr_a/addr_b and cookie_a/cookie_b values */ 251911e17ae4SJiapeng Chong swap(*addr_a, *addr_b); 252011e17ae4SJiapeng Chong swap(*cookie_a, *cookie_b); 2521ca74823cSJiri Olsa } 2522ca74823cSJiri Olsa 2523ca74823cSJiri Olsa static int __bpf_kprobe_multi_cookie_cmp(const void *a, const void *b) 2524ca74823cSJiri Olsa { 2525ca74823cSJiri Olsa const unsigned long *addr_a = a, *addr_b = b; 2526ca74823cSJiri Olsa 2527ca74823cSJiri Olsa if (*addr_a == *addr_b) 2528ca74823cSJiri Olsa return 0; 2529ca74823cSJiri Olsa return *addr_a < *addr_b ? -1 : 1; 2530ca74823cSJiri Olsa } 2531ca74823cSJiri Olsa 2532ca74823cSJiri Olsa static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv) 2533ca74823cSJiri Olsa { 2534ca74823cSJiri Olsa return __bpf_kprobe_multi_cookie_cmp(a, b); 2535ca74823cSJiri Olsa } 2536ca74823cSJiri Olsa 2537f7098690SJiri Olsa static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) 2538ca74823cSJiri Olsa { 2539f7098690SJiri Olsa struct bpf_kprobe_multi_run_ctx *run_ctx; 2540ca74823cSJiri Olsa struct bpf_kprobe_multi_link *link; 2541f7098690SJiri Olsa u64 *cookie, entry_ip; 2542ca74823cSJiri Olsa unsigned long *addr; 2543ca74823cSJiri Olsa 2544ca74823cSJiri Olsa if (WARN_ON_ONCE(!ctx)) 2545ca74823cSJiri Olsa return 0; 2546f7098690SJiri Olsa run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx); 2547f7098690SJiri Olsa link = run_ctx->link; 2548ca74823cSJiri Olsa if (!link->cookies) 2549ca74823cSJiri Olsa return 0; 2550f7098690SJiri Olsa entry_ip = run_ctx->entry_ip; 2551f7098690SJiri Olsa addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip), 2552ca74823cSJiri Olsa __bpf_kprobe_multi_cookie_cmp); 2553ca74823cSJiri Olsa if (!addr) 2554ca74823cSJiri Olsa return 0; 2555ca74823cSJiri Olsa cookie = link->cookies + (addr - link->addrs); 2556ca74823cSJiri Olsa return *cookie; 2557ca74823cSJiri Olsa } 2558ca74823cSJiri Olsa 2559f7098690SJiri Olsa static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) 2560f7098690SJiri Olsa { 2561f7098690SJiri Olsa struct bpf_kprobe_multi_run_ctx *run_ctx; 2562f7098690SJiri Olsa 2563f7098690SJiri Olsa run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx); 2564f7098690SJiri Olsa return run_ctx->entry_ip; 2565f7098690SJiri Olsa } 2566f7098690SJiri Olsa 25670dcac272SJiri Olsa static int 25680dcac272SJiri Olsa kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, 2569f7098690SJiri Olsa unsigned long entry_ip, struct pt_regs *regs) 25700dcac272SJiri Olsa { 2571f7098690SJiri Olsa struct bpf_kprobe_multi_run_ctx run_ctx = { 2572f7098690SJiri Olsa .link = link, 2573f7098690SJiri Olsa .entry_ip = entry_ip, 2574f7098690SJiri Olsa }; 2575ca74823cSJiri Olsa struct bpf_run_ctx *old_run_ctx; 25760dcac272SJiri Olsa int err; 25770dcac272SJiri Olsa 25780dcac272SJiri Olsa if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 25790dcac272SJiri Olsa err = 0; 25800dcac272SJiri Olsa goto out; 25810dcac272SJiri Olsa } 25820dcac272SJiri Olsa 25830dcac272SJiri Olsa migrate_disable(); 25840dcac272SJiri Olsa rcu_read_lock(); 2585f7098690SJiri Olsa old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 25860dcac272SJiri Olsa err = bpf_prog_run(link->link.prog, regs); 2587ca74823cSJiri Olsa bpf_reset_run_ctx(old_run_ctx); 25880dcac272SJiri Olsa rcu_read_unlock(); 25890dcac272SJiri Olsa migrate_enable(); 25900dcac272SJiri Olsa 25910dcac272SJiri Olsa out: 25920dcac272SJiri Olsa __this_cpu_dec(bpf_prog_active); 25930dcac272SJiri Olsa return err; 25940dcac272SJiri Olsa } 25950dcac272SJiri Olsa 25960dcac272SJiri Olsa static void 25970dcac272SJiri Olsa kprobe_multi_link_handler(struct fprobe *fp, unsigned long entry_ip, 25980dcac272SJiri Olsa struct pt_regs *regs) 25990dcac272SJiri Olsa { 26000dcac272SJiri Olsa struct bpf_kprobe_multi_link *link; 26010dcac272SJiri Olsa 26020dcac272SJiri Olsa link = container_of(fp, struct bpf_kprobe_multi_link, fp); 2603f7098690SJiri Olsa kprobe_multi_link_prog_run(link, entry_ip, regs); 26040dcac272SJiri Olsa } 26050dcac272SJiri Olsa 2606eb5fb032SJiri Olsa static int symbols_cmp_r(const void *a, const void *b, const void *priv) 26070dcac272SJiri Olsa { 26080236fec5SJiri Olsa const char **str_a = (const char **) a; 26090236fec5SJiri Olsa const char **str_b = (const char **) b; 26100dcac272SJiri Olsa 26110236fec5SJiri Olsa return strcmp(*str_a, *str_b); 26120dcac272SJiri Olsa } 26130dcac272SJiri Olsa 2614eb5fb032SJiri Olsa struct multi_symbols_sort { 2615eb5fb032SJiri Olsa const char **funcs; 2616eb5fb032SJiri Olsa u64 *cookies; 2617eb5fb032SJiri Olsa }; 2618eb5fb032SJiri Olsa 2619eb5fb032SJiri Olsa static void symbols_swap_r(void *a, void *b, int size, const void *priv) 2620eb5fb032SJiri Olsa { 2621eb5fb032SJiri Olsa const struct multi_symbols_sort *data = priv; 2622eb5fb032SJiri Olsa const char **name_a = a, **name_b = b; 2623eb5fb032SJiri Olsa 2624eb5fb032SJiri Olsa swap(*name_a, *name_b); 2625eb5fb032SJiri Olsa 2626eb5fb032SJiri Olsa /* If defined, swap also related cookies. */ 2627eb5fb032SJiri Olsa if (data->cookies) { 2628eb5fb032SJiri Olsa u64 *cookie_a, *cookie_b; 2629eb5fb032SJiri Olsa 2630eb5fb032SJiri Olsa cookie_a = data->cookies + (name_a - data->funcs); 2631eb5fb032SJiri Olsa cookie_b = data->cookies + (name_b - data->funcs); 2632eb5fb032SJiri Olsa swap(*cookie_a, *cookie_b); 2633eb5fb032SJiri Olsa } 2634eb5fb032SJiri Olsa } 2635eb5fb032SJiri Olsa 26360dcac272SJiri Olsa int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 26370dcac272SJiri Olsa { 26380dcac272SJiri Olsa struct bpf_kprobe_multi_link *link = NULL; 26390dcac272SJiri Olsa struct bpf_link_primer link_primer; 2640ca74823cSJiri Olsa void __user *ucookies; 26410dcac272SJiri Olsa unsigned long *addrs; 26420dcac272SJiri Olsa u32 flags, cnt, size; 26430dcac272SJiri Olsa void __user *uaddrs; 2644ca74823cSJiri Olsa u64 *cookies = NULL; 26450dcac272SJiri Olsa void __user *usyms; 26460dcac272SJiri Olsa int err; 26470dcac272SJiri Olsa 26480dcac272SJiri Olsa /* no support for 32bit archs yet */ 26490dcac272SJiri Olsa if (sizeof(u64) != sizeof(void *)) 26500dcac272SJiri Olsa return -EOPNOTSUPP; 26510dcac272SJiri Olsa 26520dcac272SJiri Olsa if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI) 26530dcac272SJiri Olsa return -EINVAL; 26540dcac272SJiri Olsa 26550dcac272SJiri Olsa flags = attr->link_create.kprobe_multi.flags; 26560dcac272SJiri Olsa if (flags & ~BPF_F_KPROBE_MULTI_RETURN) 26570dcac272SJiri Olsa return -EINVAL; 26580dcac272SJiri Olsa 26590dcac272SJiri Olsa uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs); 26600dcac272SJiri Olsa usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms); 26610dcac272SJiri Olsa if (!!uaddrs == !!usyms) 26620dcac272SJiri Olsa return -EINVAL; 26630dcac272SJiri Olsa 26640dcac272SJiri Olsa cnt = attr->link_create.kprobe_multi.cnt; 26650dcac272SJiri Olsa if (!cnt) 26660dcac272SJiri Olsa return -EINVAL; 26670dcac272SJiri Olsa 26680dcac272SJiri Olsa size = cnt * sizeof(*addrs); 2669fd58f7dfSDan Carpenter addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); 26700dcac272SJiri Olsa if (!addrs) 26710dcac272SJiri Olsa return -ENOMEM; 26720dcac272SJiri Olsa 2673ca74823cSJiri Olsa ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies); 2674ca74823cSJiri Olsa if (ucookies) { 2675fd58f7dfSDan Carpenter cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); 2676ca74823cSJiri Olsa if (!cookies) { 2677ca74823cSJiri Olsa err = -ENOMEM; 2678ca74823cSJiri Olsa goto error; 2679ca74823cSJiri Olsa } 2680ca74823cSJiri Olsa if (copy_from_user(cookies, ucookies, size)) { 2681ca74823cSJiri Olsa err = -EFAULT; 2682ca74823cSJiri Olsa goto error; 2683ca74823cSJiri Olsa } 2684ca74823cSJiri Olsa } 2685ca74823cSJiri Olsa 2686eb5fb032SJiri Olsa if (uaddrs) { 2687eb5fb032SJiri Olsa if (copy_from_user(addrs, uaddrs, size)) { 2688eb5fb032SJiri Olsa err = -EFAULT; 2689eb5fb032SJiri Olsa goto error; 2690eb5fb032SJiri Olsa } 2691eb5fb032SJiri Olsa } else { 2692eb5fb032SJiri Olsa struct multi_symbols_sort data = { 2693eb5fb032SJiri Olsa .cookies = cookies, 2694eb5fb032SJiri Olsa }; 2695eb5fb032SJiri Olsa struct user_syms us; 2696eb5fb032SJiri Olsa 2697eb5fb032SJiri Olsa err = copy_user_syms(&us, usyms, cnt); 2698eb5fb032SJiri Olsa if (err) 2699eb5fb032SJiri Olsa goto error; 2700eb5fb032SJiri Olsa 2701eb5fb032SJiri Olsa if (cookies) 2702eb5fb032SJiri Olsa data.funcs = us.syms; 2703eb5fb032SJiri Olsa 2704eb5fb032SJiri Olsa sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r, 2705eb5fb032SJiri Olsa symbols_swap_r, &data); 2706eb5fb032SJiri Olsa 2707eb5fb032SJiri Olsa err = ftrace_lookup_symbols(us.syms, cnt, addrs); 2708eb5fb032SJiri Olsa free_user_syms(&us); 2709eb5fb032SJiri Olsa if (err) 2710eb5fb032SJiri Olsa goto error; 2711eb5fb032SJiri Olsa } 2712eb5fb032SJiri Olsa 27130dcac272SJiri Olsa link = kzalloc(sizeof(*link), GFP_KERNEL); 27140dcac272SJiri Olsa if (!link) { 27150dcac272SJiri Olsa err = -ENOMEM; 27160dcac272SJiri Olsa goto error; 27170dcac272SJiri Olsa } 27180dcac272SJiri Olsa 27190dcac272SJiri Olsa bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI, 27200dcac272SJiri Olsa &bpf_kprobe_multi_link_lops, prog); 27210dcac272SJiri Olsa 27220dcac272SJiri Olsa err = bpf_link_prime(&link->link, &link_primer); 27230dcac272SJiri Olsa if (err) 27240dcac272SJiri Olsa goto error; 27250dcac272SJiri Olsa 27260dcac272SJiri Olsa if (flags & BPF_F_KPROBE_MULTI_RETURN) 27270dcac272SJiri Olsa link->fp.exit_handler = kprobe_multi_link_handler; 27280dcac272SJiri Olsa else 27290dcac272SJiri Olsa link->fp.entry_handler = kprobe_multi_link_handler; 27300dcac272SJiri Olsa 27310dcac272SJiri Olsa link->addrs = addrs; 2732ca74823cSJiri Olsa link->cookies = cookies; 2733ca74823cSJiri Olsa link->cnt = cnt; 2734ca74823cSJiri Olsa 2735ca74823cSJiri Olsa if (cookies) { 2736ca74823cSJiri Olsa /* 2737ca74823cSJiri Olsa * Sorting addresses will trigger sorting cookies as well 2738ca74823cSJiri Olsa * (check bpf_kprobe_multi_cookie_swap). This way we can 2739ca74823cSJiri Olsa * find cookie based on the address in bpf_get_attach_cookie 2740ca74823cSJiri Olsa * helper. 2741ca74823cSJiri Olsa */ 2742ca74823cSJiri Olsa sort_r(addrs, cnt, sizeof(*addrs), 2743ca74823cSJiri Olsa bpf_kprobe_multi_cookie_cmp, 2744ca74823cSJiri Olsa bpf_kprobe_multi_cookie_swap, 2745ca74823cSJiri Olsa link); 2746ca74823cSJiri Olsa } 27470dcac272SJiri Olsa 27480dcac272SJiri Olsa err = register_fprobe_ips(&link->fp, addrs, cnt); 27490dcac272SJiri Olsa if (err) { 27500dcac272SJiri Olsa bpf_link_cleanup(&link_primer); 27510dcac272SJiri Olsa return err; 27520dcac272SJiri Olsa } 27530dcac272SJiri Olsa 27540dcac272SJiri Olsa return bpf_link_settle(&link_primer); 27550dcac272SJiri Olsa 27560dcac272SJiri Olsa error: 27570dcac272SJiri Olsa kfree(link); 27580dcac272SJiri Olsa kvfree(addrs); 2759ca74823cSJiri Olsa kvfree(cookies); 27600dcac272SJiri Olsa return err; 27610dcac272SJiri Olsa } 27620dcac272SJiri Olsa #else /* !CONFIG_FPROBE */ 27630dcac272SJiri Olsa int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 27640dcac272SJiri Olsa { 27650dcac272SJiri Olsa return -EOPNOTSUPP; 27660dcac272SJiri Olsa } 2767f7098690SJiri Olsa static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) 2768f7098690SJiri Olsa { 2769f7098690SJiri Olsa return 0; 2770f7098690SJiri Olsa } 2771f7098690SJiri Olsa static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) 2772ca74823cSJiri Olsa { 2773ca74823cSJiri Olsa return 0; 2774ca74823cSJiri Olsa } 27750dcac272SJiri Olsa #endif 2776