1179a0cc4SSteven Rostedt (VMware) // SPDX-License-Identifier: GPL-2.0 22541517cSAlexei Starovoitov /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com 30515e599SAlexei Starovoitov * Copyright (c) 2016 Facebook 42541517cSAlexei Starovoitov */ 52541517cSAlexei Starovoitov #include <linux/kernel.h> 62541517cSAlexei Starovoitov #include <linux/types.h> 72541517cSAlexei Starovoitov #include <linux/slab.h> 82541517cSAlexei Starovoitov #include <linux/bpf.h> 94279adb0SMartin KaFai Lau #include <linux/bpf_verifier.h> 100515e599SAlexei Starovoitov #include <linux/bpf_perf_event.h> 11c4d0bfb4SAlan Maguire #include <linux/btf.h> 122541517cSAlexei Starovoitov #include <linux/filter.h> 132541517cSAlexei Starovoitov #include <linux/uaccess.h> 149c959c86SAlexei Starovoitov #include <linux/ctype.h> 159802d865SJosef Bacik #include <linux/kprobes.h> 16ac5a72eaSAlan Maguire #include <linux/spinlock.h> 1741bdc4b4SYonghong Song #include <linux/syscalls.h> 18540adea3SMasami Hiramatsu #include <linux/error-injection.h> 19c9a0f3b8SJiri Olsa #include <linux/btf_ids.h> 206f100640SKP Singh #include <linux/bpf_lsm.h> 210dcac272SJiri Olsa #include <linux/fprobe.h> 22ca74823cSJiri Olsa #include <linux/bsearch.h> 23ca74823cSJiri Olsa #include <linux/sort.h> 24f3cf4134SRoberto Sassu #include <linux/key.h> 25f3cf4134SRoberto Sassu #include <linux/verification.h> 266f100640SKP Singh 278e4597c6SMartin KaFai Lau #include <net/bpf_sk_storage.h> 289802d865SJosef Bacik 29c4d0bfb4SAlan Maguire #include <uapi/linux/bpf.h> 30c4d0bfb4SAlan Maguire #include <uapi/linux/btf.h> 31c4d0bfb4SAlan Maguire 32c7b6f29bSNadav Amit #include <asm/tlb.h> 33c7b6f29bSNadav Amit 349802d865SJosef Bacik #include "trace_probe.h" 352541517cSAlexei Starovoitov #include "trace.h" 362541517cSAlexei Starovoitov 37ac5a72eaSAlan Maguire #define CREATE_TRACE_POINTS 38ac5a72eaSAlan Maguire #include "bpf_trace.h" 39ac5a72eaSAlan Maguire 40e672db03SStanislav Fomichev #define bpf_event_rcu_dereference(p) \ 41e672db03SStanislav Fomichev rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) 42e672db03SStanislav Fomichev 43a38d1107SMatt Mullins #ifdef CONFIG_MODULES 44a38d1107SMatt Mullins struct bpf_trace_module { 45a38d1107SMatt Mullins struct module *module; 46a38d1107SMatt Mullins struct list_head list; 47a38d1107SMatt Mullins }; 48a38d1107SMatt Mullins 49a38d1107SMatt Mullins static LIST_HEAD(bpf_trace_modules); 50a38d1107SMatt Mullins static DEFINE_MUTEX(bpf_module_mutex); 51a38d1107SMatt Mullins 52a38d1107SMatt Mullins static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) 53a38d1107SMatt Mullins { 54a38d1107SMatt Mullins struct bpf_raw_event_map *btp, *ret = NULL; 55a38d1107SMatt Mullins struct bpf_trace_module *btm; 56a38d1107SMatt Mullins unsigned int i; 57a38d1107SMatt Mullins 58a38d1107SMatt Mullins mutex_lock(&bpf_module_mutex); 59a38d1107SMatt Mullins list_for_each_entry(btm, &bpf_trace_modules, list) { 60a38d1107SMatt Mullins for (i = 0; i < btm->module->num_bpf_raw_events; ++i) { 61a38d1107SMatt Mullins btp = &btm->module->bpf_raw_events[i]; 62a38d1107SMatt Mullins if (!strcmp(btp->tp->name, name)) { 63a38d1107SMatt Mullins if (try_module_get(btm->module)) 64a38d1107SMatt Mullins ret = btp; 65a38d1107SMatt Mullins goto out; 66a38d1107SMatt Mullins } 67a38d1107SMatt Mullins } 68a38d1107SMatt Mullins } 69a38d1107SMatt Mullins out: 70a38d1107SMatt Mullins mutex_unlock(&bpf_module_mutex); 71a38d1107SMatt Mullins return ret; 72a38d1107SMatt Mullins } 73a38d1107SMatt Mullins #else 74a38d1107SMatt Mullins static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name) 75a38d1107SMatt Mullins { 76a38d1107SMatt Mullins return NULL; 77a38d1107SMatt Mullins } 78a38d1107SMatt Mullins #endif /* CONFIG_MODULES */ 79a38d1107SMatt Mullins 80035226b9SGianluca Borello u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 81c195651eSYonghong Song u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 82035226b9SGianluca Borello 83eb411377SAlan Maguire static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, 84eb411377SAlan Maguire u64 flags, const struct btf **btf, 85eb411377SAlan Maguire s32 *btf_id); 86f7098690SJiri Olsa static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx); 87f7098690SJiri Olsa static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx); 88eb411377SAlan Maguire 892541517cSAlexei Starovoitov /** 902541517cSAlexei Starovoitov * trace_call_bpf - invoke BPF program 91e87c6bc3SYonghong Song * @call: tracepoint event 922541517cSAlexei Starovoitov * @ctx: opaque context pointer 932541517cSAlexei Starovoitov * 942541517cSAlexei Starovoitov * kprobe handlers execute BPF programs via this helper. 952541517cSAlexei Starovoitov * Can be used from static tracepoints in the future. 962541517cSAlexei Starovoitov * 972541517cSAlexei Starovoitov * Return: BPF programs always return an integer which is interpreted by 982541517cSAlexei Starovoitov * kprobe handler as: 992541517cSAlexei Starovoitov * 0 - return from kprobe (event is filtered out) 1002541517cSAlexei Starovoitov * 1 - store kprobe event into ring buffer 1012541517cSAlexei Starovoitov * Other values are reserved and currently alias to 1 1022541517cSAlexei Starovoitov */ 103e87c6bc3SYonghong Song unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) 1042541517cSAlexei Starovoitov { 1052541517cSAlexei Starovoitov unsigned int ret; 1062541517cSAlexei Starovoitov 107b0a81b94SThomas Gleixner cant_sleep(); 1082541517cSAlexei Starovoitov 1092541517cSAlexei Starovoitov if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 1102541517cSAlexei Starovoitov /* 1112541517cSAlexei Starovoitov * since some bpf program is already running on this cpu, 1122541517cSAlexei Starovoitov * don't call into another bpf program (same or different) 1132541517cSAlexei Starovoitov * and don't send kprobe event into ring-buffer, 1142541517cSAlexei Starovoitov * so return zero here 1152541517cSAlexei Starovoitov */ 1162541517cSAlexei Starovoitov ret = 0; 1172541517cSAlexei Starovoitov goto out; 1182541517cSAlexei Starovoitov } 1192541517cSAlexei Starovoitov 120e87c6bc3SYonghong Song /* 121e87c6bc3SYonghong Song * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock 122e87c6bc3SYonghong Song * to all call sites, we did a bpf_prog_array_valid() there to check 123e87c6bc3SYonghong Song * whether call->prog_array is empty or not, which is 1242b5894ccSQiujun Huang * a heuristic to speed up execution. 125e87c6bc3SYonghong Song * 126e87c6bc3SYonghong Song * If bpf_prog_array_valid() fetched prog_array was 127e87c6bc3SYonghong Song * non-NULL, we go into trace_call_bpf() and do the actual 128e87c6bc3SYonghong Song * proper rcu_dereference() under RCU lock. 129e87c6bc3SYonghong Song * If it turns out that prog_array is NULL then, we bail out. 130e87c6bc3SYonghong Song * For the opposite, if the bpf_prog_array_valid() fetched pointer 131e87c6bc3SYonghong Song * was NULL, you'll skip the prog_array with the risk of missing 132e87c6bc3SYonghong Song * out of events when it was updated in between this and the 133e87c6bc3SYonghong Song * rcu_dereference() which is accepted risk. 134e87c6bc3SYonghong Song */ 135055eb955SStanislav Fomichev rcu_read_lock(); 136055eb955SStanislav Fomichev ret = bpf_prog_run_array(rcu_dereference(call->prog_array), 137055eb955SStanislav Fomichev ctx, bpf_prog_run); 138055eb955SStanislav Fomichev rcu_read_unlock(); 1392541517cSAlexei Starovoitov 1402541517cSAlexei Starovoitov out: 1412541517cSAlexei Starovoitov __this_cpu_dec(bpf_prog_active); 1422541517cSAlexei Starovoitov 1432541517cSAlexei Starovoitov return ret; 1442541517cSAlexei Starovoitov } 1452541517cSAlexei Starovoitov 1469802d865SJosef Bacik #ifdef CONFIG_BPF_KPROBE_OVERRIDE 1479802d865SJosef Bacik BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) 1489802d865SJosef Bacik { 1499802d865SJosef Bacik regs_set_return_value(regs, rc); 150540adea3SMasami Hiramatsu override_function_with_return(regs); 1519802d865SJosef Bacik return 0; 1529802d865SJosef Bacik } 1539802d865SJosef Bacik 1549802d865SJosef Bacik static const struct bpf_func_proto bpf_override_return_proto = { 1559802d865SJosef Bacik .func = bpf_override_return, 1569802d865SJosef Bacik .gpl_only = true, 1579802d865SJosef Bacik .ret_type = RET_INTEGER, 1589802d865SJosef Bacik .arg1_type = ARG_PTR_TO_CTX, 1599802d865SJosef Bacik .arg2_type = ARG_ANYTHING, 1609802d865SJosef Bacik }; 1619802d865SJosef Bacik #endif 1629802d865SJosef Bacik 1638d92db5cSChristoph Hellwig static __always_inline int 1648d92db5cSChristoph Hellwig bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr) 1658d92db5cSChristoph Hellwig { 1668d92db5cSChristoph Hellwig int ret; 1678d92db5cSChristoph Hellwig 168c0ee37e8SChristoph Hellwig ret = copy_from_user_nofault(dst, unsafe_ptr, size); 1698d92db5cSChristoph Hellwig if (unlikely(ret < 0)) 1708d92db5cSChristoph Hellwig memset(dst, 0, size); 1718d92db5cSChristoph Hellwig return ret; 1728d92db5cSChristoph Hellwig } 1738d92db5cSChristoph Hellwig 1746ae08ae3SDaniel Borkmann BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, 1756ae08ae3SDaniel Borkmann const void __user *, unsafe_ptr) 1762541517cSAlexei Starovoitov { 1778d92db5cSChristoph Hellwig return bpf_probe_read_user_common(dst, size, unsafe_ptr); 1782541517cSAlexei Starovoitov } 1792541517cSAlexei Starovoitov 180f470378cSJohn Fastabend const struct bpf_func_proto bpf_probe_read_user_proto = { 1816ae08ae3SDaniel Borkmann .func = bpf_probe_read_user, 1826ae08ae3SDaniel Borkmann .gpl_only = true, 1836ae08ae3SDaniel Borkmann .ret_type = RET_INTEGER, 1846ae08ae3SDaniel Borkmann .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1856ae08ae3SDaniel Borkmann .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1866ae08ae3SDaniel Borkmann .arg3_type = ARG_ANYTHING, 1876ae08ae3SDaniel Borkmann }; 1886ae08ae3SDaniel Borkmann 1898d92db5cSChristoph Hellwig static __always_inline int 1908d92db5cSChristoph Hellwig bpf_probe_read_user_str_common(void *dst, u32 size, 1918d92db5cSChristoph Hellwig const void __user *unsafe_ptr) 1928d92db5cSChristoph Hellwig { 1938d92db5cSChristoph Hellwig int ret; 1948d92db5cSChristoph Hellwig 1956fa6d280SDaniel Xu /* 1966fa6d280SDaniel Xu * NB: We rely on strncpy_from_user() not copying junk past the NUL 1976fa6d280SDaniel Xu * terminator into `dst`. 1986fa6d280SDaniel Xu * 1996fa6d280SDaniel Xu * strncpy_from_user() does long-sized strides in the fast path. If the 2006fa6d280SDaniel Xu * strncpy does not mask out the bytes after the NUL in `unsafe_ptr`, 2016fa6d280SDaniel Xu * then there could be junk after the NUL in `dst`. If user takes `dst` 2026fa6d280SDaniel Xu * and keys a hash map with it, then semantically identical strings can 2036fa6d280SDaniel Xu * occupy multiple entries in the map. 2046fa6d280SDaniel Xu */ 2058d92db5cSChristoph Hellwig ret = strncpy_from_user_nofault(dst, unsafe_ptr, size); 2068d92db5cSChristoph Hellwig if (unlikely(ret < 0)) 2078d92db5cSChristoph Hellwig memset(dst, 0, size); 2088d92db5cSChristoph Hellwig return ret; 2098d92db5cSChristoph Hellwig } 2108d92db5cSChristoph Hellwig 2116ae08ae3SDaniel Borkmann BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, 2126ae08ae3SDaniel Borkmann const void __user *, unsafe_ptr) 2136ae08ae3SDaniel Borkmann { 2148d92db5cSChristoph Hellwig return bpf_probe_read_user_str_common(dst, size, unsafe_ptr); 2156ae08ae3SDaniel Borkmann } 2166ae08ae3SDaniel Borkmann 217f470378cSJohn Fastabend const struct bpf_func_proto bpf_probe_read_user_str_proto = { 2186ae08ae3SDaniel Borkmann .func = bpf_probe_read_user_str, 2196ae08ae3SDaniel Borkmann .gpl_only = true, 2206ae08ae3SDaniel Borkmann .ret_type = RET_INTEGER, 2216ae08ae3SDaniel Borkmann .arg1_type = ARG_PTR_TO_UNINIT_MEM, 2226ae08ae3SDaniel Borkmann .arg2_type = ARG_CONST_SIZE_OR_ZERO, 2236ae08ae3SDaniel Borkmann .arg3_type = ARG_ANYTHING, 2246ae08ae3SDaniel Borkmann }; 2256ae08ae3SDaniel Borkmann 2266ae08ae3SDaniel Borkmann static __always_inline int 2278d92db5cSChristoph Hellwig bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) 2286ae08ae3SDaniel Borkmann { 229ff40e510SDaniel Borkmann int ret; 2306ae08ae3SDaniel Borkmann 231fe557319SChristoph Hellwig ret = copy_from_kernel_nofault(dst, unsafe_ptr, size); 2326ae08ae3SDaniel Borkmann if (unlikely(ret < 0)) 2336ae08ae3SDaniel Borkmann memset(dst, 0, size); 2346ae08ae3SDaniel Borkmann return ret; 2356ae08ae3SDaniel Borkmann } 2366ae08ae3SDaniel Borkmann 2376ae08ae3SDaniel Borkmann BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size, 2386ae08ae3SDaniel Borkmann const void *, unsafe_ptr) 2396ae08ae3SDaniel Borkmann { 2408d92db5cSChristoph Hellwig return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); 2416ae08ae3SDaniel Borkmann } 2426ae08ae3SDaniel Borkmann 243f470378cSJohn Fastabend const struct bpf_func_proto bpf_probe_read_kernel_proto = { 2446ae08ae3SDaniel Borkmann .func = bpf_probe_read_kernel, 2456ae08ae3SDaniel Borkmann .gpl_only = true, 2466ae08ae3SDaniel Borkmann .ret_type = RET_INTEGER, 2476ae08ae3SDaniel Borkmann .arg1_type = ARG_PTR_TO_UNINIT_MEM, 2486ae08ae3SDaniel Borkmann .arg2_type = ARG_CONST_SIZE_OR_ZERO, 2496ae08ae3SDaniel Borkmann .arg3_type = ARG_ANYTHING, 2506ae08ae3SDaniel Borkmann }; 2516ae08ae3SDaniel Borkmann 2526ae08ae3SDaniel Borkmann static __always_inline int 2538d92db5cSChristoph Hellwig bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) 2546ae08ae3SDaniel Borkmann { 255ff40e510SDaniel Borkmann int ret; 2568d92db5cSChristoph Hellwig 2576ae08ae3SDaniel Borkmann /* 2588d92db5cSChristoph Hellwig * The strncpy_from_kernel_nofault() call will likely not fill the 2598d92db5cSChristoph Hellwig * entire buffer, but that's okay in this circumstance as we're probing 2606ae08ae3SDaniel Borkmann * arbitrary memory anyway similar to bpf_probe_read_*() and might 2616ae08ae3SDaniel Borkmann * as well probe the stack. Thus, memory is explicitly cleared 2626ae08ae3SDaniel Borkmann * only in error case, so that improper users ignoring return 2636ae08ae3SDaniel Borkmann * code altogether don't copy garbage; otherwise length of string 2646ae08ae3SDaniel Borkmann * is returned that can be used for bpf_perf_event_output() et al. 2656ae08ae3SDaniel Borkmann */ 2668d92db5cSChristoph Hellwig ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size); 2676ae08ae3SDaniel Borkmann if (unlikely(ret < 0)) 2686ae08ae3SDaniel Borkmann memset(dst, 0, size); 2696ae08ae3SDaniel Borkmann return ret; 2706ae08ae3SDaniel Borkmann } 2716ae08ae3SDaniel Borkmann 2726ae08ae3SDaniel Borkmann BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size, 2736ae08ae3SDaniel Borkmann const void *, unsafe_ptr) 2746ae08ae3SDaniel Borkmann { 2758d92db5cSChristoph Hellwig return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); 2766ae08ae3SDaniel Borkmann } 2776ae08ae3SDaniel Borkmann 278f470378cSJohn Fastabend const struct bpf_func_proto bpf_probe_read_kernel_str_proto = { 2796ae08ae3SDaniel Borkmann .func = bpf_probe_read_kernel_str, 2806ae08ae3SDaniel Borkmann .gpl_only = true, 2816ae08ae3SDaniel Borkmann .ret_type = RET_INTEGER, 2826ae08ae3SDaniel Borkmann .arg1_type = ARG_PTR_TO_UNINIT_MEM, 2836ae08ae3SDaniel Borkmann .arg2_type = ARG_CONST_SIZE_OR_ZERO, 2846ae08ae3SDaniel Borkmann .arg3_type = ARG_ANYTHING, 2856ae08ae3SDaniel Borkmann }; 2866ae08ae3SDaniel Borkmann 2878d92db5cSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 2888d92db5cSChristoph Hellwig BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size, 2898d92db5cSChristoph Hellwig const void *, unsafe_ptr) 2908d92db5cSChristoph Hellwig { 2918d92db5cSChristoph Hellwig if ((unsigned long)unsafe_ptr < TASK_SIZE) { 2928d92db5cSChristoph Hellwig return bpf_probe_read_user_common(dst, size, 2938d92db5cSChristoph Hellwig (__force void __user *)unsafe_ptr); 2948d92db5cSChristoph Hellwig } 2958d92db5cSChristoph Hellwig return bpf_probe_read_kernel_common(dst, size, unsafe_ptr); 2968d92db5cSChristoph Hellwig } 2978d92db5cSChristoph Hellwig 2988d92db5cSChristoph Hellwig static const struct bpf_func_proto bpf_probe_read_compat_proto = { 2998d92db5cSChristoph Hellwig .func = bpf_probe_read_compat, 3008d92db5cSChristoph Hellwig .gpl_only = true, 3018d92db5cSChristoph Hellwig .ret_type = RET_INTEGER, 3028d92db5cSChristoph Hellwig .arg1_type = ARG_PTR_TO_UNINIT_MEM, 3038d92db5cSChristoph Hellwig .arg2_type = ARG_CONST_SIZE_OR_ZERO, 3048d92db5cSChristoph Hellwig .arg3_type = ARG_ANYTHING, 3058d92db5cSChristoph Hellwig }; 3068d92db5cSChristoph Hellwig 3076ae08ae3SDaniel Borkmann BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size, 3086ae08ae3SDaniel Borkmann const void *, unsafe_ptr) 3096ae08ae3SDaniel Borkmann { 3108d92db5cSChristoph Hellwig if ((unsigned long)unsafe_ptr < TASK_SIZE) { 3118d92db5cSChristoph Hellwig return bpf_probe_read_user_str_common(dst, size, 3128d92db5cSChristoph Hellwig (__force void __user *)unsafe_ptr); 3138d92db5cSChristoph Hellwig } 3148d92db5cSChristoph Hellwig return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr); 3156ae08ae3SDaniel Borkmann } 3166ae08ae3SDaniel Borkmann 3176ae08ae3SDaniel Borkmann static const struct bpf_func_proto bpf_probe_read_compat_str_proto = { 3186ae08ae3SDaniel Borkmann .func = bpf_probe_read_compat_str, 3192541517cSAlexei Starovoitov .gpl_only = true, 3202541517cSAlexei Starovoitov .ret_type = RET_INTEGER, 32139f19ebbSAlexei Starovoitov .arg1_type = ARG_PTR_TO_UNINIT_MEM, 3229c019e2bSYonghong Song .arg2_type = ARG_CONST_SIZE_OR_ZERO, 3232541517cSAlexei Starovoitov .arg3_type = ARG_ANYTHING, 3242541517cSAlexei Starovoitov }; 3258d92db5cSChristoph Hellwig #endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ 3262541517cSAlexei Starovoitov 327eb1b6688SDaniel Borkmann BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src, 328f3694e00SDaniel Borkmann u32, size) 32996ae5227SSargun Dhillon { 33096ae5227SSargun Dhillon /* 33196ae5227SSargun Dhillon * Ensure we're in user context which is safe for the helper to 33296ae5227SSargun Dhillon * run. This helper has no business in a kthread. 33396ae5227SSargun Dhillon * 33496ae5227SSargun Dhillon * access_ok() should prevent writing to non-user memory, but in 33596ae5227SSargun Dhillon * some situations (nommu, temporary switch, etc) access_ok() does 33696ae5227SSargun Dhillon * not provide enough validation, hence the check on KERNEL_DS. 337c7b6f29bSNadav Amit * 338c7b6f29bSNadav Amit * nmi_uaccess_okay() ensures the probe is not run in an interim 339c7b6f29bSNadav Amit * state, when the task or mm are switched. This is specifically 340c7b6f29bSNadav Amit * required to prevent the use of temporary mm. 34196ae5227SSargun Dhillon */ 34296ae5227SSargun Dhillon 34396ae5227SSargun Dhillon if (unlikely(in_interrupt() || 34496ae5227SSargun Dhillon current->flags & (PF_KTHREAD | PF_EXITING))) 34596ae5227SSargun Dhillon return -EPERM; 346c7b6f29bSNadav Amit if (unlikely(!nmi_uaccess_okay())) 347c7b6f29bSNadav Amit return -EPERM; 34896ae5227SSargun Dhillon 349c0ee37e8SChristoph Hellwig return copy_to_user_nofault(unsafe_ptr, src, size); 35096ae5227SSargun Dhillon } 35196ae5227SSargun Dhillon 35296ae5227SSargun Dhillon static const struct bpf_func_proto bpf_probe_write_user_proto = { 35396ae5227SSargun Dhillon .func = bpf_probe_write_user, 35496ae5227SSargun Dhillon .gpl_only = true, 35596ae5227SSargun Dhillon .ret_type = RET_INTEGER, 35696ae5227SSargun Dhillon .arg1_type = ARG_ANYTHING, 357216e3cd2SHao Luo .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 35839f19ebbSAlexei Starovoitov .arg3_type = ARG_CONST_SIZE, 35996ae5227SSargun Dhillon }; 36096ae5227SSargun Dhillon 36196ae5227SSargun Dhillon static const struct bpf_func_proto *bpf_get_probe_write_proto(void) 36296ae5227SSargun Dhillon { 3632c78ee89SAlexei Starovoitov if (!capable(CAP_SYS_ADMIN)) 3642c78ee89SAlexei Starovoitov return NULL; 3652c78ee89SAlexei Starovoitov 36696ae5227SSargun Dhillon pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!", 36796ae5227SSargun Dhillon current->comm, task_pid_nr(current)); 36896ae5227SSargun Dhillon 36996ae5227SSargun Dhillon return &bpf_probe_write_user_proto; 37096ae5227SSargun Dhillon } 37196ae5227SSargun Dhillon 372ac5a72eaSAlan Maguire static DEFINE_RAW_SPINLOCK(trace_printk_lock); 373ac5a72eaSAlan Maguire 374d9c9e4dbSFlorent Revest #define MAX_TRACE_PRINTK_VARARGS 3 375ac5a72eaSAlan Maguire #define BPF_TRACE_PRINTK_SIZE 1024 376ac5a72eaSAlan Maguire 377f3694e00SDaniel Borkmann BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, 378f3694e00SDaniel Borkmann u64, arg2, u64, arg3) 3799c959c86SAlexei Starovoitov { 380d9c9e4dbSFlorent Revest u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 }; 38148cac3f4SFlorent Revest u32 *bin_args; 382d9c9e4dbSFlorent Revest static char buf[BPF_TRACE_PRINTK_SIZE]; 383d9c9e4dbSFlorent Revest unsigned long flags; 384d9c9e4dbSFlorent Revest int ret; 3859c959c86SAlexei Starovoitov 38648cac3f4SFlorent Revest ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args, 387d9c9e4dbSFlorent Revest MAX_TRACE_PRINTK_VARARGS); 388d9c9e4dbSFlorent Revest if (ret < 0) 389d9c9e4dbSFlorent Revest return ret; 3909c959c86SAlexei Starovoitov 39138d26d89SFlorent Revest raw_spin_lock_irqsave(&trace_printk_lock, flags); 39248cac3f4SFlorent Revest ret = bstr_printf(buf, sizeof(buf), fmt, bin_args); 3939c959c86SAlexei Starovoitov 394d9c9e4dbSFlorent Revest trace_bpf_trace_printk(buf); 395d9c9e4dbSFlorent Revest raw_spin_unlock_irqrestore(&trace_printk_lock, flags); 3969c959c86SAlexei Starovoitov 39748cac3f4SFlorent Revest bpf_bprintf_cleanup(); 3989c959c86SAlexei Starovoitov 399d9c9e4dbSFlorent Revest return ret; 4009c959c86SAlexei Starovoitov } 4019c959c86SAlexei Starovoitov 4029c959c86SAlexei Starovoitov static const struct bpf_func_proto bpf_trace_printk_proto = { 4039c959c86SAlexei Starovoitov .func = bpf_trace_printk, 4049c959c86SAlexei Starovoitov .gpl_only = true, 4059c959c86SAlexei Starovoitov .ret_type = RET_INTEGER, 406216e3cd2SHao Luo .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 40739f19ebbSAlexei Starovoitov .arg2_type = ARG_CONST_SIZE, 4089c959c86SAlexei Starovoitov }; 4099c959c86SAlexei Starovoitov 41010aceb62SDave Marchevsky static void __set_printk_clr_event(void) 4110756ea3eSAlexei Starovoitov { 4120756ea3eSAlexei Starovoitov /* 413ac5a72eaSAlan Maguire * This program might be calling bpf_trace_printk, 414ac5a72eaSAlan Maguire * so enable the associated bpf_trace/bpf_trace_printk event. 415ac5a72eaSAlan Maguire * Repeat this each time as it is possible a user has 416ac5a72eaSAlan Maguire * disabled bpf_trace_printk events. By loading a program 417ac5a72eaSAlan Maguire * calling bpf_trace_printk() however the user has expressed 418ac5a72eaSAlan Maguire * the intent to see such events. 4190756ea3eSAlexei Starovoitov */ 420ac5a72eaSAlan Maguire if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) 421ac5a72eaSAlan Maguire pr_warn_ratelimited("could not enable bpf_trace_printk events"); 42210aceb62SDave Marchevsky } 4230756ea3eSAlexei Starovoitov 42410aceb62SDave Marchevsky const struct bpf_func_proto *bpf_get_trace_printk_proto(void) 42510aceb62SDave Marchevsky { 42610aceb62SDave Marchevsky __set_printk_clr_event(); 4270756ea3eSAlexei Starovoitov return &bpf_trace_printk_proto; 4280756ea3eSAlexei Starovoitov } 4290756ea3eSAlexei Starovoitov 43010aceb62SDave Marchevsky BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data, 43110aceb62SDave Marchevsky u32, data_len) 43210aceb62SDave Marchevsky { 43310aceb62SDave Marchevsky static char buf[BPF_TRACE_PRINTK_SIZE]; 43410aceb62SDave Marchevsky unsigned long flags; 43510aceb62SDave Marchevsky int ret, num_args; 43610aceb62SDave Marchevsky u32 *bin_args; 43710aceb62SDave Marchevsky 43810aceb62SDave Marchevsky if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || 43910aceb62SDave Marchevsky (data_len && !data)) 44010aceb62SDave Marchevsky return -EINVAL; 44110aceb62SDave Marchevsky num_args = data_len / 8; 44210aceb62SDave Marchevsky 44310aceb62SDave Marchevsky ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args); 44410aceb62SDave Marchevsky if (ret < 0) 44510aceb62SDave Marchevsky return ret; 44610aceb62SDave Marchevsky 44710aceb62SDave Marchevsky raw_spin_lock_irqsave(&trace_printk_lock, flags); 44810aceb62SDave Marchevsky ret = bstr_printf(buf, sizeof(buf), fmt, bin_args); 44910aceb62SDave Marchevsky 45010aceb62SDave Marchevsky trace_bpf_trace_printk(buf); 45110aceb62SDave Marchevsky raw_spin_unlock_irqrestore(&trace_printk_lock, flags); 45210aceb62SDave Marchevsky 45310aceb62SDave Marchevsky bpf_bprintf_cleanup(); 45410aceb62SDave Marchevsky 45510aceb62SDave Marchevsky return ret; 45610aceb62SDave Marchevsky } 45710aceb62SDave Marchevsky 45810aceb62SDave Marchevsky static const struct bpf_func_proto bpf_trace_vprintk_proto = { 45910aceb62SDave Marchevsky .func = bpf_trace_vprintk, 46010aceb62SDave Marchevsky .gpl_only = true, 46110aceb62SDave Marchevsky .ret_type = RET_INTEGER, 462216e3cd2SHao Luo .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, 46310aceb62SDave Marchevsky .arg2_type = ARG_CONST_SIZE, 464216e3cd2SHao Luo .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 46510aceb62SDave Marchevsky .arg4_type = ARG_CONST_SIZE_OR_ZERO, 46610aceb62SDave Marchevsky }; 46710aceb62SDave Marchevsky 46810aceb62SDave Marchevsky const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void) 46910aceb62SDave Marchevsky { 47010aceb62SDave Marchevsky __set_printk_clr_event(); 47110aceb62SDave Marchevsky return &bpf_trace_vprintk_proto; 47210aceb62SDave Marchevsky } 47310aceb62SDave Marchevsky 474492e639fSYonghong Song BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, 475492e639fSYonghong Song const void *, data, u32, data_len) 476492e639fSYonghong Song { 477d9c9e4dbSFlorent Revest int err, num_args; 47848cac3f4SFlorent Revest u32 *bin_args; 479492e639fSYonghong Song 480335ff499SDave Marchevsky if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || 481d9c9e4dbSFlorent Revest (data_len && !data)) 482d9c9e4dbSFlorent Revest return -EINVAL; 483492e639fSYonghong Song num_args = data_len / 8; 484492e639fSYonghong Song 48548cac3f4SFlorent Revest err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args); 486492e639fSYonghong Song if (err < 0) 487d9c9e4dbSFlorent Revest return err; 488492e639fSYonghong Song 48948cac3f4SFlorent Revest seq_bprintf(m, fmt, bin_args); 490492e639fSYonghong Song 49148cac3f4SFlorent Revest bpf_bprintf_cleanup(); 492d9c9e4dbSFlorent Revest 493d9c9e4dbSFlorent Revest return seq_has_overflowed(m) ? -EOVERFLOW : 0; 494492e639fSYonghong Song } 495492e639fSYonghong Song 4969436ef6eSLorenz Bauer BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file) 497c9a0f3b8SJiri Olsa 498492e639fSYonghong Song static const struct bpf_func_proto bpf_seq_printf_proto = { 499492e639fSYonghong Song .func = bpf_seq_printf, 500492e639fSYonghong Song .gpl_only = true, 501492e639fSYonghong Song .ret_type = RET_INTEGER, 502492e639fSYonghong Song .arg1_type = ARG_PTR_TO_BTF_ID, 5039436ef6eSLorenz Bauer .arg1_btf_id = &btf_seq_file_ids[0], 504216e3cd2SHao Luo .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 505492e639fSYonghong Song .arg3_type = ARG_CONST_SIZE, 506216e3cd2SHao Luo .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, 507492e639fSYonghong Song .arg5_type = ARG_CONST_SIZE_OR_ZERO, 508492e639fSYonghong Song }; 509492e639fSYonghong Song 510492e639fSYonghong Song BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len) 511492e639fSYonghong Song { 512492e639fSYonghong Song return seq_write(m, data, len) ? -EOVERFLOW : 0; 513492e639fSYonghong Song } 514492e639fSYonghong Song 515492e639fSYonghong Song static const struct bpf_func_proto bpf_seq_write_proto = { 516492e639fSYonghong Song .func = bpf_seq_write, 517492e639fSYonghong Song .gpl_only = true, 518492e639fSYonghong Song .ret_type = RET_INTEGER, 519492e639fSYonghong Song .arg1_type = ARG_PTR_TO_BTF_ID, 5209436ef6eSLorenz Bauer .arg1_btf_id = &btf_seq_file_ids[0], 521216e3cd2SHao Luo .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 522492e639fSYonghong Song .arg3_type = ARG_CONST_SIZE_OR_ZERO, 523492e639fSYonghong Song }; 524492e639fSYonghong Song 525eb411377SAlan Maguire BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr, 526eb411377SAlan Maguire u32, btf_ptr_size, u64, flags) 527eb411377SAlan Maguire { 528eb411377SAlan Maguire const struct btf *btf; 529eb411377SAlan Maguire s32 btf_id; 530eb411377SAlan Maguire int ret; 531eb411377SAlan Maguire 532eb411377SAlan Maguire ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); 533eb411377SAlan Maguire if (ret) 534eb411377SAlan Maguire return ret; 535eb411377SAlan Maguire 536eb411377SAlan Maguire return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags); 537eb411377SAlan Maguire } 538eb411377SAlan Maguire 539eb411377SAlan Maguire static const struct bpf_func_proto bpf_seq_printf_btf_proto = { 540eb411377SAlan Maguire .func = bpf_seq_printf_btf, 541eb411377SAlan Maguire .gpl_only = true, 542eb411377SAlan Maguire .ret_type = RET_INTEGER, 543eb411377SAlan Maguire .arg1_type = ARG_PTR_TO_BTF_ID, 544eb411377SAlan Maguire .arg1_btf_id = &btf_seq_file_ids[0], 545216e3cd2SHao Luo .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 546eb411377SAlan Maguire .arg3_type = ARG_CONST_SIZE_OR_ZERO, 547eb411377SAlan Maguire .arg4_type = ARG_ANYTHING, 548d9847d31SAlexei Starovoitov }; 549d9847d31SAlexei Starovoitov 550908432caSYonghong Song static __always_inline int 551908432caSYonghong Song get_map_perf_counter(struct bpf_map *map, u64 flags, 552908432caSYonghong Song u64 *value, u64 *enabled, u64 *running) 55335578d79SKaixu Xia { 55435578d79SKaixu Xia struct bpf_array *array = container_of(map, struct bpf_array, map); 5556816a7ffSDaniel Borkmann unsigned int cpu = smp_processor_id(); 5566816a7ffSDaniel Borkmann u64 index = flags & BPF_F_INDEX_MASK; 5573b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 55835578d79SKaixu Xia 5596816a7ffSDaniel Borkmann if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 5606816a7ffSDaniel Borkmann return -EINVAL; 5616816a7ffSDaniel Borkmann if (index == BPF_F_CURRENT_CPU) 5626816a7ffSDaniel Borkmann index = cpu; 56335578d79SKaixu Xia if (unlikely(index >= array->map.max_entries)) 56435578d79SKaixu Xia return -E2BIG; 56535578d79SKaixu Xia 5663b1efb19SDaniel Borkmann ee = READ_ONCE(array->ptrs[index]); 5671ca1cc98SDaniel Borkmann if (!ee) 56835578d79SKaixu Xia return -ENOENT; 56935578d79SKaixu Xia 570908432caSYonghong Song return perf_event_read_local(ee->event, value, enabled, running); 571908432caSYonghong Song } 572908432caSYonghong Song 573908432caSYonghong Song BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) 574908432caSYonghong Song { 575908432caSYonghong Song u64 value = 0; 576908432caSYonghong Song int err; 577908432caSYonghong Song 578908432caSYonghong Song err = get_map_perf_counter(map, flags, &value, NULL, NULL); 57935578d79SKaixu Xia /* 580f91840a3SAlexei Starovoitov * this api is ugly since we miss [-22..-2] range of valid 581f91840a3SAlexei Starovoitov * counter values, but that's uapi 58235578d79SKaixu Xia */ 583f91840a3SAlexei Starovoitov if (err) 584f91840a3SAlexei Starovoitov return err; 585f91840a3SAlexei Starovoitov return value; 58635578d79SKaixu Xia } 58735578d79SKaixu Xia 58862544ce8SAlexei Starovoitov static const struct bpf_func_proto bpf_perf_event_read_proto = { 58935578d79SKaixu Xia .func = bpf_perf_event_read, 5901075ef59SAlexei Starovoitov .gpl_only = true, 59135578d79SKaixu Xia .ret_type = RET_INTEGER, 59235578d79SKaixu Xia .arg1_type = ARG_CONST_MAP_PTR, 59335578d79SKaixu Xia .arg2_type = ARG_ANYTHING, 59435578d79SKaixu Xia }; 59535578d79SKaixu Xia 596908432caSYonghong Song BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags, 597908432caSYonghong Song struct bpf_perf_event_value *, buf, u32, size) 598908432caSYonghong Song { 599908432caSYonghong Song int err = -EINVAL; 600908432caSYonghong Song 601908432caSYonghong Song if (unlikely(size != sizeof(struct bpf_perf_event_value))) 602908432caSYonghong Song goto clear; 603908432caSYonghong Song err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, 604908432caSYonghong Song &buf->running); 605908432caSYonghong Song if (unlikely(err)) 606908432caSYonghong Song goto clear; 607908432caSYonghong Song return 0; 608908432caSYonghong Song clear: 609908432caSYonghong Song memset(buf, 0, size); 610908432caSYonghong Song return err; 611908432caSYonghong Song } 612908432caSYonghong Song 613908432caSYonghong Song static const struct bpf_func_proto bpf_perf_event_read_value_proto = { 614908432caSYonghong Song .func = bpf_perf_event_read_value, 615908432caSYonghong Song .gpl_only = true, 616908432caSYonghong Song .ret_type = RET_INTEGER, 617908432caSYonghong Song .arg1_type = ARG_CONST_MAP_PTR, 618908432caSYonghong Song .arg2_type = ARG_ANYTHING, 619908432caSYonghong Song .arg3_type = ARG_PTR_TO_UNINIT_MEM, 620908432caSYonghong Song .arg4_type = ARG_CONST_SIZE, 621908432caSYonghong Song }; 622908432caSYonghong Song 6238e7a3920SDaniel Borkmann static __always_inline u64 6248e7a3920SDaniel Borkmann __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, 625283ca526SDaniel Borkmann u64 flags, struct perf_sample_data *sd) 626a43eec30SAlexei Starovoitov { 627a43eec30SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 628d7931330SDaniel Borkmann unsigned int cpu = smp_processor_id(); 6291e33759cSDaniel Borkmann u64 index = flags & BPF_F_INDEX_MASK; 6303b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 631a43eec30SAlexei Starovoitov struct perf_event *event; 632a43eec30SAlexei Starovoitov 6331e33759cSDaniel Borkmann if (index == BPF_F_CURRENT_CPU) 634d7931330SDaniel Borkmann index = cpu; 635a43eec30SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 636a43eec30SAlexei Starovoitov return -E2BIG; 637a43eec30SAlexei Starovoitov 6383b1efb19SDaniel Borkmann ee = READ_ONCE(array->ptrs[index]); 6391ca1cc98SDaniel Borkmann if (!ee) 640a43eec30SAlexei Starovoitov return -ENOENT; 641a43eec30SAlexei Starovoitov 6423b1efb19SDaniel Borkmann event = ee->event; 643a43eec30SAlexei Starovoitov if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || 644a43eec30SAlexei Starovoitov event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) 645a43eec30SAlexei Starovoitov return -EINVAL; 646a43eec30SAlexei Starovoitov 647d7931330SDaniel Borkmann if (unlikely(event->oncpu != cpu)) 648a43eec30SAlexei Starovoitov return -EOPNOTSUPP; 649a43eec30SAlexei Starovoitov 65056201969SArnaldo Carvalho de Melo return perf_event_output(event, sd, regs); 651a43eec30SAlexei Starovoitov } 652a43eec30SAlexei Starovoitov 6539594dc3cSMatt Mullins /* 6549594dc3cSMatt Mullins * Support executing tracepoints in normal, irq, and nmi context that each call 6559594dc3cSMatt Mullins * bpf_perf_event_output 6569594dc3cSMatt Mullins */ 6579594dc3cSMatt Mullins struct bpf_trace_sample_data { 6589594dc3cSMatt Mullins struct perf_sample_data sds[3]; 6599594dc3cSMatt Mullins }; 6609594dc3cSMatt Mullins 6619594dc3cSMatt Mullins static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds); 6629594dc3cSMatt Mullins static DEFINE_PER_CPU(int, bpf_trace_nest_level); 663f3694e00SDaniel Borkmann BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, 664f3694e00SDaniel Borkmann u64, flags, void *, data, u64, size) 6658e7a3920SDaniel Borkmann { 6669594dc3cSMatt Mullins struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds); 6679594dc3cSMatt Mullins int nest_level = this_cpu_inc_return(bpf_trace_nest_level); 6688e7a3920SDaniel Borkmann struct perf_raw_record raw = { 6698e7a3920SDaniel Borkmann .frag = { 6708e7a3920SDaniel Borkmann .size = size, 6718e7a3920SDaniel Borkmann .data = data, 6728e7a3920SDaniel Borkmann }, 6738e7a3920SDaniel Borkmann }; 6749594dc3cSMatt Mullins struct perf_sample_data *sd; 6759594dc3cSMatt Mullins int err; 6768e7a3920SDaniel Borkmann 6779594dc3cSMatt Mullins if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { 6789594dc3cSMatt Mullins err = -EBUSY; 6799594dc3cSMatt Mullins goto out; 6809594dc3cSMatt Mullins } 6819594dc3cSMatt Mullins 6829594dc3cSMatt Mullins sd = &sds->sds[nest_level - 1]; 6839594dc3cSMatt Mullins 6849594dc3cSMatt Mullins if (unlikely(flags & ~(BPF_F_INDEX_MASK))) { 6859594dc3cSMatt Mullins err = -EINVAL; 6869594dc3cSMatt Mullins goto out; 6879594dc3cSMatt Mullins } 6888e7a3920SDaniel Borkmann 689283ca526SDaniel Borkmann perf_sample_data_init(sd, 0, 0); 690283ca526SDaniel Borkmann sd->raw = &raw; 69121da7472SSumanth Korikkar sd->sample_flags |= PERF_SAMPLE_RAW; 692283ca526SDaniel Borkmann 6939594dc3cSMatt Mullins err = __bpf_perf_event_output(regs, map, flags, sd); 6949594dc3cSMatt Mullins 6959594dc3cSMatt Mullins out: 6969594dc3cSMatt Mullins this_cpu_dec(bpf_trace_nest_level); 6979594dc3cSMatt Mullins return err; 6988e7a3920SDaniel Borkmann } 6998e7a3920SDaniel Borkmann 700a43eec30SAlexei Starovoitov static const struct bpf_func_proto bpf_perf_event_output_proto = { 701a43eec30SAlexei Starovoitov .func = bpf_perf_event_output, 7021075ef59SAlexei Starovoitov .gpl_only = true, 703a43eec30SAlexei Starovoitov .ret_type = RET_INTEGER, 704a43eec30SAlexei Starovoitov .arg1_type = ARG_PTR_TO_CTX, 705a43eec30SAlexei Starovoitov .arg2_type = ARG_CONST_MAP_PTR, 706a43eec30SAlexei Starovoitov .arg3_type = ARG_ANYTHING, 707216e3cd2SHao Luo .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 708a60dd35dSGianluca Borello .arg5_type = ARG_CONST_SIZE_OR_ZERO, 709a43eec30SAlexei Starovoitov }; 710a43eec30SAlexei Starovoitov 711768fb61fSAllan Zhang static DEFINE_PER_CPU(int, bpf_event_output_nest_level); 712768fb61fSAllan Zhang struct bpf_nested_pt_regs { 713768fb61fSAllan Zhang struct pt_regs regs[3]; 714768fb61fSAllan Zhang }; 715768fb61fSAllan Zhang static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs); 716768fb61fSAllan Zhang static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds); 717bd570ff9SDaniel Borkmann 718555c8a86SDaniel Borkmann u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 719555c8a86SDaniel Borkmann void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 720bd570ff9SDaniel Borkmann { 721768fb61fSAllan Zhang int nest_level = this_cpu_inc_return(bpf_event_output_nest_level); 722555c8a86SDaniel Borkmann struct perf_raw_frag frag = { 723555c8a86SDaniel Borkmann .copy = ctx_copy, 724555c8a86SDaniel Borkmann .size = ctx_size, 725555c8a86SDaniel Borkmann .data = ctx, 726555c8a86SDaniel Borkmann }; 727555c8a86SDaniel Borkmann struct perf_raw_record raw = { 728555c8a86SDaniel Borkmann .frag = { 729183fc153SAndrew Morton { 730555c8a86SDaniel Borkmann .next = ctx_size ? &frag : NULL, 731183fc153SAndrew Morton }, 732555c8a86SDaniel Borkmann .size = meta_size, 733555c8a86SDaniel Borkmann .data = meta, 734555c8a86SDaniel Borkmann }, 735555c8a86SDaniel Borkmann }; 736768fb61fSAllan Zhang struct perf_sample_data *sd; 737768fb61fSAllan Zhang struct pt_regs *regs; 738768fb61fSAllan Zhang u64 ret; 739768fb61fSAllan Zhang 740768fb61fSAllan Zhang if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { 741768fb61fSAllan Zhang ret = -EBUSY; 742768fb61fSAllan Zhang goto out; 743768fb61fSAllan Zhang } 744768fb61fSAllan Zhang sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]); 745768fb61fSAllan Zhang regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]); 746bd570ff9SDaniel Borkmann 747bd570ff9SDaniel Borkmann perf_fetch_caller_regs(regs); 748283ca526SDaniel Borkmann perf_sample_data_init(sd, 0, 0); 749283ca526SDaniel Borkmann sd->raw = &raw; 75021da7472SSumanth Korikkar sd->sample_flags |= PERF_SAMPLE_RAW; 751bd570ff9SDaniel Borkmann 752768fb61fSAllan Zhang ret = __bpf_perf_event_output(regs, map, flags, sd); 753768fb61fSAllan Zhang out: 754768fb61fSAllan Zhang this_cpu_dec(bpf_event_output_nest_level); 755768fb61fSAllan Zhang return ret; 756bd570ff9SDaniel Borkmann } 757bd570ff9SDaniel Borkmann 758f3694e00SDaniel Borkmann BPF_CALL_0(bpf_get_current_task) 759606274c5SAlexei Starovoitov { 760606274c5SAlexei Starovoitov return (long) current; 761606274c5SAlexei Starovoitov } 762606274c5SAlexei Starovoitov 763f470378cSJohn Fastabend const struct bpf_func_proto bpf_get_current_task_proto = { 764606274c5SAlexei Starovoitov .func = bpf_get_current_task, 765606274c5SAlexei Starovoitov .gpl_only = true, 766606274c5SAlexei Starovoitov .ret_type = RET_INTEGER, 767606274c5SAlexei Starovoitov }; 768606274c5SAlexei Starovoitov 7693ca1032aSKP Singh BPF_CALL_0(bpf_get_current_task_btf) 7703ca1032aSKP Singh { 7713ca1032aSKP Singh return (unsigned long) current; 7723ca1032aSKP Singh } 7733ca1032aSKP Singh 774a396eda5SDaniel Xu const struct bpf_func_proto bpf_get_current_task_btf_proto = { 7753ca1032aSKP Singh .func = bpf_get_current_task_btf, 7763ca1032aSKP Singh .gpl_only = true, 777*3f00c523SDavid Vernet .ret_type = RET_PTR_TO_BTF_ID_TRUSTED, 778d19ddb47SSong Liu .ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], 7793ca1032aSKP Singh }; 7803ca1032aSKP Singh 781dd6e10fbSDaniel Xu BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task) 782dd6e10fbSDaniel Xu { 783dd6e10fbSDaniel Xu return (unsigned long) task_pt_regs(task); 784dd6e10fbSDaniel Xu } 785dd6e10fbSDaniel Xu 786dd6e10fbSDaniel Xu BTF_ID_LIST(bpf_task_pt_regs_ids) 787dd6e10fbSDaniel Xu BTF_ID(struct, pt_regs) 788dd6e10fbSDaniel Xu 789dd6e10fbSDaniel Xu const struct bpf_func_proto bpf_task_pt_regs_proto = { 790dd6e10fbSDaniel Xu .func = bpf_task_pt_regs, 791dd6e10fbSDaniel Xu .gpl_only = true, 792dd6e10fbSDaniel Xu .arg1_type = ARG_PTR_TO_BTF_ID, 793d19ddb47SSong Liu .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], 794dd6e10fbSDaniel Xu .ret_type = RET_PTR_TO_BTF_ID, 795dd6e10fbSDaniel Xu .ret_btf_id = &bpf_task_pt_regs_ids[0], 796dd6e10fbSDaniel Xu }; 797dd6e10fbSDaniel Xu 798f3694e00SDaniel Borkmann BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) 79960d20f91SSargun Dhillon { 80060d20f91SSargun Dhillon struct bpf_array *array = container_of(map, struct bpf_array, map); 80160d20f91SSargun Dhillon struct cgroup *cgrp; 80260d20f91SSargun Dhillon 80360d20f91SSargun Dhillon if (unlikely(idx >= array->map.max_entries)) 80460d20f91SSargun Dhillon return -E2BIG; 80560d20f91SSargun Dhillon 80660d20f91SSargun Dhillon cgrp = READ_ONCE(array->ptrs[idx]); 80760d20f91SSargun Dhillon if (unlikely(!cgrp)) 80860d20f91SSargun Dhillon return -EAGAIN; 80960d20f91SSargun Dhillon 81060d20f91SSargun Dhillon return task_under_cgroup_hierarchy(current, cgrp); 81160d20f91SSargun Dhillon } 81260d20f91SSargun Dhillon 81360d20f91SSargun Dhillon static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { 81460d20f91SSargun Dhillon .func = bpf_current_task_under_cgroup, 81560d20f91SSargun Dhillon .gpl_only = false, 81660d20f91SSargun Dhillon .ret_type = RET_INTEGER, 81760d20f91SSargun Dhillon .arg1_type = ARG_CONST_MAP_PTR, 81860d20f91SSargun Dhillon .arg2_type = ARG_ANYTHING, 81960d20f91SSargun Dhillon }; 82060d20f91SSargun Dhillon 8218b401f9eSYonghong Song struct send_signal_irq_work { 8228b401f9eSYonghong Song struct irq_work irq_work; 8238b401f9eSYonghong Song struct task_struct *task; 8248b401f9eSYonghong Song u32 sig; 8258482941fSYonghong Song enum pid_type type; 8268b401f9eSYonghong Song }; 8278b401f9eSYonghong Song 8288b401f9eSYonghong Song static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); 8298b401f9eSYonghong Song 8308b401f9eSYonghong Song static void do_bpf_send_signal(struct irq_work *entry) 8318b401f9eSYonghong Song { 8328b401f9eSYonghong Song struct send_signal_irq_work *work; 8338b401f9eSYonghong Song 8348b401f9eSYonghong Song work = container_of(entry, struct send_signal_irq_work, irq_work); 8358482941fSYonghong Song group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); 8368b401f9eSYonghong Song } 8378b401f9eSYonghong Song 8388482941fSYonghong Song static int bpf_send_signal_common(u32 sig, enum pid_type type) 8398b401f9eSYonghong Song { 8408b401f9eSYonghong Song struct send_signal_irq_work *work = NULL; 8418b401f9eSYonghong Song 8428b401f9eSYonghong Song /* Similar to bpf_probe_write_user, task needs to be 8438b401f9eSYonghong Song * in a sound condition and kernel memory access be 8448b401f9eSYonghong Song * permitted in order to send signal to the current 8458b401f9eSYonghong Song * task. 8468b401f9eSYonghong Song */ 8478b401f9eSYonghong Song if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING))) 8488b401f9eSYonghong Song return -EPERM; 8498b401f9eSYonghong Song if (unlikely(!nmi_uaccess_okay())) 8508b401f9eSYonghong Song return -EPERM; 8518b401f9eSYonghong Song 8521bc7896eSYonghong Song if (irqs_disabled()) { 853e1afb702SYonghong Song /* Do an early check on signal validity. Otherwise, 854e1afb702SYonghong Song * the error is lost in deferred irq_work. 855e1afb702SYonghong Song */ 856e1afb702SYonghong Song if (unlikely(!valid_signal(sig))) 857e1afb702SYonghong Song return -EINVAL; 858e1afb702SYonghong Song 8598b401f9eSYonghong Song work = this_cpu_ptr(&send_signal_work); 8607a9f50a0SPeter Zijlstra if (irq_work_is_busy(&work->irq_work)) 8618b401f9eSYonghong Song return -EBUSY; 8628b401f9eSYonghong Song 8638b401f9eSYonghong Song /* Add the current task, which is the target of sending signal, 8648b401f9eSYonghong Song * to the irq_work. The current task may change when queued 8658b401f9eSYonghong Song * irq works get executed. 8668b401f9eSYonghong Song */ 8678b401f9eSYonghong Song work->task = current; 8688b401f9eSYonghong Song work->sig = sig; 8698482941fSYonghong Song work->type = type; 8708b401f9eSYonghong Song irq_work_queue(&work->irq_work); 8718b401f9eSYonghong Song return 0; 8728b401f9eSYonghong Song } 8738b401f9eSYonghong Song 8748482941fSYonghong Song return group_send_sig_info(sig, SEND_SIG_PRIV, current, type); 8758482941fSYonghong Song } 8768482941fSYonghong Song 8778482941fSYonghong Song BPF_CALL_1(bpf_send_signal, u32, sig) 8788482941fSYonghong Song { 8798482941fSYonghong Song return bpf_send_signal_common(sig, PIDTYPE_TGID); 8808b401f9eSYonghong Song } 8818b401f9eSYonghong Song 8828b401f9eSYonghong Song static const struct bpf_func_proto bpf_send_signal_proto = { 8838b401f9eSYonghong Song .func = bpf_send_signal, 8848b401f9eSYonghong Song .gpl_only = false, 8858b401f9eSYonghong Song .ret_type = RET_INTEGER, 8868b401f9eSYonghong Song .arg1_type = ARG_ANYTHING, 8878b401f9eSYonghong Song }; 8888b401f9eSYonghong Song 8898482941fSYonghong Song BPF_CALL_1(bpf_send_signal_thread, u32, sig) 8908482941fSYonghong Song { 8918482941fSYonghong Song return bpf_send_signal_common(sig, PIDTYPE_PID); 8928482941fSYonghong Song } 8938482941fSYonghong Song 8948482941fSYonghong Song static const struct bpf_func_proto bpf_send_signal_thread_proto = { 8958482941fSYonghong Song .func = bpf_send_signal_thread, 8968482941fSYonghong Song .gpl_only = false, 8978482941fSYonghong Song .ret_type = RET_INTEGER, 8988482941fSYonghong Song .arg1_type = ARG_ANYTHING, 8998482941fSYonghong Song }; 9008482941fSYonghong Song 9016e22ab9dSJiri Olsa BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz) 9026e22ab9dSJiri Olsa { 9036e22ab9dSJiri Olsa long len; 9046e22ab9dSJiri Olsa char *p; 9056e22ab9dSJiri Olsa 9066e22ab9dSJiri Olsa if (!sz) 9076e22ab9dSJiri Olsa return 0; 9086e22ab9dSJiri Olsa 9096e22ab9dSJiri Olsa p = d_path(path, buf, sz); 9106e22ab9dSJiri Olsa if (IS_ERR(p)) { 9116e22ab9dSJiri Olsa len = PTR_ERR(p); 9126e22ab9dSJiri Olsa } else { 9136e22ab9dSJiri Olsa len = buf + sz - p; 9146e22ab9dSJiri Olsa memmove(buf, p, len); 9156e22ab9dSJiri Olsa } 9166e22ab9dSJiri Olsa 9176e22ab9dSJiri Olsa return len; 9186e22ab9dSJiri Olsa } 9196e22ab9dSJiri Olsa 9206e22ab9dSJiri Olsa BTF_SET_START(btf_allowlist_d_path) 921a8a71796SJiri Olsa #ifdef CONFIG_SECURITY 922a8a71796SJiri Olsa BTF_ID(func, security_file_permission) 923a8a71796SJiri Olsa BTF_ID(func, security_inode_getattr) 924a8a71796SJiri Olsa BTF_ID(func, security_file_open) 925a8a71796SJiri Olsa #endif 926a8a71796SJiri Olsa #ifdef CONFIG_SECURITY_PATH 927a8a71796SJiri Olsa BTF_ID(func, security_path_truncate) 928a8a71796SJiri Olsa #endif 9296e22ab9dSJiri Olsa BTF_ID(func, vfs_truncate) 9306e22ab9dSJiri Olsa BTF_ID(func, vfs_fallocate) 9316e22ab9dSJiri Olsa BTF_ID(func, dentry_open) 9326e22ab9dSJiri Olsa BTF_ID(func, vfs_getattr) 9336e22ab9dSJiri Olsa BTF_ID(func, filp_close) 9346e22ab9dSJiri Olsa BTF_SET_END(btf_allowlist_d_path) 9356e22ab9dSJiri Olsa 9366e22ab9dSJiri Olsa static bool bpf_d_path_allowed(const struct bpf_prog *prog) 9376e22ab9dSJiri Olsa { 9383d06f34aSSong Liu if (prog->type == BPF_PROG_TYPE_TRACING && 9393d06f34aSSong Liu prog->expected_attach_type == BPF_TRACE_ITER) 9403d06f34aSSong Liu return true; 9413d06f34aSSong Liu 9426f100640SKP Singh if (prog->type == BPF_PROG_TYPE_LSM) 9436f100640SKP Singh return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id); 9446f100640SKP Singh 9456f100640SKP Singh return btf_id_set_contains(&btf_allowlist_d_path, 9466f100640SKP Singh prog->aux->attach_btf_id); 9476e22ab9dSJiri Olsa } 9486e22ab9dSJiri Olsa 9499436ef6eSLorenz Bauer BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path) 9506e22ab9dSJiri Olsa 9516e22ab9dSJiri Olsa static const struct bpf_func_proto bpf_d_path_proto = { 9526e22ab9dSJiri Olsa .func = bpf_d_path, 9536e22ab9dSJiri Olsa .gpl_only = false, 9546e22ab9dSJiri Olsa .ret_type = RET_INTEGER, 9556e22ab9dSJiri Olsa .arg1_type = ARG_PTR_TO_BTF_ID, 9569436ef6eSLorenz Bauer .arg1_btf_id = &bpf_d_path_btf_ids[0], 9576e22ab9dSJiri Olsa .arg2_type = ARG_PTR_TO_MEM, 9586e22ab9dSJiri Olsa .arg3_type = ARG_CONST_SIZE_OR_ZERO, 9596e22ab9dSJiri Olsa .allowed = bpf_d_path_allowed, 9606e22ab9dSJiri Olsa }; 9616e22ab9dSJiri Olsa 962c4d0bfb4SAlan Maguire #define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \ 963c4d0bfb4SAlan Maguire BTF_F_PTR_RAW | BTF_F_ZERO) 964c4d0bfb4SAlan Maguire 965c4d0bfb4SAlan Maguire static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size, 966c4d0bfb4SAlan Maguire u64 flags, const struct btf **btf, 967c4d0bfb4SAlan Maguire s32 *btf_id) 968c4d0bfb4SAlan Maguire { 969c4d0bfb4SAlan Maguire const struct btf_type *t; 970c4d0bfb4SAlan Maguire 971c4d0bfb4SAlan Maguire if (unlikely(flags & ~(BTF_F_ALL))) 972c4d0bfb4SAlan Maguire return -EINVAL; 973c4d0bfb4SAlan Maguire 974c4d0bfb4SAlan Maguire if (btf_ptr_size != sizeof(struct btf_ptr)) 975c4d0bfb4SAlan Maguire return -EINVAL; 976c4d0bfb4SAlan Maguire 977c4d0bfb4SAlan Maguire *btf = bpf_get_btf_vmlinux(); 978c4d0bfb4SAlan Maguire 979c4d0bfb4SAlan Maguire if (IS_ERR_OR_NULL(*btf)) 980abbaa433SWang Qing return IS_ERR(*btf) ? PTR_ERR(*btf) : -EINVAL; 981c4d0bfb4SAlan Maguire 982c4d0bfb4SAlan Maguire if (ptr->type_id > 0) 983c4d0bfb4SAlan Maguire *btf_id = ptr->type_id; 984c4d0bfb4SAlan Maguire else 985c4d0bfb4SAlan Maguire return -EINVAL; 986c4d0bfb4SAlan Maguire 987c4d0bfb4SAlan Maguire if (*btf_id > 0) 988c4d0bfb4SAlan Maguire t = btf_type_by_id(*btf, *btf_id); 989c4d0bfb4SAlan Maguire if (*btf_id <= 0 || !t) 990c4d0bfb4SAlan Maguire return -ENOENT; 991c4d0bfb4SAlan Maguire 992c4d0bfb4SAlan Maguire return 0; 993c4d0bfb4SAlan Maguire } 994c4d0bfb4SAlan Maguire 995c4d0bfb4SAlan Maguire BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr, 996c4d0bfb4SAlan Maguire u32, btf_ptr_size, u64, flags) 997c4d0bfb4SAlan Maguire { 998c4d0bfb4SAlan Maguire const struct btf *btf; 999c4d0bfb4SAlan Maguire s32 btf_id; 1000c4d0bfb4SAlan Maguire int ret; 1001c4d0bfb4SAlan Maguire 1002c4d0bfb4SAlan Maguire ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id); 1003c4d0bfb4SAlan Maguire if (ret) 1004c4d0bfb4SAlan Maguire return ret; 1005c4d0bfb4SAlan Maguire 1006c4d0bfb4SAlan Maguire return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size, 1007c4d0bfb4SAlan Maguire flags); 1008c4d0bfb4SAlan Maguire } 1009c4d0bfb4SAlan Maguire 1010c4d0bfb4SAlan Maguire const struct bpf_func_proto bpf_snprintf_btf_proto = { 1011c4d0bfb4SAlan Maguire .func = bpf_snprintf_btf, 1012c4d0bfb4SAlan Maguire .gpl_only = false, 1013c4d0bfb4SAlan Maguire .ret_type = RET_INTEGER, 1014c4d0bfb4SAlan Maguire .arg1_type = ARG_PTR_TO_MEM, 1015c4d0bfb4SAlan Maguire .arg2_type = ARG_CONST_SIZE, 1016216e3cd2SHao Luo .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1017c4d0bfb4SAlan Maguire .arg4_type = ARG_CONST_SIZE, 1018c4d0bfb4SAlan Maguire .arg5_type = ARG_ANYTHING, 1019c4d0bfb4SAlan Maguire }; 1020c4d0bfb4SAlan Maguire 10219b99edcaSJiri Olsa BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx) 10229b99edcaSJiri Olsa { 10239b99edcaSJiri Olsa /* This helper call is inlined by verifier. */ 1024f92c1e18SJiri Olsa return ((u64 *)ctx)[-2]; 10259b99edcaSJiri Olsa } 10269b99edcaSJiri Olsa 10279b99edcaSJiri Olsa static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = { 10289b99edcaSJiri Olsa .func = bpf_get_func_ip_tracing, 10299b99edcaSJiri Olsa .gpl_only = true, 10309b99edcaSJiri Olsa .ret_type = RET_INTEGER, 10319b99edcaSJiri Olsa .arg1_type = ARG_PTR_TO_CTX, 10329b99edcaSJiri Olsa }; 10339b99edcaSJiri Olsa 1034c09eb2e5SJiri Olsa #ifdef CONFIG_X86_KERNEL_IBT 1035c09eb2e5SJiri Olsa static unsigned long get_entry_ip(unsigned long fentry_ip) 1036c09eb2e5SJiri Olsa { 1037c09eb2e5SJiri Olsa u32 instr; 1038c09eb2e5SJiri Olsa 1039c09eb2e5SJiri Olsa /* Being extra safe in here in case entry ip is on the page-edge. */ 1040c09eb2e5SJiri Olsa if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1)) 1041c09eb2e5SJiri Olsa return fentry_ip; 1042c09eb2e5SJiri Olsa if (is_endbr(instr)) 1043c09eb2e5SJiri Olsa fentry_ip -= ENDBR_INSN_SIZE; 1044c09eb2e5SJiri Olsa return fentry_ip; 1045c09eb2e5SJiri Olsa } 1046c09eb2e5SJiri Olsa #else 1047c09eb2e5SJiri Olsa #define get_entry_ip(fentry_ip) fentry_ip 1048c09eb2e5SJiri Olsa #endif 1049c09eb2e5SJiri Olsa 10509ffd9f3fSJiri Olsa BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs) 10519ffd9f3fSJiri Olsa { 10529ffd9f3fSJiri Olsa struct kprobe *kp = kprobe_running(); 10539ffd9f3fSJiri Olsa 10540e253f7eSJiri Olsa if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY)) 10550e253f7eSJiri Olsa return 0; 10560e253f7eSJiri Olsa 10570e253f7eSJiri Olsa return get_entry_ip((uintptr_t)kp->addr); 10589ffd9f3fSJiri Olsa } 10599ffd9f3fSJiri Olsa 10609ffd9f3fSJiri Olsa static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { 10619ffd9f3fSJiri Olsa .func = bpf_get_func_ip_kprobe, 10629ffd9f3fSJiri Olsa .gpl_only = true, 10639ffd9f3fSJiri Olsa .ret_type = RET_INTEGER, 10649ffd9f3fSJiri Olsa .arg1_type = ARG_PTR_TO_CTX, 10659ffd9f3fSJiri Olsa }; 10669ffd9f3fSJiri Olsa 106742a57120SJiri Olsa BPF_CALL_1(bpf_get_func_ip_kprobe_multi, struct pt_regs *, regs) 106842a57120SJiri Olsa { 1069f7098690SJiri Olsa return bpf_kprobe_multi_entry_ip(current->bpf_ctx); 107042a57120SJiri Olsa } 107142a57120SJiri Olsa 107242a57120SJiri Olsa static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe_multi = { 107342a57120SJiri Olsa .func = bpf_get_func_ip_kprobe_multi, 107442a57120SJiri Olsa .gpl_only = false, 107542a57120SJiri Olsa .ret_type = RET_INTEGER, 107642a57120SJiri Olsa .arg1_type = ARG_PTR_TO_CTX, 107742a57120SJiri Olsa }; 107842a57120SJiri Olsa 1079ca74823cSJiri Olsa BPF_CALL_1(bpf_get_attach_cookie_kprobe_multi, struct pt_regs *, regs) 1080ca74823cSJiri Olsa { 1081f7098690SJiri Olsa return bpf_kprobe_multi_cookie(current->bpf_ctx); 1082ca74823cSJiri Olsa } 1083ca74823cSJiri Olsa 1084ca74823cSJiri Olsa static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = { 1085ca74823cSJiri Olsa .func = bpf_get_attach_cookie_kprobe_multi, 1086ca74823cSJiri Olsa .gpl_only = false, 1087ca74823cSJiri Olsa .ret_type = RET_INTEGER, 1088ca74823cSJiri Olsa .arg1_type = ARG_PTR_TO_CTX, 1089ca74823cSJiri Olsa }; 1090ca74823cSJiri Olsa 10917adfc6c9SAndrii Nakryiko BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx) 10927adfc6c9SAndrii Nakryiko { 10937adfc6c9SAndrii Nakryiko struct bpf_trace_run_ctx *run_ctx; 10947adfc6c9SAndrii Nakryiko 10957adfc6c9SAndrii Nakryiko run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); 10967adfc6c9SAndrii Nakryiko return run_ctx->bpf_cookie; 10977adfc6c9SAndrii Nakryiko } 10987adfc6c9SAndrii Nakryiko 10997adfc6c9SAndrii Nakryiko static const struct bpf_func_proto bpf_get_attach_cookie_proto_trace = { 11007adfc6c9SAndrii Nakryiko .func = bpf_get_attach_cookie_trace, 11017adfc6c9SAndrii Nakryiko .gpl_only = false, 11027adfc6c9SAndrii Nakryiko .ret_type = RET_INTEGER, 11037adfc6c9SAndrii Nakryiko .arg1_type = ARG_PTR_TO_CTX, 11047adfc6c9SAndrii Nakryiko }; 11057adfc6c9SAndrii Nakryiko 11067adfc6c9SAndrii Nakryiko BPF_CALL_1(bpf_get_attach_cookie_pe, struct bpf_perf_event_data_kern *, ctx) 11077adfc6c9SAndrii Nakryiko { 11087adfc6c9SAndrii Nakryiko return ctx->event->bpf_cookie; 11097adfc6c9SAndrii Nakryiko } 11107adfc6c9SAndrii Nakryiko 11117adfc6c9SAndrii Nakryiko static const struct bpf_func_proto bpf_get_attach_cookie_proto_pe = { 11127adfc6c9SAndrii Nakryiko .func = bpf_get_attach_cookie_pe, 11137adfc6c9SAndrii Nakryiko .gpl_only = false, 11147adfc6c9SAndrii Nakryiko .ret_type = RET_INTEGER, 11157adfc6c9SAndrii Nakryiko .arg1_type = ARG_PTR_TO_CTX, 11167adfc6c9SAndrii Nakryiko }; 11177adfc6c9SAndrii Nakryiko 11182fcc8241SKui-Feng Lee BPF_CALL_1(bpf_get_attach_cookie_tracing, void *, ctx) 11192fcc8241SKui-Feng Lee { 11202fcc8241SKui-Feng Lee struct bpf_trace_run_ctx *run_ctx; 11212fcc8241SKui-Feng Lee 11222fcc8241SKui-Feng Lee run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); 11232fcc8241SKui-Feng Lee return run_ctx->bpf_cookie; 11242fcc8241SKui-Feng Lee } 11252fcc8241SKui-Feng Lee 11262fcc8241SKui-Feng Lee static const struct bpf_func_proto bpf_get_attach_cookie_proto_tracing = { 11272fcc8241SKui-Feng Lee .func = bpf_get_attach_cookie_tracing, 11282fcc8241SKui-Feng Lee .gpl_only = false, 11292fcc8241SKui-Feng Lee .ret_type = RET_INTEGER, 11302fcc8241SKui-Feng Lee .arg1_type = ARG_PTR_TO_CTX, 11312fcc8241SKui-Feng Lee }; 11322fcc8241SKui-Feng Lee 1133856c02dbSSong Liu BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags) 1134856c02dbSSong Liu { 1135856c02dbSSong Liu #ifndef CONFIG_X86 1136856c02dbSSong Liu return -ENOENT; 1137856c02dbSSong Liu #else 1138856c02dbSSong Liu static const u32 br_entry_size = sizeof(struct perf_branch_entry); 1139856c02dbSSong Liu u32 entry_cnt = size / br_entry_size; 1140856c02dbSSong Liu 1141856c02dbSSong Liu entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt); 1142856c02dbSSong Liu 1143856c02dbSSong Liu if (unlikely(flags)) 1144856c02dbSSong Liu return -EINVAL; 1145856c02dbSSong Liu 1146856c02dbSSong Liu if (!entry_cnt) 1147856c02dbSSong Liu return -ENOENT; 1148856c02dbSSong Liu 1149856c02dbSSong Liu return entry_cnt * br_entry_size; 1150856c02dbSSong Liu #endif 1151856c02dbSSong Liu } 1152856c02dbSSong Liu 1153856c02dbSSong Liu static const struct bpf_func_proto bpf_get_branch_snapshot_proto = { 1154856c02dbSSong Liu .func = bpf_get_branch_snapshot, 1155856c02dbSSong Liu .gpl_only = true, 1156856c02dbSSong Liu .ret_type = RET_INTEGER, 1157856c02dbSSong Liu .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1158856c02dbSSong Liu .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1159856c02dbSSong Liu }; 1160856c02dbSSong Liu 1161f92c1e18SJiri Olsa BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value) 1162f92c1e18SJiri Olsa { 1163f92c1e18SJiri Olsa /* This helper call is inlined by verifier. */ 1164f92c1e18SJiri Olsa u64 nr_args = ((u64 *)ctx)[-1]; 1165f92c1e18SJiri Olsa 1166f92c1e18SJiri Olsa if ((u64) n >= nr_args) 1167f92c1e18SJiri Olsa return -EINVAL; 1168f92c1e18SJiri Olsa *value = ((u64 *)ctx)[n]; 1169f92c1e18SJiri Olsa return 0; 1170f92c1e18SJiri Olsa } 1171f92c1e18SJiri Olsa 1172f92c1e18SJiri Olsa static const struct bpf_func_proto bpf_get_func_arg_proto = { 1173f92c1e18SJiri Olsa .func = get_func_arg, 1174f92c1e18SJiri Olsa .ret_type = RET_INTEGER, 1175f92c1e18SJiri Olsa .arg1_type = ARG_PTR_TO_CTX, 1176f92c1e18SJiri Olsa .arg2_type = ARG_ANYTHING, 1177f92c1e18SJiri Olsa .arg3_type = ARG_PTR_TO_LONG, 1178f92c1e18SJiri Olsa }; 1179f92c1e18SJiri Olsa 1180f92c1e18SJiri Olsa BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value) 1181f92c1e18SJiri Olsa { 1182f92c1e18SJiri Olsa /* This helper call is inlined by verifier. */ 1183f92c1e18SJiri Olsa u64 nr_args = ((u64 *)ctx)[-1]; 1184f92c1e18SJiri Olsa 1185f92c1e18SJiri Olsa *value = ((u64 *)ctx)[nr_args]; 1186f92c1e18SJiri Olsa return 0; 1187f92c1e18SJiri Olsa } 1188f92c1e18SJiri Olsa 1189f92c1e18SJiri Olsa static const struct bpf_func_proto bpf_get_func_ret_proto = { 1190f92c1e18SJiri Olsa .func = get_func_ret, 1191f92c1e18SJiri Olsa .ret_type = RET_INTEGER, 1192f92c1e18SJiri Olsa .arg1_type = ARG_PTR_TO_CTX, 1193f92c1e18SJiri Olsa .arg2_type = ARG_PTR_TO_LONG, 1194f92c1e18SJiri Olsa }; 1195f92c1e18SJiri Olsa 1196f92c1e18SJiri Olsa BPF_CALL_1(get_func_arg_cnt, void *, ctx) 1197f92c1e18SJiri Olsa { 1198f92c1e18SJiri Olsa /* This helper call is inlined by verifier. */ 1199f92c1e18SJiri Olsa return ((u64 *)ctx)[-1]; 1200f92c1e18SJiri Olsa } 1201f92c1e18SJiri Olsa 1202f92c1e18SJiri Olsa static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = { 1203f92c1e18SJiri Olsa .func = get_func_arg_cnt, 1204f92c1e18SJiri Olsa .ret_type = RET_INTEGER, 1205f92c1e18SJiri Olsa .arg1_type = ARG_PTR_TO_CTX, 1206f92c1e18SJiri Olsa }; 1207f92c1e18SJiri Olsa 1208f3cf4134SRoberto Sassu #ifdef CONFIG_KEYS 1209f3cf4134SRoberto Sassu __diag_push(); 1210f3cf4134SRoberto Sassu __diag_ignore_all("-Wmissing-prototypes", 1211f3cf4134SRoberto Sassu "kfuncs which will be used in BPF programs"); 1212f3cf4134SRoberto Sassu 1213f3cf4134SRoberto Sassu /** 1214f3cf4134SRoberto Sassu * bpf_lookup_user_key - lookup a key by its serial 1215f3cf4134SRoberto Sassu * @serial: key handle serial number 1216f3cf4134SRoberto Sassu * @flags: lookup-specific flags 1217f3cf4134SRoberto Sassu * 1218f3cf4134SRoberto Sassu * Search a key with a given *serial* and the provided *flags*. 1219f3cf4134SRoberto Sassu * If found, increment the reference count of the key by one, and 1220f3cf4134SRoberto Sassu * return it in the bpf_key structure. 1221f3cf4134SRoberto Sassu * 1222f3cf4134SRoberto Sassu * The bpf_key structure must be passed to bpf_key_put() when done 1223f3cf4134SRoberto Sassu * with it, so that the key reference count is decremented and the 1224f3cf4134SRoberto Sassu * bpf_key structure is freed. 1225f3cf4134SRoberto Sassu * 1226f3cf4134SRoberto Sassu * Permission checks are deferred to the time the key is used by 1227f3cf4134SRoberto Sassu * one of the available key-specific kfuncs. 1228f3cf4134SRoberto Sassu * 1229f3cf4134SRoberto Sassu * Set *flags* with KEY_LOOKUP_CREATE, to attempt creating a requested 1230f3cf4134SRoberto Sassu * special keyring (e.g. session keyring), if it doesn't yet exist. 1231f3cf4134SRoberto Sassu * Set *flags* with KEY_LOOKUP_PARTIAL, to lookup a key without waiting 1232f3cf4134SRoberto Sassu * for the key construction, and to retrieve uninstantiated keys (keys 1233f3cf4134SRoberto Sassu * without data attached to them). 1234f3cf4134SRoberto Sassu * 1235f3cf4134SRoberto Sassu * Return: a bpf_key pointer with a valid key pointer if the key is found, a 1236f3cf4134SRoberto Sassu * NULL pointer otherwise. 1237f3cf4134SRoberto Sassu */ 1238f3cf4134SRoberto Sassu struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags) 1239f3cf4134SRoberto Sassu { 1240f3cf4134SRoberto Sassu key_ref_t key_ref; 1241f3cf4134SRoberto Sassu struct bpf_key *bkey; 1242f3cf4134SRoberto Sassu 1243f3cf4134SRoberto Sassu if (flags & ~KEY_LOOKUP_ALL) 1244f3cf4134SRoberto Sassu return NULL; 1245f3cf4134SRoberto Sassu 1246f3cf4134SRoberto Sassu /* 1247f3cf4134SRoberto Sassu * Permission check is deferred until the key is used, as the 1248f3cf4134SRoberto Sassu * intent of the caller is unknown here. 1249f3cf4134SRoberto Sassu */ 1250f3cf4134SRoberto Sassu key_ref = lookup_user_key(serial, flags, KEY_DEFER_PERM_CHECK); 1251f3cf4134SRoberto Sassu if (IS_ERR(key_ref)) 1252f3cf4134SRoberto Sassu return NULL; 1253f3cf4134SRoberto Sassu 1254f3cf4134SRoberto Sassu bkey = kmalloc(sizeof(*bkey), GFP_KERNEL); 1255f3cf4134SRoberto Sassu if (!bkey) { 1256f3cf4134SRoberto Sassu key_put(key_ref_to_ptr(key_ref)); 1257f3cf4134SRoberto Sassu return NULL; 1258f3cf4134SRoberto Sassu } 1259f3cf4134SRoberto Sassu 1260f3cf4134SRoberto Sassu bkey->key = key_ref_to_ptr(key_ref); 1261f3cf4134SRoberto Sassu bkey->has_ref = true; 1262f3cf4134SRoberto Sassu 1263f3cf4134SRoberto Sassu return bkey; 1264f3cf4134SRoberto Sassu } 1265f3cf4134SRoberto Sassu 1266f3cf4134SRoberto Sassu /** 1267f3cf4134SRoberto Sassu * bpf_lookup_system_key - lookup a key by a system-defined ID 1268f3cf4134SRoberto Sassu * @id: key ID 1269f3cf4134SRoberto Sassu * 1270f3cf4134SRoberto Sassu * Obtain a bpf_key structure with a key pointer set to the passed key ID. 1271f3cf4134SRoberto Sassu * The key pointer is marked as invalid, to prevent bpf_key_put() from 1272f3cf4134SRoberto Sassu * attempting to decrement the key reference count on that pointer. The key 1273f3cf4134SRoberto Sassu * pointer set in such way is currently understood only by 1274f3cf4134SRoberto Sassu * verify_pkcs7_signature(). 1275f3cf4134SRoberto Sassu * 1276f3cf4134SRoberto Sassu * Set *id* to one of the values defined in include/linux/verification.h: 1277f3cf4134SRoberto Sassu * 0 for the primary keyring (immutable keyring of system keys); 1278f3cf4134SRoberto Sassu * VERIFY_USE_SECONDARY_KEYRING for both the primary and secondary keyring 1279f3cf4134SRoberto Sassu * (where keys can be added only if they are vouched for by existing keys 1280f3cf4134SRoberto Sassu * in those keyrings); VERIFY_USE_PLATFORM_KEYRING for the platform 1281f3cf4134SRoberto Sassu * keyring (primarily used by the integrity subsystem to verify a kexec'ed 1282f3cf4134SRoberto Sassu * kerned image and, possibly, the initramfs signature). 1283f3cf4134SRoberto Sassu * 1284f3cf4134SRoberto Sassu * Return: a bpf_key pointer with an invalid key pointer set from the 1285f3cf4134SRoberto Sassu * pre-determined ID on success, a NULL pointer otherwise 1286f3cf4134SRoberto Sassu */ 1287f3cf4134SRoberto Sassu struct bpf_key *bpf_lookup_system_key(u64 id) 1288f3cf4134SRoberto Sassu { 1289f3cf4134SRoberto Sassu struct bpf_key *bkey; 1290f3cf4134SRoberto Sassu 1291f3cf4134SRoberto Sassu if (system_keyring_id_check(id) < 0) 1292f3cf4134SRoberto Sassu return NULL; 1293f3cf4134SRoberto Sassu 1294f3cf4134SRoberto Sassu bkey = kmalloc(sizeof(*bkey), GFP_ATOMIC); 1295f3cf4134SRoberto Sassu if (!bkey) 1296f3cf4134SRoberto Sassu return NULL; 1297f3cf4134SRoberto Sassu 1298f3cf4134SRoberto Sassu bkey->key = (struct key *)(unsigned long)id; 1299f3cf4134SRoberto Sassu bkey->has_ref = false; 1300f3cf4134SRoberto Sassu 1301f3cf4134SRoberto Sassu return bkey; 1302f3cf4134SRoberto Sassu } 1303f3cf4134SRoberto Sassu 1304f3cf4134SRoberto Sassu /** 1305f3cf4134SRoberto Sassu * bpf_key_put - decrement key reference count if key is valid and free bpf_key 1306f3cf4134SRoberto Sassu * @bkey: bpf_key structure 1307f3cf4134SRoberto Sassu * 1308f3cf4134SRoberto Sassu * Decrement the reference count of the key inside *bkey*, if the pointer 1309f3cf4134SRoberto Sassu * is valid, and free *bkey*. 1310f3cf4134SRoberto Sassu */ 1311f3cf4134SRoberto Sassu void bpf_key_put(struct bpf_key *bkey) 1312f3cf4134SRoberto Sassu { 1313f3cf4134SRoberto Sassu if (bkey->has_ref) 1314f3cf4134SRoberto Sassu key_put(bkey->key); 1315f3cf4134SRoberto Sassu 1316f3cf4134SRoberto Sassu kfree(bkey); 1317f3cf4134SRoberto Sassu } 1318f3cf4134SRoberto Sassu 1319865b0566SRoberto Sassu #ifdef CONFIG_SYSTEM_DATA_VERIFICATION 1320865b0566SRoberto Sassu /** 1321865b0566SRoberto Sassu * bpf_verify_pkcs7_signature - verify a PKCS#7 signature 1322865b0566SRoberto Sassu * @data_ptr: data to verify 1323865b0566SRoberto Sassu * @sig_ptr: signature of the data 1324865b0566SRoberto Sassu * @trusted_keyring: keyring with keys trusted for signature verification 1325865b0566SRoberto Sassu * 1326865b0566SRoberto Sassu * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr* 1327865b0566SRoberto Sassu * with keys in a keyring referenced by *trusted_keyring*. 1328865b0566SRoberto Sassu * 1329865b0566SRoberto Sassu * Return: 0 on success, a negative value on error. 1330865b0566SRoberto Sassu */ 1331865b0566SRoberto Sassu int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr, 1332865b0566SRoberto Sassu struct bpf_dynptr_kern *sig_ptr, 1333865b0566SRoberto Sassu struct bpf_key *trusted_keyring) 1334865b0566SRoberto Sassu { 1335865b0566SRoberto Sassu int ret; 1336865b0566SRoberto Sassu 1337865b0566SRoberto Sassu if (trusted_keyring->has_ref) { 1338865b0566SRoberto Sassu /* 1339865b0566SRoberto Sassu * Do the permission check deferred in bpf_lookup_user_key(). 1340865b0566SRoberto Sassu * See bpf_lookup_user_key() for more details. 1341865b0566SRoberto Sassu * 1342865b0566SRoberto Sassu * A call to key_task_permission() here would be redundant, as 1343865b0566SRoberto Sassu * it is already done by keyring_search() called by 1344865b0566SRoberto Sassu * find_asymmetric_key(). 1345865b0566SRoberto Sassu */ 1346865b0566SRoberto Sassu ret = key_validate(trusted_keyring->key); 1347865b0566SRoberto Sassu if (ret < 0) 1348865b0566SRoberto Sassu return ret; 1349865b0566SRoberto Sassu } 1350865b0566SRoberto Sassu 1351865b0566SRoberto Sassu return verify_pkcs7_signature(data_ptr->data, 1352865b0566SRoberto Sassu bpf_dynptr_get_size(data_ptr), 1353865b0566SRoberto Sassu sig_ptr->data, 1354865b0566SRoberto Sassu bpf_dynptr_get_size(sig_ptr), 1355865b0566SRoberto Sassu trusted_keyring->key, 1356865b0566SRoberto Sassu VERIFYING_UNSPECIFIED_SIGNATURE, NULL, 1357865b0566SRoberto Sassu NULL); 1358865b0566SRoberto Sassu } 1359865b0566SRoberto Sassu #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ 1360865b0566SRoberto Sassu 1361f3cf4134SRoberto Sassu __diag_pop(); 1362f3cf4134SRoberto Sassu 1363f3cf4134SRoberto Sassu BTF_SET8_START(key_sig_kfunc_set) 1364f3cf4134SRoberto Sassu BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE) 1365f3cf4134SRoberto Sassu BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL) 1366f3cf4134SRoberto Sassu BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE) 1367865b0566SRoberto Sassu #ifdef CONFIG_SYSTEM_DATA_VERIFICATION 1368865b0566SRoberto Sassu BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE) 1369865b0566SRoberto Sassu #endif 1370f3cf4134SRoberto Sassu BTF_SET8_END(key_sig_kfunc_set) 1371f3cf4134SRoberto Sassu 1372f3cf4134SRoberto Sassu static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = { 1373f3cf4134SRoberto Sassu .owner = THIS_MODULE, 1374f3cf4134SRoberto Sassu .set = &key_sig_kfunc_set, 1375f3cf4134SRoberto Sassu }; 1376f3cf4134SRoberto Sassu 1377f3cf4134SRoberto Sassu static int __init bpf_key_sig_kfuncs_init(void) 1378f3cf4134SRoberto Sassu { 1379f3cf4134SRoberto Sassu return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, 1380f3cf4134SRoberto Sassu &bpf_key_sig_kfunc_set); 1381f3cf4134SRoberto Sassu } 1382f3cf4134SRoberto Sassu 1383f3cf4134SRoberto Sassu late_initcall(bpf_key_sig_kfuncs_init); 1384f3cf4134SRoberto Sassu #endif /* CONFIG_KEYS */ 1385f3cf4134SRoberto Sassu 13867adfc6c9SAndrii Nakryiko static const struct bpf_func_proto * 1387fc611f47SKP Singh bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 13882541517cSAlexei Starovoitov { 13892541517cSAlexei Starovoitov switch (func_id) { 13902541517cSAlexei Starovoitov case BPF_FUNC_map_lookup_elem: 13912541517cSAlexei Starovoitov return &bpf_map_lookup_elem_proto; 13922541517cSAlexei Starovoitov case BPF_FUNC_map_update_elem: 13932541517cSAlexei Starovoitov return &bpf_map_update_elem_proto; 13942541517cSAlexei Starovoitov case BPF_FUNC_map_delete_elem: 13952541517cSAlexei Starovoitov return &bpf_map_delete_elem_proto; 139602a8c817SAlban Crequy case BPF_FUNC_map_push_elem: 139702a8c817SAlban Crequy return &bpf_map_push_elem_proto; 139802a8c817SAlban Crequy case BPF_FUNC_map_pop_elem: 139902a8c817SAlban Crequy return &bpf_map_pop_elem_proto; 140002a8c817SAlban Crequy case BPF_FUNC_map_peek_elem: 140102a8c817SAlban Crequy return &bpf_map_peek_elem_proto; 140207343110SFeng Zhou case BPF_FUNC_map_lookup_percpu_elem: 140307343110SFeng Zhou return &bpf_map_lookup_percpu_elem_proto; 1404d9847d31SAlexei Starovoitov case BPF_FUNC_ktime_get_ns: 1405d9847d31SAlexei Starovoitov return &bpf_ktime_get_ns_proto; 140671d19214SMaciej Żenczykowski case BPF_FUNC_ktime_get_boot_ns: 140771d19214SMaciej Żenczykowski return &bpf_ktime_get_boot_ns_proto; 140804fd61abSAlexei Starovoitov case BPF_FUNC_tail_call: 140904fd61abSAlexei Starovoitov return &bpf_tail_call_proto; 1410ffeedafbSAlexei Starovoitov case BPF_FUNC_get_current_pid_tgid: 1411ffeedafbSAlexei Starovoitov return &bpf_get_current_pid_tgid_proto; 1412606274c5SAlexei Starovoitov case BPF_FUNC_get_current_task: 1413606274c5SAlexei Starovoitov return &bpf_get_current_task_proto; 14143ca1032aSKP Singh case BPF_FUNC_get_current_task_btf: 14153ca1032aSKP Singh return &bpf_get_current_task_btf_proto; 1416dd6e10fbSDaniel Xu case BPF_FUNC_task_pt_regs: 1417dd6e10fbSDaniel Xu return &bpf_task_pt_regs_proto; 1418ffeedafbSAlexei Starovoitov case BPF_FUNC_get_current_uid_gid: 1419ffeedafbSAlexei Starovoitov return &bpf_get_current_uid_gid_proto; 1420ffeedafbSAlexei Starovoitov case BPF_FUNC_get_current_comm: 1421ffeedafbSAlexei Starovoitov return &bpf_get_current_comm_proto; 14229c959c86SAlexei Starovoitov case BPF_FUNC_trace_printk: 14230756ea3eSAlexei Starovoitov return bpf_get_trace_printk_proto(); 1424ab1973d3SAlexei Starovoitov case BPF_FUNC_get_smp_processor_id: 1425ab1973d3SAlexei Starovoitov return &bpf_get_smp_processor_id_proto; 14262d0e30c3SDaniel Borkmann case BPF_FUNC_get_numa_node_id: 14272d0e30c3SDaniel Borkmann return &bpf_get_numa_node_id_proto; 142835578d79SKaixu Xia case BPF_FUNC_perf_event_read: 142935578d79SKaixu Xia return &bpf_perf_event_read_proto; 143060d20f91SSargun Dhillon case BPF_FUNC_current_task_under_cgroup: 143160d20f91SSargun Dhillon return &bpf_current_task_under_cgroup_proto; 14328937bd80SAlexei Starovoitov case BPF_FUNC_get_prandom_u32: 14338937bd80SAlexei Starovoitov return &bpf_get_prandom_u32_proto; 143451e1bb9eSDaniel Borkmann case BPF_FUNC_probe_write_user: 143551e1bb9eSDaniel Borkmann return security_locked_down(LOCKDOWN_BPF_WRITE_USER) < 0 ? 143651e1bb9eSDaniel Borkmann NULL : bpf_get_probe_write_proto(); 14376ae08ae3SDaniel Borkmann case BPF_FUNC_probe_read_user: 14386ae08ae3SDaniel Borkmann return &bpf_probe_read_user_proto; 14396ae08ae3SDaniel Borkmann case BPF_FUNC_probe_read_kernel: 144071330842SDaniel Borkmann return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1441ff40e510SDaniel Borkmann NULL : &bpf_probe_read_kernel_proto; 14426ae08ae3SDaniel Borkmann case BPF_FUNC_probe_read_user_str: 14436ae08ae3SDaniel Borkmann return &bpf_probe_read_user_str_proto; 14446ae08ae3SDaniel Borkmann case BPF_FUNC_probe_read_kernel_str: 144571330842SDaniel Borkmann return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1446ff40e510SDaniel Borkmann NULL : &bpf_probe_read_kernel_str_proto; 14470ebeea8cSDaniel Borkmann #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 14480ebeea8cSDaniel Borkmann case BPF_FUNC_probe_read: 144971330842SDaniel Borkmann return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1450ff40e510SDaniel Borkmann NULL : &bpf_probe_read_compat_proto; 1451a5e8c070SGianluca Borello case BPF_FUNC_probe_read_str: 145271330842SDaniel Borkmann return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? 1453ff40e510SDaniel Borkmann NULL : &bpf_probe_read_compat_str_proto; 14540ebeea8cSDaniel Borkmann #endif 145534ea38caSYonghong Song #ifdef CONFIG_CGROUPS 1456bf6fa2c8SYonghong Song case BPF_FUNC_get_current_cgroup_id: 1457bf6fa2c8SYonghong Song return &bpf_get_current_cgroup_id_proto; 145895b861a7SNamhyung Kim case BPF_FUNC_get_current_ancestor_cgroup_id: 145995b861a7SNamhyung Kim return &bpf_get_current_ancestor_cgroup_id_proto; 1460c4bcfb38SYonghong Song case BPF_FUNC_cgrp_storage_get: 1461c4bcfb38SYonghong Song return &bpf_cgrp_storage_get_proto; 1462c4bcfb38SYonghong Song case BPF_FUNC_cgrp_storage_delete: 1463c4bcfb38SYonghong Song return &bpf_cgrp_storage_delete_proto; 146434ea38caSYonghong Song #endif 14658b401f9eSYonghong Song case BPF_FUNC_send_signal: 14668b401f9eSYonghong Song return &bpf_send_signal_proto; 14678482941fSYonghong Song case BPF_FUNC_send_signal_thread: 14688482941fSYonghong Song return &bpf_send_signal_thread_proto; 1469b80b033bSSong Liu case BPF_FUNC_perf_event_read_value: 1470b80b033bSSong Liu return &bpf_perf_event_read_value_proto; 1471b4490c5cSCarlos Neira case BPF_FUNC_get_ns_current_pid_tgid: 1472b4490c5cSCarlos Neira return &bpf_get_ns_current_pid_tgid_proto; 1473457f4436SAndrii Nakryiko case BPF_FUNC_ringbuf_output: 1474457f4436SAndrii Nakryiko return &bpf_ringbuf_output_proto; 1475457f4436SAndrii Nakryiko case BPF_FUNC_ringbuf_reserve: 1476457f4436SAndrii Nakryiko return &bpf_ringbuf_reserve_proto; 1477457f4436SAndrii Nakryiko case BPF_FUNC_ringbuf_submit: 1478457f4436SAndrii Nakryiko return &bpf_ringbuf_submit_proto; 1479457f4436SAndrii Nakryiko case BPF_FUNC_ringbuf_discard: 1480457f4436SAndrii Nakryiko return &bpf_ringbuf_discard_proto; 1481457f4436SAndrii Nakryiko case BPF_FUNC_ringbuf_query: 1482457f4436SAndrii Nakryiko return &bpf_ringbuf_query_proto; 148372e2b2b6SYonghong Song case BPF_FUNC_jiffies64: 148472e2b2b6SYonghong Song return &bpf_jiffies64_proto; 1485fa28dcb8SSong Liu case BPF_FUNC_get_task_stack: 1486fa28dcb8SSong Liu return &bpf_get_task_stack_proto; 148707be4c4aSAlexei Starovoitov case BPF_FUNC_copy_from_user: 148807be4c4aSAlexei Starovoitov return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL; 1489376040e4SKenny Yu case BPF_FUNC_copy_from_user_task: 1490376040e4SKenny Yu return prog->aux->sleepable ? &bpf_copy_from_user_task_proto : NULL; 1491c4d0bfb4SAlan Maguire case BPF_FUNC_snprintf_btf: 1492c4d0bfb4SAlan Maguire return &bpf_snprintf_btf_proto; 1493b7906b70SAndrii Nakryiko case BPF_FUNC_per_cpu_ptr: 1494eaa6bcb7SHao Luo return &bpf_per_cpu_ptr_proto; 1495b7906b70SAndrii Nakryiko case BPF_FUNC_this_cpu_ptr: 149663d9b80dSHao Luo return &bpf_this_cpu_ptr_proto; 1497a10787e6SSong Liu case BPF_FUNC_task_storage_get: 14984279adb0SMartin KaFai Lau if (bpf_prog_check_recur(prog)) 14990593dd34SMartin KaFai Lau return &bpf_task_storage_get_recur_proto; 1500a10787e6SSong Liu return &bpf_task_storage_get_proto; 1501a10787e6SSong Liu case BPF_FUNC_task_storage_delete: 15028a7dac37SMartin KaFai Lau if (bpf_prog_check_recur(prog)) 15030593dd34SMartin KaFai Lau return &bpf_task_storage_delete_recur_proto; 1504a10787e6SSong Liu return &bpf_task_storage_delete_proto; 150569c087baSYonghong Song case BPF_FUNC_for_each_map_elem: 150669c087baSYonghong Song return &bpf_for_each_map_elem_proto; 15077b15523aSFlorent Revest case BPF_FUNC_snprintf: 15087b15523aSFlorent Revest return &bpf_snprintf_proto; 15099b99edcaSJiri Olsa case BPF_FUNC_get_func_ip: 15109b99edcaSJiri Olsa return &bpf_get_func_ip_proto_tracing; 1511856c02dbSSong Liu case BPF_FUNC_get_branch_snapshot: 1512856c02dbSSong Liu return &bpf_get_branch_snapshot_proto; 15137c7e3d31SSong Liu case BPF_FUNC_find_vma: 15147c7e3d31SSong Liu return &bpf_find_vma_proto; 151510aceb62SDave Marchevsky case BPF_FUNC_trace_vprintk: 151610aceb62SDave Marchevsky return bpf_get_trace_vprintk_proto(); 15179fd82b61SAlexei Starovoitov default: 1518b00628b1SAlexei Starovoitov return bpf_base_func_proto(func_id); 15199fd82b61SAlexei Starovoitov } 15209fd82b61SAlexei Starovoitov } 15219fd82b61SAlexei Starovoitov 15225e43f899SAndrey Ignatov static const struct bpf_func_proto * 15235e43f899SAndrey Ignatov kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 15249fd82b61SAlexei Starovoitov { 15259fd82b61SAlexei Starovoitov switch (func_id) { 1526a43eec30SAlexei Starovoitov case BPF_FUNC_perf_event_output: 1527a43eec30SAlexei Starovoitov return &bpf_perf_event_output_proto; 1528d5a3b1f6SAlexei Starovoitov case BPF_FUNC_get_stackid: 1529d5a3b1f6SAlexei Starovoitov return &bpf_get_stackid_proto; 1530c195651eSYonghong Song case BPF_FUNC_get_stack: 1531c195651eSYonghong Song return &bpf_get_stack_proto; 15329802d865SJosef Bacik #ifdef CONFIG_BPF_KPROBE_OVERRIDE 15339802d865SJosef Bacik case BPF_FUNC_override_return: 15349802d865SJosef Bacik return &bpf_override_return_proto; 15359802d865SJosef Bacik #endif 15369ffd9f3fSJiri Olsa case BPF_FUNC_get_func_ip: 153742a57120SJiri Olsa return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ? 153842a57120SJiri Olsa &bpf_get_func_ip_proto_kprobe_multi : 153942a57120SJiri Olsa &bpf_get_func_ip_proto_kprobe; 15407adfc6c9SAndrii Nakryiko case BPF_FUNC_get_attach_cookie: 1541ca74823cSJiri Olsa return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ? 1542ca74823cSJiri Olsa &bpf_get_attach_cookie_proto_kmulti : 1543ca74823cSJiri Olsa &bpf_get_attach_cookie_proto_trace; 15442541517cSAlexei Starovoitov default: 1545fc611f47SKP Singh return bpf_tracing_func_proto(func_id, prog); 15462541517cSAlexei Starovoitov } 15472541517cSAlexei Starovoitov } 15482541517cSAlexei Starovoitov 15492541517cSAlexei Starovoitov /* bpf+kprobe programs can access fields of 'struct pt_regs' */ 155019de99f7SAlexei Starovoitov static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 15515e43f899SAndrey Ignatov const struct bpf_prog *prog, 155223994631SYonghong Song struct bpf_insn_access_aux *info) 15532541517cSAlexei Starovoitov { 15542541517cSAlexei Starovoitov if (off < 0 || off >= sizeof(struct pt_regs)) 15552541517cSAlexei Starovoitov return false; 15562541517cSAlexei Starovoitov if (type != BPF_READ) 15572541517cSAlexei Starovoitov return false; 15582541517cSAlexei Starovoitov if (off % size != 0) 15592541517cSAlexei Starovoitov return false; 15602d071c64SDaniel Borkmann /* 15612d071c64SDaniel Borkmann * Assertion for 32 bit to make sure last 8 byte access 15622d071c64SDaniel Borkmann * (BPF_DW) to the last 4 byte member is disallowed. 15632d071c64SDaniel Borkmann */ 15642d071c64SDaniel Borkmann if (off + size > sizeof(struct pt_regs)) 15652d071c64SDaniel Borkmann return false; 15662d071c64SDaniel Borkmann 15672541517cSAlexei Starovoitov return true; 15682541517cSAlexei Starovoitov } 15692541517cSAlexei Starovoitov 15707de16e3aSJakub Kicinski const struct bpf_verifier_ops kprobe_verifier_ops = { 15712541517cSAlexei Starovoitov .get_func_proto = kprobe_prog_func_proto, 15722541517cSAlexei Starovoitov .is_valid_access = kprobe_prog_is_valid_access, 15732541517cSAlexei Starovoitov }; 15742541517cSAlexei Starovoitov 15757de16e3aSJakub Kicinski const struct bpf_prog_ops kprobe_prog_ops = { 15767de16e3aSJakub Kicinski }; 15777de16e3aSJakub Kicinski 1578f3694e00SDaniel Borkmann BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, 1579f3694e00SDaniel Borkmann u64, flags, void *, data, u64, size) 15809940d67cSAlexei Starovoitov { 1581f3694e00SDaniel Borkmann struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1582f3694e00SDaniel Borkmann 15839940d67cSAlexei Starovoitov /* 15849940d67cSAlexei Starovoitov * r1 points to perf tracepoint buffer where first 8 bytes are hidden 15859940d67cSAlexei Starovoitov * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it 1586f3694e00SDaniel Borkmann * from there and call the same bpf_perf_event_output() helper inline. 15879940d67cSAlexei Starovoitov */ 1588f3694e00SDaniel Borkmann return ____bpf_perf_event_output(regs, map, flags, data, size); 15899940d67cSAlexei Starovoitov } 15909940d67cSAlexei Starovoitov 15919940d67cSAlexei Starovoitov static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { 15929940d67cSAlexei Starovoitov .func = bpf_perf_event_output_tp, 15939940d67cSAlexei Starovoitov .gpl_only = true, 15949940d67cSAlexei Starovoitov .ret_type = RET_INTEGER, 15959940d67cSAlexei Starovoitov .arg1_type = ARG_PTR_TO_CTX, 15969940d67cSAlexei Starovoitov .arg2_type = ARG_CONST_MAP_PTR, 15979940d67cSAlexei Starovoitov .arg3_type = ARG_ANYTHING, 1598216e3cd2SHao Luo .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1599a60dd35dSGianluca Borello .arg5_type = ARG_CONST_SIZE_OR_ZERO, 16009940d67cSAlexei Starovoitov }; 16019940d67cSAlexei Starovoitov 1602f3694e00SDaniel Borkmann BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, 1603f3694e00SDaniel Borkmann u64, flags) 16049940d67cSAlexei Starovoitov { 1605f3694e00SDaniel Borkmann struct pt_regs *regs = *(struct pt_regs **)tp_buff; 16069940d67cSAlexei Starovoitov 1607f3694e00SDaniel Borkmann /* 1608f3694e00SDaniel Borkmann * Same comment as in bpf_perf_event_output_tp(), only that this time 1609f3694e00SDaniel Borkmann * the other helper's function body cannot be inlined due to being 1610f3694e00SDaniel Borkmann * external, thus we need to call raw helper function. 1611f3694e00SDaniel Borkmann */ 1612f3694e00SDaniel Borkmann return bpf_get_stackid((unsigned long) regs, (unsigned long) map, 1613f3694e00SDaniel Borkmann flags, 0, 0); 16149940d67cSAlexei Starovoitov } 16159940d67cSAlexei Starovoitov 16169940d67cSAlexei Starovoitov static const struct bpf_func_proto bpf_get_stackid_proto_tp = { 16179940d67cSAlexei Starovoitov .func = bpf_get_stackid_tp, 16189940d67cSAlexei Starovoitov .gpl_only = true, 16199940d67cSAlexei Starovoitov .ret_type = RET_INTEGER, 16209940d67cSAlexei Starovoitov .arg1_type = ARG_PTR_TO_CTX, 16219940d67cSAlexei Starovoitov .arg2_type = ARG_CONST_MAP_PTR, 16229940d67cSAlexei Starovoitov .arg3_type = ARG_ANYTHING, 16239940d67cSAlexei Starovoitov }; 16249940d67cSAlexei Starovoitov 1625c195651eSYonghong Song BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size, 1626c195651eSYonghong Song u64, flags) 1627c195651eSYonghong Song { 1628c195651eSYonghong Song struct pt_regs *regs = *(struct pt_regs **)tp_buff; 1629c195651eSYonghong Song 1630c195651eSYonghong Song return bpf_get_stack((unsigned long) regs, (unsigned long) buf, 1631c195651eSYonghong Song (unsigned long) size, flags, 0); 1632c195651eSYonghong Song } 1633c195651eSYonghong Song 1634c195651eSYonghong Song static const struct bpf_func_proto bpf_get_stack_proto_tp = { 1635c195651eSYonghong Song .func = bpf_get_stack_tp, 1636c195651eSYonghong Song .gpl_only = true, 1637c195651eSYonghong Song .ret_type = RET_INTEGER, 1638c195651eSYonghong Song .arg1_type = ARG_PTR_TO_CTX, 1639c195651eSYonghong Song .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1640c195651eSYonghong Song .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1641c195651eSYonghong Song .arg4_type = ARG_ANYTHING, 1642c195651eSYonghong Song }; 1643c195651eSYonghong Song 16445e43f899SAndrey Ignatov static const struct bpf_func_proto * 16455e43f899SAndrey Ignatov tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 16469fd82b61SAlexei Starovoitov { 16479fd82b61SAlexei Starovoitov switch (func_id) { 16489fd82b61SAlexei Starovoitov case BPF_FUNC_perf_event_output: 16499940d67cSAlexei Starovoitov return &bpf_perf_event_output_proto_tp; 16509fd82b61SAlexei Starovoitov case BPF_FUNC_get_stackid: 16519940d67cSAlexei Starovoitov return &bpf_get_stackid_proto_tp; 1652c195651eSYonghong Song case BPF_FUNC_get_stack: 1653c195651eSYonghong Song return &bpf_get_stack_proto_tp; 16547adfc6c9SAndrii Nakryiko case BPF_FUNC_get_attach_cookie: 16557adfc6c9SAndrii Nakryiko return &bpf_get_attach_cookie_proto_trace; 16569fd82b61SAlexei Starovoitov default: 1657fc611f47SKP Singh return bpf_tracing_func_proto(func_id, prog); 16589fd82b61SAlexei Starovoitov } 16599fd82b61SAlexei Starovoitov } 16609fd82b61SAlexei Starovoitov 166119de99f7SAlexei Starovoitov static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, 16625e43f899SAndrey Ignatov const struct bpf_prog *prog, 166323994631SYonghong Song struct bpf_insn_access_aux *info) 16649fd82b61SAlexei Starovoitov { 16659fd82b61SAlexei Starovoitov if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) 16669fd82b61SAlexei Starovoitov return false; 16679fd82b61SAlexei Starovoitov if (type != BPF_READ) 16689fd82b61SAlexei Starovoitov return false; 16699fd82b61SAlexei Starovoitov if (off % size != 0) 16709fd82b61SAlexei Starovoitov return false; 16712d071c64SDaniel Borkmann 16722d071c64SDaniel Borkmann BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); 16739fd82b61SAlexei Starovoitov return true; 16749fd82b61SAlexei Starovoitov } 16759fd82b61SAlexei Starovoitov 16767de16e3aSJakub Kicinski const struct bpf_verifier_ops tracepoint_verifier_ops = { 16779fd82b61SAlexei Starovoitov .get_func_proto = tp_prog_func_proto, 16789fd82b61SAlexei Starovoitov .is_valid_access = tp_prog_is_valid_access, 16799fd82b61SAlexei Starovoitov }; 16809fd82b61SAlexei Starovoitov 16817de16e3aSJakub Kicinski const struct bpf_prog_ops tracepoint_prog_ops = { 16827de16e3aSJakub Kicinski }; 16837de16e3aSJakub Kicinski 1684f005afedSYonghong Song BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, 1685f005afedSYonghong Song struct bpf_perf_event_value *, buf, u32, size) 1686f005afedSYonghong Song { 1687f005afedSYonghong Song int err = -EINVAL; 1688f005afedSYonghong Song 1689f005afedSYonghong Song if (unlikely(size != sizeof(struct bpf_perf_event_value))) 1690f005afedSYonghong Song goto clear; 1691f005afedSYonghong Song err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled, 1692f005afedSYonghong Song &buf->running); 1693f005afedSYonghong Song if (unlikely(err)) 1694f005afedSYonghong Song goto clear; 1695f005afedSYonghong Song return 0; 1696f005afedSYonghong Song clear: 1697f005afedSYonghong Song memset(buf, 0, size); 1698f005afedSYonghong Song return err; 1699f005afedSYonghong Song } 1700f005afedSYonghong Song 1701f005afedSYonghong Song static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { 1702f005afedSYonghong Song .func = bpf_perf_prog_read_value, 1703f005afedSYonghong Song .gpl_only = true, 1704f005afedSYonghong Song .ret_type = RET_INTEGER, 1705f005afedSYonghong Song .arg1_type = ARG_PTR_TO_CTX, 1706f005afedSYonghong Song .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1707f005afedSYonghong Song .arg3_type = ARG_CONST_SIZE, 1708f005afedSYonghong Song }; 1709f005afedSYonghong Song 1710fff7b643SDaniel Xu BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, 1711fff7b643SDaniel Xu void *, buf, u32, size, u64, flags) 1712fff7b643SDaniel Xu { 1713fff7b643SDaniel Xu static const u32 br_entry_size = sizeof(struct perf_branch_entry); 1714fff7b643SDaniel Xu struct perf_branch_stack *br_stack = ctx->data->br_stack; 1715fff7b643SDaniel Xu u32 to_copy; 1716fff7b643SDaniel Xu 1717fff7b643SDaniel Xu if (unlikely(flags & ~BPF_F_GET_BRANCH_RECORDS_SIZE)) 1718fff7b643SDaniel Xu return -EINVAL; 1719fff7b643SDaniel Xu 1720cce6a2d7SJiri Olsa if (unlikely(!(ctx->data->sample_flags & PERF_SAMPLE_BRANCH_STACK))) 1721cce6a2d7SJiri Olsa return -ENOENT; 1722cce6a2d7SJiri Olsa 1723fff7b643SDaniel Xu if (unlikely(!br_stack)) 1724db52f572SKajol Jain return -ENOENT; 1725fff7b643SDaniel Xu 1726fff7b643SDaniel Xu if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE) 1727fff7b643SDaniel Xu return br_stack->nr * br_entry_size; 1728fff7b643SDaniel Xu 1729fff7b643SDaniel Xu if (!buf || (size % br_entry_size != 0)) 1730fff7b643SDaniel Xu return -EINVAL; 1731fff7b643SDaniel Xu 1732fff7b643SDaniel Xu to_copy = min_t(u32, br_stack->nr * br_entry_size, size); 1733fff7b643SDaniel Xu memcpy(buf, br_stack->entries, to_copy); 1734fff7b643SDaniel Xu 1735fff7b643SDaniel Xu return to_copy; 1736fff7b643SDaniel Xu } 1737fff7b643SDaniel Xu 1738fff7b643SDaniel Xu static const struct bpf_func_proto bpf_read_branch_records_proto = { 1739fff7b643SDaniel Xu .func = bpf_read_branch_records, 1740fff7b643SDaniel Xu .gpl_only = true, 1741fff7b643SDaniel Xu .ret_type = RET_INTEGER, 1742fff7b643SDaniel Xu .arg1_type = ARG_PTR_TO_CTX, 1743fff7b643SDaniel Xu .arg2_type = ARG_PTR_TO_MEM_OR_NULL, 1744fff7b643SDaniel Xu .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1745fff7b643SDaniel Xu .arg4_type = ARG_ANYTHING, 1746fff7b643SDaniel Xu }; 1747fff7b643SDaniel Xu 17485e43f899SAndrey Ignatov static const struct bpf_func_proto * 17495e43f899SAndrey Ignatov pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1750f005afedSYonghong Song { 1751f005afedSYonghong Song switch (func_id) { 1752f005afedSYonghong Song case BPF_FUNC_perf_event_output: 1753f005afedSYonghong Song return &bpf_perf_event_output_proto_tp; 1754f005afedSYonghong Song case BPF_FUNC_get_stackid: 17557b04d6d6SSong Liu return &bpf_get_stackid_proto_pe; 1756c195651eSYonghong Song case BPF_FUNC_get_stack: 17577b04d6d6SSong Liu return &bpf_get_stack_proto_pe; 1758f005afedSYonghong Song case BPF_FUNC_perf_prog_read_value: 1759f005afedSYonghong Song return &bpf_perf_prog_read_value_proto; 1760fff7b643SDaniel Xu case BPF_FUNC_read_branch_records: 1761fff7b643SDaniel Xu return &bpf_read_branch_records_proto; 17627adfc6c9SAndrii Nakryiko case BPF_FUNC_get_attach_cookie: 17637adfc6c9SAndrii Nakryiko return &bpf_get_attach_cookie_proto_pe; 1764f005afedSYonghong Song default: 1765fc611f47SKP Singh return bpf_tracing_func_proto(func_id, prog); 1766f005afedSYonghong Song } 1767f005afedSYonghong Song } 1768f005afedSYonghong Song 1769c4f6699dSAlexei Starovoitov /* 1770c4f6699dSAlexei Starovoitov * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp 1771c4f6699dSAlexei Starovoitov * to avoid potential recursive reuse issue when/if tracepoints are added 17729594dc3cSMatt Mullins * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack. 17739594dc3cSMatt Mullins * 17749594dc3cSMatt Mullins * Since raw tracepoints run despite bpf_prog_active, support concurrent usage 17759594dc3cSMatt Mullins * in normal, irq, and nmi context. 1776c4f6699dSAlexei Starovoitov */ 17779594dc3cSMatt Mullins struct bpf_raw_tp_regs { 17789594dc3cSMatt Mullins struct pt_regs regs[3]; 17799594dc3cSMatt Mullins }; 17809594dc3cSMatt Mullins static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs); 17819594dc3cSMatt Mullins static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level); 17829594dc3cSMatt Mullins static struct pt_regs *get_bpf_raw_tp_regs(void) 17839594dc3cSMatt Mullins { 17849594dc3cSMatt Mullins struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs); 17859594dc3cSMatt Mullins int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level); 17869594dc3cSMatt Mullins 17879594dc3cSMatt Mullins if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) { 17889594dc3cSMatt Mullins this_cpu_dec(bpf_raw_tp_nest_level); 17899594dc3cSMatt Mullins return ERR_PTR(-EBUSY); 17909594dc3cSMatt Mullins } 17919594dc3cSMatt Mullins 17929594dc3cSMatt Mullins return &tp_regs->regs[nest_level - 1]; 17939594dc3cSMatt Mullins } 17949594dc3cSMatt Mullins 17959594dc3cSMatt Mullins static void put_bpf_raw_tp_regs(void) 17969594dc3cSMatt Mullins { 17979594dc3cSMatt Mullins this_cpu_dec(bpf_raw_tp_nest_level); 17989594dc3cSMatt Mullins } 17999594dc3cSMatt Mullins 1800c4f6699dSAlexei Starovoitov BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, 1801c4f6699dSAlexei Starovoitov struct bpf_map *, map, u64, flags, void *, data, u64, size) 1802c4f6699dSAlexei Starovoitov { 18039594dc3cSMatt Mullins struct pt_regs *regs = get_bpf_raw_tp_regs(); 18049594dc3cSMatt Mullins int ret; 18059594dc3cSMatt Mullins 18069594dc3cSMatt Mullins if (IS_ERR(regs)) 18079594dc3cSMatt Mullins return PTR_ERR(regs); 1808c4f6699dSAlexei Starovoitov 1809c4f6699dSAlexei Starovoitov perf_fetch_caller_regs(regs); 18109594dc3cSMatt Mullins ret = ____bpf_perf_event_output(regs, map, flags, data, size); 18119594dc3cSMatt Mullins 18129594dc3cSMatt Mullins put_bpf_raw_tp_regs(); 18139594dc3cSMatt Mullins return ret; 1814c4f6699dSAlexei Starovoitov } 1815c4f6699dSAlexei Starovoitov 1816c4f6699dSAlexei Starovoitov static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { 1817c4f6699dSAlexei Starovoitov .func = bpf_perf_event_output_raw_tp, 1818c4f6699dSAlexei Starovoitov .gpl_only = true, 1819c4f6699dSAlexei Starovoitov .ret_type = RET_INTEGER, 1820c4f6699dSAlexei Starovoitov .arg1_type = ARG_PTR_TO_CTX, 1821c4f6699dSAlexei Starovoitov .arg2_type = ARG_CONST_MAP_PTR, 1822c4f6699dSAlexei Starovoitov .arg3_type = ARG_ANYTHING, 1823216e3cd2SHao Luo .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1824c4f6699dSAlexei Starovoitov .arg5_type = ARG_CONST_SIZE_OR_ZERO, 1825c4f6699dSAlexei Starovoitov }; 1826c4f6699dSAlexei Starovoitov 1827a7658e1aSAlexei Starovoitov extern const struct bpf_func_proto bpf_skb_output_proto; 1828d831ee84SEelco Chaudron extern const struct bpf_func_proto bpf_xdp_output_proto; 1829d9917302SEelco Chaudron extern const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto; 1830a7658e1aSAlexei Starovoitov 1831c4f6699dSAlexei Starovoitov BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, 1832c4f6699dSAlexei Starovoitov struct bpf_map *, map, u64, flags) 1833c4f6699dSAlexei Starovoitov { 18349594dc3cSMatt Mullins struct pt_regs *regs = get_bpf_raw_tp_regs(); 18359594dc3cSMatt Mullins int ret; 18369594dc3cSMatt Mullins 18379594dc3cSMatt Mullins if (IS_ERR(regs)) 18389594dc3cSMatt Mullins return PTR_ERR(regs); 1839c4f6699dSAlexei Starovoitov 1840c4f6699dSAlexei Starovoitov perf_fetch_caller_regs(regs); 1841c4f6699dSAlexei Starovoitov /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ 18429594dc3cSMatt Mullins ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map, 1843c4f6699dSAlexei Starovoitov flags, 0, 0); 18449594dc3cSMatt Mullins put_bpf_raw_tp_regs(); 18459594dc3cSMatt Mullins return ret; 1846c4f6699dSAlexei Starovoitov } 1847c4f6699dSAlexei Starovoitov 1848c4f6699dSAlexei Starovoitov static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { 1849c4f6699dSAlexei Starovoitov .func = bpf_get_stackid_raw_tp, 1850c4f6699dSAlexei Starovoitov .gpl_only = true, 1851c4f6699dSAlexei Starovoitov .ret_type = RET_INTEGER, 1852c4f6699dSAlexei Starovoitov .arg1_type = ARG_PTR_TO_CTX, 1853c4f6699dSAlexei Starovoitov .arg2_type = ARG_CONST_MAP_PTR, 1854c4f6699dSAlexei Starovoitov .arg3_type = ARG_ANYTHING, 1855c4f6699dSAlexei Starovoitov }; 1856c4f6699dSAlexei Starovoitov 1857c195651eSYonghong Song BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args, 1858c195651eSYonghong Song void *, buf, u32, size, u64, flags) 1859c195651eSYonghong Song { 18609594dc3cSMatt Mullins struct pt_regs *regs = get_bpf_raw_tp_regs(); 18619594dc3cSMatt Mullins int ret; 18629594dc3cSMatt Mullins 18639594dc3cSMatt Mullins if (IS_ERR(regs)) 18649594dc3cSMatt Mullins return PTR_ERR(regs); 1865c195651eSYonghong Song 1866c195651eSYonghong Song perf_fetch_caller_regs(regs); 18679594dc3cSMatt Mullins ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf, 1868c195651eSYonghong Song (unsigned long) size, flags, 0); 18699594dc3cSMatt Mullins put_bpf_raw_tp_regs(); 18709594dc3cSMatt Mullins return ret; 1871c195651eSYonghong Song } 1872c195651eSYonghong Song 1873c195651eSYonghong Song static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { 1874c195651eSYonghong Song .func = bpf_get_stack_raw_tp, 1875c195651eSYonghong Song .gpl_only = true, 1876c195651eSYonghong Song .ret_type = RET_INTEGER, 1877c195651eSYonghong Song .arg1_type = ARG_PTR_TO_CTX, 1878216e3cd2SHao Luo .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1879c195651eSYonghong Song .arg3_type = ARG_CONST_SIZE_OR_ZERO, 1880c195651eSYonghong Song .arg4_type = ARG_ANYTHING, 1881c195651eSYonghong Song }; 1882c195651eSYonghong Song 18835e43f899SAndrey Ignatov static const struct bpf_func_proto * 18845e43f899SAndrey Ignatov raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1885c4f6699dSAlexei Starovoitov { 1886c4f6699dSAlexei Starovoitov switch (func_id) { 1887c4f6699dSAlexei Starovoitov case BPF_FUNC_perf_event_output: 1888c4f6699dSAlexei Starovoitov return &bpf_perf_event_output_proto_raw_tp; 1889c4f6699dSAlexei Starovoitov case BPF_FUNC_get_stackid: 1890c4f6699dSAlexei Starovoitov return &bpf_get_stackid_proto_raw_tp; 1891c195651eSYonghong Song case BPF_FUNC_get_stack: 1892c195651eSYonghong Song return &bpf_get_stack_proto_raw_tp; 1893c4f6699dSAlexei Starovoitov default: 1894fc611f47SKP Singh return bpf_tracing_func_proto(func_id, prog); 1895c4f6699dSAlexei Starovoitov } 1896c4f6699dSAlexei Starovoitov } 1897c4f6699dSAlexei Starovoitov 1898958a3f2dSJiri Olsa const struct bpf_func_proto * 1899f1b9509cSAlexei Starovoitov tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1900f1b9509cSAlexei Starovoitov { 19013cee6fb8SMartin KaFai Lau const struct bpf_func_proto *fn; 19023cee6fb8SMartin KaFai Lau 1903f1b9509cSAlexei Starovoitov switch (func_id) { 1904f1b9509cSAlexei Starovoitov #ifdef CONFIG_NET 1905f1b9509cSAlexei Starovoitov case BPF_FUNC_skb_output: 1906f1b9509cSAlexei Starovoitov return &bpf_skb_output_proto; 1907d831ee84SEelco Chaudron case BPF_FUNC_xdp_output: 1908d831ee84SEelco Chaudron return &bpf_xdp_output_proto; 1909af7ec138SYonghong Song case BPF_FUNC_skc_to_tcp6_sock: 1910af7ec138SYonghong Song return &bpf_skc_to_tcp6_sock_proto; 1911478cfbdfSYonghong Song case BPF_FUNC_skc_to_tcp_sock: 1912478cfbdfSYonghong Song return &bpf_skc_to_tcp_sock_proto; 1913478cfbdfSYonghong Song case BPF_FUNC_skc_to_tcp_timewait_sock: 1914478cfbdfSYonghong Song return &bpf_skc_to_tcp_timewait_sock_proto; 1915478cfbdfSYonghong Song case BPF_FUNC_skc_to_tcp_request_sock: 1916478cfbdfSYonghong Song return &bpf_skc_to_tcp_request_sock_proto; 19170d4fad3eSYonghong Song case BPF_FUNC_skc_to_udp6_sock: 19180d4fad3eSYonghong Song return &bpf_skc_to_udp6_sock_proto; 19199eeb3aa3SHengqi Chen case BPF_FUNC_skc_to_unix_sock: 19209eeb3aa3SHengqi Chen return &bpf_skc_to_unix_sock_proto; 19213bc253c2SGeliang Tang case BPF_FUNC_skc_to_mptcp_sock: 19223bc253c2SGeliang Tang return &bpf_skc_to_mptcp_sock_proto; 19238e4597c6SMartin KaFai Lau case BPF_FUNC_sk_storage_get: 19248e4597c6SMartin KaFai Lau return &bpf_sk_storage_get_tracing_proto; 19258e4597c6SMartin KaFai Lau case BPF_FUNC_sk_storage_delete: 19268e4597c6SMartin KaFai Lau return &bpf_sk_storage_delete_tracing_proto; 1927b60da495SFlorent Revest case BPF_FUNC_sock_from_file: 1928b60da495SFlorent Revest return &bpf_sock_from_file_proto; 1929c5dbb89fSFlorent Revest case BPF_FUNC_get_socket_cookie: 1930c5dbb89fSFlorent Revest return &bpf_get_socket_ptr_cookie_proto; 1931d9917302SEelco Chaudron case BPF_FUNC_xdp_get_buff_len: 1932d9917302SEelco Chaudron return &bpf_xdp_get_buff_len_trace_proto; 1933f1b9509cSAlexei Starovoitov #endif 1934492e639fSYonghong Song case BPF_FUNC_seq_printf: 1935492e639fSYonghong Song return prog->expected_attach_type == BPF_TRACE_ITER ? 1936492e639fSYonghong Song &bpf_seq_printf_proto : 1937492e639fSYonghong Song NULL; 1938492e639fSYonghong Song case BPF_FUNC_seq_write: 1939492e639fSYonghong Song return prog->expected_attach_type == BPF_TRACE_ITER ? 1940492e639fSYonghong Song &bpf_seq_write_proto : 1941492e639fSYonghong Song NULL; 1942eb411377SAlan Maguire case BPF_FUNC_seq_printf_btf: 1943eb411377SAlan Maguire return prog->expected_attach_type == BPF_TRACE_ITER ? 1944eb411377SAlan Maguire &bpf_seq_printf_btf_proto : 1945eb411377SAlan Maguire NULL; 19466e22ab9dSJiri Olsa case BPF_FUNC_d_path: 19476e22ab9dSJiri Olsa return &bpf_d_path_proto; 1948f92c1e18SJiri Olsa case BPF_FUNC_get_func_arg: 1949f92c1e18SJiri Olsa return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL; 1950f92c1e18SJiri Olsa case BPF_FUNC_get_func_ret: 1951f92c1e18SJiri Olsa return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL; 1952f92c1e18SJiri Olsa case BPF_FUNC_get_func_arg_cnt: 1953f92c1e18SJiri Olsa return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL; 19542fcc8241SKui-Feng Lee case BPF_FUNC_get_attach_cookie: 19552fcc8241SKui-Feng Lee return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL; 1956f1b9509cSAlexei Starovoitov default: 19573cee6fb8SMartin KaFai Lau fn = raw_tp_prog_func_proto(func_id, prog); 19583cee6fb8SMartin KaFai Lau if (!fn && prog->expected_attach_type == BPF_TRACE_ITER) 19593cee6fb8SMartin KaFai Lau fn = bpf_iter_get_func_proto(func_id, prog); 19603cee6fb8SMartin KaFai Lau return fn; 1961f1b9509cSAlexei Starovoitov } 1962f1b9509cSAlexei Starovoitov } 1963f1b9509cSAlexei Starovoitov 1964c4f6699dSAlexei Starovoitov static bool raw_tp_prog_is_valid_access(int off, int size, 1965c4f6699dSAlexei Starovoitov enum bpf_access_type type, 19665e43f899SAndrey Ignatov const struct bpf_prog *prog, 1967c4f6699dSAlexei Starovoitov struct bpf_insn_access_aux *info) 1968c4f6699dSAlexei Starovoitov { 196935346ab6SHou Tao return bpf_tracing_ctx_access(off, size, type); 1970f1b9509cSAlexei Starovoitov } 1971f1b9509cSAlexei Starovoitov 1972f1b9509cSAlexei Starovoitov static bool tracing_prog_is_valid_access(int off, int size, 1973f1b9509cSAlexei Starovoitov enum bpf_access_type type, 1974f1b9509cSAlexei Starovoitov const struct bpf_prog *prog, 1975f1b9509cSAlexei Starovoitov struct bpf_insn_access_aux *info) 1976f1b9509cSAlexei Starovoitov { 197735346ab6SHou Tao return bpf_tracing_btf_ctx_access(off, size, type, prog, info); 1978c4f6699dSAlexei Starovoitov } 1979c4f6699dSAlexei Starovoitov 19803e7c67d9SKP Singh int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog, 19813e7c67d9SKP Singh const union bpf_attr *kattr, 19823e7c67d9SKP Singh union bpf_attr __user *uattr) 19833e7c67d9SKP Singh { 19843e7c67d9SKP Singh return -ENOTSUPP; 19853e7c67d9SKP Singh } 19863e7c67d9SKP Singh 1987c4f6699dSAlexei Starovoitov const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { 1988c4f6699dSAlexei Starovoitov .get_func_proto = raw_tp_prog_func_proto, 1989c4f6699dSAlexei Starovoitov .is_valid_access = raw_tp_prog_is_valid_access, 1990c4f6699dSAlexei Starovoitov }; 1991c4f6699dSAlexei Starovoitov 1992c4f6699dSAlexei Starovoitov const struct bpf_prog_ops raw_tracepoint_prog_ops = { 1993ebfb4d40SYonghong Song #ifdef CONFIG_NET 19941b4d60ecSSong Liu .test_run = bpf_prog_test_run_raw_tp, 1995ebfb4d40SYonghong Song #endif 1996c4f6699dSAlexei Starovoitov }; 1997c4f6699dSAlexei Starovoitov 1998f1b9509cSAlexei Starovoitov const struct bpf_verifier_ops tracing_verifier_ops = { 1999f1b9509cSAlexei Starovoitov .get_func_proto = tracing_prog_func_proto, 2000f1b9509cSAlexei Starovoitov .is_valid_access = tracing_prog_is_valid_access, 2001f1b9509cSAlexei Starovoitov }; 2002f1b9509cSAlexei Starovoitov 2003f1b9509cSAlexei Starovoitov const struct bpf_prog_ops tracing_prog_ops = { 2004da00d2f1SKP Singh .test_run = bpf_prog_test_run_tracing, 2005f1b9509cSAlexei Starovoitov }; 2006f1b9509cSAlexei Starovoitov 20079df1c28bSMatt Mullins static bool raw_tp_writable_prog_is_valid_access(int off, int size, 20089df1c28bSMatt Mullins enum bpf_access_type type, 20099df1c28bSMatt Mullins const struct bpf_prog *prog, 20109df1c28bSMatt Mullins struct bpf_insn_access_aux *info) 20119df1c28bSMatt Mullins { 20129df1c28bSMatt Mullins if (off == 0) { 20139df1c28bSMatt Mullins if (size != sizeof(u64) || type != BPF_READ) 20149df1c28bSMatt Mullins return false; 20159df1c28bSMatt Mullins info->reg_type = PTR_TO_TP_BUFFER; 20169df1c28bSMatt Mullins } 20179df1c28bSMatt Mullins return raw_tp_prog_is_valid_access(off, size, type, prog, info); 20189df1c28bSMatt Mullins } 20199df1c28bSMatt Mullins 20209df1c28bSMatt Mullins const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = { 20219df1c28bSMatt Mullins .get_func_proto = raw_tp_prog_func_proto, 20229df1c28bSMatt Mullins .is_valid_access = raw_tp_writable_prog_is_valid_access, 20239df1c28bSMatt Mullins }; 20249df1c28bSMatt Mullins 20259df1c28bSMatt Mullins const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = { 20269df1c28bSMatt Mullins }; 20279df1c28bSMatt Mullins 20280515e599SAlexei Starovoitov static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 20295e43f899SAndrey Ignatov const struct bpf_prog *prog, 203023994631SYonghong Song struct bpf_insn_access_aux *info) 20310515e599SAlexei Starovoitov { 203295da0cdbSTeng Qin const int size_u64 = sizeof(u64); 203331fd8581SYonghong Song 20340515e599SAlexei Starovoitov if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) 20350515e599SAlexei Starovoitov return false; 20360515e599SAlexei Starovoitov if (type != BPF_READ) 20370515e599SAlexei Starovoitov return false; 2038bc23105cSDaniel Borkmann if (off % size != 0) { 2039bc23105cSDaniel Borkmann if (sizeof(unsigned long) != 4) 20400515e599SAlexei Starovoitov return false; 2041bc23105cSDaniel Borkmann if (size != 8) 2042bc23105cSDaniel Borkmann return false; 2043bc23105cSDaniel Borkmann if (off % size != 4) 2044bc23105cSDaniel Borkmann return false; 2045bc23105cSDaniel Borkmann } 204631fd8581SYonghong Song 2047f96da094SDaniel Borkmann switch (off) { 2048f96da094SDaniel Borkmann case bpf_ctx_range(struct bpf_perf_event_data, sample_period): 204995da0cdbSTeng Qin bpf_ctx_record_field_size(info, size_u64); 205095da0cdbSTeng Qin if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) 205195da0cdbSTeng Qin return false; 205295da0cdbSTeng Qin break; 205395da0cdbSTeng Qin case bpf_ctx_range(struct bpf_perf_event_data, addr): 205495da0cdbSTeng Qin bpf_ctx_record_field_size(info, size_u64); 205595da0cdbSTeng Qin if (!bpf_ctx_narrow_access_ok(off, size, size_u64)) 205623994631SYonghong Song return false; 2057f96da094SDaniel Borkmann break; 2058f96da094SDaniel Borkmann default: 20590515e599SAlexei Starovoitov if (size != sizeof(long)) 20600515e599SAlexei Starovoitov return false; 20610515e599SAlexei Starovoitov } 2062f96da094SDaniel Borkmann 20630515e599SAlexei Starovoitov return true; 20640515e599SAlexei Starovoitov } 20650515e599SAlexei Starovoitov 20666b8cc1d1SDaniel Borkmann static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, 20676b8cc1d1SDaniel Borkmann const struct bpf_insn *si, 20680515e599SAlexei Starovoitov struct bpf_insn *insn_buf, 2069f96da094SDaniel Borkmann struct bpf_prog *prog, u32 *target_size) 20700515e599SAlexei Starovoitov { 20710515e599SAlexei Starovoitov struct bpf_insn *insn = insn_buf; 20720515e599SAlexei Starovoitov 20736b8cc1d1SDaniel Borkmann switch (si->off) { 20740515e599SAlexei Starovoitov case offsetof(struct bpf_perf_event_data, sample_period): 2075f035a515SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 20766b8cc1d1SDaniel Borkmann data), si->dst_reg, si->src_reg, 20770515e599SAlexei Starovoitov offsetof(struct bpf_perf_event_data_kern, data)); 20786b8cc1d1SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, 2079f96da094SDaniel Borkmann bpf_target_off(struct perf_sample_data, period, 8, 2080f96da094SDaniel Borkmann target_size)); 20810515e599SAlexei Starovoitov break; 208295da0cdbSTeng Qin case offsetof(struct bpf_perf_event_data, addr): 208395da0cdbSTeng Qin *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 208495da0cdbSTeng Qin data), si->dst_reg, si->src_reg, 208595da0cdbSTeng Qin offsetof(struct bpf_perf_event_data_kern, data)); 208695da0cdbSTeng Qin *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, 208795da0cdbSTeng Qin bpf_target_off(struct perf_sample_data, addr, 8, 208895da0cdbSTeng Qin target_size)); 208995da0cdbSTeng Qin break; 20900515e599SAlexei Starovoitov default: 2091f035a515SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 20926b8cc1d1SDaniel Borkmann regs), si->dst_reg, si->src_reg, 20930515e599SAlexei Starovoitov offsetof(struct bpf_perf_event_data_kern, regs)); 20946b8cc1d1SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, 20956b8cc1d1SDaniel Borkmann si->off); 20960515e599SAlexei Starovoitov break; 20970515e599SAlexei Starovoitov } 20980515e599SAlexei Starovoitov 20990515e599SAlexei Starovoitov return insn - insn_buf; 21000515e599SAlexei Starovoitov } 21010515e599SAlexei Starovoitov 21027de16e3aSJakub Kicinski const struct bpf_verifier_ops perf_event_verifier_ops = { 2103f005afedSYonghong Song .get_func_proto = pe_prog_func_proto, 21040515e599SAlexei Starovoitov .is_valid_access = pe_prog_is_valid_access, 21050515e599SAlexei Starovoitov .convert_ctx_access = pe_prog_convert_ctx_access, 21060515e599SAlexei Starovoitov }; 21077de16e3aSJakub Kicinski 21087de16e3aSJakub Kicinski const struct bpf_prog_ops perf_event_prog_ops = { 21097de16e3aSJakub Kicinski }; 2110e87c6bc3SYonghong Song 2111e87c6bc3SYonghong Song static DEFINE_MUTEX(bpf_event_mutex); 2112e87c6bc3SYonghong Song 2113c8c088baSYonghong Song #define BPF_TRACE_MAX_PROGS 64 2114c8c088baSYonghong Song 2115e87c6bc3SYonghong Song int perf_event_attach_bpf_prog(struct perf_event *event, 211682e6b1eeSAndrii Nakryiko struct bpf_prog *prog, 211782e6b1eeSAndrii Nakryiko u64 bpf_cookie) 2118e87c6bc3SYonghong Song { 2119e672db03SStanislav Fomichev struct bpf_prog_array *old_array; 2120e87c6bc3SYonghong Song struct bpf_prog_array *new_array; 2121e87c6bc3SYonghong Song int ret = -EEXIST; 2122e87c6bc3SYonghong Song 21239802d865SJosef Bacik /* 2124b4da3340SMasami Hiramatsu * Kprobe override only works if they are on the function entry, 2125b4da3340SMasami Hiramatsu * and only if they are on the opt-in list. 21269802d865SJosef Bacik */ 21279802d865SJosef Bacik if (prog->kprobe_override && 2128b4da3340SMasami Hiramatsu (!trace_kprobe_on_func_entry(event->tp_event) || 21299802d865SJosef Bacik !trace_kprobe_error_injectable(event->tp_event))) 21309802d865SJosef Bacik return -EINVAL; 21319802d865SJosef Bacik 2132e87c6bc3SYonghong Song mutex_lock(&bpf_event_mutex); 2133e87c6bc3SYonghong Song 2134e87c6bc3SYonghong Song if (event->prog) 213507c41a29SYonghong Song goto unlock; 2136e87c6bc3SYonghong Song 2137e672db03SStanislav Fomichev old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); 2138c8c088baSYonghong Song if (old_array && 2139c8c088baSYonghong Song bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) { 2140c8c088baSYonghong Song ret = -E2BIG; 2141c8c088baSYonghong Song goto unlock; 2142c8c088baSYonghong Song } 2143c8c088baSYonghong Song 214482e6b1eeSAndrii Nakryiko ret = bpf_prog_array_copy(old_array, NULL, prog, bpf_cookie, &new_array); 2145e87c6bc3SYonghong Song if (ret < 0) 214607c41a29SYonghong Song goto unlock; 2147e87c6bc3SYonghong Song 2148e87c6bc3SYonghong Song /* set the new array to event->tp_event and set event->prog */ 2149e87c6bc3SYonghong Song event->prog = prog; 215082e6b1eeSAndrii Nakryiko event->bpf_cookie = bpf_cookie; 2151e87c6bc3SYonghong Song rcu_assign_pointer(event->tp_event->prog_array, new_array); 21528c7dcb84SDelyan Kratunov bpf_prog_array_free_sleepable(old_array); 2153e87c6bc3SYonghong Song 215407c41a29SYonghong Song unlock: 2155e87c6bc3SYonghong Song mutex_unlock(&bpf_event_mutex); 2156e87c6bc3SYonghong Song return ret; 2157e87c6bc3SYonghong Song } 2158e87c6bc3SYonghong Song 2159e87c6bc3SYonghong Song void perf_event_detach_bpf_prog(struct perf_event *event) 2160e87c6bc3SYonghong Song { 2161e672db03SStanislav Fomichev struct bpf_prog_array *old_array; 2162e87c6bc3SYonghong Song struct bpf_prog_array *new_array; 2163e87c6bc3SYonghong Song int ret; 2164e87c6bc3SYonghong Song 2165e87c6bc3SYonghong Song mutex_lock(&bpf_event_mutex); 2166e87c6bc3SYonghong Song 2167e87c6bc3SYonghong Song if (!event->prog) 216807c41a29SYonghong Song goto unlock; 2169e87c6bc3SYonghong Song 2170e672db03SStanislav Fomichev old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); 217182e6b1eeSAndrii Nakryiko ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array); 2172170a7e3eSSean Young if (ret == -ENOENT) 2173170a7e3eSSean Young goto unlock; 2174e87c6bc3SYonghong Song if (ret < 0) { 2175e87c6bc3SYonghong Song bpf_prog_array_delete_safe(old_array, event->prog); 2176e87c6bc3SYonghong Song } else { 2177e87c6bc3SYonghong Song rcu_assign_pointer(event->tp_event->prog_array, new_array); 21788c7dcb84SDelyan Kratunov bpf_prog_array_free_sleepable(old_array); 2179e87c6bc3SYonghong Song } 2180e87c6bc3SYonghong Song 2181e87c6bc3SYonghong Song bpf_prog_put(event->prog); 2182e87c6bc3SYonghong Song event->prog = NULL; 2183e87c6bc3SYonghong Song 218407c41a29SYonghong Song unlock: 2185e87c6bc3SYonghong Song mutex_unlock(&bpf_event_mutex); 2186e87c6bc3SYonghong Song } 2187f371b304SYonghong Song 2188f4e2298eSYonghong Song int perf_event_query_prog_array(struct perf_event *event, void __user *info) 2189f371b304SYonghong Song { 2190f371b304SYonghong Song struct perf_event_query_bpf __user *uquery = info; 2191f371b304SYonghong Song struct perf_event_query_bpf query = {}; 2192e672db03SStanislav Fomichev struct bpf_prog_array *progs; 21933a38bb98SYonghong Song u32 *ids, prog_cnt, ids_len; 2194f371b304SYonghong Song int ret; 2195f371b304SYonghong Song 2196031258daSAlexey Budankov if (!perfmon_capable()) 2197f371b304SYonghong Song return -EPERM; 2198f371b304SYonghong Song if (event->attr.type != PERF_TYPE_TRACEPOINT) 2199f371b304SYonghong Song return -EINVAL; 2200f371b304SYonghong Song if (copy_from_user(&query, uquery, sizeof(query))) 2201f371b304SYonghong Song return -EFAULT; 22023a38bb98SYonghong Song 22033a38bb98SYonghong Song ids_len = query.ids_len; 22043a38bb98SYonghong Song if (ids_len > BPF_TRACE_MAX_PROGS) 22059c481b90SDaniel Borkmann return -E2BIG; 22063a38bb98SYonghong Song ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN); 22073a38bb98SYonghong Song if (!ids) 22083a38bb98SYonghong Song return -ENOMEM; 22093a38bb98SYonghong Song /* 22103a38bb98SYonghong Song * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which 22113a38bb98SYonghong Song * is required when user only wants to check for uquery->prog_cnt. 22123a38bb98SYonghong Song * There is no need to check for it since the case is handled 22133a38bb98SYonghong Song * gracefully in bpf_prog_array_copy_info. 22143a38bb98SYonghong Song */ 2215f371b304SYonghong Song 2216f371b304SYonghong Song mutex_lock(&bpf_event_mutex); 2217e672db03SStanislav Fomichev progs = bpf_event_rcu_dereference(event->tp_event->prog_array); 2218e672db03SStanislav Fomichev ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt); 2219f371b304SYonghong Song mutex_unlock(&bpf_event_mutex); 2220f371b304SYonghong Song 22213a38bb98SYonghong Song if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || 22223a38bb98SYonghong Song copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) 22233a38bb98SYonghong Song ret = -EFAULT; 22243a38bb98SYonghong Song 22253a38bb98SYonghong Song kfree(ids); 2226f371b304SYonghong Song return ret; 2227f371b304SYonghong Song } 2228c4f6699dSAlexei Starovoitov 2229c4f6699dSAlexei Starovoitov extern struct bpf_raw_event_map __start__bpf_raw_tp[]; 2230c4f6699dSAlexei Starovoitov extern struct bpf_raw_event_map __stop__bpf_raw_tp[]; 2231c4f6699dSAlexei Starovoitov 2232a38d1107SMatt Mullins struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) 2233c4f6699dSAlexei Starovoitov { 2234c4f6699dSAlexei Starovoitov struct bpf_raw_event_map *btp = __start__bpf_raw_tp; 2235c4f6699dSAlexei Starovoitov 2236c4f6699dSAlexei Starovoitov for (; btp < __stop__bpf_raw_tp; btp++) { 2237c4f6699dSAlexei Starovoitov if (!strcmp(btp->tp->name, name)) 2238c4f6699dSAlexei Starovoitov return btp; 2239c4f6699dSAlexei Starovoitov } 2240a38d1107SMatt Mullins 2241a38d1107SMatt Mullins return bpf_get_raw_tracepoint_module(name); 2242a38d1107SMatt Mullins } 2243a38d1107SMatt Mullins 2244a38d1107SMatt Mullins void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) 2245a38d1107SMatt Mullins { 224612cc126dSAndrii Nakryiko struct module *mod; 2247a38d1107SMatt Mullins 224812cc126dSAndrii Nakryiko preempt_disable(); 224912cc126dSAndrii Nakryiko mod = __module_address((unsigned long)btp); 2250a38d1107SMatt Mullins module_put(mod); 225112cc126dSAndrii Nakryiko preempt_enable(); 2252c4f6699dSAlexei Starovoitov } 2253c4f6699dSAlexei Starovoitov 2254c4f6699dSAlexei Starovoitov static __always_inline 2255c4f6699dSAlexei Starovoitov void __bpf_trace_run(struct bpf_prog *prog, u64 *args) 2256c4f6699dSAlexei Starovoitov { 2257f03efe49SThomas Gleixner cant_sleep(); 225805b24ff9SJiri Olsa if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { 225905b24ff9SJiri Olsa bpf_prog_inc_misses_counter(prog); 226005b24ff9SJiri Olsa goto out; 226105b24ff9SJiri Olsa } 2262c4f6699dSAlexei Starovoitov rcu_read_lock(); 2263fb7dd8bcSAndrii Nakryiko (void) bpf_prog_run(prog, args); 2264c4f6699dSAlexei Starovoitov rcu_read_unlock(); 226505b24ff9SJiri Olsa out: 226605b24ff9SJiri Olsa this_cpu_dec(*(prog->active)); 2267c4f6699dSAlexei Starovoitov } 2268c4f6699dSAlexei Starovoitov 2269c4f6699dSAlexei Starovoitov #define UNPACK(...) __VA_ARGS__ 2270c4f6699dSAlexei Starovoitov #define REPEAT_1(FN, DL, X, ...) FN(X) 2271c4f6699dSAlexei Starovoitov #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__) 2272c4f6699dSAlexei Starovoitov #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__) 2273c4f6699dSAlexei Starovoitov #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__) 2274c4f6699dSAlexei Starovoitov #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__) 2275c4f6699dSAlexei Starovoitov #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__) 2276c4f6699dSAlexei Starovoitov #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__) 2277c4f6699dSAlexei Starovoitov #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__) 2278c4f6699dSAlexei Starovoitov #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__) 2279c4f6699dSAlexei Starovoitov #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__) 2280c4f6699dSAlexei Starovoitov #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__) 2281c4f6699dSAlexei Starovoitov #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__) 2282c4f6699dSAlexei Starovoitov #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__) 2283c4f6699dSAlexei Starovoitov 2284c4f6699dSAlexei Starovoitov #define SARG(X) u64 arg##X 2285c4f6699dSAlexei Starovoitov #define COPY(X) args[X] = arg##X 2286c4f6699dSAlexei Starovoitov 2287c4f6699dSAlexei Starovoitov #define __DL_COM (,) 2288c4f6699dSAlexei Starovoitov #define __DL_SEM (;) 2289c4f6699dSAlexei Starovoitov 2290c4f6699dSAlexei Starovoitov #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 2291c4f6699dSAlexei Starovoitov 2292c4f6699dSAlexei Starovoitov #define BPF_TRACE_DEFN_x(x) \ 2293c4f6699dSAlexei Starovoitov void bpf_trace_run##x(struct bpf_prog *prog, \ 2294c4f6699dSAlexei Starovoitov REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \ 2295c4f6699dSAlexei Starovoitov { \ 2296c4f6699dSAlexei Starovoitov u64 args[x]; \ 2297c4f6699dSAlexei Starovoitov REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \ 2298c4f6699dSAlexei Starovoitov __bpf_trace_run(prog, args); \ 2299c4f6699dSAlexei Starovoitov } \ 2300c4f6699dSAlexei Starovoitov EXPORT_SYMBOL_GPL(bpf_trace_run##x) 2301c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(1); 2302c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(2); 2303c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(3); 2304c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(4); 2305c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(5); 2306c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(6); 2307c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(7); 2308c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(8); 2309c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(9); 2310c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(10); 2311c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(11); 2312c4f6699dSAlexei Starovoitov BPF_TRACE_DEFN_x(12); 2313c4f6699dSAlexei Starovoitov 2314c4f6699dSAlexei Starovoitov static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 2315c4f6699dSAlexei Starovoitov { 2316c4f6699dSAlexei Starovoitov struct tracepoint *tp = btp->tp; 2317c4f6699dSAlexei Starovoitov 2318c4f6699dSAlexei Starovoitov /* 2319c4f6699dSAlexei Starovoitov * check that program doesn't access arguments beyond what's 2320c4f6699dSAlexei Starovoitov * available in this tracepoint 2321c4f6699dSAlexei Starovoitov */ 2322c4f6699dSAlexei Starovoitov if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64)) 2323c4f6699dSAlexei Starovoitov return -EINVAL; 2324c4f6699dSAlexei Starovoitov 23259df1c28bSMatt Mullins if (prog->aux->max_tp_access > btp->writable_size) 23269df1c28bSMatt Mullins return -EINVAL; 23279df1c28bSMatt Mullins 23289913d574SSteven Rostedt (VMware) return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, 23299913d574SSteven Rostedt (VMware) prog); 2330c4f6699dSAlexei Starovoitov } 2331c4f6699dSAlexei Starovoitov 2332c4f6699dSAlexei Starovoitov int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 2333c4f6699dSAlexei Starovoitov { 2334e16ec340SAlexei Starovoitov return __bpf_probe_register(btp, prog); 2335c4f6699dSAlexei Starovoitov } 2336c4f6699dSAlexei Starovoitov 2337c4f6699dSAlexei Starovoitov int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) 2338c4f6699dSAlexei Starovoitov { 2339e16ec340SAlexei Starovoitov return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); 2340c4f6699dSAlexei Starovoitov } 234141bdc4b4SYonghong Song 234241bdc4b4SYonghong Song int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, 234341bdc4b4SYonghong Song u32 *fd_type, const char **buf, 234441bdc4b4SYonghong Song u64 *probe_offset, u64 *probe_addr) 234541bdc4b4SYonghong Song { 234641bdc4b4SYonghong Song bool is_tracepoint, is_syscall_tp; 234741bdc4b4SYonghong Song struct bpf_prog *prog; 234841bdc4b4SYonghong Song int flags, err = 0; 234941bdc4b4SYonghong Song 235041bdc4b4SYonghong Song prog = event->prog; 235141bdc4b4SYonghong Song if (!prog) 235241bdc4b4SYonghong Song return -ENOENT; 235341bdc4b4SYonghong Song 235441bdc4b4SYonghong Song /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */ 235541bdc4b4SYonghong Song if (prog->type == BPF_PROG_TYPE_PERF_EVENT) 235641bdc4b4SYonghong Song return -EOPNOTSUPP; 235741bdc4b4SYonghong Song 235841bdc4b4SYonghong Song *prog_id = prog->aux->id; 235941bdc4b4SYonghong Song flags = event->tp_event->flags; 236041bdc4b4SYonghong Song is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT; 236141bdc4b4SYonghong Song is_syscall_tp = is_syscall_trace_event(event->tp_event); 236241bdc4b4SYonghong Song 236341bdc4b4SYonghong Song if (is_tracepoint || is_syscall_tp) { 236441bdc4b4SYonghong Song *buf = is_tracepoint ? event->tp_event->tp->name 236541bdc4b4SYonghong Song : event->tp_event->name; 236641bdc4b4SYonghong Song *fd_type = BPF_FD_TYPE_TRACEPOINT; 236741bdc4b4SYonghong Song *probe_offset = 0x0; 236841bdc4b4SYonghong Song *probe_addr = 0x0; 236941bdc4b4SYonghong Song } else { 237041bdc4b4SYonghong Song /* kprobe/uprobe */ 237141bdc4b4SYonghong Song err = -EOPNOTSUPP; 237241bdc4b4SYonghong Song #ifdef CONFIG_KPROBE_EVENTS 237341bdc4b4SYonghong Song if (flags & TRACE_EVENT_FL_KPROBE) 237441bdc4b4SYonghong Song err = bpf_get_kprobe_info(event, fd_type, buf, 237541bdc4b4SYonghong Song probe_offset, probe_addr, 237641bdc4b4SYonghong Song event->attr.type == PERF_TYPE_TRACEPOINT); 237741bdc4b4SYonghong Song #endif 237841bdc4b4SYonghong Song #ifdef CONFIG_UPROBE_EVENTS 237941bdc4b4SYonghong Song if (flags & TRACE_EVENT_FL_UPROBE) 238041bdc4b4SYonghong Song err = bpf_get_uprobe_info(event, fd_type, buf, 238141bdc4b4SYonghong Song probe_offset, 238241bdc4b4SYonghong Song event->attr.type == PERF_TYPE_TRACEPOINT); 238341bdc4b4SYonghong Song #endif 238441bdc4b4SYonghong Song } 238541bdc4b4SYonghong Song 238641bdc4b4SYonghong Song return err; 238741bdc4b4SYonghong Song } 2388a38d1107SMatt Mullins 23899db1ff0aSYonghong Song static int __init send_signal_irq_work_init(void) 23909db1ff0aSYonghong Song { 23919db1ff0aSYonghong Song int cpu; 23929db1ff0aSYonghong Song struct send_signal_irq_work *work; 23939db1ff0aSYonghong Song 23949db1ff0aSYonghong Song for_each_possible_cpu(cpu) { 23959db1ff0aSYonghong Song work = per_cpu_ptr(&send_signal_work, cpu); 23969db1ff0aSYonghong Song init_irq_work(&work->irq_work, do_bpf_send_signal); 23979db1ff0aSYonghong Song } 23989db1ff0aSYonghong Song return 0; 23999db1ff0aSYonghong Song } 24009db1ff0aSYonghong Song 24019db1ff0aSYonghong Song subsys_initcall(send_signal_irq_work_init); 24029db1ff0aSYonghong Song 2403a38d1107SMatt Mullins #ifdef CONFIG_MODULES 2404390e99cfSStanislav Fomichev static int bpf_event_notify(struct notifier_block *nb, unsigned long op, 2405390e99cfSStanislav Fomichev void *module) 2406a38d1107SMatt Mullins { 2407a38d1107SMatt Mullins struct bpf_trace_module *btm, *tmp; 2408a38d1107SMatt Mullins struct module *mod = module; 24090340a6b7SPeter Zijlstra int ret = 0; 2410a38d1107SMatt Mullins 2411a38d1107SMatt Mullins if (mod->num_bpf_raw_events == 0 || 2412a38d1107SMatt Mullins (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING)) 24130340a6b7SPeter Zijlstra goto out; 2414a38d1107SMatt Mullins 2415a38d1107SMatt Mullins mutex_lock(&bpf_module_mutex); 2416a38d1107SMatt Mullins 2417a38d1107SMatt Mullins switch (op) { 2418a38d1107SMatt Mullins case MODULE_STATE_COMING: 2419a38d1107SMatt Mullins btm = kzalloc(sizeof(*btm), GFP_KERNEL); 2420a38d1107SMatt Mullins if (btm) { 2421a38d1107SMatt Mullins btm->module = module; 2422a38d1107SMatt Mullins list_add(&btm->list, &bpf_trace_modules); 24230340a6b7SPeter Zijlstra } else { 24240340a6b7SPeter Zijlstra ret = -ENOMEM; 2425a38d1107SMatt Mullins } 2426a38d1107SMatt Mullins break; 2427a38d1107SMatt Mullins case MODULE_STATE_GOING: 2428a38d1107SMatt Mullins list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) { 2429a38d1107SMatt Mullins if (btm->module == module) { 2430a38d1107SMatt Mullins list_del(&btm->list); 2431a38d1107SMatt Mullins kfree(btm); 2432a38d1107SMatt Mullins break; 2433a38d1107SMatt Mullins } 2434a38d1107SMatt Mullins } 2435a38d1107SMatt Mullins break; 2436a38d1107SMatt Mullins } 2437a38d1107SMatt Mullins 2438a38d1107SMatt Mullins mutex_unlock(&bpf_module_mutex); 2439a38d1107SMatt Mullins 24400340a6b7SPeter Zijlstra out: 24410340a6b7SPeter Zijlstra return notifier_from_errno(ret); 2442a38d1107SMatt Mullins } 2443a38d1107SMatt Mullins 2444a38d1107SMatt Mullins static struct notifier_block bpf_module_nb = { 2445a38d1107SMatt Mullins .notifier_call = bpf_event_notify, 2446a38d1107SMatt Mullins }; 2447a38d1107SMatt Mullins 2448390e99cfSStanislav Fomichev static int __init bpf_event_init(void) 2449a38d1107SMatt Mullins { 2450a38d1107SMatt Mullins register_module_notifier(&bpf_module_nb); 2451a38d1107SMatt Mullins return 0; 2452a38d1107SMatt Mullins } 2453a38d1107SMatt Mullins 2454a38d1107SMatt Mullins fs_initcall(bpf_event_init); 2455a38d1107SMatt Mullins #endif /* CONFIG_MODULES */ 24560dcac272SJiri Olsa 24570dcac272SJiri Olsa #ifdef CONFIG_FPROBE 24580dcac272SJiri Olsa struct bpf_kprobe_multi_link { 24590dcac272SJiri Olsa struct bpf_link link; 24600dcac272SJiri Olsa struct fprobe fp; 24610dcac272SJiri Olsa unsigned long *addrs; 2462ca74823cSJiri Olsa u64 *cookies; 2463ca74823cSJiri Olsa u32 cnt; 2464e22061b2SJiri Olsa u32 mods_cnt; 2465e22061b2SJiri Olsa struct module **mods; 24660dcac272SJiri Olsa }; 24670dcac272SJiri Olsa 2468f7098690SJiri Olsa struct bpf_kprobe_multi_run_ctx { 2469f7098690SJiri Olsa struct bpf_run_ctx run_ctx; 2470f7098690SJiri Olsa struct bpf_kprobe_multi_link *link; 2471f7098690SJiri Olsa unsigned long entry_ip; 2472f7098690SJiri Olsa }; 2473f7098690SJiri Olsa 24740236fec5SJiri Olsa struct user_syms { 24750236fec5SJiri Olsa const char **syms; 24760236fec5SJiri Olsa char *buf; 24770236fec5SJiri Olsa }; 24780236fec5SJiri Olsa 24790236fec5SJiri Olsa static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt) 24800236fec5SJiri Olsa { 24810236fec5SJiri Olsa unsigned long __user usymbol; 24820236fec5SJiri Olsa const char **syms = NULL; 24830236fec5SJiri Olsa char *buf = NULL, *p; 24840236fec5SJiri Olsa int err = -ENOMEM; 24850236fec5SJiri Olsa unsigned int i; 24860236fec5SJiri Olsa 2487fd58f7dfSDan Carpenter syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL); 24880236fec5SJiri Olsa if (!syms) 24890236fec5SJiri Olsa goto error; 24900236fec5SJiri Olsa 2491fd58f7dfSDan Carpenter buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL); 24920236fec5SJiri Olsa if (!buf) 24930236fec5SJiri Olsa goto error; 24940236fec5SJiri Olsa 24950236fec5SJiri Olsa for (p = buf, i = 0; i < cnt; i++) { 24960236fec5SJiri Olsa if (__get_user(usymbol, usyms + i)) { 24970236fec5SJiri Olsa err = -EFAULT; 24980236fec5SJiri Olsa goto error; 24990236fec5SJiri Olsa } 25000236fec5SJiri Olsa err = strncpy_from_user(p, (const char __user *) usymbol, KSYM_NAME_LEN); 25010236fec5SJiri Olsa if (err == KSYM_NAME_LEN) 25020236fec5SJiri Olsa err = -E2BIG; 25030236fec5SJiri Olsa if (err < 0) 25040236fec5SJiri Olsa goto error; 25050236fec5SJiri Olsa syms[i] = p; 25060236fec5SJiri Olsa p += err + 1; 25070236fec5SJiri Olsa } 25080236fec5SJiri Olsa 25090236fec5SJiri Olsa us->syms = syms; 25100236fec5SJiri Olsa us->buf = buf; 25110236fec5SJiri Olsa return 0; 25120236fec5SJiri Olsa 25130236fec5SJiri Olsa error: 25140236fec5SJiri Olsa if (err) { 25150236fec5SJiri Olsa kvfree(syms); 25160236fec5SJiri Olsa kvfree(buf); 25170236fec5SJiri Olsa } 25180236fec5SJiri Olsa return err; 25190236fec5SJiri Olsa } 25200236fec5SJiri Olsa 2521e22061b2SJiri Olsa static void kprobe_multi_put_modules(struct module **mods, u32 cnt) 2522e22061b2SJiri Olsa { 2523e22061b2SJiri Olsa u32 i; 2524e22061b2SJiri Olsa 2525e22061b2SJiri Olsa for (i = 0; i < cnt; i++) 2526e22061b2SJiri Olsa module_put(mods[i]); 2527e22061b2SJiri Olsa } 2528e22061b2SJiri Olsa 25290236fec5SJiri Olsa static void free_user_syms(struct user_syms *us) 25300236fec5SJiri Olsa { 25310236fec5SJiri Olsa kvfree(us->syms); 25320236fec5SJiri Olsa kvfree(us->buf); 25330236fec5SJiri Olsa } 25340236fec5SJiri Olsa 25350dcac272SJiri Olsa static void bpf_kprobe_multi_link_release(struct bpf_link *link) 25360dcac272SJiri Olsa { 25370dcac272SJiri Olsa struct bpf_kprobe_multi_link *kmulti_link; 25380dcac272SJiri Olsa 25390dcac272SJiri Olsa kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); 25400dcac272SJiri Olsa unregister_fprobe(&kmulti_link->fp); 2541e22061b2SJiri Olsa kprobe_multi_put_modules(kmulti_link->mods, kmulti_link->mods_cnt); 25420dcac272SJiri Olsa } 25430dcac272SJiri Olsa 25440dcac272SJiri Olsa static void bpf_kprobe_multi_link_dealloc(struct bpf_link *link) 25450dcac272SJiri Olsa { 25460dcac272SJiri Olsa struct bpf_kprobe_multi_link *kmulti_link; 25470dcac272SJiri Olsa 25480dcac272SJiri Olsa kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link); 25490dcac272SJiri Olsa kvfree(kmulti_link->addrs); 2550ca74823cSJiri Olsa kvfree(kmulti_link->cookies); 2551e22061b2SJiri Olsa kfree(kmulti_link->mods); 25520dcac272SJiri Olsa kfree(kmulti_link); 25530dcac272SJiri Olsa } 25540dcac272SJiri Olsa 25550dcac272SJiri Olsa static const struct bpf_link_ops bpf_kprobe_multi_link_lops = { 25560dcac272SJiri Olsa .release = bpf_kprobe_multi_link_release, 25570dcac272SJiri Olsa .dealloc = bpf_kprobe_multi_link_dealloc, 25580dcac272SJiri Olsa }; 25590dcac272SJiri Olsa 2560ca74823cSJiri Olsa static void bpf_kprobe_multi_cookie_swap(void *a, void *b, int size, const void *priv) 2561ca74823cSJiri Olsa { 2562ca74823cSJiri Olsa const struct bpf_kprobe_multi_link *link = priv; 2563ca74823cSJiri Olsa unsigned long *addr_a = a, *addr_b = b; 2564ca74823cSJiri Olsa u64 *cookie_a, *cookie_b; 2565ca74823cSJiri Olsa 2566ca74823cSJiri Olsa cookie_a = link->cookies + (addr_a - link->addrs); 2567ca74823cSJiri Olsa cookie_b = link->cookies + (addr_b - link->addrs); 2568ca74823cSJiri Olsa 2569ca74823cSJiri Olsa /* swap addr_a/addr_b and cookie_a/cookie_b values */ 257011e17ae4SJiapeng Chong swap(*addr_a, *addr_b); 257111e17ae4SJiapeng Chong swap(*cookie_a, *cookie_b); 2572ca74823cSJiri Olsa } 2573ca74823cSJiri Olsa 25741a1b0716SJiri Olsa static int bpf_kprobe_multi_addrs_cmp(const void *a, const void *b) 2575ca74823cSJiri Olsa { 2576ca74823cSJiri Olsa const unsigned long *addr_a = a, *addr_b = b; 2577ca74823cSJiri Olsa 2578ca74823cSJiri Olsa if (*addr_a == *addr_b) 2579ca74823cSJiri Olsa return 0; 2580ca74823cSJiri Olsa return *addr_a < *addr_b ? -1 : 1; 2581ca74823cSJiri Olsa } 2582ca74823cSJiri Olsa 2583ca74823cSJiri Olsa static int bpf_kprobe_multi_cookie_cmp(const void *a, const void *b, const void *priv) 2584ca74823cSJiri Olsa { 25851a1b0716SJiri Olsa return bpf_kprobe_multi_addrs_cmp(a, b); 2586ca74823cSJiri Olsa } 2587ca74823cSJiri Olsa 2588f7098690SJiri Olsa static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) 2589ca74823cSJiri Olsa { 2590f7098690SJiri Olsa struct bpf_kprobe_multi_run_ctx *run_ctx; 2591ca74823cSJiri Olsa struct bpf_kprobe_multi_link *link; 2592f7098690SJiri Olsa u64 *cookie, entry_ip; 2593ca74823cSJiri Olsa unsigned long *addr; 2594ca74823cSJiri Olsa 2595ca74823cSJiri Olsa if (WARN_ON_ONCE(!ctx)) 2596ca74823cSJiri Olsa return 0; 2597f7098690SJiri Olsa run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx); 2598f7098690SJiri Olsa link = run_ctx->link; 2599ca74823cSJiri Olsa if (!link->cookies) 2600ca74823cSJiri Olsa return 0; 2601f7098690SJiri Olsa entry_ip = run_ctx->entry_ip; 2602f7098690SJiri Olsa addr = bsearch(&entry_ip, link->addrs, link->cnt, sizeof(entry_ip), 26031a1b0716SJiri Olsa bpf_kprobe_multi_addrs_cmp); 2604ca74823cSJiri Olsa if (!addr) 2605ca74823cSJiri Olsa return 0; 2606ca74823cSJiri Olsa cookie = link->cookies + (addr - link->addrs); 2607ca74823cSJiri Olsa return *cookie; 2608ca74823cSJiri Olsa } 2609ca74823cSJiri Olsa 2610f7098690SJiri Olsa static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) 2611f7098690SJiri Olsa { 2612f7098690SJiri Olsa struct bpf_kprobe_multi_run_ctx *run_ctx; 2613f7098690SJiri Olsa 2614f7098690SJiri Olsa run_ctx = container_of(current->bpf_ctx, struct bpf_kprobe_multi_run_ctx, run_ctx); 2615f7098690SJiri Olsa return run_ctx->entry_ip; 2616f7098690SJiri Olsa } 2617f7098690SJiri Olsa 26180dcac272SJiri Olsa static int 26190dcac272SJiri Olsa kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, 2620f7098690SJiri Olsa unsigned long entry_ip, struct pt_regs *regs) 26210dcac272SJiri Olsa { 2622f7098690SJiri Olsa struct bpf_kprobe_multi_run_ctx run_ctx = { 2623f7098690SJiri Olsa .link = link, 2624f7098690SJiri Olsa .entry_ip = entry_ip, 2625f7098690SJiri Olsa }; 2626ca74823cSJiri Olsa struct bpf_run_ctx *old_run_ctx; 26270dcac272SJiri Olsa int err; 26280dcac272SJiri Olsa 26290dcac272SJiri Olsa if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 26300dcac272SJiri Olsa err = 0; 26310dcac272SJiri Olsa goto out; 26320dcac272SJiri Olsa } 26330dcac272SJiri Olsa 26340dcac272SJiri Olsa migrate_disable(); 26350dcac272SJiri Olsa rcu_read_lock(); 2636f7098690SJiri Olsa old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); 26370dcac272SJiri Olsa err = bpf_prog_run(link->link.prog, regs); 2638ca74823cSJiri Olsa bpf_reset_run_ctx(old_run_ctx); 26390dcac272SJiri Olsa rcu_read_unlock(); 26400dcac272SJiri Olsa migrate_enable(); 26410dcac272SJiri Olsa 26420dcac272SJiri Olsa out: 26430dcac272SJiri Olsa __this_cpu_dec(bpf_prog_active); 26440dcac272SJiri Olsa return err; 26450dcac272SJiri Olsa } 26460dcac272SJiri Olsa 26470dcac272SJiri Olsa static void 2648c09eb2e5SJiri Olsa kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip, 26490dcac272SJiri Olsa struct pt_regs *regs) 26500dcac272SJiri Olsa { 26510dcac272SJiri Olsa struct bpf_kprobe_multi_link *link; 26520dcac272SJiri Olsa 26530dcac272SJiri Olsa link = container_of(fp, struct bpf_kprobe_multi_link, fp); 2654c09eb2e5SJiri Olsa kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs); 26550dcac272SJiri Olsa } 26560dcac272SJiri Olsa 2657eb5fb032SJiri Olsa static int symbols_cmp_r(const void *a, const void *b, const void *priv) 26580dcac272SJiri Olsa { 26590236fec5SJiri Olsa const char **str_a = (const char **) a; 26600236fec5SJiri Olsa const char **str_b = (const char **) b; 26610dcac272SJiri Olsa 26620236fec5SJiri Olsa return strcmp(*str_a, *str_b); 26630dcac272SJiri Olsa } 26640dcac272SJiri Olsa 2665eb5fb032SJiri Olsa struct multi_symbols_sort { 2666eb5fb032SJiri Olsa const char **funcs; 2667eb5fb032SJiri Olsa u64 *cookies; 2668eb5fb032SJiri Olsa }; 2669eb5fb032SJiri Olsa 2670eb5fb032SJiri Olsa static void symbols_swap_r(void *a, void *b, int size, const void *priv) 2671eb5fb032SJiri Olsa { 2672eb5fb032SJiri Olsa const struct multi_symbols_sort *data = priv; 2673eb5fb032SJiri Olsa const char **name_a = a, **name_b = b; 2674eb5fb032SJiri Olsa 2675eb5fb032SJiri Olsa swap(*name_a, *name_b); 2676eb5fb032SJiri Olsa 2677eb5fb032SJiri Olsa /* If defined, swap also related cookies. */ 2678eb5fb032SJiri Olsa if (data->cookies) { 2679eb5fb032SJiri Olsa u64 *cookie_a, *cookie_b; 2680eb5fb032SJiri Olsa 2681eb5fb032SJiri Olsa cookie_a = data->cookies + (name_a - data->funcs); 2682eb5fb032SJiri Olsa cookie_b = data->cookies + (name_b - data->funcs); 2683eb5fb032SJiri Olsa swap(*cookie_a, *cookie_b); 2684eb5fb032SJiri Olsa } 2685eb5fb032SJiri Olsa } 2686eb5fb032SJiri Olsa 2687e22061b2SJiri Olsa struct module_addr_args { 2688e22061b2SJiri Olsa unsigned long *addrs; 2689e22061b2SJiri Olsa u32 addrs_cnt; 2690e22061b2SJiri Olsa struct module **mods; 2691e22061b2SJiri Olsa int mods_cnt; 2692e22061b2SJiri Olsa int mods_cap; 2693e22061b2SJiri Olsa }; 2694e22061b2SJiri Olsa 2695e22061b2SJiri Olsa static int module_callback(void *data, const char *name, 2696e22061b2SJiri Olsa struct module *mod, unsigned long addr) 2697e22061b2SJiri Olsa { 2698e22061b2SJiri Olsa struct module_addr_args *args = data; 2699e22061b2SJiri Olsa struct module **mods; 2700e22061b2SJiri Olsa 2701e22061b2SJiri Olsa /* We iterate all modules symbols and for each we: 2702e22061b2SJiri Olsa * - search for it in provided addresses array 2703e22061b2SJiri Olsa * - if found we check if we already have the module pointer stored 2704e22061b2SJiri Olsa * (we iterate modules sequentially, so we can check just the last 2705e22061b2SJiri Olsa * module pointer) 2706e22061b2SJiri Olsa * - take module reference and store it 2707e22061b2SJiri Olsa */ 2708e22061b2SJiri Olsa if (!bsearch(&addr, args->addrs, args->addrs_cnt, sizeof(addr), 2709e22061b2SJiri Olsa bpf_kprobe_multi_addrs_cmp)) 2710e22061b2SJiri Olsa return 0; 2711e22061b2SJiri Olsa 2712e22061b2SJiri Olsa if (args->mods && args->mods[args->mods_cnt - 1] == mod) 2713e22061b2SJiri Olsa return 0; 2714e22061b2SJiri Olsa 2715e22061b2SJiri Olsa if (args->mods_cnt == args->mods_cap) { 2716e22061b2SJiri Olsa args->mods_cap = max(16, args->mods_cap * 3 / 2); 2717e22061b2SJiri Olsa mods = krealloc_array(args->mods, args->mods_cap, sizeof(*mods), GFP_KERNEL); 2718e22061b2SJiri Olsa if (!mods) 2719e22061b2SJiri Olsa return -ENOMEM; 2720e22061b2SJiri Olsa args->mods = mods; 2721e22061b2SJiri Olsa } 2722e22061b2SJiri Olsa 2723e22061b2SJiri Olsa if (!try_module_get(mod)) 2724e22061b2SJiri Olsa return -EINVAL; 2725e22061b2SJiri Olsa 2726e22061b2SJiri Olsa args->mods[args->mods_cnt] = mod; 2727e22061b2SJiri Olsa args->mods_cnt++; 2728e22061b2SJiri Olsa return 0; 2729e22061b2SJiri Olsa } 2730e22061b2SJiri Olsa 2731e22061b2SJiri Olsa static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u32 addrs_cnt) 2732e22061b2SJiri Olsa { 2733e22061b2SJiri Olsa struct module_addr_args args = { 2734e22061b2SJiri Olsa .addrs = addrs, 2735e22061b2SJiri Olsa .addrs_cnt = addrs_cnt, 2736e22061b2SJiri Olsa }; 2737e22061b2SJiri Olsa int err; 2738e22061b2SJiri Olsa 2739e22061b2SJiri Olsa /* We return either err < 0 in case of error, ... */ 2740e22061b2SJiri Olsa err = module_kallsyms_on_each_symbol(module_callback, &args); 2741e22061b2SJiri Olsa if (err) { 2742e22061b2SJiri Olsa kprobe_multi_put_modules(args.mods, args.mods_cnt); 2743e22061b2SJiri Olsa kfree(args.mods); 2744e22061b2SJiri Olsa return err; 2745e22061b2SJiri Olsa } 2746e22061b2SJiri Olsa 2747e22061b2SJiri Olsa /* or number of modules found if everything is ok. */ 2748e22061b2SJiri Olsa *mods = args.mods; 2749e22061b2SJiri Olsa return args.mods_cnt; 2750e22061b2SJiri Olsa } 2751e22061b2SJiri Olsa 27520dcac272SJiri Olsa int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 27530dcac272SJiri Olsa { 27540dcac272SJiri Olsa struct bpf_kprobe_multi_link *link = NULL; 27550dcac272SJiri Olsa struct bpf_link_primer link_primer; 2756ca74823cSJiri Olsa void __user *ucookies; 27570dcac272SJiri Olsa unsigned long *addrs; 27580dcac272SJiri Olsa u32 flags, cnt, size; 27590dcac272SJiri Olsa void __user *uaddrs; 2760ca74823cSJiri Olsa u64 *cookies = NULL; 27610dcac272SJiri Olsa void __user *usyms; 27620dcac272SJiri Olsa int err; 27630dcac272SJiri Olsa 27640dcac272SJiri Olsa /* no support for 32bit archs yet */ 27650dcac272SJiri Olsa if (sizeof(u64) != sizeof(void *)) 27660dcac272SJiri Olsa return -EOPNOTSUPP; 27670dcac272SJiri Olsa 27680dcac272SJiri Olsa if (prog->expected_attach_type != BPF_TRACE_KPROBE_MULTI) 27690dcac272SJiri Olsa return -EINVAL; 27700dcac272SJiri Olsa 27710dcac272SJiri Olsa flags = attr->link_create.kprobe_multi.flags; 27720dcac272SJiri Olsa if (flags & ~BPF_F_KPROBE_MULTI_RETURN) 27730dcac272SJiri Olsa return -EINVAL; 27740dcac272SJiri Olsa 27750dcac272SJiri Olsa uaddrs = u64_to_user_ptr(attr->link_create.kprobe_multi.addrs); 27760dcac272SJiri Olsa usyms = u64_to_user_ptr(attr->link_create.kprobe_multi.syms); 27770dcac272SJiri Olsa if (!!uaddrs == !!usyms) 27780dcac272SJiri Olsa return -EINVAL; 27790dcac272SJiri Olsa 27800dcac272SJiri Olsa cnt = attr->link_create.kprobe_multi.cnt; 27810dcac272SJiri Olsa if (!cnt) 27820dcac272SJiri Olsa return -EINVAL; 27830dcac272SJiri Olsa 27840dcac272SJiri Olsa size = cnt * sizeof(*addrs); 2785fd58f7dfSDan Carpenter addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); 27860dcac272SJiri Olsa if (!addrs) 27870dcac272SJiri Olsa return -ENOMEM; 27880dcac272SJiri Olsa 2789ca74823cSJiri Olsa ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies); 2790ca74823cSJiri Olsa if (ucookies) { 2791fd58f7dfSDan Carpenter cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); 2792ca74823cSJiri Olsa if (!cookies) { 2793ca74823cSJiri Olsa err = -ENOMEM; 2794ca74823cSJiri Olsa goto error; 2795ca74823cSJiri Olsa } 2796ca74823cSJiri Olsa if (copy_from_user(cookies, ucookies, size)) { 2797ca74823cSJiri Olsa err = -EFAULT; 2798ca74823cSJiri Olsa goto error; 2799ca74823cSJiri Olsa } 2800ca74823cSJiri Olsa } 2801ca74823cSJiri Olsa 2802eb5fb032SJiri Olsa if (uaddrs) { 2803eb5fb032SJiri Olsa if (copy_from_user(addrs, uaddrs, size)) { 2804eb5fb032SJiri Olsa err = -EFAULT; 2805eb5fb032SJiri Olsa goto error; 2806eb5fb032SJiri Olsa } 2807eb5fb032SJiri Olsa } else { 2808eb5fb032SJiri Olsa struct multi_symbols_sort data = { 2809eb5fb032SJiri Olsa .cookies = cookies, 2810eb5fb032SJiri Olsa }; 2811eb5fb032SJiri Olsa struct user_syms us; 2812eb5fb032SJiri Olsa 2813eb5fb032SJiri Olsa err = copy_user_syms(&us, usyms, cnt); 2814eb5fb032SJiri Olsa if (err) 2815eb5fb032SJiri Olsa goto error; 2816eb5fb032SJiri Olsa 2817eb5fb032SJiri Olsa if (cookies) 2818eb5fb032SJiri Olsa data.funcs = us.syms; 2819eb5fb032SJiri Olsa 2820eb5fb032SJiri Olsa sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r, 2821eb5fb032SJiri Olsa symbols_swap_r, &data); 2822eb5fb032SJiri Olsa 2823eb5fb032SJiri Olsa err = ftrace_lookup_symbols(us.syms, cnt, addrs); 2824eb5fb032SJiri Olsa free_user_syms(&us); 2825eb5fb032SJiri Olsa if (err) 2826eb5fb032SJiri Olsa goto error; 2827eb5fb032SJiri Olsa } 2828eb5fb032SJiri Olsa 28290dcac272SJiri Olsa link = kzalloc(sizeof(*link), GFP_KERNEL); 28300dcac272SJiri Olsa if (!link) { 28310dcac272SJiri Olsa err = -ENOMEM; 28320dcac272SJiri Olsa goto error; 28330dcac272SJiri Olsa } 28340dcac272SJiri Olsa 28350dcac272SJiri Olsa bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI, 28360dcac272SJiri Olsa &bpf_kprobe_multi_link_lops, prog); 28370dcac272SJiri Olsa 28380dcac272SJiri Olsa err = bpf_link_prime(&link->link, &link_primer); 28390dcac272SJiri Olsa if (err) 28400dcac272SJiri Olsa goto error; 28410dcac272SJiri Olsa 28420dcac272SJiri Olsa if (flags & BPF_F_KPROBE_MULTI_RETURN) 28430dcac272SJiri Olsa link->fp.exit_handler = kprobe_multi_link_handler; 28440dcac272SJiri Olsa else 28450dcac272SJiri Olsa link->fp.entry_handler = kprobe_multi_link_handler; 28460dcac272SJiri Olsa 28470dcac272SJiri Olsa link->addrs = addrs; 2848ca74823cSJiri Olsa link->cookies = cookies; 2849ca74823cSJiri Olsa link->cnt = cnt; 2850ca74823cSJiri Olsa 2851ca74823cSJiri Olsa if (cookies) { 2852ca74823cSJiri Olsa /* 2853ca74823cSJiri Olsa * Sorting addresses will trigger sorting cookies as well 2854ca74823cSJiri Olsa * (check bpf_kprobe_multi_cookie_swap). This way we can 2855ca74823cSJiri Olsa * find cookie based on the address in bpf_get_attach_cookie 2856ca74823cSJiri Olsa * helper. 2857ca74823cSJiri Olsa */ 2858ca74823cSJiri Olsa sort_r(addrs, cnt, sizeof(*addrs), 2859ca74823cSJiri Olsa bpf_kprobe_multi_cookie_cmp, 2860ca74823cSJiri Olsa bpf_kprobe_multi_cookie_swap, 2861ca74823cSJiri Olsa link); 2862e22061b2SJiri Olsa } else { 2863e22061b2SJiri Olsa /* 2864e22061b2SJiri Olsa * We need to sort addrs array even if there are no cookies 2865e22061b2SJiri Olsa * provided, to allow bsearch in get_modules_for_addrs. 2866e22061b2SJiri Olsa */ 2867e22061b2SJiri Olsa sort(addrs, cnt, sizeof(*addrs), 2868e22061b2SJiri Olsa bpf_kprobe_multi_addrs_cmp, NULL); 2869ca74823cSJiri Olsa } 28700dcac272SJiri Olsa 2871e22061b2SJiri Olsa err = get_modules_for_addrs(&link->mods, addrs, cnt); 2872e22061b2SJiri Olsa if (err < 0) { 2873e22061b2SJiri Olsa bpf_link_cleanup(&link_primer); 2874e22061b2SJiri Olsa return err; 2875e22061b2SJiri Olsa } 2876e22061b2SJiri Olsa link->mods_cnt = err; 2877e22061b2SJiri Olsa 28780dcac272SJiri Olsa err = register_fprobe_ips(&link->fp, addrs, cnt); 28790dcac272SJiri Olsa if (err) { 2880e22061b2SJiri Olsa kprobe_multi_put_modules(link->mods, link->mods_cnt); 28810dcac272SJiri Olsa bpf_link_cleanup(&link_primer); 28820dcac272SJiri Olsa return err; 28830dcac272SJiri Olsa } 28840dcac272SJiri Olsa 28850dcac272SJiri Olsa return bpf_link_settle(&link_primer); 28860dcac272SJiri Olsa 28870dcac272SJiri Olsa error: 28880dcac272SJiri Olsa kfree(link); 28890dcac272SJiri Olsa kvfree(addrs); 2890ca74823cSJiri Olsa kvfree(cookies); 28910dcac272SJiri Olsa return err; 28920dcac272SJiri Olsa } 28930dcac272SJiri Olsa #else /* !CONFIG_FPROBE */ 28940dcac272SJiri Olsa int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 28950dcac272SJiri Olsa { 28960dcac272SJiri Olsa return -EOPNOTSUPP; 28970dcac272SJiri Olsa } 2898f7098690SJiri Olsa static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx) 2899f7098690SJiri Olsa { 2900f7098690SJiri Olsa return 0; 2901f7098690SJiri Olsa } 2902f7098690SJiri Olsa static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx) 2903ca74823cSJiri Olsa { 2904ca74823cSJiri Olsa return 0; 2905ca74823cSJiri Olsa } 29060dcac272SJiri Olsa #endif 2907