12541517cSAlexei Starovoitov /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com 20515e599SAlexei Starovoitov * Copyright (c) 2016 Facebook 32541517cSAlexei Starovoitov * 42541517cSAlexei Starovoitov * This program is free software; you can redistribute it and/or 52541517cSAlexei Starovoitov * modify it under the terms of version 2 of the GNU General Public 62541517cSAlexei Starovoitov * License as published by the Free Software Foundation. 72541517cSAlexei Starovoitov */ 82541517cSAlexei Starovoitov #include <linux/kernel.h> 92541517cSAlexei Starovoitov #include <linux/types.h> 102541517cSAlexei Starovoitov #include <linux/slab.h> 112541517cSAlexei Starovoitov #include <linux/bpf.h> 120515e599SAlexei Starovoitov #include <linux/bpf_perf_event.h> 132541517cSAlexei Starovoitov #include <linux/filter.h> 142541517cSAlexei Starovoitov #include <linux/uaccess.h> 159c959c86SAlexei Starovoitov #include <linux/ctype.h> 162541517cSAlexei Starovoitov #include "trace.h" 172541517cSAlexei Starovoitov 182541517cSAlexei Starovoitov /** 192541517cSAlexei Starovoitov * trace_call_bpf - invoke BPF program 202541517cSAlexei Starovoitov * @prog: BPF program 212541517cSAlexei Starovoitov * @ctx: opaque context pointer 222541517cSAlexei Starovoitov * 232541517cSAlexei Starovoitov * kprobe handlers execute BPF programs via this helper. 242541517cSAlexei Starovoitov * Can be used from static tracepoints in the future. 252541517cSAlexei Starovoitov * 262541517cSAlexei Starovoitov * Return: BPF programs always return an integer which is interpreted by 272541517cSAlexei Starovoitov * kprobe handler as: 282541517cSAlexei Starovoitov * 0 - return from kprobe (event is filtered out) 292541517cSAlexei Starovoitov * 1 - store kprobe event into ring buffer 302541517cSAlexei Starovoitov * Other values are reserved and currently alias to 1 312541517cSAlexei Starovoitov */ 322541517cSAlexei Starovoitov unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx) 332541517cSAlexei Starovoitov { 342541517cSAlexei Starovoitov unsigned int ret; 352541517cSAlexei Starovoitov 362541517cSAlexei Starovoitov if (in_nmi()) /* not supported yet */ 372541517cSAlexei Starovoitov return 1; 382541517cSAlexei Starovoitov 392541517cSAlexei Starovoitov preempt_disable(); 402541517cSAlexei Starovoitov 412541517cSAlexei Starovoitov if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { 422541517cSAlexei Starovoitov /* 432541517cSAlexei Starovoitov * since some bpf program is already running on this cpu, 442541517cSAlexei Starovoitov * don't call into another bpf program (same or different) 452541517cSAlexei Starovoitov * and don't send kprobe event into ring-buffer, 462541517cSAlexei Starovoitov * so return zero here 472541517cSAlexei Starovoitov */ 482541517cSAlexei Starovoitov ret = 0; 492541517cSAlexei Starovoitov goto out; 502541517cSAlexei Starovoitov } 512541517cSAlexei Starovoitov 522541517cSAlexei Starovoitov rcu_read_lock(); 532541517cSAlexei Starovoitov ret = BPF_PROG_RUN(prog, ctx); 542541517cSAlexei Starovoitov rcu_read_unlock(); 552541517cSAlexei Starovoitov 562541517cSAlexei Starovoitov out: 572541517cSAlexei Starovoitov __this_cpu_dec(bpf_prog_active); 582541517cSAlexei Starovoitov preempt_enable(); 592541517cSAlexei Starovoitov 602541517cSAlexei Starovoitov return ret; 612541517cSAlexei Starovoitov } 622541517cSAlexei Starovoitov EXPORT_SYMBOL_GPL(trace_call_bpf); 632541517cSAlexei Starovoitov 64f3694e00SDaniel Borkmann BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr) 652541517cSAlexei Starovoitov { 66f3694e00SDaniel Borkmann int ret; 672541517cSAlexei Starovoitov 68074f528eSDaniel Borkmann ret = probe_kernel_read(dst, unsafe_ptr, size); 69074f528eSDaniel Borkmann if (unlikely(ret < 0)) 70074f528eSDaniel Borkmann memset(dst, 0, size); 71074f528eSDaniel Borkmann 72074f528eSDaniel Borkmann return ret; 732541517cSAlexei Starovoitov } 742541517cSAlexei Starovoitov 752541517cSAlexei Starovoitov static const struct bpf_func_proto bpf_probe_read_proto = { 762541517cSAlexei Starovoitov .func = bpf_probe_read, 772541517cSAlexei Starovoitov .gpl_only = true, 782541517cSAlexei Starovoitov .ret_type = RET_INTEGER, 7939f19ebbSAlexei Starovoitov .arg1_type = ARG_PTR_TO_UNINIT_MEM, 8039f19ebbSAlexei Starovoitov .arg2_type = ARG_CONST_SIZE, 812541517cSAlexei Starovoitov .arg3_type = ARG_ANYTHING, 822541517cSAlexei Starovoitov }; 832541517cSAlexei Starovoitov 84f3694e00SDaniel Borkmann BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src, 85f3694e00SDaniel Borkmann u32, size) 8696ae5227SSargun Dhillon { 8796ae5227SSargun Dhillon /* 8896ae5227SSargun Dhillon * Ensure we're in user context which is safe for the helper to 8996ae5227SSargun Dhillon * run. This helper has no business in a kthread. 9096ae5227SSargun Dhillon * 9196ae5227SSargun Dhillon * access_ok() should prevent writing to non-user memory, but in 9296ae5227SSargun Dhillon * some situations (nommu, temporary switch, etc) access_ok() does 9396ae5227SSargun Dhillon * not provide enough validation, hence the check on KERNEL_DS. 9496ae5227SSargun Dhillon */ 9596ae5227SSargun Dhillon 9696ae5227SSargun Dhillon if (unlikely(in_interrupt() || 9796ae5227SSargun Dhillon current->flags & (PF_KTHREAD | PF_EXITING))) 9896ae5227SSargun Dhillon return -EPERM; 99db68ce10SAl Viro if (unlikely(uaccess_kernel())) 10096ae5227SSargun Dhillon return -EPERM; 10196ae5227SSargun Dhillon if (!access_ok(VERIFY_WRITE, unsafe_ptr, size)) 10296ae5227SSargun Dhillon return -EPERM; 10396ae5227SSargun Dhillon 10496ae5227SSargun Dhillon return probe_kernel_write(unsafe_ptr, src, size); 10596ae5227SSargun Dhillon } 10696ae5227SSargun Dhillon 10796ae5227SSargun Dhillon static const struct bpf_func_proto bpf_probe_write_user_proto = { 10896ae5227SSargun Dhillon .func = bpf_probe_write_user, 10996ae5227SSargun Dhillon .gpl_only = true, 11096ae5227SSargun Dhillon .ret_type = RET_INTEGER, 11196ae5227SSargun Dhillon .arg1_type = ARG_ANYTHING, 11239f19ebbSAlexei Starovoitov .arg2_type = ARG_PTR_TO_MEM, 11339f19ebbSAlexei Starovoitov .arg3_type = ARG_CONST_SIZE, 11496ae5227SSargun Dhillon }; 11596ae5227SSargun Dhillon 11696ae5227SSargun Dhillon static const struct bpf_func_proto *bpf_get_probe_write_proto(void) 11796ae5227SSargun Dhillon { 11896ae5227SSargun Dhillon pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!", 11996ae5227SSargun Dhillon current->comm, task_pid_nr(current)); 12096ae5227SSargun Dhillon 12196ae5227SSargun Dhillon return &bpf_probe_write_user_proto; 12296ae5227SSargun Dhillon } 12396ae5227SSargun Dhillon 1249c959c86SAlexei Starovoitov /* 1257bda4b40SJohn Fastabend * Only limited trace_printk() conversion specifiers allowed: 1267bda4b40SJohn Fastabend * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s 1279c959c86SAlexei Starovoitov */ 128f3694e00SDaniel Borkmann BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, 129f3694e00SDaniel Borkmann u64, arg2, u64, arg3) 1309c959c86SAlexei Starovoitov { 1318d3b7dceSAlexei Starovoitov bool str_seen = false; 1329c959c86SAlexei Starovoitov int mod[3] = {}; 1339c959c86SAlexei Starovoitov int fmt_cnt = 0; 1348d3b7dceSAlexei Starovoitov u64 unsafe_addr; 1358d3b7dceSAlexei Starovoitov char buf[64]; 1369c959c86SAlexei Starovoitov int i; 1379c959c86SAlexei Starovoitov 1389c959c86SAlexei Starovoitov /* 1399c959c86SAlexei Starovoitov * bpf_check()->check_func_arg()->check_stack_boundary() 1409c959c86SAlexei Starovoitov * guarantees that fmt points to bpf program stack, 1419c959c86SAlexei Starovoitov * fmt_size bytes of it were initialized and fmt_size > 0 1429c959c86SAlexei Starovoitov */ 1439c959c86SAlexei Starovoitov if (fmt[--fmt_size] != 0) 1449c959c86SAlexei Starovoitov return -EINVAL; 1459c959c86SAlexei Starovoitov 1469c959c86SAlexei Starovoitov /* check format string for allowed specifiers */ 1479c959c86SAlexei Starovoitov for (i = 0; i < fmt_size; i++) { 1489c959c86SAlexei Starovoitov if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) 1499c959c86SAlexei Starovoitov return -EINVAL; 1509c959c86SAlexei Starovoitov 1519c959c86SAlexei Starovoitov if (fmt[i] != '%') 1529c959c86SAlexei Starovoitov continue; 1539c959c86SAlexei Starovoitov 1549c959c86SAlexei Starovoitov if (fmt_cnt >= 3) 1559c959c86SAlexei Starovoitov return -EINVAL; 1569c959c86SAlexei Starovoitov 1579c959c86SAlexei Starovoitov /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ 1589c959c86SAlexei Starovoitov i++; 1599c959c86SAlexei Starovoitov if (fmt[i] == 'l') { 1609c959c86SAlexei Starovoitov mod[fmt_cnt]++; 1619c959c86SAlexei Starovoitov i++; 1628d3b7dceSAlexei Starovoitov } else if (fmt[i] == 'p' || fmt[i] == 's') { 1639c959c86SAlexei Starovoitov mod[fmt_cnt]++; 1649c959c86SAlexei Starovoitov i++; 1659c959c86SAlexei Starovoitov if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0) 1669c959c86SAlexei Starovoitov return -EINVAL; 1679c959c86SAlexei Starovoitov fmt_cnt++; 1688d3b7dceSAlexei Starovoitov if (fmt[i - 1] == 's') { 1698d3b7dceSAlexei Starovoitov if (str_seen) 1708d3b7dceSAlexei Starovoitov /* allow only one '%s' per fmt string */ 1718d3b7dceSAlexei Starovoitov return -EINVAL; 1728d3b7dceSAlexei Starovoitov str_seen = true; 1738d3b7dceSAlexei Starovoitov 1748d3b7dceSAlexei Starovoitov switch (fmt_cnt) { 1758d3b7dceSAlexei Starovoitov case 1: 176f3694e00SDaniel Borkmann unsafe_addr = arg1; 177f3694e00SDaniel Borkmann arg1 = (long) buf; 1788d3b7dceSAlexei Starovoitov break; 1798d3b7dceSAlexei Starovoitov case 2: 180f3694e00SDaniel Borkmann unsafe_addr = arg2; 181f3694e00SDaniel Borkmann arg2 = (long) buf; 1828d3b7dceSAlexei Starovoitov break; 1838d3b7dceSAlexei Starovoitov case 3: 184f3694e00SDaniel Borkmann unsafe_addr = arg3; 185f3694e00SDaniel Borkmann arg3 = (long) buf; 1868d3b7dceSAlexei Starovoitov break; 1878d3b7dceSAlexei Starovoitov } 1888d3b7dceSAlexei Starovoitov buf[0] = 0; 1898d3b7dceSAlexei Starovoitov strncpy_from_unsafe(buf, 1908d3b7dceSAlexei Starovoitov (void *) (long) unsafe_addr, 1918d3b7dceSAlexei Starovoitov sizeof(buf)); 1928d3b7dceSAlexei Starovoitov } 1939c959c86SAlexei Starovoitov continue; 1949c959c86SAlexei Starovoitov } 1959c959c86SAlexei Starovoitov 1969c959c86SAlexei Starovoitov if (fmt[i] == 'l') { 1979c959c86SAlexei Starovoitov mod[fmt_cnt]++; 1989c959c86SAlexei Starovoitov i++; 1999c959c86SAlexei Starovoitov } 2009c959c86SAlexei Starovoitov 2017bda4b40SJohn Fastabend if (fmt[i] != 'i' && fmt[i] != 'd' && 2027bda4b40SJohn Fastabend fmt[i] != 'u' && fmt[i] != 'x') 2039c959c86SAlexei Starovoitov return -EINVAL; 2049c959c86SAlexei Starovoitov fmt_cnt++; 2059c959c86SAlexei Starovoitov } 2069c959c86SAlexei Starovoitov 20788a5c690SDaniel Borkmann /* Horrid workaround for getting va_list handling working with different 20888a5c690SDaniel Borkmann * argument type combinations generically for 32 and 64 bit archs. 20988a5c690SDaniel Borkmann */ 21088a5c690SDaniel Borkmann #define __BPF_TP_EMIT() __BPF_ARG3_TP() 21188a5c690SDaniel Borkmann #define __BPF_TP(...) \ 21288a5c690SDaniel Borkmann __trace_printk(1 /* Fake ip will not be printed. */, \ 21388a5c690SDaniel Borkmann fmt, ##__VA_ARGS__) 21488a5c690SDaniel Borkmann 21588a5c690SDaniel Borkmann #define __BPF_ARG1_TP(...) \ 21688a5c690SDaniel Borkmann ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \ 21788a5c690SDaniel Borkmann ? __BPF_TP(arg1, ##__VA_ARGS__) \ 21888a5c690SDaniel Borkmann : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \ 21988a5c690SDaniel Borkmann ? __BPF_TP((long)arg1, ##__VA_ARGS__) \ 22088a5c690SDaniel Borkmann : __BPF_TP((u32)arg1, ##__VA_ARGS__))) 22188a5c690SDaniel Borkmann 22288a5c690SDaniel Borkmann #define __BPF_ARG2_TP(...) \ 22388a5c690SDaniel Borkmann ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \ 22488a5c690SDaniel Borkmann ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \ 22588a5c690SDaniel Borkmann : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \ 22688a5c690SDaniel Borkmann ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \ 22788a5c690SDaniel Borkmann : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__))) 22888a5c690SDaniel Borkmann 22988a5c690SDaniel Borkmann #define __BPF_ARG3_TP(...) \ 23088a5c690SDaniel Borkmann ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \ 23188a5c690SDaniel Borkmann ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \ 23288a5c690SDaniel Borkmann : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \ 23388a5c690SDaniel Borkmann ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \ 23488a5c690SDaniel Borkmann : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__))) 23588a5c690SDaniel Borkmann 23688a5c690SDaniel Borkmann return __BPF_TP_EMIT(); 2379c959c86SAlexei Starovoitov } 2389c959c86SAlexei Starovoitov 2399c959c86SAlexei Starovoitov static const struct bpf_func_proto bpf_trace_printk_proto = { 2409c959c86SAlexei Starovoitov .func = bpf_trace_printk, 2419c959c86SAlexei Starovoitov .gpl_only = true, 2429c959c86SAlexei Starovoitov .ret_type = RET_INTEGER, 24339f19ebbSAlexei Starovoitov .arg1_type = ARG_PTR_TO_MEM, 24439f19ebbSAlexei Starovoitov .arg2_type = ARG_CONST_SIZE, 2459c959c86SAlexei Starovoitov }; 2469c959c86SAlexei Starovoitov 2470756ea3eSAlexei Starovoitov const struct bpf_func_proto *bpf_get_trace_printk_proto(void) 2480756ea3eSAlexei Starovoitov { 2490756ea3eSAlexei Starovoitov /* 2500756ea3eSAlexei Starovoitov * this program might be calling bpf_trace_printk, 2510756ea3eSAlexei Starovoitov * so allocate per-cpu printk buffers 2520756ea3eSAlexei Starovoitov */ 2530756ea3eSAlexei Starovoitov trace_printk_init_buffers(); 2540756ea3eSAlexei Starovoitov 2550756ea3eSAlexei Starovoitov return &bpf_trace_printk_proto; 2560756ea3eSAlexei Starovoitov } 2570756ea3eSAlexei Starovoitov 258908432caSYonghong Song static __always_inline int 259908432caSYonghong Song get_map_perf_counter(struct bpf_map *map, u64 flags, 260908432caSYonghong Song u64 *value, u64 *enabled, u64 *running) 26135578d79SKaixu Xia { 26235578d79SKaixu Xia struct bpf_array *array = container_of(map, struct bpf_array, map); 2636816a7ffSDaniel Borkmann unsigned int cpu = smp_processor_id(); 2646816a7ffSDaniel Borkmann u64 index = flags & BPF_F_INDEX_MASK; 2653b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 26635578d79SKaixu Xia 2676816a7ffSDaniel Borkmann if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 2686816a7ffSDaniel Borkmann return -EINVAL; 2696816a7ffSDaniel Borkmann if (index == BPF_F_CURRENT_CPU) 2706816a7ffSDaniel Borkmann index = cpu; 27135578d79SKaixu Xia if (unlikely(index >= array->map.max_entries)) 27235578d79SKaixu Xia return -E2BIG; 27335578d79SKaixu Xia 2743b1efb19SDaniel Borkmann ee = READ_ONCE(array->ptrs[index]); 2751ca1cc98SDaniel Borkmann if (!ee) 27635578d79SKaixu Xia return -ENOENT; 27735578d79SKaixu Xia 278908432caSYonghong Song return perf_event_read_local(ee->event, value, enabled, running); 279908432caSYonghong Song } 280908432caSYonghong Song 281908432caSYonghong Song BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) 282908432caSYonghong Song { 283908432caSYonghong Song u64 value = 0; 284908432caSYonghong Song int err; 285908432caSYonghong Song 286908432caSYonghong Song err = get_map_perf_counter(map, flags, &value, NULL, NULL); 28735578d79SKaixu Xia /* 288f91840a3SAlexei Starovoitov * this api is ugly since we miss [-22..-2] range of valid 289f91840a3SAlexei Starovoitov * counter values, but that's uapi 29035578d79SKaixu Xia */ 291f91840a3SAlexei Starovoitov if (err) 292f91840a3SAlexei Starovoitov return err; 293f91840a3SAlexei Starovoitov return value; 29435578d79SKaixu Xia } 29535578d79SKaixu Xia 29662544ce8SAlexei Starovoitov static const struct bpf_func_proto bpf_perf_event_read_proto = { 29735578d79SKaixu Xia .func = bpf_perf_event_read, 2981075ef59SAlexei Starovoitov .gpl_only = true, 29935578d79SKaixu Xia .ret_type = RET_INTEGER, 30035578d79SKaixu Xia .arg1_type = ARG_CONST_MAP_PTR, 30135578d79SKaixu Xia .arg2_type = ARG_ANYTHING, 30235578d79SKaixu Xia }; 30335578d79SKaixu Xia 304908432caSYonghong Song BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags, 305908432caSYonghong Song struct bpf_perf_event_value *, buf, u32, size) 306908432caSYonghong Song { 307908432caSYonghong Song int err = -EINVAL; 308908432caSYonghong Song 309908432caSYonghong Song if (unlikely(size != sizeof(struct bpf_perf_event_value))) 310908432caSYonghong Song goto clear; 311908432caSYonghong Song err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, 312908432caSYonghong Song &buf->running); 313908432caSYonghong Song if (unlikely(err)) 314908432caSYonghong Song goto clear; 315908432caSYonghong Song return 0; 316908432caSYonghong Song clear: 317908432caSYonghong Song memset(buf, 0, size); 318908432caSYonghong Song return err; 319908432caSYonghong Song } 320908432caSYonghong Song 321908432caSYonghong Song static const struct bpf_func_proto bpf_perf_event_read_value_proto = { 322908432caSYonghong Song .func = bpf_perf_event_read_value, 323908432caSYonghong Song .gpl_only = true, 324908432caSYonghong Song .ret_type = RET_INTEGER, 325908432caSYonghong Song .arg1_type = ARG_CONST_MAP_PTR, 326908432caSYonghong Song .arg2_type = ARG_ANYTHING, 327908432caSYonghong Song .arg3_type = ARG_PTR_TO_UNINIT_MEM, 328908432caSYonghong Song .arg4_type = ARG_CONST_SIZE, 329908432caSYonghong Song }; 330908432caSYonghong Song 33120b9d7acSDaniel Borkmann static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd); 33220b9d7acSDaniel Borkmann 3338e7a3920SDaniel Borkmann static __always_inline u64 3348e7a3920SDaniel Borkmann __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, 3358e7a3920SDaniel Borkmann u64 flags, struct perf_raw_record *raw) 336a43eec30SAlexei Starovoitov { 337a43eec30SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 33820b9d7acSDaniel Borkmann struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd); 339d7931330SDaniel Borkmann unsigned int cpu = smp_processor_id(); 3401e33759cSDaniel Borkmann u64 index = flags & BPF_F_INDEX_MASK; 3413b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 342a43eec30SAlexei Starovoitov struct perf_event *event; 343a43eec30SAlexei Starovoitov 3441e33759cSDaniel Borkmann if (index == BPF_F_CURRENT_CPU) 345d7931330SDaniel Borkmann index = cpu; 346a43eec30SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 347a43eec30SAlexei Starovoitov return -E2BIG; 348a43eec30SAlexei Starovoitov 3493b1efb19SDaniel Borkmann ee = READ_ONCE(array->ptrs[index]); 3501ca1cc98SDaniel Borkmann if (!ee) 351a43eec30SAlexei Starovoitov return -ENOENT; 352a43eec30SAlexei Starovoitov 3533b1efb19SDaniel Borkmann event = ee->event; 354a43eec30SAlexei Starovoitov if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || 355a43eec30SAlexei Starovoitov event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) 356a43eec30SAlexei Starovoitov return -EINVAL; 357a43eec30SAlexei Starovoitov 358d7931330SDaniel Borkmann if (unlikely(event->oncpu != cpu)) 359a43eec30SAlexei Starovoitov return -EOPNOTSUPP; 360a43eec30SAlexei Starovoitov 36120b9d7acSDaniel Borkmann perf_sample_data_init(sd, 0, 0); 36220b9d7acSDaniel Borkmann sd->raw = raw; 36320b9d7acSDaniel Borkmann perf_event_output(event, sd, regs); 364a43eec30SAlexei Starovoitov return 0; 365a43eec30SAlexei Starovoitov } 366a43eec30SAlexei Starovoitov 367f3694e00SDaniel Borkmann BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, 368f3694e00SDaniel Borkmann u64, flags, void *, data, u64, size) 3698e7a3920SDaniel Borkmann { 3708e7a3920SDaniel Borkmann struct perf_raw_record raw = { 3718e7a3920SDaniel Borkmann .frag = { 3728e7a3920SDaniel Borkmann .size = size, 3738e7a3920SDaniel Borkmann .data = data, 3748e7a3920SDaniel Borkmann }, 3758e7a3920SDaniel Borkmann }; 3768e7a3920SDaniel Borkmann 3778e7a3920SDaniel Borkmann if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 3788e7a3920SDaniel Borkmann return -EINVAL; 3798e7a3920SDaniel Borkmann 3808e7a3920SDaniel Borkmann return __bpf_perf_event_output(regs, map, flags, &raw); 3818e7a3920SDaniel Borkmann } 3828e7a3920SDaniel Borkmann 383a43eec30SAlexei Starovoitov static const struct bpf_func_proto bpf_perf_event_output_proto = { 384a43eec30SAlexei Starovoitov .func = bpf_perf_event_output, 3851075ef59SAlexei Starovoitov .gpl_only = true, 386a43eec30SAlexei Starovoitov .ret_type = RET_INTEGER, 387a43eec30SAlexei Starovoitov .arg1_type = ARG_PTR_TO_CTX, 388a43eec30SAlexei Starovoitov .arg2_type = ARG_CONST_MAP_PTR, 389a43eec30SAlexei Starovoitov .arg3_type = ARG_ANYTHING, 39039f19ebbSAlexei Starovoitov .arg4_type = ARG_PTR_TO_MEM, 39139f19ebbSAlexei Starovoitov .arg5_type = ARG_CONST_SIZE, 392a43eec30SAlexei Starovoitov }; 393a43eec30SAlexei Starovoitov 394bd570ff9SDaniel Borkmann static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); 395bd570ff9SDaniel Borkmann 396555c8a86SDaniel Borkmann u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 397555c8a86SDaniel Borkmann void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 398bd570ff9SDaniel Borkmann { 399bd570ff9SDaniel Borkmann struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); 400555c8a86SDaniel Borkmann struct perf_raw_frag frag = { 401555c8a86SDaniel Borkmann .copy = ctx_copy, 402555c8a86SDaniel Borkmann .size = ctx_size, 403555c8a86SDaniel Borkmann .data = ctx, 404555c8a86SDaniel Borkmann }; 405555c8a86SDaniel Borkmann struct perf_raw_record raw = { 406555c8a86SDaniel Borkmann .frag = { 407183fc153SAndrew Morton { 408555c8a86SDaniel Borkmann .next = ctx_size ? &frag : NULL, 409183fc153SAndrew Morton }, 410555c8a86SDaniel Borkmann .size = meta_size, 411555c8a86SDaniel Borkmann .data = meta, 412555c8a86SDaniel Borkmann }, 413555c8a86SDaniel Borkmann }; 414bd570ff9SDaniel Borkmann 415bd570ff9SDaniel Borkmann perf_fetch_caller_regs(regs); 416bd570ff9SDaniel Borkmann 417555c8a86SDaniel Borkmann return __bpf_perf_event_output(regs, map, flags, &raw); 418bd570ff9SDaniel Borkmann } 419bd570ff9SDaniel Borkmann 420f3694e00SDaniel Borkmann BPF_CALL_0(bpf_get_current_task) 421606274c5SAlexei Starovoitov { 422606274c5SAlexei Starovoitov return (long) current; 423606274c5SAlexei Starovoitov } 424606274c5SAlexei Starovoitov 425606274c5SAlexei Starovoitov static const struct bpf_func_proto bpf_get_current_task_proto = { 426606274c5SAlexei Starovoitov .func = bpf_get_current_task, 427606274c5SAlexei Starovoitov .gpl_only = true, 428606274c5SAlexei Starovoitov .ret_type = RET_INTEGER, 429606274c5SAlexei Starovoitov }; 430606274c5SAlexei Starovoitov 431f3694e00SDaniel Borkmann BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) 43260d20f91SSargun Dhillon { 43360d20f91SSargun Dhillon struct bpf_array *array = container_of(map, struct bpf_array, map); 43460d20f91SSargun Dhillon struct cgroup *cgrp; 43560d20f91SSargun Dhillon 43660d20f91SSargun Dhillon if (unlikely(in_interrupt())) 43760d20f91SSargun Dhillon return -EINVAL; 43860d20f91SSargun Dhillon if (unlikely(idx >= array->map.max_entries)) 43960d20f91SSargun Dhillon return -E2BIG; 44060d20f91SSargun Dhillon 44160d20f91SSargun Dhillon cgrp = READ_ONCE(array->ptrs[idx]); 44260d20f91SSargun Dhillon if (unlikely(!cgrp)) 44360d20f91SSargun Dhillon return -EAGAIN; 44460d20f91SSargun Dhillon 44560d20f91SSargun Dhillon return task_under_cgroup_hierarchy(current, cgrp); 44660d20f91SSargun Dhillon } 44760d20f91SSargun Dhillon 44860d20f91SSargun Dhillon static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { 44960d20f91SSargun Dhillon .func = bpf_current_task_under_cgroup, 45060d20f91SSargun Dhillon .gpl_only = false, 45160d20f91SSargun Dhillon .ret_type = RET_INTEGER, 45260d20f91SSargun Dhillon .arg1_type = ARG_CONST_MAP_PTR, 45360d20f91SSargun Dhillon .arg2_type = ARG_ANYTHING, 45460d20f91SSargun Dhillon }; 45560d20f91SSargun Dhillon 456a5e8c070SGianluca Borello BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size, 457a5e8c070SGianluca Borello const void *, unsafe_ptr) 458a5e8c070SGianluca Borello { 459a5e8c070SGianluca Borello int ret; 460a5e8c070SGianluca Borello 461a5e8c070SGianluca Borello /* 462a5e8c070SGianluca Borello * The strncpy_from_unsafe() call will likely not fill the entire 463a5e8c070SGianluca Borello * buffer, but that's okay in this circumstance as we're probing 464a5e8c070SGianluca Borello * arbitrary memory anyway similar to bpf_probe_read() and might 465a5e8c070SGianluca Borello * as well probe the stack. Thus, memory is explicitly cleared 466a5e8c070SGianluca Borello * only in error case, so that improper users ignoring return 467a5e8c070SGianluca Borello * code altogether don't copy garbage; otherwise length of string 468a5e8c070SGianluca Borello * is returned that can be used for bpf_perf_event_output() et al. 469a5e8c070SGianluca Borello */ 470a5e8c070SGianluca Borello ret = strncpy_from_unsafe(dst, unsafe_ptr, size); 471a5e8c070SGianluca Borello if (unlikely(ret < 0)) 472a5e8c070SGianluca Borello memset(dst, 0, size); 473a5e8c070SGianluca Borello 474a5e8c070SGianluca Borello return ret; 475a5e8c070SGianluca Borello } 476a5e8c070SGianluca Borello 477a5e8c070SGianluca Borello static const struct bpf_func_proto bpf_probe_read_str_proto = { 478a5e8c070SGianluca Borello .func = bpf_probe_read_str, 479a5e8c070SGianluca Borello .gpl_only = true, 480a5e8c070SGianluca Borello .ret_type = RET_INTEGER, 481a5e8c070SGianluca Borello .arg1_type = ARG_PTR_TO_UNINIT_MEM, 482a5e8c070SGianluca Borello .arg2_type = ARG_CONST_SIZE, 483a5e8c070SGianluca Borello .arg3_type = ARG_ANYTHING, 484a5e8c070SGianluca Borello }; 485a5e8c070SGianluca Borello 4869fd82b61SAlexei Starovoitov static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id) 4872541517cSAlexei Starovoitov { 4882541517cSAlexei Starovoitov switch (func_id) { 4892541517cSAlexei Starovoitov case BPF_FUNC_map_lookup_elem: 4902541517cSAlexei Starovoitov return &bpf_map_lookup_elem_proto; 4912541517cSAlexei Starovoitov case BPF_FUNC_map_update_elem: 4922541517cSAlexei Starovoitov return &bpf_map_update_elem_proto; 4932541517cSAlexei Starovoitov case BPF_FUNC_map_delete_elem: 4942541517cSAlexei Starovoitov return &bpf_map_delete_elem_proto; 4952541517cSAlexei Starovoitov case BPF_FUNC_probe_read: 4962541517cSAlexei Starovoitov return &bpf_probe_read_proto; 497d9847d31SAlexei Starovoitov case BPF_FUNC_ktime_get_ns: 498d9847d31SAlexei Starovoitov return &bpf_ktime_get_ns_proto; 49904fd61abSAlexei Starovoitov case BPF_FUNC_tail_call: 50004fd61abSAlexei Starovoitov return &bpf_tail_call_proto; 501ffeedafbSAlexei Starovoitov case BPF_FUNC_get_current_pid_tgid: 502ffeedafbSAlexei Starovoitov return &bpf_get_current_pid_tgid_proto; 503606274c5SAlexei Starovoitov case BPF_FUNC_get_current_task: 504606274c5SAlexei Starovoitov return &bpf_get_current_task_proto; 505ffeedafbSAlexei Starovoitov case BPF_FUNC_get_current_uid_gid: 506ffeedafbSAlexei Starovoitov return &bpf_get_current_uid_gid_proto; 507ffeedafbSAlexei Starovoitov case BPF_FUNC_get_current_comm: 508ffeedafbSAlexei Starovoitov return &bpf_get_current_comm_proto; 5099c959c86SAlexei Starovoitov case BPF_FUNC_trace_printk: 5100756ea3eSAlexei Starovoitov return bpf_get_trace_printk_proto(); 511ab1973d3SAlexei Starovoitov case BPF_FUNC_get_smp_processor_id: 512ab1973d3SAlexei Starovoitov return &bpf_get_smp_processor_id_proto; 5132d0e30c3SDaniel Borkmann case BPF_FUNC_get_numa_node_id: 5142d0e30c3SDaniel Borkmann return &bpf_get_numa_node_id_proto; 51535578d79SKaixu Xia case BPF_FUNC_perf_event_read: 51635578d79SKaixu Xia return &bpf_perf_event_read_proto; 51796ae5227SSargun Dhillon case BPF_FUNC_probe_write_user: 51896ae5227SSargun Dhillon return bpf_get_probe_write_proto(); 51960d20f91SSargun Dhillon case BPF_FUNC_current_task_under_cgroup: 52060d20f91SSargun Dhillon return &bpf_current_task_under_cgroup_proto; 5218937bd80SAlexei Starovoitov case BPF_FUNC_get_prandom_u32: 5228937bd80SAlexei Starovoitov return &bpf_get_prandom_u32_proto; 523a5e8c070SGianluca Borello case BPF_FUNC_probe_read_str: 524a5e8c070SGianluca Borello return &bpf_probe_read_str_proto; 5259fd82b61SAlexei Starovoitov default: 5269fd82b61SAlexei Starovoitov return NULL; 5279fd82b61SAlexei Starovoitov } 5289fd82b61SAlexei Starovoitov } 5299fd82b61SAlexei Starovoitov 5309fd82b61SAlexei Starovoitov static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id) 5319fd82b61SAlexei Starovoitov { 5329fd82b61SAlexei Starovoitov switch (func_id) { 533a43eec30SAlexei Starovoitov case BPF_FUNC_perf_event_output: 534a43eec30SAlexei Starovoitov return &bpf_perf_event_output_proto; 535d5a3b1f6SAlexei Starovoitov case BPF_FUNC_get_stackid: 536d5a3b1f6SAlexei Starovoitov return &bpf_get_stackid_proto; 537908432caSYonghong Song case BPF_FUNC_perf_event_read_value: 538908432caSYonghong Song return &bpf_perf_event_read_value_proto; 5392541517cSAlexei Starovoitov default: 5409fd82b61SAlexei Starovoitov return tracing_func_proto(func_id); 5412541517cSAlexei Starovoitov } 5422541517cSAlexei Starovoitov } 5432541517cSAlexei Starovoitov 5442541517cSAlexei Starovoitov /* bpf+kprobe programs can access fields of 'struct pt_regs' */ 54519de99f7SAlexei Starovoitov static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 54623994631SYonghong Song struct bpf_insn_access_aux *info) 5472541517cSAlexei Starovoitov { 5482541517cSAlexei Starovoitov if (off < 0 || off >= sizeof(struct pt_regs)) 5492541517cSAlexei Starovoitov return false; 5502541517cSAlexei Starovoitov if (type != BPF_READ) 5512541517cSAlexei Starovoitov return false; 5522541517cSAlexei Starovoitov if (off % size != 0) 5532541517cSAlexei Starovoitov return false; 5542d071c64SDaniel Borkmann /* 5552d071c64SDaniel Borkmann * Assertion for 32 bit to make sure last 8 byte access 5562d071c64SDaniel Borkmann * (BPF_DW) to the last 4 byte member is disallowed. 5572d071c64SDaniel Borkmann */ 5582d071c64SDaniel Borkmann if (off + size > sizeof(struct pt_regs)) 5592d071c64SDaniel Borkmann return false; 5602d071c64SDaniel Borkmann 5612541517cSAlexei Starovoitov return true; 5622541517cSAlexei Starovoitov } 5632541517cSAlexei Starovoitov 564be9370a7SJohannes Berg const struct bpf_verifier_ops kprobe_prog_ops = { 5652541517cSAlexei Starovoitov .get_func_proto = kprobe_prog_func_proto, 5662541517cSAlexei Starovoitov .is_valid_access = kprobe_prog_is_valid_access, 5672541517cSAlexei Starovoitov }; 5682541517cSAlexei Starovoitov 569f3694e00SDaniel Borkmann BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, 570f3694e00SDaniel Borkmann u64, flags, void *, data, u64, size) 5719940d67cSAlexei Starovoitov { 572f3694e00SDaniel Borkmann struct pt_regs *regs = *(struct pt_regs **)tp_buff; 573f3694e00SDaniel Borkmann 5749940d67cSAlexei Starovoitov /* 5759940d67cSAlexei Starovoitov * r1 points to perf tracepoint buffer where first 8 bytes are hidden 5769940d67cSAlexei Starovoitov * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it 577f3694e00SDaniel Borkmann * from there and call the same bpf_perf_event_output() helper inline. 5789940d67cSAlexei Starovoitov */ 579f3694e00SDaniel Borkmann return ____bpf_perf_event_output(regs, map, flags, data, size); 5809940d67cSAlexei Starovoitov } 5819940d67cSAlexei Starovoitov 5829940d67cSAlexei Starovoitov static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { 5839940d67cSAlexei Starovoitov .func = bpf_perf_event_output_tp, 5849940d67cSAlexei Starovoitov .gpl_only = true, 5859940d67cSAlexei Starovoitov .ret_type = RET_INTEGER, 5869940d67cSAlexei Starovoitov .arg1_type = ARG_PTR_TO_CTX, 5879940d67cSAlexei Starovoitov .arg2_type = ARG_CONST_MAP_PTR, 5889940d67cSAlexei Starovoitov .arg3_type = ARG_ANYTHING, 58939f19ebbSAlexei Starovoitov .arg4_type = ARG_PTR_TO_MEM, 59039f19ebbSAlexei Starovoitov .arg5_type = ARG_CONST_SIZE, 5919940d67cSAlexei Starovoitov }; 5929940d67cSAlexei Starovoitov 593f3694e00SDaniel Borkmann BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, 594f3694e00SDaniel Borkmann u64, flags) 5959940d67cSAlexei Starovoitov { 596f3694e00SDaniel Borkmann struct pt_regs *regs = *(struct pt_regs **)tp_buff; 5979940d67cSAlexei Starovoitov 598f3694e00SDaniel Borkmann /* 599f3694e00SDaniel Borkmann * Same comment as in bpf_perf_event_output_tp(), only that this time 600f3694e00SDaniel Borkmann * the other helper's function body cannot be inlined due to being 601f3694e00SDaniel Borkmann * external, thus we need to call raw helper function. 602f3694e00SDaniel Borkmann */ 603f3694e00SDaniel Borkmann return bpf_get_stackid((unsigned long) regs, (unsigned long) map, 604f3694e00SDaniel Borkmann flags, 0, 0); 6059940d67cSAlexei Starovoitov } 6069940d67cSAlexei Starovoitov 6079940d67cSAlexei Starovoitov static const struct bpf_func_proto bpf_get_stackid_proto_tp = { 6089940d67cSAlexei Starovoitov .func = bpf_get_stackid_tp, 6099940d67cSAlexei Starovoitov .gpl_only = true, 6109940d67cSAlexei Starovoitov .ret_type = RET_INTEGER, 6119940d67cSAlexei Starovoitov .arg1_type = ARG_PTR_TO_CTX, 6129940d67cSAlexei Starovoitov .arg2_type = ARG_CONST_MAP_PTR, 6139940d67cSAlexei Starovoitov .arg3_type = ARG_ANYTHING, 6149940d67cSAlexei Starovoitov }; 6159940d67cSAlexei Starovoitov 6169fd82b61SAlexei Starovoitov static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) 6179fd82b61SAlexei Starovoitov { 6189fd82b61SAlexei Starovoitov switch (func_id) { 6199fd82b61SAlexei Starovoitov case BPF_FUNC_perf_event_output: 6209940d67cSAlexei Starovoitov return &bpf_perf_event_output_proto_tp; 6219fd82b61SAlexei Starovoitov case BPF_FUNC_get_stackid: 6229940d67cSAlexei Starovoitov return &bpf_get_stackid_proto_tp; 6239fd82b61SAlexei Starovoitov default: 6249fd82b61SAlexei Starovoitov return tracing_func_proto(func_id); 6259fd82b61SAlexei Starovoitov } 6269fd82b61SAlexei Starovoitov } 6279fd82b61SAlexei Starovoitov 62819de99f7SAlexei Starovoitov static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, 62923994631SYonghong Song struct bpf_insn_access_aux *info) 6309fd82b61SAlexei Starovoitov { 6319fd82b61SAlexei Starovoitov if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) 6329fd82b61SAlexei Starovoitov return false; 6339fd82b61SAlexei Starovoitov if (type != BPF_READ) 6349fd82b61SAlexei Starovoitov return false; 6359fd82b61SAlexei Starovoitov if (off % size != 0) 6369fd82b61SAlexei Starovoitov return false; 6372d071c64SDaniel Borkmann 6382d071c64SDaniel Borkmann BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); 6399fd82b61SAlexei Starovoitov return true; 6409fd82b61SAlexei Starovoitov } 6419fd82b61SAlexei Starovoitov 642be9370a7SJohannes Berg const struct bpf_verifier_ops tracepoint_prog_ops = { 6439fd82b61SAlexei Starovoitov .get_func_proto = tp_prog_func_proto, 6449fd82b61SAlexei Starovoitov .is_valid_access = tp_prog_is_valid_access, 6459fd82b61SAlexei Starovoitov }; 6469fd82b61SAlexei Starovoitov 6470515e599SAlexei Starovoitov static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 64823994631SYonghong Song struct bpf_insn_access_aux *info) 6490515e599SAlexei Starovoitov { 650f96da094SDaniel Borkmann const int size_sp = FIELD_SIZEOF(struct bpf_perf_event_data, 651f96da094SDaniel Borkmann sample_period); 65231fd8581SYonghong Song 6530515e599SAlexei Starovoitov if (off < 0 || off >= sizeof(struct bpf_perf_event_data)) 6540515e599SAlexei Starovoitov return false; 6550515e599SAlexei Starovoitov if (type != BPF_READ) 6560515e599SAlexei Starovoitov return false; 6570515e599SAlexei Starovoitov if (off % size != 0) 6580515e599SAlexei Starovoitov return false; 65931fd8581SYonghong Song 660f96da094SDaniel Borkmann switch (off) { 661f96da094SDaniel Borkmann case bpf_ctx_range(struct bpf_perf_event_data, sample_period): 662f96da094SDaniel Borkmann bpf_ctx_record_field_size(info, size_sp); 663f96da094SDaniel Borkmann if (!bpf_ctx_narrow_access_ok(off, size, size_sp)) 66423994631SYonghong Song return false; 665f96da094SDaniel Borkmann break; 666f96da094SDaniel Borkmann default: 6670515e599SAlexei Starovoitov if (size != sizeof(long)) 6680515e599SAlexei Starovoitov return false; 6690515e599SAlexei Starovoitov } 670f96da094SDaniel Borkmann 6710515e599SAlexei Starovoitov return true; 6720515e599SAlexei Starovoitov } 6730515e599SAlexei Starovoitov 6746b8cc1d1SDaniel Borkmann static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, 6756b8cc1d1SDaniel Borkmann const struct bpf_insn *si, 6760515e599SAlexei Starovoitov struct bpf_insn *insn_buf, 677f96da094SDaniel Borkmann struct bpf_prog *prog, u32 *target_size) 6780515e599SAlexei Starovoitov { 6790515e599SAlexei Starovoitov struct bpf_insn *insn = insn_buf; 6800515e599SAlexei Starovoitov 6816b8cc1d1SDaniel Borkmann switch (si->off) { 6820515e599SAlexei Starovoitov case offsetof(struct bpf_perf_event_data, sample_period): 683f035a515SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 6846b8cc1d1SDaniel Borkmann data), si->dst_reg, si->src_reg, 6850515e599SAlexei Starovoitov offsetof(struct bpf_perf_event_data_kern, data)); 6866b8cc1d1SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg, 687f96da094SDaniel Borkmann bpf_target_off(struct perf_sample_data, period, 8, 688f96da094SDaniel Borkmann target_size)); 6890515e599SAlexei Starovoitov break; 6900515e599SAlexei Starovoitov default: 691f035a515SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern, 6926b8cc1d1SDaniel Borkmann regs), si->dst_reg, si->src_reg, 6930515e599SAlexei Starovoitov offsetof(struct bpf_perf_event_data_kern, regs)); 6946b8cc1d1SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg, 6956b8cc1d1SDaniel Borkmann si->off); 6960515e599SAlexei Starovoitov break; 6970515e599SAlexei Starovoitov } 6980515e599SAlexei Starovoitov 6990515e599SAlexei Starovoitov return insn - insn_buf; 7000515e599SAlexei Starovoitov } 7010515e599SAlexei Starovoitov 702be9370a7SJohannes Berg const struct bpf_verifier_ops perf_event_prog_ops = { 7030515e599SAlexei Starovoitov .get_func_proto = tp_prog_func_proto, 7040515e599SAlexei Starovoitov .is_valid_access = pe_prog_is_valid_access, 7050515e599SAlexei Starovoitov .convert_ctx_access = pe_prog_convert_ctx_access, 7060515e599SAlexei Starovoitov }; 707