1c4f6699dSAlexei Starovoitov /* SPDX-License-Identifier: GPL-2.0 */ 2c4f6699dSAlexei Starovoitov 3c4f6699dSAlexei Starovoitov #undef TRACE_SYSTEM_VAR 4c4f6699dSAlexei Starovoitov 5c4f6699dSAlexei Starovoitov #ifdef CONFIG_BPF_EVENTS 6c4f6699dSAlexei Starovoitov 7c4f6699dSAlexei Starovoitov #undef __entry 8c4f6699dSAlexei Starovoitov #define __entry entry 9c4f6699dSAlexei Starovoitov 10c4f6699dSAlexei Starovoitov #undef __get_dynamic_array 11c4f6699dSAlexei Starovoitov #define __get_dynamic_array(field) \ 12c4f6699dSAlexei Starovoitov ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) 13c4f6699dSAlexei Starovoitov 14c4f6699dSAlexei Starovoitov #undef __get_dynamic_array_len 15c4f6699dSAlexei Starovoitov #define __get_dynamic_array_len(field) \ 16c4f6699dSAlexei Starovoitov ((__entry->__data_loc_##field >> 16) & 0xffff) 17c4f6699dSAlexei Starovoitov 18c4f6699dSAlexei Starovoitov #undef __get_str 19c4f6699dSAlexei Starovoitov #define __get_str(field) ((char *)__get_dynamic_array(field)) 20c4f6699dSAlexei Starovoitov 21c4f6699dSAlexei Starovoitov #undef __get_bitmask 22c4f6699dSAlexei Starovoitov #define __get_bitmask(field) (char *)__get_dynamic_array(field) 23c4f6699dSAlexei Starovoitov 24c4f6699dSAlexei Starovoitov #undef __perf_count 25c4f6699dSAlexei Starovoitov #define __perf_count(c) (c) 26c4f6699dSAlexei Starovoitov 27c4f6699dSAlexei Starovoitov #undef __perf_task 28c4f6699dSAlexei Starovoitov #define __perf_task(t) (t) 29c4f6699dSAlexei Starovoitov 30c4f6699dSAlexei Starovoitov /* cast any integer, pointer, or small struct to u64 */ 31c4f6699dSAlexei Starovoitov #define UINTTYPE(size) \ 32c4f6699dSAlexei Starovoitov __typeof__(__builtin_choose_expr(size == 1, (u8)1, \ 33c4f6699dSAlexei Starovoitov __builtin_choose_expr(size == 2, (u16)2, \ 34c4f6699dSAlexei Starovoitov __builtin_choose_expr(size == 4, (u32)3, \ 35c4f6699dSAlexei Starovoitov __builtin_choose_expr(size == 8, (u64)4, \ 36c4f6699dSAlexei Starovoitov (void)5))))) 37c4f6699dSAlexei Starovoitov #define __CAST_TO_U64(x) ({ \ 38c4f6699dSAlexei Starovoitov typeof(x) __src = (x); \ 39c4f6699dSAlexei Starovoitov UINTTYPE(sizeof(x)) __dst; \ 40c4f6699dSAlexei Starovoitov memcpy(&__dst, &__src, sizeof(__dst)); \ 41c4f6699dSAlexei Starovoitov (u64)__dst; }) 42c4f6699dSAlexei Starovoitov 43c4f6699dSAlexei Starovoitov #define __CAST1(a,...) __CAST_TO_U64(a) 44c4f6699dSAlexei Starovoitov #define __CAST2(a,...) __CAST_TO_U64(a), __CAST1(__VA_ARGS__) 45c4f6699dSAlexei Starovoitov #define __CAST3(a,...) __CAST_TO_U64(a), __CAST2(__VA_ARGS__) 46c4f6699dSAlexei Starovoitov #define __CAST4(a,...) __CAST_TO_U64(a), __CAST3(__VA_ARGS__) 47c4f6699dSAlexei Starovoitov #define __CAST5(a,...) __CAST_TO_U64(a), __CAST4(__VA_ARGS__) 48c4f6699dSAlexei Starovoitov #define __CAST6(a,...) __CAST_TO_U64(a), __CAST5(__VA_ARGS__) 49c4f6699dSAlexei Starovoitov #define __CAST7(a,...) __CAST_TO_U64(a), __CAST6(__VA_ARGS__) 50c4f6699dSAlexei Starovoitov #define __CAST8(a,...) __CAST_TO_U64(a), __CAST7(__VA_ARGS__) 51c4f6699dSAlexei Starovoitov #define __CAST9(a,...) __CAST_TO_U64(a), __CAST8(__VA_ARGS__) 52c4f6699dSAlexei Starovoitov #define __CAST10(a,...) __CAST_TO_U64(a), __CAST9(__VA_ARGS__) 53c4f6699dSAlexei Starovoitov #define __CAST11(a,...) __CAST_TO_U64(a), __CAST10(__VA_ARGS__) 54c4f6699dSAlexei Starovoitov #define __CAST12(a,...) __CAST_TO_U64(a), __CAST11(__VA_ARGS__) 55c4f6699dSAlexei Starovoitov /* tracepoints with more than 12 arguments will hit build error */ 56c4f6699dSAlexei Starovoitov #define CAST_TO_U64(...) CONCATENATE(__CAST, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__) 57c4f6699dSAlexei Starovoitov 58c4f6699dSAlexei Starovoitov #undef DECLARE_EVENT_CLASS 59c4f6699dSAlexei Starovoitov #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 60c4f6699dSAlexei Starovoitov static notrace void \ 61c4f6699dSAlexei Starovoitov __bpf_trace_##call(void *__data, proto) \ 62c4f6699dSAlexei Starovoitov { \ 63c4f6699dSAlexei Starovoitov struct bpf_prog *prog = __data; \ 64c4f6699dSAlexei Starovoitov CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(prog, CAST_TO_U64(args)); \ 65c4f6699dSAlexei Starovoitov } 66c4f6699dSAlexei Starovoitov 67c4f6699dSAlexei Starovoitov /* 68c4f6699dSAlexei Starovoitov * This part is compiled out, it is only here as a build time check 69c4f6699dSAlexei Starovoitov * to make sure that if the tracepoint handling changes, the 70c4f6699dSAlexei Starovoitov * bpf probe will fail to compile unless it too is updated. 71c4f6699dSAlexei Starovoitov */ 729df1c28bSMatt Mullins #define __DEFINE_EVENT(template, call, proto, args, size) \ 73c4f6699dSAlexei Starovoitov static inline void bpf_test_probe_##call(void) \ 74c4f6699dSAlexei Starovoitov { \ 75c4f6699dSAlexei Starovoitov check_trace_callback_type_##call(__bpf_trace_##template); \ 76c4f6699dSAlexei Starovoitov } \ 77e8c423fbSAlexei Starovoitov typedef void (*btf_trace_##call)(void *__data, proto); \ 78441420a1SAndrii Nakryiko static union { \ 79441420a1SAndrii Nakryiko struct bpf_raw_event_map event; \ 80441420a1SAndrii Nakryiko btf_trace_##call handler; \ 81441420a1SAndrii Nakryiko } __bpf_trace_tp_map_##call __used \ 8233def849SJoe Perches __section("__bpf_raw_tp_map") = { \ 83441420a1SAndrii Nakryiko .event = { \ 84c4f6699dSAlexei Starovoitov .tp = &__tracepoint_##call, \ 85441420a1SAndrii Nakryiko .bpf_func = __bpf_trace_##template, \ 86c4f6699dSAlexei Starovoitov .num_args = COUNT_ARGS(args), \ 879df1c28bSMatt Mullins .writable_size = size, \ 88441420a1SAndrii Nakryiko }, \ 89c4f6699dSAlexei Starovoitov }; 90c4f6699dSAlexei Starovoitov 919df1c28bSMatt Mullins #define FIRST(x, ...) x 929df1c28bSMatt Mullins 939df1c28bSMatt Mullins #undef DEFINE_EVENT_WRITABLE 949df1c28bSMatt Mullins #define DEFINE_EVENT_WRITABLE(template, call, proto, args, size) \ 959df1c28bSMatt Mullins static inline void bpf_test_buffer_##call(void) \ 969df1c28bSMatt Mullins { \ 979df1c28bSMatt Mullins /* BUILD_BUG_ON() is ignored if the code is completely eliminated, but \ 989df1c28bSMatt Mullins * BUILD_BUG_ON_ZERO() uses a different mechanism that is not \ 999df1c28bSMatt Mullins * dead-code-eliminated. \ 1009df1c28bSMatt Mullins */ \ 1019df1c28bSMatt Mullins FIRST(proto); \ 1029df1c28bSMatt Mullins (void)BUILD_BUG_ON_ZERO(size != sizeof(*FIRST(args))); \ 1039df1c28bSMatt Mullins } \ 1049df1c28bSMatt Mullins __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size) 1059df1c28bSMatt Mullins 1069df1c28bSMatt Mullins #undef DEFINE_EVENT 1079df1c28bSMatt Mullins #define DEFINE_EVENT(template, call, proto, args) \ 1089df1c28bSMatt Mullins __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), 0) 109c4f6699dSAlexei Starovoitov 110c4f6699dSAlexei Starovoitov #undef DEFINE_EVENT_PRINT 111c4f6699dSAlexei Starovoitov #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 112c4f6699dSAlexei Starovoitov DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 113c4f6699dSAlexei Starovoitov 114c4f6699dSAlexei Starovoitov #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 1159df1c28bSMatt Mullins 1169df1c28bSMatt Mullins #undef DEFINE_EVENT_WRITABLE 1179df1c28bSMatt Mullins #undef __DEFINE_EVENT 1189df1c28bSMatt Mullins #undef FIRST 1199df1c28bSMatt Mullins 120c4f6699dSAlexei Starovoitov #endif /* CONFIG_BPF_EVENTS */ 121