1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #undef TRACE_SYSTEM_VAR 4 5 #ifdef CONFIG_PERF_EVENTS 6 7 #undef __entry 8 #define __entry entry 9 10 #undef __get_dynamic_array 11 #define __get_dynamic_array(field) \ 12 ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) 13 14 #undef __get_dynamic_array_len 15 #define __get_dynamic_array_len(field) \ 16 ((__entry->__data_loc_##field >> 16) & 0xffff) 17 18 #undef __get_str 19 #define __get_str(field) ((char *)__get_dynamic_array(field)) 20 21 #undef __get_bitmask 22 #define __get_bitmask(field) (char *)__get_dynamic_array(field) 23 24 #undef __get_cpumask 25 #define __get_cpumask(field) (char *)__get_dynamic_array(field) 26 27 #undef __get_sockaddr 28 #define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field)) 29 30 #undef __get_rel_dynamic_array 31 #define __get_rel_dynamic_array(field) \ 32 ((void *)__entry + \ 33 offsetof(typeof(*__entry), __rel_loc_##field) + \ 34 sizeof(__entry->__rel_loc_##field) + \ 35 (__entry->__rel_loc_##field & 0xffff)) 36 37 #undef __get_rel_dynamic_array_len 38 #define __get_rel_dynamic_array_len(field) \ 39 ((__entry->__rel_loc_##field >> 16) & 0xffff) 40 41 #undef __get_rel_str 42 #define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field)) 43 44 #undef __get_rel_bitmask 45 #define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field) 46 47 #undef __get_rel_cpumask 48 #define __get_rel_cpumask(field) (char *)__get_rel_dynamic_array(field) 49 50 #undef __get_rel_sockaddr 51 #define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field)) 52 53 #undef __perf_count 54 #define __perf_count(c) (__count = (c)) 55 56 #undef __perf_task 57 #define __perf_task(t) (__task = (t)) 58 59 #undef DECLARE_EVENT_CLASS 60 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 61 static notrace void \ 62 perf_trace_##call(void *__data, proto) \ 63 { \ 64 struct trace_event_call *event_call = __data; \ 65 struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ 66 struct trace_event_raw_##call *entry; \ 67 struct pt_regs *__regs; \ 68 u64 __count = 1; \ 69 struct task_struct *__task = NULL; \ 70 struct hlist_head *head; \ 71 int __entry_size; \ 72 int __data_size; \ 73 int rctx; \ 74 \ 75 __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ 76 \ 77 head = this_cpu_ptr(event_call->perf_events); \ 78 if (!bpf_prog_array_valid(event_call) && \ 79 __builtin_constant_p(!__task) && !__task && \ 80 hlist_empty(head)) \ 81 return; \ 82 \ 83 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 84 sizeof(u64)); \ 85 __entry_size -= sizeof(u32); \ 86 \ 87 entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); \ 88 if (!entry) \ 89 return; \ 90 \ 91 perf_fetch_caller_regs(__regs); \ 92 \ 93 tstruct \ 94 \ 95 { assign; } \ 96 \ 97 perf_trace_run_bpf_submit(entry, __entry_size, rctx, \ 98 event_call, __count, __regs, \ 99 head, __task); \ 100 } 101 102 /* 103 * This part is compiled out, it is only here as a build time check 104 * to make sure that if the tracepoint handling changes, the 105 * perf probe will fail to compile unless it too is updated. 106 */ 107 #undef DEFINE_EVENT 108 #define DEFINE_EVENT(template, call, proto, args) \ 109 static inline void perf_test_probe_##call(void) \ 110 { \ 111 check_trace_callback_type_##call(perf_trace_##template); \ 112 } 113 114 115 #undef DEFINE_EVENT_PRINT 116 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 117 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 118 119 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 120 #endif /* CONFIG_PERF_EVENTS */ 121