1 #ifndef __PERF_FTRACE_H__ 2 #define __PERF_FTRACE_H__ 3 4 #include <linux/list.h> 5 6 #include "target.h" 7 8 struct evlist; 9 10 struct perf_ftrace { 11 struct evlist *evlist; 12 struct target target; 13 const char *tracer; 14 struct list_head filters; 15 struct list_head notrace; 16 struct list_head graph_funcs; 17 struct list_head nograph_funcs; 18 unsigned long percpu_buffer_size; 19 bool inherit; 20 int graph_depth; 21 int func_stack_trace; 22 int func_irq_info; 23 int graph_nosleep_time; 24 int graph_noirqs; 25 int graph_verbose; 26 int graph_thresh; 27 unsigned int initial_delay; 28 }; 29 30 struct filter_entry { 31 struct list_head list; 32 char name[]; 33 }; 34 35 #define NUM_BUCKET 22 /* 20 + 2 (for outliers in both direction) */ 36 37 #ifdef HAVE_BPF_SKEL 38 39 int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace); 40 int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace); 41 int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace); 42 int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace, 43 int buckets[]); 44 int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace); 45 46 #else /* !HAVE_BPF_SKEL */ 47 48 static inline int 49 perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace __maybe_unused) 50 { 51 return -1; 52 } 53 54 static inline int 55 perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace __maybe_unused) 56 { 57 return -1; 58 } 59 60 static inline int 61 perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused) 62 { 63 return -1; 64 } 65 66 static inline int 67 perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused, 68 int buckets[] __maybe_unused) 69 { 70 return -1; 71 } 72 73 static inline int 74 perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace __maybe_unused) 75 { 76 return -1; 77 } 78 79 #endif /* HAVE_BPF_SKEL */ 80 81 #endif /* __PERF_FTRACE_H__ */ 82