1 // SPDX-License-Identifier: GPL-2.0 2 #ifndef PERF_LOCK_CONTENTION_H 3 #define PERF_LOCK_CONTENTION_H 4 5 #include <linux/list.h> 6 #include <linux/rbtree.h> 7 8 struct lock_stat { 9 struct hlist_node hash_entry; 10 struct rb_node rb; /* used for sorting */ 11 12 u64 addr; /* address of lockdep_map, used as ID */ 13 char *name; /* for strcpy(), we cannot use const */ 14 u64 *callstack; 15 16 unsigned int nr_acquire; 17 unsigned int nr_acquired; 18 unsigned int nr_contended; 19 unsigned int nr_release; 20 21 union { 22 unsigned int nr_readlock; 23 unsigned int flags; 24 }; 25 unsigned int nr_trylock; 26 27 /* these times are in nano sec. */ 28 u64 avg_wait_time; 29 u64 wait_time_total; 30 u64 wait_time_min; 31 u64 wait_time_max; 32 33 int broken; /* flag of blacklist */ 34 int combined; 35 }; 36 37 /* 38 * States of lock_seq_stat 39 * 40 * UNINITIALIZED is required for detecting first event of acquire. 41 * As the nature of lock events, there is no guarantee 42 * that the first event for the locks are acquire, 43 * it can be acquired, contended or release. 44 */ 45 #define SEQ_STATE_UNINITIALIZED 0 /* initial state */ 46 #define SEQ_STATE_RELEASED 1 47 #define SEQ_STATE_ACQUIRING 2 48 #define SEQ_STATE_ACQUIRED 3 49 #define SEQ_STATE_READ_ACQUIRED 4 50 #define SEQ_STATE_CONTENDED 5 51 52 /* 53 * MAX_LOCK_DEPTH 54 * Imported from include/linux/sched.h. 55 * Should this be synchronized? 56 */ 57 #define MAX_LOCK_DEPTH 48 58 59 /* 60 * struct lock_seq_stat: 61 * Place to put on state of one lock sequence 62 * 1) acquire -> acquired -> release 63 * 2) acquire -> contended -> acquired -> release 64 * 3) acquire (with read or try) -> release 65 * 4) Are there other patterns? 66 */ 67 struct lock_seq_stat { 68 struct list_head list; 69 int state; 70 u64 prev_event_time; 71 u64 addr; 72 73 int read_count; 74 }; 75 76 struct thread_stat { 77 struct rb_node rb; 78 79 u32 tid; 80 struct list_head seq_list; 81 }; 82 83 /* 84 * CONTENTION_STACK_DEPTH 85 * Number of stack trace entries to find callers 86 */ 87 #define CONTENTION_STACK_DEPTH 8 88 89 /* 90 * CONTENTION_STACK_SKIP 91 * Number of stack trace entries to skip when finding callers. 92 * The first few entries belong to the locking implementation itself. 93 */ 94 #define CONTENTION_STACK_SKIP 3 95 96 /* 97 * flags for lock:contention_begin 98 * Imported from include/trace/events/lock.h. 99 */ 100 #define LCB_F_SPIN (1U << 0) 101 #define LCB_F_READ (1U << 1) 102 #define LCB_F_WRITE (1U << 2) 103 #define LCB_F_RT (1U << 3) 104 #define LCB_F_PERCPU (1U << 4) 105 #define LCB_F_MUTEX (1U << 5) 106 107 struct evlist; 108 struct machine; 109 struct target; 110 111 struct lock_contention { 112 struct evlist *evlist; 113 struct target *target; 114 struct machine *machine; 115 struct hlist_head *result; 116 unsigned long map_nr_entries; 117 int lost; 118 int max_stack; 119 int stack_skip; 120 }; 121 122 #ifdef HAVE_BPF_SKEL 123 124 int lock_contention_prepare(struct lock_contention *con); 125 int lock_contention_start(void); 126 int lock_contention_stop(void); 127 int lock_contention_read(struct lock_contention *con); 128 int lock_contention_finish(void); 129 130 #else /* !HAVE_BPF_SKEL */ 131 132 static inline int lock_contention_prepare(struct lock_contention *con __maybe_unused) 133 { 134 return 0; 135 } 136 137 static inline int lock_contention_start(void) { return 0; } 138 static inline int lock_contention_stop(void) { return 0; } 139 static inline int lock_contention_finish(void) { return 0; } 140 141 static inline int lock_contention_read(struct lock_contention *con __maybe_unused) 142 { 143 return 0; 144 } 145 146 #endif /* HAVE_BPF_SKEL */ 147 148 bool is_lock_function(struct machine *machine, u64 addr); 149 150 #endif /* PERF_LOCK_CONTENTION_H */ 151