1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef PERF_LOCK_CONTENTION_H
3 #define PERF_LOCK_CONTENTION_H
4 
5 #include <linux/list.h>
6 #include <linux/rbtree.h>
7 
8 struct lock_stat {
9 	struct hlist_node	hash_entry;
10 	struct rb_node		rb;		/* used for sorting */
11 
12 	u64			addr;		/* address of lockdep_map, used as ID */
13 	char			*name;		/* for strcpy(), we cannot use const */
14 
15 	unsigned int		nr_acquire;
16 	unsigned int		nr_acquired;
17 	unsigned int		nr_contended;
18 	unsigned int		nr_release;
19 
20 	union {
21 		unsigned int	nr_readlock;
22 		unsigned int	flags;
23 	};
24 	unsigned int		nr_trylock;
25 
26 	/* these times are in nano sec. */
27 	u64                     avg_wait_time;
28 	u64			wait_time_total;
29 	u64			wait_time_min;
30 	u64			wait_time_max;
31 
32 	int			broken; /* flag of blacklist */
33 	int			combined;
34 };
35 
36 /*
37  * States of lock_seq_stat
38  *
39  * UNINITIALIZED is required for detecting first event of acquire.
40  * As the nature of lock events, there is no guarantee
41  * that the first event for the locks are acquire,
42  * it can be acquired, contended or release.
43  */
44 #define SEQ_STATE_UNINITIALIZED      0	       /* initial state */
45 #define SEQ_STATE_RELEASED	1
46 #define SEQ_STATE_ACQUIRING	2
47 #define SEQ_STATE_ACQUIRED	3
48 #define SEQ_STATE_READ_ACQUIRED	4
49 #define SEQ_STATE_CONTENDED	5
50 
51 /*
52  * MAX_LOCK_DEPTH
53  * Imported from include/linux/sched.h.
54  * Should this be synchronized?
55  */
56 #define MAX_LOCK_DEPTH 48
57 
58 /*
59  * struct lock_seq_stat:
60  * Place to put on state of one lock sequence
61  * 1) acquire -> acquired -> release
62  * 2) acquire -> contended -> acquired -> release
63  * 3) acquire (with read or try) -> release
64  * 4) Are there other patterns?
65  */
66 struct lock_seq_stat {
67 	struct list_head        list;
68 	int			state;
69 	u64			prev_event_time;
70 	u64                     addr;
71 
72 	int                     read_count;
73 };
74 
75 struct thread_stat {
76 	struct rb_node		rb;
77 
78 	u32                     tid;
79 	struct list_head        seq_list;
80 };
81 
82 /*
83  * CONTENTION_STACK_DEPTH
84  * Number of stack trace entries to find callers
85  */
86 #define CONTENTION_STACK_DEPTH  8
87 
88 /*
89  * CONTENTION_STACK_SKIP
90  * Number of stack trace entries to skip when finding callers.
91  * The first few entries belong to the locking implementation itself.
92  */
93 #define CONTENTION_STACK_SKIP  3
94 
95 /*
96  * flags for lock:contention_begin
97  * Imported from include/trace/events/lock.h.
98  */
99 #define LCB_F_SPIN	(1U << 0)
100 #define LCB_F_READ	(1U << 1)
101 #define LCB_F_WRITE	(1U << 2)
102 #define LCB_F_RT	(1U << 3)
103 #define LCB_F_PERCPU	(1U << 4)
104 #define LCB_F_MUTEX	(1U << 5)
105 
106 struct evlist;
107 struct machine;
108 struct target;
109 
110 struct lock_contention {
111 	struct evlist *evlist;
112 	struct target *target;
113 	struct machine *machine;
114 	struct hlist_head *result;
115 	unsigned long map_nr_entries;
116 	unsigned long lost;
117 };
118 
119 #ifdef HAVE_BPF_SKEL
120 
121 int lock_contention_prepare(struct lock_contention *con);
122 int lock_contention_start(void);
123 int lock_contention_stop(void);
124 int lock_contention_read(struct lock_contention *con);
125 int lock_contention_finish(void);
126 
127 #else  /* !HAVE_BPF_SKEL */
128 
129 static inline int lock_contention_prepare(struct lock_contention *con __maybe_unused)
130 {
131 	return 0;
132 }
133 
134 static inline int lock_contention_start(void) { return 0; }
135 static inline int lock_contention_stop(void) { return 0; }
136 static inline int lock_contention_finish(void) { return 0; }
137 
138 static inline int lock_contention_read(struct lock_contention *con __maybe_unused)
139 {
140 	return 0;
141 }
142 
143 #endif  /* HAVE_BPF_SKEL */
144 
145 bool is_lock_function(struct machine *machine, u64 addr);
146 
147 #endif  /* PERF_LOCK_CONTENTION_H */
148