xref: /openbmc/linux/tools/perf/util/kwork.h (revision 628d6999)
1 #ifndef PERF_UTIL_KWORK_H
2 #define PERF_UTIL_KWORK_H
3 
4 #include "util/tool.h"
5 #include "util/time-utils.h"
6 
7 #include <linux/bitmap.h>
8 #include <linux/list.h>
9 #include <linux/rbtree.h>
10 #include <linux/types.h>
11 
12 struct perf_sample;
13 struct perf_session;
14 
15 enum kwork_class_type {
16 	KWORK_CLASS_IRQ,
17 	KWORK_CLASS_SOFTIRQ,
18 	KWORK_CLASS_WORKQUEUE,
19 	KWORK_CLASS_MAX,
20 };
21 
22 enum kwork_report_type {
23 	KWORK_REPORT_RUNTIME,
24 	KWORK_REPORT_LATENCY,
25 	KWORK_REPORT_TIMEHIST,
26 };
27 
28 enum kwork_trace_type {
29 	KWORK_TRACE_RAISE,
30 	KWORK_TRACE_ENTRY,
31 	KWORK_TRACE_EXIT,
32 	KWORK_TRACE_MAX,
33 };
34 
35 /*
36  * data structure:
37  *
38  *                 +==================+ +============+ +======================+
39  *                 |      class       | |    work    | |         atom         |
40  *                 +==================+ +============+ +======================+
41  * +------------+  |  +-----+         | |  +------+  | |  +-------+   +-----+ |
42  * | perf_kwork | +-> | irq | --------|+-> | eth0 | --+-> | raise | - | ... | --+   +-----------+
43  * +-----+------+ ||  +-----+         |||  +------+  |||  +-------+   +-----+ | |   |           |
44  *       |        ||                  |||            |||                      | +-> | atom_page |
45  *       |        ||                  |||            |||  +-------+   +-----+ |     |           |
46  *       |  class_list                |||            |+-> | entry | - | ... | ----> |           |
47  *       |        ||                  |||            |||  +-------+   +-----+ |     |           |
48  *       |        ||                  |||            |||                      | +-> |           |
49  *       |        ||                  |||            |||  +-------+   +-----+ | |   |           |
50  *       |        ||                  |||            |+-> | exit  | - | ... | --+   +-----+-----+
51  *       |        ||                  |||            | |  +-------+   +-----+ |           |
52  *       |        ||                  |||            | |                      |           |
53  *       |        ||                  |||  +-----+   | |                      |           |
54  *       |        ||                  |+-> | ... |   | |                      |           |
55  *       |        ||                  | |  +-----+   | |                      |           |
56  *       |        ||                  | |            | |                      |           |
57  *       |        ||  +---------+     | |  +-----+   | |  +-------+   +-----+ |           |
58  *       |        +-> | softirq | -------> | RCU | ---+-> | raise | - | ... | --+   +-----+-----+
59  *       |        ||  +---------+     | |  +-----+   |||  +-------+   +-----+ | |   |           |
60  *       |        ||                  | |            |||                      | +-> | atom_page |
61  *       |        ||                  | |            |||  +-------+   +-----+ |     |           |
62  *       |        ||                  | |            |+-> | entry | - | ... | ----> |           |
63  *       |        ||                  | |            |||  +-------+   +-----+ |     |           |
64  *       |        ||                  | |            |||                      | +-> |           |
65  *       |        ||                  | |            |||  +-------+   +-----+ | |   |           |
66  *       |        ||                  | |            |+-> | exit  | - | ... | --+   +-----+-----+
67  *       |        ||                  | |            | |  +-------+   +-----+ |           |
68  *       |        ||                  | |            | |                      |           |
69  *       |        ||  +-----------+   | |  +-----+   | |                      |           |
70  *       |        +-> | workqueue | -----> | ... |   | |                      |           |
71  *       |         |  +-----------+   | |  +-----+   | |                      |           |
72  *       |         +==================+ +============+ +======================+           |
73  *       |                                                                                |
74  *       +---->  atom_page_list  ---------------------------------------------------------+
75  *
76  */
77 
78 struct kwork_atom {
79 	struct list_head list;
80 	u64 time;
81 	struct kwork_atom *prev;
82 
83 	void *page_addr;
84 	unsigned long bit_inpage;
85 };
86 
87 #define NR_ATOM_PER_PAGE 128
88 struct kwork_atom_page {
89 	struct list_head list;
90 	struct kwork_atom atoms[NR_ATOM_PER_PAGE];
91 	DECLARE_BITMAP(bitmap, NR_ATOM_PER_PAGE);
92 };
93 
94 struct kwork_class;
95 struct kwork_work {
96 	/*
97 	 * class field
98 	 */
99 	struct rb_node node;
100 	struct kwork_class *class;
101 
102 	/*
103 	 * work field
104 	 */
105 	u64 id;
106 	int cpu;
107 	char *name;
108 
109 	/*
110 	 * atom field
111 	 */
112 	u64 nr_atoms;
113 	struct list_head atom_list[KWORK_TRACE_MAX];
114 
115 	/*
116 	 * runtime report
117 	 */
118 	u64 max_runtime;
119 	u64 max_runtime_start;
120 	u64 max_runtime_end;
121 	u64 total_runtime;
122 
123 	/*
124 	 * latency report
125 	 */
126 	u64 max_latency;
127 	u64 max_latency_start;
128 	u64 max_latency_end;
129 	u64 total_latency;
130 };
131 
132 struct kwork_class {
133 	struct list_head list;
134 	const char *name;
135 	enum kwork_class_type type;
136 
137 	unsigned int nr_tracepoints;
138 	const struct evsel_str_handler *tp_handlers;
139 
140 	struct rb_root_cached work_root;
141 
142 	int (*class_init)(struct kwork_class *class,
143 			  struct perf_session *session);
144 
145 	void (*work_init)(struct kwork_class *class,
146 			  struct kwork_work *work,
147 			  struct evsel *evsel,
148 			  struct perf_sample *sample,
149 			  struct machine *machine);
150 
151 	void (*work_name)(struct kwork_work *work,
152 			  char *buf, int len);
153 };
154 
155 struct perf_kwork;
156 struct trace_kwork_handler {
157 	int (*raise_event)(struct perf_kwork *kwork,
158 			   struct kwork_class *class, struct evsel *evsel,
159 			   struct perf_sample *sample, struct machine *machine);
160 
161 	int (*entry_event)(struct perf_kwork *kwork,
162 			   struct kwork_class *class, struct evsel *evsel,
163 			   struct perf_sample *sample, struct machine *machine);
164 
165 	int (*exit_event)(struct perf_kwork *kwork,
166 			  struct kwork_class *class, struct evsel *evsel,
167 			  struct perf_sample *sample, struct machine *machine);
168 };
169 
170 struct perf_kwork {
171 	/*
172 	 * metadata
173 	 */
174 	struct perf_tool tool;
175 	struct list_head class_list;
176 	struct list_head atom_page_list;
177 	struct list_head sort_list, cmp_id;
178 	struct rb_root_cached sorted_work_root;
179 	const struct trace_kwork_handler *tp_handler;
180 
181 	/*
182 	 * profile filters
183 	 */
184 	const char *profile_name;
185 
186 	const char *cpu_list;
187 	DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
188 
189 	const char *time_str;
190 	struct perf_time_interval ptime;
191 
192 	/*
193 	 * options for command
194 	 */
195 	bool force;
196 	const char *event_list_str;
197 	enum kwork_report_type report;
198 
199 	/*
200 	 * options for subcommand
201 	 */
202 	bool summary;
203 	const char *sort_order;
204 	bool show_callchain;
205 	unsigned int max_stack;
206 	bool use_bpf;
207 
208 	/*
209 	 * statistics
210 	 */
211 	u64 timestart;
212 	u64 timeend;
213 
214 	unsigned long nr_events;
215 	unsigned long nr_lost_chunks;
216 	unsigned long nr_lost_events;
217 
218 	u64 all_runtime;
219 	u64 all_count;
220 	u64 nr_skipped_events[KWORK_TRACE_MAX + 1];
221 };
222 
223 struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
224 				       struct kwork_class *class,
225 				       struct kwork_work *key);
226 
227 #ifdef HAVE_BPF_SKEL
228 
229 int perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork);
230 int perf_kwork__report_read_bpf(struct perf_kwork *kwork);
231 void perf_kwork__report_cleanup_bpf(void);
232 
233 void perf_kwork__trace_start(void);
234 void perf_kwork__trace_finish(void);
235 
236 #else  /* !HAVE_BPF_SKEL */
237 
238 static inline int
perf_kwork__trace_prepare_bpf(struct perf_kwork * kwork __maybe_unused)239 perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork __maybe_unused)
240 {
241 	return -1;
242 }
243 
244 static inline int
perf_kwork__report_read_bpf(struct perf_kwork * kwork __maybe_unused)245 perf_kwork__report_read_bpf(struct perf_kwork *kwork __maybe_unused)
246 {
247 	return -1;
248 }
249 
perf_kwork__report_cleanup_bpf(void)250 static inline void perf_kwork__report_cleanup_bpf(void) {}
251 
perf_kwork__trace_start(void)252 static inline void perf_kwork__trace_start(void) {}
perf_kwork__trace_finish(void)253 static inline void perf_kwork__trace_finish(void) {}
254 
255 #endif  /* HAVE_BPF_SKEL */
256 
257 #endif  /* PERF_UTIL_KWORK_H */
258