xref: /openbmc/linux/tools/perf/util/event.h (revision e0bf6c5c)
1 #ifndef __PERF_RECORD_H
2 #define __PERF_RECORD_H
3 
4 #include <limits.h>
5 #include <stdio.h>
6 
7 #include "../perf.h"
8 #include "map.h"
9 #include "build-id.h"
10 #include "perf_regs.h"
11 
12 struct mmap_event {
13 	struct perf_event_header header;
14 	u32 pid, tid;
15 	u64 start;
16 	u64 len;
17 	u64 pgoff;
18 	char filename[PATH_MAX];
19 };
20 
21 struct mmap2_event {
22 	struct perf_event_header header;
23 	u32 pid, tid;
24 	u64 start;
25 	u64 len;
26 	u64 pgoff;
27 	u32 maj;
28 	u32 min;
29 	u64 ino;
30 	u64 ino_generation;
31 	u32 prot;
32 	u32 flags;
33 	char filename[PATH_MAX];
34 };
35 
36 struct comm_event {
37 	struct perf_event_header header;
38 	u32 pid, tid;
39 	char comm[16];
40 };
41 
42 struct fork_event {
43 	struct perf_event_header header;
44 	u32 pid, ppid;
45 	u32 tid, ptid;
46 	u64 time;
47 };
48 
49 struct lost_event {
50 	struct perf_event_header header;
51 	u64 id;
52 	u64 lost;
53 };
54 
55 /*
56  * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
57  */
58 struct read_event {
59 	struct perf_event_header header;
60 	u32 pid, tid;
61 	u64 value;
62 	u64 time_enabled;
63 	u64 time_running;
64 	u64 id;
65 };
66 
67 struct throttle_event {
68 	struct perf_event_header header;
69 	u64 time;
70 	u64 id;
71 	u64 stream_id;
72 };
73 
74 #define PERF_SAMPLE_MASK				\
75 	(PERF_SAMPLE_IP | PERF_SAMPLE_TID |		\
76 	 PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR |		\
77 	PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID |	\
78 	 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD |		\
79 	 PERF_SAMPLE_IDENTIFIER)
80 
81 /* perf sample has 16 bits size limit */
82 #define PERF_SAMPLE_MAX_SIZE (1 << 16)
83 
84 struct sample_event {
85 	struct perf_event_header        header;
86 	u64 array[];
87 };
88 
89 struct regs_dump {
90 	u64 abi;
91 	u64 mask;
92 	u64 *regs;
93 
94 	/* Cached values/mask filled by first register access. */
95 	u64 cache_regs[PERF_REGS_MAX];
96 	u64 cache_mask;
97 };
98 
99 struct stack_dump {
100 	u16 offset;
101 	u64 size;
102 	char *data;
103 };
104 
105 struct sample_read_value {
106 	u64 value;
107 	u64 id;
108 };
109 
110 struct sample_read {
111 	u64 time_enabled;
112 	u64 time_running;
113 	union {
114 		struct {
115 			u64 nr;
116 			struct sample_read_value *values;
117 		} group;
118 		struct sample_read_value one;
119 	};
120 };
121 
122 struct ip_callchain {
123 	u64 nr;
124 	u64 ips[0];
125 };
126 
127 struct branch_flags {
128 	u64 mispred:1;
129 	u64 predicted:1;
130 	u64 in_tx:1;
131 	u64 abort:1;
132 	u64 reserved:60;
133 };
134 
135 struct branch_entry {
136 	u64			from;
137 	u64			to;
138 	struct branch_flags	flags;
139 };
140 
141 struct branch_stack {
142 	u64			nr;
143 	struct branch_entry	entries[0];
144 };
145 
146 enum {
147 	PERF_IP_FLAG_BRANCH		= 1ULL << 0,
148 	PERF_IP_FLAG_CALL		= 1ULL << 1,
149 	PERF_IP_FLAG_RETURN		= 1ULL << 2,
150 	PERF_IP_FLAG_CONDITIONAL	= 1ULL << 3,
151 	PERF_IP_FLAG_SYSCALLRET		= 1ULL << 4,
152 	PERF_IP_FLAG_ASYNC		= 1ULL << 5,
153 	PERF_IP_FLAG_INTERRUPT		= 1ULL << 6,
154 	PERF_IP_FLAG_TX_ABORT		= 1ULL << 7,
155 	PERF_IP_FLAG_TRACE_BEGIN	= 1ULL << 8,
156 	PERF_IP_FLAG_TRACE_END		= 1ULL << 9,
157 	PERF_IP_FLAG_IN_TX		= 1ULL << 10,
158 };
159 
160 #define PERF_BRANCH_MASK		(\
161 	PERF_IP_FLAG_BRANCH		|\
162 	PERF_IP_FLAG_CALL		|\
163 	PERF_IP_FLAG_RETURN		|\
164 	PERF_IP_FLAG_CONDITIONAL	|\
165 	PERF_IP_FLAG_SYSCALLRET		|\
166 	PERF_IP_FLAG_ASYNC		|\
167 	PERF_IP_FLAG_INTERRUPT		|\
168 	PERF_IP_FLAG_TX_ABORT		|\
169 	PERF_IP_FLAG_TRACE_BEGIN	|\
170 	PERF_IP_FLAG_TRACE_END)
171 
172 struct perf_sample {
173 	u64 ip;
174 	u32 pid, tid;
175 	u64 time;
176 	u64 addr;
177 	u64 id;
178 	u64 stream_id;
179 	u64 period;
180 	u64 weight;
181 	u64 transaction;
182 	u32 cpu;
183 	u32 raw_size;
184 	u64 data_src;
185 	u32 flags;
186 	u16 insn_len;
187 	void *raw_data;
188 	struct ip_callchain *callchain;
189 	struct branch_stack *branch_stack;
190 	struct regs_dump  user_regs;
191 	struct regs_dump  intr_regs;
192 	struct stack_dump user_stack;
193 	struct sample_read read;
194 };
195 
196 #define PERF_MEM_DATA_SRC_NONE \
197 	(PERF_MEM_S(OP, NA) |\
198 	 PERF_MEM_S(LVL, NA) |\
199 	 PERF_MEM_S(SNOOP, NA) |\
200 	 PERF_MEM_S(LOCK, NA) |\
201 	 PERF_MEM_S(TLB, NA))
202 
203 struct build_id_event {
204 	struct perf_event_header header;
205 	pid_t			 pid;
206 	u8			 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
207 	char			 filename[];
208 };
209 
210 enum perf_user_event_type { /* above any possible kernel type */
211 	PERF_RECORD_USER_TYPE_START		= 64,
212 	PERF_RECORD_HEADER_ATTR			= 64,
213 	PERF_RECORD_HEADER_EVENT_TYPE		= 65, /* depreceated */
214 	PERF_RECORD_HEADER_TRACING_DATA		= 66,
215 	PERF_RECORD_HEADER_BUILD_ID		= 67,
216 	PERF_RECORD_FINISHED_ROUND		= 68,
217 	PERF_RECORD_ID_INDEX			= 69,
218 	PERF_RECORD_HEADER_MAX
219 };
220 
221 /*
222  * The kernel collects the number of events it couldn't send in a stretch and
223  * when possible sends this number in a PERF_RECORD_LOST event. The number of
224  * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
225  * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
226  * the sum of all struct lost_event.lost fields reported.
227  *
228  * The total_period is needed because by default auto-freq is used, so
229  * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
230  * the total number of low level events, it is necessary to to sum all struct
231  * sample_event.period and stash the result in total_period.
232  */
233 struct events_stats {
234 	u64 total_period;
235 	u64 total_non_filtered_period;
236 	u64 total_lost;
237 	u64 total_invalid_chains;
238 	u32 nr_events[PERF_RECORD_HEADER_MAX];
239 	u32 nr_non_filtered_samples;
240 	u32 nr_lost_warned;
241 	u32 nr_unknown_events;
242 	u32 nr_invalid_chains;
243 	u32 nr_unknown_id;
244 	u32 nr_unprocessable_samples;
245 	u32 nr_unordered_events;
246 };
247 
248 struct attr_event {
249 	struct perf_event_header header;
250 	struct perf_event_attr attr;
251 	u64 id[];
252 };
253 
254 #define MAX_EVENT_NAME 64
255 
256 struct perf_trace_event_type {
257 	u64	event_id;
258 	char	name[MAX_EVENT_NAME];
259 };
260 
261 struct event_type_event {
262 	struct perf_event_header header;
263 	struct perf_trace_event_type event_type;
264 };
265 
266 struct tracing_data_event {
267 	struct perf_event_header header;
268 	u32 size;
269 };
270 
271 struct id_index_entry {
272 	u64 id;
273 	u64 idx;
274 	u64 cpu;
275 	u64 tid;
276 };
277 
278 struct id_index_event {
279 	struct perf_event_header header;
280 	u64 nr;
281 	struct id_index_entry entries[0];
282 };
283 
284 union perf_event {
285 	struct perf_event_header	header;
286 	struct mmap_event		mmap;
287 	struct mmap2_event		mmap2;
288 	struct comm_event		comm;
289 	struct fork_event		fork;
290 	struct lost_event		lost;
291 	struct read_event		read;
292 	struct throttle_event		throttle;
293 	struct sample_event		sample;
294 	struct attr_event		attr;
295 	struct event_type_event		event_type;
296 	struct tracing_data_event	tracing_data;
297 	struct build_id_event		build_id;
298 	struct id_index_event		id_index;
299 };
300 
301 void perf_event__print_totals(void);
302 
303 struct perf_tool;
304 struct thread_map;
305 
306 typedef int (*perf_event__handler_t)(struct perf_tool *tool,
307 				     union perf_event *event,
308 				     struct perf_sample *sample,
309 				     struct machine *machine);
310 
311 int perf_event__synthesize_thread_map(struct perf_tool *tool,
312 				      struct thread_map *threads,
313 				      perf_event__handler_t process,
314 				      struct machine *machine, bool mmap_data);
315 int perf_event__synthesize_threads(struct perf_tool *tool,
316 				   perf_event__handler_t process,
317 				   struct machine *machine, bool mmap_data);
318 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
319 				       perf_event__handler_t process,
320 				       struct machine *machine);
321 
322 int perf_event__synthesize_modules(struct perf_tool *tool,
323 				   perf_event__handler_t process,
324 				   struct machine *machine);
325 
326 int perf_event__process_comm(struct perf_tool *tool,
327 			     union perf_event *event,
328 			     struct perf_sample *sample,
329 			     struct machine *machine);
330 int perf_event__process_lost(struct perf_tool *tool,
331 			     union perf_event *event,
332 			     struct perf_sample *sample,
333 			     struct machine *machine);
334 int perf_event__process_mmap(struct perf_tool *tool,
335 			     union perf_event *event,
336 			     struct perf_sample *sample,
337 			     struct machine *machine);
338 int perf_event__process_mmap2(struct perf_tool *tool,
339 			     union perf_event *event,
340 			     struct perf_sample *sample,
341 			     struct machine *machine);
342 int perf_event__process_fork(struct perf_tool *tool,
343 			     union perf_event *event,
344 			     struct perf_sample *sample,
345 			     struct machine *machine);
346 int perf_event__process_exit(struct perf_tool *tool,
347 			     union perf_event *event,
348 			     struct perf_sample *sample,
349 			     struct machine *machine);
350 int perf_event__process(struct perf_tool *tool,
351 			union perf_event *event,
352 			struct perf_sample *sample,
353 			struct machine *machine);
354 
355 struct addr_location;
356 
357 int perf_event__preprocess_sample(const union perf_event *event,
358 				  struct machine *machine,
359 				  struct addr_location *al,
360 				  struct perf_sample *sample);
361 
362 struct thread;
363 
364 bool is_bts_event(struct perf_event_attr *attr);
365 bool sample_addr_correlates_sym(struct perf_event_attr *attr);
366 void perf_event__preprocess_sample_addr(union perf_event *event,
367 					struct perf_sample *sample,
368 					struct thread *thread,
369 					struct addr_location *al);
370 
371 const char *perf_event__name(unsigned int id);
372 
373 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
374 				     u64 read_format);
375 int perf_event__synthesize_sample(union perf_event *event, u64 type,
376 				  u64 read_format,
377 				  const struct perf_sample *sample,
378 				  bool swapped);
379 
380 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
381 				       union perf_event *event,
382 				       pid_t pid, pid_t tgid,
383 				       perf_event__handler_t process,
384 				       struct machine *machine,
385 				       bool mmap_data);
386 
387 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
388 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
389 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
390 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
391 size_t perf_event__fprintf(union perf_event *event, FILE *fp);
392 
393 u64 kallsyms__get_function_start(const char *kallsyms_filename,
394 				 const char *symbol_name);
395 
396 #endif /* __PERF_RECORD_H */
397