xref: /openbmc/linux/tools/perf/util/auxtrace.h (revision ecd25094)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * auxtrace.h: AUX area trace support
4  * Copyright (c) 2013-2015, Intel Corporation.
5  */
6 
7 #ifndef __PERF_AUXTRACE_H
8 #define __PERF_AUXTRACE_H
9 
10 #include <sys/types.h>
11 #include <errno.h>
12 #include <stdbool.h>
13 #include <stddef.h>
14 #include <linux/list.h>
15 #include <linux/perf_event.h>
16 #include <linux/types.h>
17 #include <asm/bitsperlong.h>
18 #include <asm/barrier.h>
19 
20 #include "event.h"
21 
22 union perf_event;
23 struct perf_session;
24 struct evlist;
25 struct perf_tool;
26 struct perf_mmap;
27 struct option;
28 struct record_opts;
29 struct perf_record_auxtrace_info;
30 struct events_stats;
31 
32 enum auxtrace_error_type {
33        PERF_AUXTRACE_ERROR_ITRACE  = 1,
34        PERF_AUXTRACE_ERROR_MAX
35 };
36 
37 /* Auxtrace records must have the same alignment as perf event records */
38 #define PERF_AUXTRACE_RECORD_ALIGNMENT 8
39 
40 enum auxtrace_type {
41 	PERF_AUXTRACE_UNKNOWN,
42 	PERF_AUXTRACE_INTEL_PT,
43 	PERF_AUXTRACE_INTEL_BTS,
44 	PERF_AUXTRACE_CS_ETM,
45 	PERF_AUXTRACE_ARM_SPE,
46 	PERF_AUXTRACE_S390_CPUMSF,
47 };
48 
49 enum itrace_period_type {
50 	PERF_ITRACE_PERIOD_INSTRUCTIONS,
51 	PERF_ITRACE_PERIOD_TICKS,
52 	PERF_ITRACE_PERIOD_NANOSECS,
53 };
54 
55 /**
56  * struct itrace_synth_opts - AUX area tracing synthesis options.
57  * @set: indicates whether or not options have been set
58  * @default_no_sample: Default to no sampling.
59  * @inject: indicates the event (not just the sample) must be fully synthesized
60  *          because 'perf inject' will write it out
61  * @instructions: whether to synthesize 'instructions' events
62  * @branches: whether to synthesize 'branches' events
63  * @transactions: whether to synthesize events for transactions
64  * @ptwrites: whether to synthesize events for ptwrites
65  * @pwr_events: whether to synthesize power events
66  * @other_events: whether to synthesize other events recorded due to the use of
67  *                aux_output
68  * @errors: whether to synthesize decoder error events
69  * @dont_decode: whether to skip decoding entirely
70  * @log: write a decoding log
71  * @calls: limit branch samples to calls (can be combined with @returns)
72  * @returns: limit branch samples to returns (can be combined with @calls)
73  * @callchain: add callchain to 'instructions' events
74  * @thread_stack: feed branches to the thread_stack
75  * @last_branch: add branch context to 'instruction' events
76  * @callchain_sz: maximum callchain size
77  * @last_branch_sz: branch context size
78  * @period: 'instructions' events period
79  * @period_type: 'instructions' events period type
80  * @initial_skip: skip N events at the beginning.
81  * @cpu_bitmap: CPUs for which to synthesize events, or NULL for all
82  * @ptime_range: time intervals to trace or NULL
83  * @range_num: number of time intervals to trace
84  */
85 struct itrace_synth_opts {
86 	bool			set;
87 	bool			default_no_sample;
88 	bool			inject;
89 	bool			instructions;
90 	bool			branches;
91 	bool			transactions;
92 	bool			ptwrites;
93 	bool			pwr_events;
94 	bool			other_events;
95 	bool			errors;
96 	bool			dont_decode;
97 	bool			log;
98 	bool			calls;
99 	bool			returns;
100 	bool			callchain;
101 	bool			thread_stack;
102 	bool			last_branch;
103 	unsigned int		callchain_sz;
104 	unsigned int		last_branch_sz;
105 	unsigned long long	period;
106 	enum itrace_period_type	period_type;
107 	unsigned long		initial_skip;
108 	unsigned long		*cpu_bitmap;
109 	struct perf_time_interval *ptime_range;
110 	int			range_num;
111 };
112 
113 /**
114  * struct auxtrace_index_entry - indexes a AUX area tracing event within a
115  *                               perf.data file.
116  * @file_offset: offset within the perf.data file
117  * @sz: size of the event
118  */
119 struct auxtrace_index_entry {
120 	u64			file_offset;
121 	u64			sz;
122 };
123 
124 #define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256
125 
126 /**
127  * struct auxtrace_index - index of AUX area tracing events within a perf.data
128  *                         file.
129  * @list: linking a number of arrays of entries
130  * @nr: number of entries
131  * @entries: array of entries
132  */
133 struct auxtrace_index {
134 	struct list_head	list;
135 	size_t			nr;
136 	struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
137 };
138 
139 /**
140  * struct auxtrace - session callbacks to allow AUX area data decoding.
141  * @process_event: lets the decoder see all session events
142  * @process_auxtrace_event: process a PERF_RECORD_AUXTRACE event
143  * @flush_events: process any remaining data
144  * @free_events: free resources associated with event processing
145  * @free: free resources associated with the session
146  */
147 struct auxtrace {
148 	int (*process_event)(struct perf_session *session,
149 			     union perf_event *event,
150 			     struct perf_sample *sample,
151 			     struct perf_tool *tool);
152 	int (*process_auxtrace_event)(struct perf_session *session,
153 				      union perf_event *event,
154 				      struct perf_tool *tool);
155 	int (*flush_events)(struct perf_session *session,
156 			    struct perf_tool *tool);
157 	void (*free_events)(struct perf_session *session);
158 	void (*free)(struct perf_session *session);
159 };
160 
161 /**
162  * struct auxtrace_buffer - a buffer containing AUX area tracing data.
163  * @list: buffers are queued in a list held by struct auxtrace_queue
164  * @size: size of the buffer in bytes
165  * @pid: in per-thread mode, the pid this buffer is associated with
166  * @tid: in per-thread mode, the tid this buffer is associated with
167  * @cpu: in per-cpu mode, the cpu this buffer is associated with
168  * @data: actual buffer data (can be null if the data has not been loaded)
169  * @data_offset: file offset at which the buffer can be read
170  * @mmap_addr: mmap address at which the buffer can be read
171  * @mmap_size: size of the mmap at @mmap_addr
172  * @data_needs_freeing: @data was malloc'd so free it when it is no longer
173  *                      needed
174  * @consecutive: the original data was split up and this buffer is consecutive
175  *               to the previous buffer
176  * @offset: offset as determined by aux_head / aux_tail members of struct
177  *          perf_event_mmap_page
178  * @reference: an implementation-specific reference determined when the data is
179  *             recorded
180  * @buffer_nr: used to number each buffer
181  * @use_size: implementation actually only uses this number of bytes
182  * @use_data: implementation actually only uses data starting at this address
183  */
184 struct auxtrace_buffer {
185 	struct list_head	list;
186 	size_t			size;
187 	pid_t			pid;
188 	pid_t			tid;
189 	int			cpu;
190 	void			*data;
191 	off_t			data_offset;
192 	void			*mmap_addr;
193 	size_t			mmap_size;
194 	bool			data_needs_freeing;
195 	bool			consecutive;
196 	u64			offset;
197 	u64			reference;
198 	u64			buffer_nr;
199 	size_t			use_size;
200 	void			*use_data;
201 };
202 
203 /**
204  * struct auxtrace_queue - a queue of AUX area tracing data buffers.
205  * @head: head of buffer list
206  * @tid: in per-thread mode, the tid this queue is associated with
207  * @cpu: in per-cpu mode, the cpu this queue is associated with
208  * @set: %true once this queue has been dedicated to a specific thread or cpu
209  * @priv: implementation-specific data
210  */
211 struct auxtrace_queue {
212 	struct list_head	head;
213 	pid_t			tid;
214 	int			cpu;
215 	bool			set;
216 	void			*priv;
217 };
218 
219 /**
220  * struct auxtrace_queues - an array of AUX area tracing queues.
221  * @queue_array: array of queues
222  * @nr_queues: number of queues
223  * @new_data: set whenever new data is queued
224  * @populated: queues have been fully populated using the auxtrace_index
225  * @next_buffer_nr: used to number each buffer
226  */
227 struct auxtrace_queues {
228 	struct auxtrace_queue	*queue_array;
229 	unsigned int		nr_queues;
230 	bool			new_data;
231 	bool			populated;
232 	u64			next_buffer_nr;
233 };
234 
235 /**
236  * struct auxtrace_heap_item - element of struct auxtrace_heap.
237  * @queue_nr: queue number
238  * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected
239  *           to be a timestamp
240  */
241 struct auxtrace_heap_item {
242 	unsigned int		queue_nr;
243 	u64			ordinal;
244 };
245 
246 /**
247  * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues.
248  * @heap_array: the heap
249  * @heap_cnt: the number of elements in the heap
250  * @heap_sz: maximum number of elements (grows as needed)
251  */
252 struct auxtrace_heap {
253 	struct auxtrace_heap_item	*heap_array;
254 	unsigned int		heap_cnt;
255 	unsigned int		heap_sz;
256 };
257 
258 /**
259  * struct auxtrace_mmap - records an mmap of the auxtrace buffer.
260  * @base: address of mapped area
261  * @userpg: pointer to buffer's perf_event_mmap_page
262  * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
263  * @len: size of mapped area
264  * @prev: previous aux_head
265  * @idx: index of this mmap
266  * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
267  *       mmap) otherwise %0
268  * @cpu: cpu number for a per-cpu mmap otherwise %-1
269  */
270 struct auxtrace_mmap {
271 	void		*base;
272 	void		*userpg;
273 	size_t		mask;
274 	size_t		len;
275 	u64		prev;
276 	int		idx;
277 	pid_t		tid;
278 	int		cpu;
279 };
280 
281 /**
282  * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap.
283  * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
284  * @offset: file offset of mapped area
285  * @len: size of mapped area
286  * @prot: mmap memory protection
287  * @idx: index of this mmap
288  * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
289  *       mmap) otherwise %0
290  * @cpu: cpu number for a per-cpu mmap otherwise %-1
291  */
292 struct auxtrace_mmap_params {
293 	size_t		mask;
294 	off_t		offset;
295 	size_t		len;
296 	int		prot;
297 	int		idx;
298 	pid_t		tid;
299 	int		cpu;
300 };
301 
302 /**
303  * struct auxtrace_record - callbacks for recording AUX area data.
304  * @recording_options: validate and process recording options
305  * @info_priv_size: return the size of the private data in auxtrace_info_event
306  * @info_fill: fill-in the private data in auxtrace_info_event
307  * @free: free this auxtrace record structure
308  * @snapshot_start: starting a snapshot
309  * @snapshot_finish: finishing a snapshot
310  * @find_snapshot: find data to snapshot within auxtrace mmap
311  * @parse_snapshot_options: parse snapshot options
312  * @reference: provide a 64-bit reference number for auxtrace_event
313  * @read_finish: called after reading from an auxtrace mmap
314  * @alignment: alignment (if any) for AUX area data
315  */
316 struct auxtrace_record {
317 	int (*recording_options)(struct auxtrace_record *itr,
318 				 struct evlist *evlist,
319 				 struct record_opts *opts);
320 	size_t (*info_priv_size)(struct auxtrace_record *itr,
321 				 struct evlist *evlist);
322 	int (*info_fill)(struct auxtrace_record *itr,
323 			 struct perf_session *session,
324 			 struct perf_record_auxtrace_info *auxtrace_info,
325 			 size_t priv_size);
326 	void (*free)(struct auxtrace_record *itr);
327 	int (*snapshot_start)(struct auxtrace_record *itr);
328 	int (*snapshot_finish)(struct auxtrace_record *itr);
329 	int (*find_snapshot)(struct auxtrace_record *itr, int idx,
330 			     struct auxtrace_mmap *mm, unsigned char *data,
331 			     u64 *head, u64 *old);
332 	int (*parse_snapshot_options)(struct auxtrace_record *itr,
333 				      struct record_opts *opts,
334 				      const char *str);
335 	u64 (*reference)(struct auxtrace_record *itr);
336 	int (*read_finish)(struct auxtrace_record *itr, int idx);
337 	unsigned int alignment;
338 };
339 
340 /**
341  * struct addr_filter - address filter.
342  * @list: list node
343  * @range: true if it is a range filter
344  * @start: true if action is 'filter' or 'start'
345  * @action: 'filter', 'start' or 'stop' ('tracestop' is accepted but converted
346  *          to 'stop')
347  * @sym_from: symbol name for the filter address
348  * @sym_to: symbol name that determines the filter size
349  * @sym_from_idx: selects n'th from symbols with the same name (0 means global
350  *                and less than 0 means symbol must be unique)
351  * @sym_to_idx: same as @sym_from_idx but for @sym_to
352  * @addr: filter address
353  * @size: filter region size (for range filters)
354  * @filename: DSO file name or NULL for the kernel
355  * @str: allocated string that contains the other string members
356  */
357 struct addr_filter {
358 	struct list_head	list;
359 	bool			range;
360 	bool			start;
361 	const char		*action;
362 	const char		*sym_from;
363 	const char		*sym_to;
364 	int			sym_from_idx;
365 	int			sym_to_idx;
366 	u64			addr;
367 	u64			size;
368 	const char		*filename;
369 	char			*str;
370 };
371 
372 /**
373  * struct addr_filters - list of address filters.
374  * @head: list of address filters
375  * @cnt: number of address filters
376  */
377 struct addr_filters {
378 	struct list_head	head;
379 	int			cnt;
380 };
381 
382 struct auxtrace_cache;
383 
384 #ifdef HAVE_AUXTRACE_SUPPORT
385 
386 /*
387  * In snapshot mode the mmapped page is read-only which makes using
388  * __sync_val_compare_and_swap() problematic.  However, snapshot mode expects
389  * the buffer is not updated while the snapshot is made (e.g. Intel PT disables
390  * the event) so there is not a race anyway.
391  */
392 static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm)
393 {
394 	struct perf_event_mmap_page *pc = mm->userpg;
395 	u64 head = READ_ONCE(pc->aux_head);
396 
397 	/* Ensure all reads are done after we read the head */
398 	rmb();
399 	return head;
400 }
401 
402 static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
403 {
404 	struct perf_event_mmap_page *pc = mm->userpg;
405 #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
406 	u64 head = READ_ONCE(pc->aux_head);
407 #else
408 	u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0);
409 #endif
410 
411 	/* Ensure all reads are done after we read the head */
412 	rmb();
413 	return head;
414 }
415 
416 static inline void auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
417 {
418 	struct perf_event_mmap_page *pc = mm->userpg;
419 #if BITS_PER_LONG != 64 && defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
420 	u64 old_tail;
421 #endif
422 
423 	/* Ensure all reads are done before we write the tail out */
424 	mb();
425 #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
426 	pc->aux_tail = tail;
427 #else
428 	do {
429 		old_tail = __sync_val_compare_and_swap(&pc->aux_tail, 0, 0);
430 	} while (!__sync_bool_compare_and_swap(&pc->aux_tail, old_tail, tail));
431 #endif
432 }
433 
434 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
435 			struct auxtrace_mmap_params *mp,
436 			void *userpg, int fd);
437 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
438 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
439 				off_t auxtrace_offset,
440 				unsigned int auxtrace_pages,
441 				bool auxtrace_overwrite);
442 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
443 				   struct evlist *evlist, int idx,
444 				   bool per_cpu);
445 
446 typedef int (*process_auxtrace_t)(struct perf_tool *tool,
447 				  struct perf_mmap *map,
448 				  union perf_event *event, void *data1,
449 				  size_t len1, void *data2, size_t len2);
450 
451 int auxtrace_mmap__read(struct perf_mmap *map, struct auxtrace_record *itr,
452 			struct perf_tool *tool, process_auxtrace_t fn);
453 
454 int auxtrace_mmap__read_snapshot(struct perf_mmap *map,
455 				 struct auxtrace_record *itr,
456 				 struct perf_tool *tool, process_auxtrace_t fn,
457 				 size_t snapshot_size);
458 
459 int auxtrace_queues__init(struct auxtrace_queues *queues);
460 int auxtrace_queues__add_event(struct auxtrace_queues *queues,
461 			       struct perf_session *session,
462 			       union perf_event *event, off_t data_offset,
463 			       struct auxtrace_buffer **buffer_ptr);
464 void auxtrace_queues__free(struct auxtrace_queues *queues);
465 int auxtrace_queues__process_index(struct auxtrace_queues *queues,
466 				   struct perf_session *session);
467 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
468 					      struct auxtrace_buffer *buffer);
469 void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd);
470 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer);
471 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer);
472 void auxtrace_buffer__free(struct auxtrace_buffer *buffer);
473 
474 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
475 		       u64 ordinal);
476 void auxtrace_heap__pop(struct auxtrace_heap *heap);
477 void auxtrace_heap__free(struct auxtrace_heap *heap);
478 
479 struct auxtrace_cache_entry {
480 	struct hlist_node hash;
481 	u32 key;
482 };
483 
484 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
485 					   unsigned int limit_percent);
486 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache);
487 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c);
488 void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry);
489 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
490 			struct auxtrace_cache_entry *entry);
491 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key);
492 
493 struct auxtrace_record *auxtrace_record__init(struct evlist *evlist,
494 					      int *err);
495 
496 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
497 				    struct record_opts *opts,
498 				    const char *str);
499 int auxtrace_record__options(struct auxtrace_record *itr,
500 			     struct evlist *evlist,
501 			     struct record_opts *opts);
502 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
503 				       struct evlist *evlist);
504 int auxtrace_record__info_fill(struct auxtrace_record *itr,
505 			       struct perf_session *session,
506 			       struct perf_record_auxtrace_info *auxtrace_info,
507 			       size_t priv_size);
508 void auxtrace_record__free(struct auxtrace_record *itr);
509 int auxtrace_record__snapshot_start(struct auxtrace_record *itr);
510 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit);
511 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
512 				   struct auxtrace_mmap *mm,
513 				   unsigned char *data, u64 *head, u64 *old);
514 u64 auxtrace_record__reference(struct auxtrace_record *itr);
515 
516 int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event,
517 				   off_t file_offset);
518 int auxtrace_index__write(int fd, struct list_head *head);
519 int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
520 			    bool needs_swap);
521 void auxtrace_index__free(struct list_head *head);
522 
523 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
524 			  int code, int cpu, pid_t pid, pid_t tid, u64 ip,
525 			  const char *msg, u64 timestamp);
526 
527 int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
528 					 struct perf_tool *tool,
529 					 struct perf_session *session,
530 					 perf_event__handler_t process);
531 int perf_event__process_auxtrace_info(struct perf_session *session,
532 				      union perf_event *event);
533 s64 perf_event__process_auxtrace(struct perf_session *session,
534 				 union perf_event *event);
535 int perf_event__process_auxtrace_error(struct perf_session *session,
536 				       union perf_event *event);
537 int itrace_parse_synth_opts(const struct option *opt, const char *str,
538 			    int unset);
539 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
540 				    bool no_sample);
541 
542 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp);
543 void perf_session__auxtrace_error_inc(struct perf_session *session,
544 				      union perf_event *event);
545 void events_stats__auxtrace_error_warn(const struct events_stats *stats);
546 
547 void addr_filters__init(struct addr_filters *filts);
548 void addr_filters__exit(struct addr_filters *filts);
549 int addr_filters__parse_bare_filter(struct addr_filters *filts,
550 				    const char *filter);
551 int auxtrace_parse_filters(struct evlist *evlist);
552 
553 int auxtrace__process_event(struct perf_session *session, union perf_event *event,
554 			    struct perf_sample *sample, struct perf_tool *tool);
555 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool);
556 void auxtrace__free_events(struct perf_session *session);
557 void auxtrace__free(struct perf_session *session);
558 
559 #define ITRACE_HELP \
560 "				i:	    		synthesize instructions events\n"		\
561 "				b:	    		synthesize branches events\n"		\
562 "				c:	    		synthesize branches events (calls only)\n"	\
563 "				r:	    		synthesize branches events (returns only)\n" \
564 "				x:	    		synthesize transactions events\n"		\
565 "				w:	    		synthesize ptwrite events\n"		\
566 "				p:	    		synthesize power events\n"			\
567 "				e:	    		synthesize error events\n"			\
568 "				d:	    		create a debug log\n"			\
569 "				g[len]:     		synthesize a call chain (use with i or x)\n" \
570 "				l[len]:     		synthesize last branch entries (use with i or x)\n" \
571 "				sNUMBER:    		skip initial number of events\n"		\
572 "				PERIOD[ns|us|ms|i|t]:   specify period to sample stream\n" \
573 "				concatenate multiple options. Default is ibxwpe or cewp\n"
574 
575 static inline
576 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts,
577 				       struct perf_time_interval *ptime_range,
578 				       int range_num)
579 {
580 	opts->ptime_range = ptime_range;
581 	opts->range_num = range_num;
582 }
583 
584 static inline
585 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts)
586 {
587 	opts->ptime_range = NULL;
588 	opts->range_num = 0;
589 }
590 
591 #else
592 #include "debug.h"
593 
594 static inline struct auxtrace_record *
595 auxtrace_record__init(struct evlist *evlist __maybe_unused,
596 		      int *err)
597 {
598 	*err = 0;
599 	return NULL;
600 }
601 
602 static inline
603 void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused)
604 {
605 }
606 
607 static inline int
608 perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr __maybe_unused,
609 				     struct perf_tool *tool __maybe_unused,
610 				     struct perf_session *session __maybe_unused,
611 				     perf_event__handler_t process __maybe_unused)
612 {
613 	return -EINVAL;
614 }
615 
616 static inline
617 int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
618 			     struct evlist *evlist __maybe_unused,
619 			     struct record_opts *opts __maybe_unused)
620 {
621 	return 0;
622 }
623 
624 #define perf_event__process_auxtrace_info		0
625 #define perf_event__process_auxtrace			0
626 #define perf_event__process_auxtrace_error		0
627 
628 static inline
629 void perf_session__auxtrace_error_inc(struct perf_session *session
630 				      __maybe_unused,
631 				      union perf_event *event
632 				      __maybe_unused)
633 {
634 }
635 
636 static inline
637 void events_stats__auxtrace_error_warn(const struct events_stats *stats
638 				       __maybe_unused)
639 {
640 }
641 
642 static inline
643 int itrace_parse_synth_opts(const struct option *opt __maybe_unused,
644 			    const char *str __maybe_unused,
645 			    int unset __maybe_unused)
646 {
647 	pr_err("AUX area tracing not supported\n");
648 	return -EINVAL;
649 }
650 
651 static inline
652 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
653 				    struct record_opts *opts __maybe_unused,
654 				    const char *str)
655 {
656 	if (!str)
657 		return 0;
658 	pr_err("AUX area tracing not supported\n");
659 	return -EINVAL;
660 }
661 
662 static inline
663 int auxtrace__process_event(struct perf_session *session __maybe_unused,
664 			    union perf_event *event __maybe_unused,
665 			    struct perf_sample *sample __maybe_unused,
666 			    struct perf_tool *tool __maybe_unused)
667 {
668 	return 0;
669 }
670 
671 static inline
672 int auxtrace__flush_events(struct perf_session *session __maybe_unused,
673 			   struct perf_tool *tool __maybe_unused)
674 {
675 	return 0;
676 }
677 
678 static inline
679 void auxtrace__free_events(struct perf_session *session __maybe_unused)
680 {
681 }
682 
683 static inline
684 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused)
685 {
686 }
687 
688 static inline
689 void auxtrace__free(struct perf_session *session __maybe_unused)
690 {
691 }
692 
693 static inline
694 int auxtrace_index__write(int fd __maybe_unused,
695 			  struct list_head *head __maybe_unused)
696 {
697 	return -EINVAL;
698 }
699 
700 static inline
701 int auxtrace_index__process(int fd __maybe_unused,
702 			    u64 size __maybe_unused,
703 			    struct perf_session *session __maybe_unused,
704 			    bool needs_swap __maybe_unused)
705 {
706 	return -EINVAL;
707 }
708 
709 static inline
710 void auxtrace_index__free(struct list_head *head __maybe_unused)
711 {
712 }
713 
714 static inline
715 int auxtrace_parse_filters(struct evlist *evlist __maybe_unused)
716 {
717 	return 0;
718 }
719 
720 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
721 			struct auxtrace_mmap_params *mp,
722 			void *userpg, int fd);
723 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
724 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
725 				off_t auxtrace_offset,
726 				unsigned int auxtrace_pages,
727 				bool auxtrace_overwrite);
728 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
729 				   struct evlist *evlist, int idx,
730 				   bool per_cpu);
731 
732 #define ITRACE_HELP ""
733 
734 static inline
735 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts
736 				       __maybe_unused,
737 				       struct perf_time_interval *ptime_range
738 				       __maybe_unused,
739 				       int range_num __maybe_unused)
740 {
741 }
742 
743 static inline
744 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts
745 					 __maybe_unused)
746 {
747 }
748 
749 #endif
750 
751 #endif
752