1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * auxtrace.h: AUX area trace support 4 * Copyright (c) 2013-2015, Intel Corporation. 5 */ 6 7 #ifndef __PERF_AUXTRACE_H 8 #define __PERF_AUXTRACE_H 9 10 #include <sys/types.h> 11 #include <errno.h> 12 #include <stdbool.h> 13 #include <stddef.h> 14 #include <stdio.h> // FILE 15 #include <linux/list.h> 16 #include <linux/perf_event.h> 17 #include <linux/types.h> 18 #include <asm/bitsperlong.h> 19 #include <asm/barrier.h> 20 21 union perf_event; 22 struct perf_session; 23 struct evlist; 24 struct perf_tool; 25 struct mmap; 26 struct perf_sample; 27 struct option; 28 struct record_opts; 29 struct perf_record_auxtrace_error; 30 struct perf_record_auxtrace_info; 31 struct events_stats; 32 struct perf_pmu; 33 34 enum auxtrace_error_type { 35 PERF_AUXTRACE_ERROR_ITRACE = 1, 36 PERF_AUXTRACE_ERROR_MAX 37 }; 38 39 /* Auxtrace records must have the same alignment as perf event records */ 40 #define PERF_AUXTRACE_RECORD_ALIGNMENT 8 41 42 enum auxtrace_type { 43 PERF_AUXTRACE_UNKNOWN, 44 PERF_AUXTRACE_INTEL_PT, 45 PERF_AUXTRACE_INTEL_BTS, 46 PERF_AUXTRACE_CS_ETM, 47 PERF_AUXTRACE_ARM_SPE, 48 PERF_AUXTRACE_S390_CPUMSF, 49 }; 50 51 enum itrace_period_type { 52 PERF_ITRACE_PERIOD_INSTRUCTIONS, 53 PERF_ITRACE_PERIOD_TICKS, 54 PERF_ITRACE_PERIOD_NANOSECS, 55 }; 56 57 /** 58 * struct itrace_synth_opts - AUX area tracing synthesis options. 59 * @set: indicates whether or not options have been set 60 * @default_no_sample: Default to no sampling. 61 * @inject: indicates the event (not just the sample) must be fully synthesized 62 * because 'perf inject' will write it out 63 * @instructions: whether to synthesize 'instructions' events 64 * @branches: whether to synthesize 'branches' events 65 * @transactions: whether to synthesize events for transactions 66 * @ptwrites: whether to synthesize events for ptwrites 67 * @pwr_events: whether to synthesize power events 68 * @other_events: whether to synthesize other events recorded due to the use of 69 * aux_output 70 * @errors: whether to synthesize decoder error events 71 * @dont_decode: whether to skip decoding entirely 72 * @log: write a decoding log 73 * @calls: limit branch samples to calls (can be combined with @returns) 74 * @returns: limit branch samples to returns (can be combined with @calls) 75 * @callchain: add callchain to 'instructions' events 76 * @thread_stack: feed branches to the thread_stack 77 * @last_branch: add branch context to 'instruction' events 78 * @callchain_sz: maximum callchain size 79 * @last_branch_sz: branch context size 80 * @period: 'instructions' events period 81 * @period_type: 'instructions' events period type 82 * @initial_skip: skip N events at the beginning. 83 * @cpu_bitmap: CPUs for which to synthesize events, or NULL for all 84 * @ptime_range: time intervals to trace or NULL 85 * @range_num: number of time intervals to trace 86 */ 87 struct itrace_synth_opts { 88 bool set; 89 bool default_no_sample; 90 bool inject; 91 bool instructions; 92 bool branches; 93 bool transactions; 94 bool ptwrites; 95 bool pwr_events; 96 bool other_events; 97 bool errors; 98 bool dont_decode; 99 bool log; 100 bool calls; 101 bool returns; 102 bool callchain; 103 bool thread_stack; 104 bool last_branch; 105 unsigned int callchain_sz; 106 unsigned int last_branch_sz; 107 unsigned long long period; 108 enum itrace_period_type period_type; 109 unsigned long initial_skip; 110 unsigned long *cpu_bitmap; 111 struct perf_time_interval *ptime_range; 112 int range_num; 113 }; 114 115 /** 116 * struct auxtrace_index_entry - indexes a AUX area tracing event within a 117 * perf.data file. 118 * @file_offset: offset within the perf.data file 119 * @sz: size of the event 120 */ 121 struct auxtrace_index_entry { 122 u64 file_offset; 123 u64 sz; 124 }; 125 126 #define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256 127 128 /** 129 * struct auxtrace_index - index of AUX area tracing events within a perf.data 130 * file. 131 * @list: linking a number of arrays of entries 132 * @nr: number of entries 133 * @entries: array of entries 134 */ 135 struct auxtrace_index { 136 struct list_head list; 137 size_t nr; 138 struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT]; 139 }; 140 141 /** 142 * struct auxtrace - session callbacks to allow AUX area data decoding. 143 * @process_event: lets the decoder see all session events 144 * @process_auxtrace_event: process a PERF_RECORD_AUXTRACE event 145 * @queue_data: queue an AUX sample or PERF_RECORD_AUXTRACE event for later 146 * processing 147 * @dump_auxtrace_sample: dump AUX area sample data 148 * @flush_events: process any remaining data 149 * @free_events: free resources associated with event processing 150 * @free: free resources associated with the session 151 */ 152 struct auxtrace { 153 int (*process_event)(struct perf_session *session, 154 union perf_event *event, 155 struct perf_sample *sample, 156 struct perf_tool *tool); 157 int (*process_auxtrace_event)(struct perf_session *session, 158 union perf_event *event, 159 struct perf_tool *tool); 160 int (*queue_data)(struct perf_session *session, 161 struct perf_sample *sample, union perf_event *event, 162 u64 data_offset); 163 void (*dump_auxtrace_sample)(struct perf_session *session, 164 struct perf_sample *sample); 165 int (*flush_events)(struct perf_session *session, 166 struct perf_tool *tool); 167 void (*free_events)(struct perf_session *session); 168 void (*free)(struct perf_session *session); 169 }; 170 171 /** 172 * struct auxtrace_buffer - a buffer containing AUX area tracing data. 173 * @list: buffers are queued in a list held by struct auxtrace_queue 174 * @size: size of the buffer in bytes 175 * @pid: in per-thread mode, the pid this buffer is associated with 176 * @tid: in per-thread mode, the tid this buffer is associated with 177 * @cpu: in per-cpu mode, the cpu this buffer is associated with 178 * @data: actual buffer data (can be null if the data has not been loaded) 179 * @data_offset: file offset at which the buffer can be read 180 * @mmap_addr: mmap address at which the buffer can be read 181 * @mmap_size: size of the mmap at @mmap_addr 182 * @data_needs_freeing: @data was malloc'd so free it when it is no longer 183 * needed 184 * @consecutive: the original data was split up and this buffer is consecutive 185 * to the previous buffer 186 * @offset: offset as determined by aux_head / aux_tail members of struct 187 * perf_event_mmap_page 188 * @reference: an implementation-specific reference determined when the data is 189 * recorded 190 * @buffer_nr: used to number each buffer 191 * @use_size: implementation actually only uses this number of bytes 192 * @use_data: implementation actually only uses data starting at this address 193 */ 194 struct auxtrace_buffer { 195 struct list_head list; 196 size_t size; 197 pid_t pid; 198 pid_t tid; 199 int cpu; 200 void *data; 201 off_t data_offset; 202 void *mmap_addr; 203 size_t mmap_size; 204 bool data_needs_freeing; 205 bool consecutive; 206 u64 offset; 207 u64 reference; 208 u64 buffer_nr; 209 size_t use_size; 210 void *use_data; 211 }; 212 213 /** 214 * struct auxtrace_queue - a queue of AUX area tracing data buffers. 215 * @head: head of buffer list 216 * @tid: in per-thread mode, the tid this queue is associated with 217 * @cpu: in per-cpu mode, the cpu this queue is associated with 218 * @set: %true once this queue has been dedicated to a specific thread or cpu 219 * @priv: implementation-specific data 220 */ 221 struct auxtrace_queue { 222 struct list_head head; 223 pid_t tid; 224 int cpu; 225 bool set; 226 void *priv; 227 }; 228 229 /** 230 * struct auxtrace_queues - an array of AUX area tracing queues. 231 * @queue_array: array of queues 232 * @nr_queues: number of queues 233 * @new_data: set whenever new data is queued 234 * @populated: queues have been fully populated using the auxtrace_index 235 * @next_buffer_nr: used to number each buffer 236 */ 237 struct auxtrace_queues { 238 struct auxtrace_queue *queue_array; 239 unsigned int nr_queues; 240 bool new_data; 241 bool populated; 242 u64 next_buffer_nr; 243 }; 244 245 /** 246 * struct auxtrace_heap_item - element of struct auxtrace_heap. 247 * @queue_nr: queue number 248 * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected 249 * to be a timestamp 250 */ 251 struct auxtrace_heap_item { 252 unsigned int queue_nr; 253 u64 ordinal; 254 }; 255 256 /** 257 * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues. 258 * @heap_array: the heap 259 * @heap_cnt: the number of elements in the heap 260 * @heap_sz: maximum number of elements (grows as needed) 261 */ 262 struct auxtrace_heap { 263 struct auxtrace_heap_item *heap_array; 264 unsigned int heap_cnt; 265 unsigned int heap_sz; 266 }; 267 268 /** 269 * struct auxtrace_mmap - records an mmap of the auxtrace buffer. 270 * @base: address of mapped area 271 * @userpg: pointer to buffer's perf_event_mmap_page 272 * @mask: %0 if @len is not a power of two, otherwise (@len - %1) 273 * @len: size of mapped area 274 * @prev: previous aux_head 275 * @idx: index of this mmap 276 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu 277 * mmap) otherwise %0 278 * @cpu: cpu number for a per-cpu mmap otherwise %-1 279 */ 280 struct auxtrace_mmap { 281 void *base; 282 void *userpg; 283 size_t mask; 284 size_t len; 285 u64 prev; 286 int idx; 287 pid_t tid; 288 int cpu; 289 }; 290 291 /** 292 * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap. 293 * @mask: %0 if @len is not a power of two, otherwise (@len - %1) 294 * @offset: file offset of mapped area 295 * @len: size of mapped area 296 * @prot: mmap memory protection 297 * @idx: index of this mmap 298 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu 299 * mmap) otherwise %0 300 * @cpu: cpu number for a per-cpu mmap otherwise %-1 301 */ 302 struct auxtrace_mmap_params { 303 size_t mask; 304 off_t offset; 305 size_t len; 306 int prot; 307 int idx; 308 pid_t tid; 309 int cpu; 310 }; 311 312 /** 313 * struct auxtrace_record - callbacks for recording AUX area data. 314 * @recording_options: validate and process recording options 315 * @info_priv_size: return the size of the private data in auxtrace_info_event 316 * @info_fill: fill-in the private data in auxtrace_info_event 317 * @free: free this auxtrace record structure 318 * @snapshot_start: starting a snapshot 319 * @snapshot_finish: finishing a snapshot 320 * @find_snapshot: find data to snapshot within auxtrace mmap 321 * @parse_snapshot_options: parse snapshot options 322 * @reference: provide a 64-bit reference number for auxtrace_event 323 * @read_finish: called after reading from an auxtrace mmap 324 * @alignment: alignment (if any) for AUX area data 325 * @default_aux_sample_size: default sample size for --aux sample option 326 * @pmu: associated pmu 327 * @evlist: selected events list 328 */ 329 struct auxtrace_record { 330 int (*recording_options)(struct auxtrace_record *itr, 331 struct evlist *evlist, 332 struct record_opts *opts); 333 size_t (*info_priv_size)(struct auxtrace_record *itr, 334 struct evlist *evlist); 335 int (*info_fill)(struct auxtrace_record *itr, 336 struct perf_session *session, 337 struct perf_record_auxtrace_info *auxtrace_info, 338 size_t priv_size); 339 void (*free)(struct auxtrace_record *itr); 340 int (*snapshot_start)(struct auxtrace_record *itr); 341 int (*snapshot_finish)(struct auxtrace_record *itr); 342 int (*find_snapshot)(struct auxtrace_record *itr, int idx, 343 struct auxtrace_mmap *mm, unsigned char *data, 344 u64 *head, u64 *old); 345 int (*parse_snapshot_options)(struct auxtrace_record *itr, 346 struct record_opts *opts, 347 const char *str); 348 u64 (*reference)(struct auxtrace_record *itr); 349 int (*read_finish)(struct auxtrace_record *itr, int idx); 350 unsigned int alignment; 351 unsigned int default_aux_sample_size; 352 struct perf_pmu *pmu; 353 struct evlist *evlist; 354 }; 355 356 /** 357 * struct addr_filter - address filter. 358 * @list: list node 359 * @range: true if it is a range filter 360 * @start: true if action is 'filter' or 'start' 361 * @action: 'filter', 'start' or 'stop' ('tracestop' is accepted but converted 362 * to 'stop') 363 * @sym_from: symbol name for the filter address 364 * @sym_to: symbol name that determines the filter size 365 * @sym_from_idx: selects n'th from symbols with the same name (0 means global 366 * and less than 0 means symbol must be unique) 367 * @sym_to_idx: same as @sym_from_idx but for @sym_to 368 * @addr: filter address 369 * @size: filter region size (for range filters) 370 * @filename: DSO file name or NULL for the kernel 371 * @str: allocated string that contains the other string members 372 */ 373 struct addr_filter { 374 struct list_head list; 375 bool range; 376 bool start; 377 const char *action; 378 const char *sym_from; 379 const char *sym_to; 380 int sym_from_idx; 381 int sym_to_idx; 382 u64 addr; 383 u64 size; 384 const char *filename; 385 char *str; 386 }; 387 388 /** 389 * struct addr_filters - list of address filters. 390 * @head: list of address filters 391 * @cnt: number of address filters 392 */ 393 struct addr_filters { 394 struct list_head head; 395 int cnt; 396 }; 397 398 struct auxtrace_cache; 399 400 #ifdef HAVE_AUXTRACE_SUPPORT 401 402 /* 403 * In snapshot mode the mmapped page is read-only which makes using 404 * __sync_val_compare_and_swap() problematic. However, snapshot mode expects 405 * the buffer is not updated while the snapshot is made (e.g. Intel PT disables 406 * the event) so there is not a race anyway. 407 */ 408 static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm) 409 { 410 struct perf_event_mmap_page *pc = mm->userpg; 411 u64 head = READ_ONCE(pc->aux_head); 412 413 /* Ensure all reads are done after we read the head */ 414 rmb(); 415 return head; 416 } 417 418 static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm) 419 { 420 struct perf_event_mmap_page *pc = mm->userpg; 421 #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) 422 u64 head = READ_ONCE(pc->aux_head); 423 #else 424 u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0); 425 #endif 426 427 /* Ensure all reads are done after we read the head */ 428 rmb(); 429 return head; 430 } 431 432 static inline void auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail) 433 { 434 struct perf_event_mmap_page *pc = mm->userpg; 435 #if BITS_PER_LONG != 64 && defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) 436 u64 old_tail; 437 #endif 438 439 /* Ensure all reads are done before we write the tail out */ 440 mb(); 441 #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) 442 pc->aux_tail = tail; 443 #else 444 do { 445 old_tail = __sync_val_compare_and_swap(&pc->aux_tail, 0, 0); 446 } while (!__sync_bool_compare_and_swap(&pc->aux_tail, old_tail, tail)); 447 #endif 448 } 449 450 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, 451 struct auxtrace_mmap_params *mp, 452 void *userpg, int fd); 453 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm); 454 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp, 455 off_t auxtrace_offset, 456 unsigned int auxtrace_pages, 457 bool auxtrace_overwrite); 458 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, 459 struct evlist *evlist, int idx, 460 bool per_cpu); 461 462 typedef int (*process_auxtrace_t)(struct perf_tool *tool, 463 struct mmap *map, 464 union perf_event *event, void *data1, 465 size_t len1, void *data2, size_t len2); 466 467 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr, 468 struct perf_tool *tool, process_auxtrace_t fn); 469 470 int auxtrace_mmap__read_snapshot(struct mmap *map, 471 struct auxtrace_record *itr, 472 struct perf_tool *tool, process_auxtrace_t fn, 473 size_t snapshot_size); 474 475 int auxtrace_queues__init(struct auxtrace_queues *queues); 476 int auxtrace_queues__add_event(struct auxtrace_queues *queues, 477 struct perf_session *session, 478 union perf_event *event, off_t data_offset, 479 struct auxtrace_buffer **buffer_ptr); 480 struct auxtrace_queue * 481 auxtrace_queues__sample_queue(struct auxtrace_queues *queues, 482 struct perf_sample *sample, 483 struct perf_session *session); 484 int auxtrace_queues__add_sample(struct auxtrace_queues *queues, 485 struct perf_session *session, 486 struct perf_sample *sample, u64 data_offset, 487 u64 reference); 488 void auxtrace_queues__free(struct auxtrace_queues *queues); 489 int auxtrace_queues__process_index(struct auxtrace_queues *queues, 490 struct perf_session *session); 491 int auxtrace_queue_data(struct perf_session *session, bool samples, 492 bool events); 493 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue, 494 struct auxtrace_buffer *buffer); 495 void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd); 496 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer); 497 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer); 498 void auxtrace_buffer__free(struct auxtrace_buffer *buffer); 499 500 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr, 501 u64 ordinal); 502 void auxtrace_heap__pop(struct auxtrace_heap *heap); 503 void auxtrace_heap__free(struct auxtrace_heap *heap); 504 505 struct auxtrace_cache_entry { 506 struct hlist_node hash; 507 u32 key; 508 }; 509 510 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size, 511 unsigned int limit_percent); 512 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache); 513 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c); 514 void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry); 515 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key, 516 struct auxtrace_cache_entry *entry); 517 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key); 518 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key); 519 520 struct auxtrace_record *auxtrace_record__init(struct evlist *evlist, 521 int *err); 522 523 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr, 524 struct record_opts *opts, 525 const char *str); 526 int auxtrace_parse_sample_options(struct auxtrace_record *itr, 527 struct evlist *evlist, 528 struct record_opts *opts, const char *str); 529 int auxtrace_record__options(struct auxtrace_record *itr, 530 struct evlist *evlist, 531 struct record_opts *opts); 532 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr, 533 struct evlist *evlist); 534 int auxtrace_record__info_fill(struct auxtrace_record *itr, 535 struct perf_session *session, 536 struct perf_record_auxtrace_info *auxtrace_info, 537 size_t priv_size); 538 void auxtrace_record__free(struct auxtrace_record *itr); 539 int auxtrace_record__snapshot_start(struct auxtrace_record *itr); 540 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit); 541 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx, 542 struct auxtrace_mmap *mm, 543 unsigned char *data, u64 *head, u64 *old); 544 u64 auxtrace_record__reference(struct auxtrace_record *itr); 545 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx); 546 547 int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event, 548 off_t file_offset); 549 int auxtrace_index__write(int fd, struct list_head *head); 550 int auxtrace_index__process(int fd, u64 size, struct perf_session *session, 551 bool needs_swap); 552 void auxtrace_index__free(struct list_head *head); 553 554 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type, 555 int code, int cpu, pid_t pid, pid_t tid, u64 ip, 556 const char *msg, u64 timestamp); 557 558 int perf_event__process_auxtrace_info(struct perf_session *session, 559 union perf_event *event); 560 s64 perf_event__process_auxtrace(struct perf_session *session, 561 union perf_event *event); 562 int perf_event__process_auxtrace_error(struct perf_session *session, 563 union perf_event *event); 564 int itrace_parse_synth_opts(const struct option *opt, const char *str, 565 int unset); 566 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts, 567 bool no_sample); 568 569 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp); 570 void perf_session__auxtrace_error_inc(struct perf_session *session, 571 union perf_event *event); 572 void events_stats__auxtrace_error_warn(const struct events_stats *stats); 573 574 void addr_filters__init(struct addr_filters *filts); 575 void addr_filters__exit(struct addr_filters *filts); 576 int addr_filters__parse_bare_filter(struct addr_filters *filts, 577 const char *filter); 578 int auxtrace_parse_filters(struct evlist *evlist); 579 580 int auxtrace__process_event(struct perf_session *session, union perf_event *event, 581 struct perf_sample *sample, struct perf_tool *tool); 582 void auxtrace__dump_auxtrace_sample(struct perf_session *session, 583 struct perf_sample *sample); 584 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool); 585 void auxtrace__free_events(struct perf_session *session); 586 void auxtrace__free(struct perf_session *session); 587 588 #define ITRACE_HELP \ 589 " i: synthesize instructions events\n" \ 590 " b: synthesize branches events\n" \ 591 " c: synthesize branches events (calls only)\n" \ 592 " r: synthesize branches events (returns only)\n" \ 593 " x: synthesize transactions events\n" \ 594 " w: synthesize ptwrite events\n" \ 595 " p: synthesize power events\n" \ 596 " e: synthesize error events\n" \ 597 " d: create a debug log\n" \ 598 " g[len]: synthesize a call chain (use with i or x)\n" \ 599 " l[len]: synthesize last branch entries (use with i or x)\n" \ 600 " sNUMBER: skip initial number of events\n" \ 601 " PERIOD[ns|us|ms|i|t]: specify period to sample stream\n" \ 602 " concatenate multiple options. Default is ibxwpe or cewp\n" 603 604 static inline 605 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts, 606 struct perf_time_interval *ptime_range, 607 int range_num) 608 { 609 opts->ptime_range = ptime_range; 610 opts->range_num = range_num; 611 } 612 613 static inline 614 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts) 615 { 616 opts->ptime_range = NULL; 617 opts->range_num = 0; 618 } 619 620 #else 621 #include "debug.h" 622 623 static inline struct auxtrace_record * 624 auxtrace_record__init(struct evlist *evlist __maybe_unused, 625 int *err) 626 { 627 *err = 0; 628 return NULL; 629 } 630 631 static inline 632 void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused) 633 { 634 } 635 636 static inline 637 int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused, 638 struct evlist *evlist __maybe_unused, 639 struct record_opts *opts __maybe_unused) 640 { 641 return 0; 642 } 643 644 #define perf_event__process_auxtrace_info 0 645 #define perf_event__process_auxtrace 0 646 #define perf_event__process_auxtrace_error 0 647 648 static inline 649 void perf_session__auxtrace_error_inc(struct perf_session *session 650 __maybe_unused, 651 union perf_event *event 652 __maybe_unused) 653 { 654 } 655 656 static inline 657 void events_stats__auxtrace_error_warn(const struct events_stats *stats 658 __maybe_unused) 659 { 660 } 661 662 static inline 663 int itrace_parse_synth_opts(const struct option *opt __maybe_unused, 664 const char *str __maybe_unused, 665 int unset __maybe_unused) 666 { 667 pr_err("AUX area tracing not supported\n"); 668 return -EINVAL; 669 } 670 671 static inline 672 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused, 673 struct record_opts *opts __maybe_unused, 674 const char *str) 675 { 676 if (!str) 677 return 0; 678 pr_err("AUX area tracing not supported\n"); 679 return -EINVAL; 680 } 681 682 static inline 683 int auxtrace_parse_sample_options(struct auxtrace_record *itr __maybe_unused, 684 struct evlist *evlist __maybe_unused, 685 struct record_opts *opts __maybe_unused, 686 const char *str) 687 { 688 if (!str) 689 return 0; 690 pr_err("AUX area tracing not supported\n"); 691 return -EINVAL; 692 } 693 694 static inline 695 int auxtrace__process_event(struct perf_session *session __maybe_unused, 696 union perf_event *event __maybe_unused, 697 struct perf_sample *sample __maybe_unused, 698 struct perf_tool *tool __maybe_unused) 699 { 700 return 0; 701 } 702 703 static inline 704 void auxtrace__dump_auxtrace_sample(struct perf_session *session __maybe_unused, 705 struct perf_sample *sample __maybe_unused) 706 { 707 } 708 709 static inline 710 int auxtrace__flush_events(struct perf_session *session __maybe_unused, 711 struct perf_tool *tool __maybe_unused) 712 { 713 return 0; 714 } 715 716 static inline 717 void auxtrace__free_events(struct perf_session *session __maybe_unused) 718 { 719 } 720 721 static inline 722 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused) 723 { 724 } 725 726 static inline 727 void auxtrace__free(struct perf_session *session __maybe_unused) 728 { 729 } 730 731 static inline 732 int auxtrace_index__write(int fd __maybe_unused, 733 struct list_head *head __maybe_unused) 734 { 735 return -EINVAL; 736 } 737 738 static inline 739 int auxtrace_index__process(int fd __maybe_unused, 740 u64 size __maybe_unused, 741 struct perf_session *session __maybe_unused, 742 bool needs_swap __maybe_unused) 743 { 744 return -EINVAL; 745 } 746 747 static inline 748 void auxtrace_index__free(struct list_head *head __maybe_unused) 749 { 750 } 751 752 static inline 753 int auxtrace_parse_filters(struct evlist *evlist __maybe_unused) 754 { 755 return 0; 756 } 757 758 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, 759 struct auxtrace_mmap_params *mp, 760 void *userpg, int fd); 761 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm); 762 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp, 763 off_t auxtrace_offset, 764 unsigned int auxtrace_pages, 765 bool auxtrace_overwrite); 766 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, 767 struct evlist *evlist, int idx, 768 bool per_cpu); 769 770 #define ITRACE_HELP "" 771 772 static inline 773 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts 774 __maybe_unused, 775 struct perf_time_interval *ptime_range 776 __maybe_unused, 777 int range_num __maybe_unused) 778 { 779 } 780 781 static inline 782 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts 783 __maybe_unused) 784 { 785 } 786 787 #endif 788 789 #endif 790