1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * auxtrace.h: AUX area trace support 4 * Copyright (c) 2013-2015, Intel Corporation. 5 */ 6 7 #ifndef __PERF_AUXTRACE_H 8 #define __PERF_AUXTRACE_H 9 10 #include <sys/types.h> 11 #include <errno.h> 12 #include <stdbool.h> 13 #include <stddef.h> 14 #include <stdio.h> // FILE 15 #include <linux/list.h> 16 #include <linux/perf_event.h> 17 #include <linux/types.h> 18 #include <internal/cpumap.h> 19 #include <asm/bitsperlong.h> 20 #include <asm/barrier.h> 21 22 union perf_event; 23 struct perf_session; 24 struct evlist; 25 struct evsel; 26 struct perf_tool; 27 struct mmap; 28 struct perf_sample; 29 struct option; 30 struct record_opts; 31 struct perf_record_auxtrace_error; 32 struct perf_record_auxtrace_info; 33 struct events_stats; 34 struct perf_pmu; 35 36 enum auxtrace_error_type { 37 PERF_AUXTRACE_ERROR_ITRACE = 1, 38 PERF_AUXTRACE_ERROR_MAX 39 }; 40 41 /* Auxtrace records must have the same alignment as perf event records */ 42 #define PERF_AUXTRACE_RECORD_ALIGNMENT 8 43 44 enum auxtrace_type { 45 PERF_AUXTRACE_UNKNOWN, 46 PERF_AUXTRACE_INTEL_PT, 47 PERF_AUXTRACE_INTEL_BTS, 48 PERF_AUXTRACE_CS_ETM, 49 PERF_AUXTRACE_ARM_SPE, 50 PERF_AUXTRACE_S390_CPUMSF, 51 }; 52 53 enum itrace_period_type { 54 PERF_ITRACE_PERIOD_INSTRUCTIONS, 55 PERF_ITRACE_PERIOD_TICKS, 56 PERF_ITRACE_PERIOD_NANOSECS, 57 }; 58 59 #define AUXTRACE_ERR_FLG_OVERFLOW (1 << ('o' - 'a')) 60 #define AUXTRACE_ERR_FLG_DATA_LOST (1 << ('l' - 'a')) 61 62 #define AUXTRACE_LOG_FLG_ALL_PERF_EVTS (1 << ('a' - 'a')) 63 #define AUXTRACE_LOG_FLG_USE_STDOUT (1 << ('o' - 'a')) 64 65 /** 66 * struct itrace_synth_opts - AUX area tracing synthesis options. 67 * @set: indicates whether or not options have been set 68 * @default_no_sample: Default to no sampling. 69 * @inject: indicates the event (not just the sample) must be fully synthesized 70 * because 'perf inject' will write it out 71 * @instructions: whether to synthesize 'instructions' events 72 * @branches: whether to synthesize 'branches' events 73 * (branch misses only for Arm SPE) 74 * @transactions: whether to synthesize events for transactions 75 * @ptwrites: whether to synthesize events for ptwrites 76 * @pwr_events: whether to synthesize power events 77 * @other_events: whether to synthesize other events recorded due to the use of 78 * aux_output 79 * @intr_events: whether to synthesize interrupt events 80 * @errors: whether to synthesize decoder error events 81 * @dont_decode: whether to skip decoding entirely 82 * @log: write a decoding log 83 * @calls: limit branch samples to calls (can be combined with @returns) 84 * @returns: limit branch samples to returns (can be combined with @calls) 85 * @callchain: add callchain to 'instructions' events 86 * @add_callchain: add callchain to existing event records 87 * @thread_stack: feed branches to the thread_stack 88 * @last_branch: add branch context to 'instruction' events 89 * @add_last_branch: add branch context to existing event records 90 * @approx_ipc: approximate IPC 91 * @flc: whether to synthesize first level cache events 92 * @llc: whether to synthesize last level cache events 93 * @tlb: whether to synthesize TLB events 94 * @remote_access: whether to synthesize remote access events 95 * @mem: whether to synthesize memory events 96 * @timeless_decoding: prefer "timeless" decoding i.e. ignore timestamps 97 * @vm_time_correlation: perform VM Time Correlation 98 * @vm_tm_corr_dry_run: VM Time Correlation dry-run 99 * @vm_tm_corr_args: VM Time Correlation implementation-specific arguments 100 * @callchain_sz: maximum callchain size 101 * @last_branch_sz: branch context size 102 * @period: 'instructions' events period 103 * @period_type: 'instructions' events period type 104 * @initial_skip: skip N events at the beginning. 105 * @cpu_bitmap: CPUs for which to synthesize events, or NULL for all 106 * @ptime_range: time intervals to trace or NULL 107 * @range_num: number of time intervals to trace 108 * @error_plus_flags: flags to affect what errors are reported 109 * @error_minus_flags: flags to affect what errors are reported 110 * @log_plus_flags: flags to affect what is logged 111 * @log_minus_flags: flags to affect what is logged 112 * @quick: quicker (less detailed) decoding 113 */ 114 struct itrace_synth_opts { 115 bool set; 116 bool default_no_sample; 117 bool inject; 118 bool instructions; 119 bool branches; 120 bool transactions; 121 bool ptwrites; 122 bool pwr_events; 123 bool other_events; 124 bool intr_events; 125 bool errors; 126 bool dont_decode; 127 bool log; 128 bool calls; 129 bool returns; 130 bool callchain; 131 bool add_callchain; 132 bool thread_stack; 133 bool last_branch; 134 bool add_last_branch; 135 bool approx_ipc; 136 bool flc; 137 bool llc; 138 bool tlb; 139 bool remote_access; 140 bool mem; 141 bool timeless_decoding; 142 bool vm_time_correlation; 143 bool vm_tm_corr_dry_run; 144 char *vm_tm_corr_args; 145 unsigned int callchain_sz; 146 unsigned int last_branch_sz; 147 unsigned long long period; 148 enum itrace_period_type period_type; 149 unsigned long initial_skip; 150 unsigned long *cpu_bitmap; 151 struct perf_time_interval *ptime_range; 152 int range_num; 153 unsigned int error_plus_flags; 154 unsigned int error_minus_flags; 155 unsigned int log_plus_flags; 156 unsigned int log_minus_flags; 157 unsigned int quick; 158 }; 159 160 /** 161 * struct auxtrace_index_entry - indexes a AUX area tracing event within a 162 * perf.data file. 163 * @file_offset: offset within the perf.data file 164 * @sz: size of the event 165 */ 166 struct auxtrace_index_entry { 167 u64 file_offset; 168 u64 sz; 169 }; 170 171 #define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256 172 173 /** 174 * struct auxtrace_index - index of AUX area tracing events within a perf.data 175 * file. 176 * @list: linking a number of arrays of entries 177 * @nr: number of entries 178 * @entries: array of entries 179 */ 180 struct auxtrace_index { 181 struct list_head list; 182 size_t nr; 183 struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT]; 184 }; 185 186 /** 187 * struct auxtrace - session callbacks to allow AUX area data decoding. 188 * @process_event: lets the decoder see all session events 189 * @process_auxtrace_event: process a PERF_RECORD_AUXTRACE event 190 * @queue_data: queue an AUX sample or PERF_RECORD_AUXTRACE event for later 191 * processing 192 * @dump_auxtrace_sample: dump AUX area sample data 193 * @flush_events: process any remaining data 194 * @free_events: free resources associated with event processing 195 * @free: free resources associated with the session 196 */ 197 struct auxtrace { 198 int (*process_event)(struct perf_session *session, 199 union perf_event *event, 200 struct perf_sample *sample, 201 struct perf_tool *tool); 202 int (*process_auxtrace_event)(struct perf_session *session, 203 union perf_event *event, 204 struct perf_tool *tool); 205 int (*queue_data)(struct perf_session *session, 206 struct perf_sample *sample, union perf_event *event, 207 u64 data_offset); 208 void (*dump_auxtrace_sample)(struct perf_session *session, 209 struct perf_sample *sample); 210 int (*flush_events)(struct perf_session *session, 211 struct perf_tool *tool); 212 void (*free_events)(struct perf_session *session); 213 void (*free)(struct perf_session *session); 214 bool (*evsel_is_auxtrace)(struct perf_session *session, 215 struct evsel *evsel); 216 }; 217 218 /** 219 * struct auxtrace_buffer - a buffer containing AUX area tracing data. 220 * @list: buffers are queued in a list held by struct auxtrace_queue 221 * @size: size of the buffer in bytes 222 * @pid: in per-thread mode, the pid this buffer is associated with 223 * @tid: in per-thread mode, the tid this buffer is associated with 224 * @cpu: in per-cpu mode, the cpu this buffer is associated with 225 * @data: actual buffer data (can be null if the data has not been loaded) 226 * @data_offset: file offset at which the buffer can be read 227 * @mmap_addr: mmap address at which the buffer can be read 228 * @mmap_size: size of the mmap at @mmap_addr 229 * @data_needs_freeing: @data was malloc'd so free it when it is no longer 230 * needed 231 * @consecutive: the original data was split up and this buffer is consecutive 232 * to the previous buffer 233 * @offset: offset as determined by aux_head / aux_tail members of struct 234 * perf_event_mmap_page 235 * @reference: an implementation-specific reference determined when the data is 236 * recorded 237 * @buffer_nr: used to number each buffer 238 * @use_size: implementation actually only uses this number of bytes 239 * @use_data: implementation actually only uses data starting at this address 240 */ 241 struct auxtrace_buffer { 242 struct list_head list; 243 size_t size; 244 pid_t pid; 245 pid_t tid; 246 struct perf_cpu cpu; 247 void *data; 248 off_t data_offset; 249 void *mmap_addr; 250 size_t mmap_size; 251 bool data_needs_freeing; 252 bool consecutive; 253 u64 offset; 254 u64 reference; 255 u64 buffer_nr; 256 size_t use_size; 257 void *use_data; 258 }; 259 260 /** 261 * struct auxtrace_queue - a queue of AUX area tracing data buffers. 262 * @head: head of buffer list 263 * @tid: in per-thread mode, the tid this queue is associated with 264 * @cpu: in per-cpu mode, the cpu this queue is associated with 265 * @set: %true once this queue has been dedicated to a specific thread or cpu 266 * @priv: implementation-specific data 267 */ 268 struct auxtrace_queue { 269 struct list_head head; 270 pid_t tid; 271 int cpu; 272 bool set; 273 void *priv; 274 }; 275 276 /** 277 * struct auxtrace_queues - an array of AUX area tracing queues. 278 * @queue_array: array of queues 279 * @nr_queues: number of queues 280 * @new_data: set whenever new data is queued 281 * @populated: queues have been fully populated using the auxtrace_index 282 * @next_buffer_nr: used to number each buffer 283 */ 284 struct auxtrace_queues { 285 struct auxtrace_queue *queue_array; 286 unsigned int nr_queues; 287 bool new_data; 288 bool populated; 289 u64 next_buffer_nr; 290 }; 291 292 /** 293 * struct auxtrace_heap_item - element of struct auxtrace_heap. 294 * @queue_nr: queue number 295 * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected 296 * to be a timestamp 297 */ 298 struct auxtrace_heap_item { 299 unsigned int queue_nr; 300 u64 ordinal; 301 }; 302 303 /** 304 * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues. 305 * @heap_array: the heap 306 * @heap_cnt: the number of elements in the heap 307 * @heap_sz: maximum number of elements (grows as needed) 308 */ 309 struct auxtrace_heap { 310 struct auxtrace_heap_item *heap_array; 311 unsigned int heap_cnt; 312 unsigned int heap_sz; 313 }; 314 315 /** 316 * struct auxtrace_mmap - records an mmap of the auxtrace buffer. 317 * @base: address of mapped area 318 * @userpg: pointer to buffer's perf_event_mmap_page 319 * @mask: %0 if @len is not a power of two, otherwise (@len - %1) 320 * @len: size of mapped area 321 * @prev: previous aux_head 322 * @idx: index of this mmap 323 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu 324 * mmap) otherwise %0 325 * @cpu: cpu number for a per-cpu mmap otherwise %-1 326 */ 327 struct auxtrace_mmap { 328 void *base; 329 void *userpg; 330 size_t mask; 331 size_t len; 332 u64 prev; 333 int idx; 334 pid_t tid; 335 int cpu; 336 }; 337 338 /** 339 * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap. 340 * @mask: %0 if @len is not a power of two, otherwise (@len - %1) 341 * @offset: file offset of mapped area 342 * @len: size of mapped area 343 * @prot: mmap memory protection 344 * @idx: index of this mmap 345 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu 346 * mmap) otherwise %0 347 * @mmap_needed: set to %false for non-auxtrace events. This is needed because 348 * auxtrace mmapping is done in the same code path as non-auxtrace 349 * mmapping but not every evsel that needs non-auxtrace mmapping 350 * also needs auxtrace mmapping. 351 * @cpu: cpu number for a per-cpu mmap otherwise %-1 352 */ 353 struct auxtrace_mmap_params { 354 size_t mask; 355 off_t offset; 356 size_t len; 357 int prot; 358 int idx; 359 pid_t tid; 360 bool mmap_needed; 361 struct perf_cpu cpu; 362 }; 363 364 /** 365 * struct auxtrace_record - callbacks for recording AUX area data. 366 * @recording_options: validate and process recording options 367 * @info_priv_size: return the size of the private data in auxtrace_info_event 368 * @info_fill: fill-in the private data in auxtrace_info_event 369 * @free: free this auxtrace record structure 370 * @snapshot_start: starting a snapshot 371 * @snapshot_finish: finishing a snapshot 372 * @find_snapshot: find data to snapshot within auxtrace mmap 373 * @parse_snapshot_options: parse snapshot options 374 * @reference: provide a 64-bit reference number for auxtrace_event 375 * @read_finish: called after reading from an auxtrace mmap 376 * @alignment: alignment (if any) for AUX area data 377 * @default_aux_sample_size: default sample size for --aux sample option 378 * @pmu: associated pmu 379 * @evlist: selected events list 380 */ 381 struct auxtrace_record { 382 int (*recording_options)(struct auxtrace_record *itr, 383 struct evlist *evlist, 384 struct record_opts *opts); 385 size_t (*info_priv_size)(struct auxtrace_record *itr, 386 struct evlist *evlist); 387 int (*info_fill)(struct auxtrace_record *itr, 388 struct perf_session *session, 389 struct perf_record_auxtrace_info *auxtrace_info, 390 size_t priv_size); 391 void (*free)(struct auxtrace_record *itr); 392 int (*snapshot_start)(struct auxtrace_record *itr); 393 int (*snapshot_finish)(struct auxtrace_record *itr); 394 int (*find_snapshot)(struct auxtrace_record *itr, int idx, 395 struct auxtrace_mmap *mm, unsigned char *data, 396 u64 *head, u64 *old); 397 int (*parse_snapshot_options)(struct auxtrace_record *itr, 398 struct record_opts *opts, 399 const char *str); 400 u64 (*reference)(struct auxtrace_record *itr); 401 int (*read_finish)(struct auxtrace_record *itr, int idx); 402 unsigned int alignment; 403 unsigned int default_aux_sample_size; 404 struct perf_pmu *pmu; 405 struct evlist *evlist; 406 }; 407 408 /** 409 * struct addr_filter - address filter. 410 * @list: list node 411 * @range: true if it is a range filter 412 * @start: true if action is 'filter' or 'start' 413 * @action: 'filter', 'start' or 'stop' ('tracestop' is accepted but converted 414 * to 'stop') 415 * @sym_from: symbol name for the filter address 416 * @sym_to: symbol name that determines the filter size 417 * @sym_from_idx: selects n'th from symbols with the same name (0 means global 418 * and less than 0 means symbol must be unique) 419 * @sym_to_idx: same as @sym_from_idx but for @sym_to 420 * @addr: filter address 421 * @size: filter region size (for range filters) 422 * @filename: DSO file name or NULL for the kernel 423 * @str: allocated string that contains the other string members 424 */ 425 struct addr_filter { 426 struct list_head list; 427 bool range; 428 bool start; 429 const char *action; 430 const char *sym_from; 431 const char *sym_to; 432 int sym_from_idx; 433 int sym_to_idx; 434 u64 addr; 435 u64 size; 436 const char *filename; 437 char *str; 438 }; 439 440 /** 441 * struct addr_filters - list of address filters. 442 * @head: list of address filters 443 * @cnt: number of address filters 444 */ 445 struct addr_filters { 446 struct list_head head; 447 int cnt; 448 }; 449 450 struct auxtrace_cache; 451 452 #ifdef HAVE_AUXTRACE_SUPPORT 453 454 u64 compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm); 455 int compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail); 456 457 static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm, 458 int kernel_is_64_bit __maybe_unused) 459 { 460 struct perf_event_mmap_page *pc = mm->userpg; 461 u64 head; 462 463 #if BITS_PER_LONG == 32 464 if (kernel_is_64_bit) 465 return compat_auxtrace_mmap__read_head(mm); 466 #endif 467 head = READ_ONCE(pc->aux_head); 468 469 /* Ensure all reads are done after we read the head */ 470 smp_rmb(); 471 return head; 472 } 473 474 static inline int auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail, 475 int kernel_is_64_bit __maybe_unused) 476 { 477 struct perf_event_mmap_page *pc = mm->userpg; 478 479 #if BITS_PER_LONG == 32 480 if (kernel_is_64_bit) 481 return compat_auxtrace_mmap__write_tail(mm, tail); 482 #endif 483 /* Ensure all reads are done before we write the tail out */ 484 smp_mb(); 485 WRITE_ONCE(pc->aux_tail, tail); 486 return 0; 487 } 488 489 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, 490 struct auxtrace_mmap_params *mp, 491 void *userpg, int fd); 492 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm); 493 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp, 494 off_t auxtrace_offset, 495 unsigned int auxtrace_pages, 496 bool auxtrace_overwrite); 497 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, 498 struct evlist *evlist, 499 struct evsel *evsel, int idx); 500 501 typedef int (*process_auxtrace_t)(struct perf_tool *tool, 502 struct mmap *map, 503 union perf_event *event, void *data1, 504 size_t len1, void *data2, size_t len2); 505 506 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr, 507 struct perf_tool *tool, process_auxtrace_t fn); 508 509 int auxtrace_mmap__read_snapshot(struct mmap *map, 510 struct auxtrace_record *itr, 511 struct perf_tool *tool, process_auxtrace_t fn, 512 size_t snapshot_size); 513 514 int auxtrace_queues__init(struct auxtrace_queues *queues); 515 int auxtrace_queues__add_event(struct auxtrace_queues *queues, 516 struct perf_session *session, 517 union perf_event *event, off_t data_offset, 518 struct auxtrace_buffer **buffer_ptr); 519 struct auxtrace_queue * 520 auxtrace_queues__sample_queue(struct auxtrace_queues *queues, 521 struct perf_sample *sample, 522 struct perf_session *session); 523 int auxtrace_queues__add_sample(struct auxtrace_queues *queues, 524 struct perf_session *session, 525 struct perf_sample *sample, u64 data_offset, 526 u64 reference); 527 void auxtrace_queues__free(struct auxtrace_queues *queues); 528 int auxtrace_queues__process_index(struct auxtrace_queues *queues, 529 struct perf_session *session); 530 int auxtrace_queue_data(struct perf_session *session, bool samples, 531 bool events); 532 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue, 533 struct auxtrace_buffer *buffer); 534 void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw); 535 static inline void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd) 536 { 537 return auxtrace_buffer__get_data_rw(buffer, fd, false); 538 } 539 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer); 540 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer); 541 void auxtrace_buffer__free(struct auxtrace_buffer *buffer); 542 543 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr, 544 u64 ordinal); 545 void auxtrace_heap__pop(struct auxtrace_heap *heap); 546 void auxtrace_heap__free(struct auxtrace_heap *heap); 547 548 struct auxtrace_cache_entry { 549 struct hlist_node hash; 550 u32 key; 551 }; 552 553 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size, 554 unsigned int limit_percent); 555 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache); 556 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c); 557 void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry); 558 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key, 559 struct auxtrace_cache_entry *entry); 560 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key); 561 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key); 562 563 struct auxtrace_record *auxtrace_record__init(struct evlist *evlist, 564 int *err); 565 566 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr, 567 struct record_opts *opts, 568 const char *str); 569 int auxtrace_parse_sample_options(struct auxtrace_record *itr, 570 struct evlist *evlist, 571 struct record_opts *opts, const char *str); 572 void auxtrace_regroup_aux_output(struct evlist *evlist); 573 int auxtrace_record__options(struct auxtrace_record *itr, 574 struct evlist *evlist, 575 struct record_opts *opts); 576 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr, 577 struct evlist *evlist); 578 int auxtrace_record__info_fill(struct auxtrace_record *itr, 579 struct perf_session *session, 580 struct perf_record_auxtrace_info *auxtrace_info, 581 size_t priv_size); 582 void auxtrace_record__free(struct auxtrace_record *itr); 583 int auxtrace_record__snapshot_start(struct auxtrace_record *itr); 584 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit); 585 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx, 586 struct auxtrace_mmap *mm, 587 unsigned char *data, u64 *head, u64 *old); 588 u64 auxtrace_record__reference(struct auxtrace_record *itr); 589 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx); 590 591 int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event, 592 off_t file_offset); 593 int auxtrace_index__write(int fd, struct list_head *head); 594 int auxtrace_index__process(int fd, u64 size, struct perf_session *session, 595 bool needs_swap); 596 void auxtrace_index__free(struct list_head *head); 597 598 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type, 599 int code, int cpu, pid_t pid, pid_t tid, u64 ip, 600 const char *msg, u64 timestamp); 601 602 int perf_event__process_auxtrace_info(struct perf_session *session, 603 union perf_event *event); 604 s64 perf_event__process_auxtrace(struct perf_session *session, 605 union perf_event *event); 606 int perf_event__process_auxtrace_error(struct perf_session *session, 607 union perf_event *event); 608 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts, 609 const char *str, int unset); 610 int itrace_parse_synth_opts(const struct option *opt, const char *str, 611 int unset); 612 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts, 613 bool no_sample); 614 615 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp); 616 void perf_session__auxtrace_error_inc(struct perf_session *session, 617 union perf_event *event); 618 void events_stats__auxtrace_error_warn(const struct events_stats *stats); 619 620 void addr_filters__init(struct addr_filters *filts); 621 void addr_filters__exit(struct addr_filters *filts); 622 int addr_filters__parse_bare_filter(struct addr_filters *filts, 623 const char *filter); 624 int auxtrace_parse_filters(struct evlist *evlist); 625 626 int auxtrace__process_event(struct perf_session *session, union perf_event *event, 627 struct perf_sample *sample, struct perf_tool *tool); 628 void auxtrace__dump_auxtrace_sample(struct perf_session *session, 629 struct perf_sample *sample); 630 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool); 631 void auxtrace__free_events(struct perf_session *session); 632 void auxtrace__free(struct perf_session *session); 633 bool auxtrace__evsel_is_auxtrace(struct perf_session *session, 634 struct evsel *evsel); 635 636 #define ITRACE_HELP \ 637 " i[period]: synthesize instructions events\n" \ 638 " b: synthesize branches events (branch misses for Arm SPE)\n" \ 639 " c: synthesize branches events (calls only)\n" \ 640 " r: synthesize branches events (returns only)\n" \ 641 " x: synthesize transactions events\n" \ 642 " w: synthesize ptwrite events\n" \ 643 " p: synthesize power events\n" \ 644 " o: synthesize other events recorded due to the use\n" \ 645 " of aux-output (refer to perf record)\n" \ 646 " I: synthesize interrupt or similar (asynchronous) events\n" \ 647 " (e.g. Intel PT Event Trace)\n" \ 648 " e[flags]: synthesize error events\n" \ 649 " each flag must be preceded by + or -\n" \ 650 " error flags are: o (overflow)\n" \ 651 " l (data lost)\n" \ 652 " d[flags]: create a debug log\n" \ 653 " each flag must be preceded by + or -\n" \ 654 " log flags are: a (all perf events)\n" \ 655 " o (output to stdout)\n" \ 656 " f: synthesize first level cache events\n" \ 657 " m: synthesize last level cache events\n" \ 658 " t: synthesize TLB events\n" \ 659 " a: synthesize remote access events\n" \ 660 " g[len]: synthesize a call chain (use with i or x)\n" \ 661 " G[len]: synthesize a call chain on existing event records\n" \ 662 " l[len]: synthesize last branch entries (use with i or x)\n" \ 663 " L[len]: synthesize last branch entries on existing event records\n" \ 664 " sNUMBER: skip initial number of events\n" \ 665 " q: quicker (less detailed) decoding\n" \ 666 " A: approximate IPC\n" \ 667 " Z: prefer to ignore timestamps (so-called \"timeless\" decoding)\n" \ 668 " PERIOD[ns|us|ms|i|t]: specify period to sample stream\n" \ 669 " concatenate multiple options. Default is ibxwpe or cewp\n" 670 671 static inline 672 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts, 673 struct perf_time_interval *ptime_range, 674 int range_num) 675 { 676 opts->ptime_range = ptime_range; 677 opts->range_num = range_num; 678 } 679 680 static inline 681 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts) 682 { 683 opts->ptime_range = NULL; 684 opts->range_num = 0; 685 } 686 687 #else 688 #include "debug.h" 689 690 static inline struct auxtrace_record * 691 auxtrace_record__init(struct evlist *evlist __maybe_unused, 692 int *err) 693 { 694 *err = 0; 695 return NULL; 696 } 697 698 static inline 699 void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused) 700 { 701 } 702 703 static inline 704 int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused, 705 struct evlist *evlist __maybe_unused, 706 struct record_opts *opts __maybe_unused) 707 { 708 return 0; 709 } 710 711 static inline 712 int perf_event__process_auxtrace_info(struct perf_session *session __maybe_unused, 713 union perf_event *event __maybe_unused) 714 { 715 return 0; 716 } 717 718 static inline 719 s64 perf_event__process_auxtrace(struct perf_session *session __maybe_unused, 720 union perf_event *event __maybe_unused) 721 { 722 return 0; 723 } 724 725 static inline 726 int perf_event__process_auxtrace_error(struct perf_session *session __maybe_unused, 727 union perf_event *event __maybe_unused) 728 { 729 return 0; 730 } 731 732 static inline 733 void perf_session__auxtrace_error_inc(struct perf_session *session 734 __maybe_unused, 735 union perf_event *event 736 __maybe_unused) 737 { 738 } 739 740 static inline 741 void events_stats__auxtrace_error_warn(const struct events_stats *stats 742 __maybe_unused) 743 { 744 } 745 746 static inline 747 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts __maybe_unused, 748 const char *str __maybe_unused, int unset __maybe_unused) 749 { 750 pr_err("AUX area tracing not supported\n"); 751 return -EINVAL; 752 } 753 754 static inline 755 int itrace_parse_synth_opts(const struct option *opt __maybe_unused, 756 const char *str __maybe_unused, 757 int unset __maybe_unused) 758 { 759 pr_err("AUX area tracing not supported\n"); 760 return -EINVAL; 761 } 762 763 static inline 764 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused, 765 struct record_opts *opts __maybe_unused, 766 const char *str) 767 { 768 if (!str) 769 return 0; 770 pr_err("AUX area tracing not supported\n"); 771 return -EINVAL; 772 } 773 774 static inline 775 int auxtrace_parse_sample_options(struct auxtrace_record *itr __maybe_unused, 776 struct evlist *evlist __maybe_unused, 777 struct record_opts *opts __maybe_unused, 778 const char *str) 779 { 780 if (!str) 781 return 0; 782 pr_err("AUX area tracing not supported\n"); 783 return -EINVAL; 784 } 785 786 static inline 787 void auxtrace_regroup_aux_output(struct evlist *evlist __maybe_unused) 788 { 789 } 790 791 static inline 792 int auxtrace__process_event(struct perf_session *session __maybe_unused, 793 union perf_event *event __maybe_unused, 794 struct perf_sample *sample __maybe_unused, 795 struct perf_tool *tool __maybe_unused) 796 { 797 return 0; 798 } 799 800 static inline 801 void auxtrace__dump_auxtrace_sample(struct perf_session *session __maybe_unused, 802 struct perf_sample *sample __maybe_unused) 803 { 804 } 805 806 static inline 807 int auxtrace__flush_events(struct perf_session *session __maybe_unused, 808 struct perf_tool *tool __maybe_unused) 809 { 810 return 0; 811 } 812 813 static inline 814 void auxtrace__free_events(struct perf_session *session __maybe_unused) 815 { 816 } 817 818 static inline 819 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused) 820 { 821 } 822 823 static inline 824 void auxtrace__free(struct perf_session *session __maybe_unused) 825 { 826 } 827 828 static inline 829 int auxtrace_index__write(int fd __maybe_unused, 830 struct list_head *head __maybe_unused) 831 { 832 return -EINVAL; 833 } 834 835 static inline 836 int auxtrace_index__process(int fd __maybe_unused, 837 u64 size __maybe_unused, 838 struct perf_session *session __maybe_unused, 839 bool needs_swap __maybe_unused) 840 { 841 return -EINVAL; 842 } 843 844 static inline 845 void auxtrace_index__free(struct list_head *head __maybe_unused) 846 { 847 } 848 849 static inline 850 bool auxtrace__evsel_is_auxtrace(struct perf_session *session __maybe_unused, 851 struct evsel *evsel __maybe_unused) 852 { 853 return false; 854 } 855 856 static inline 857 int auxtrace_parse_filters(struct evlist *evlist __maybe_unused) 858 { 859 return 0; 860 } 861 862 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, 863 struct auxtrace_mmap_params *mp, 864 void *userpg, int fd); 865 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm); 866 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp, 867 off_t auxtrace_offset, 868 unsigned int auxtrace_pages, 869 bool auxtrace_overwrite); 870 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, 871 struct evlist *evlist, 872 struct evsel *evsel, int idx); 873 874 #define ITRACE_HELP "" 875 876 static inline 877 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts 878 __maybe_unused, 879 struct perf_time_interval *ptime_range 880 __maybe_unused, 881 int range_num __maybe_unused) 882 { 883 } 884 885 static inline 886 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts 887 __maybe_unused) 888 { 889 } 890 891 #endif 892 893 #endif 894