1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * auxtrace.h: AUX area trace support 4 * Copyright (c) 2013-2015, Intel Corporation. 5 */ 6 7 #ifndef __PERF_AUXTRACE_H 8 #define __PERF_AUXTRACE_H 9 10 #include <sys/types.h> 11 #include <errno.h> 12 #include <stdbool.h> 13 #include <stddef.h> 14 #include <stdio.h> // FILE 15 #include <linux/list.h> 16 #include <linux/perf_event.h> 17 #include <linux/types.h> 18 #include <internal/cpumap.h> 19 #include <asm/bitsperlong.h> 20 #include <asm/barrier.h> 21 22 union perf_event; 23 struct perf_session; 24 struct evlist; 25 struct evsel; 26 struct perf_tool; 27 struct mmap; 28 struct perf_sample; 29 struct option; 30 struct record_opts; 31 struct perf_record_auxtrace_error; 32 struct perf_record_auxtrace_info; 33 struct events_stats; 34 struct perf_pmu; 35 36 enum auxtrace_error_type { 37 PERF_AUXTRACE_ERROR_ITRACE = 1, 38 PERF_AUXTRACE_ERROR_MAX 39 }; 40 41 /* Auxtrace records must have the same alignment as perf event records */ 42 #define PERF_AUXTRACE_RECORD_ALIGNMENT 8 43 44 enum auxtrace_type { 45 PERF_AUXTRACE_UNKNOWN, 46 PERF_AUXTRACE_INTEL_PT, 47 PERF_AUXTRACE_INTEL_BTS, 48 PERF_AUXTRACE_CS_ETM, 49 PERF_AUXTRACE_ARM_SPE, 50 PERF_AUXTRACE_S390_CPUMSF, 51 }; 52 53 enum itrace_period_type { 54 PERF_ITRACE_PERIOD_INSTRUCTIONS, 55 PERF_ITRACE_PERIOD_TICKS, 56 PERF_ITRACE_PERIOD_NANOSECS, 57 }; 58 59 #define AUXTRACE_ERR_FLG_OVERFLOW (1 << ('o' - 'a')) 60 #define AUXTRACE_ERR_FLG_DATA_LOST (1 << ('l' - 'a')) 61 62 #define AUXTRACE_LOG_FLG_ALL_PERF_EVTS (1 << ('a' - 'a')) 63 #define AUXTRACE_LOG_FLG_USE_STDOUT (1 << ('o' - 'a')) 64 65 /** 66 * struct itrace_synth_opts - AUX area tracing synthesis options. 67 * @set: indicates whether or not options have been set 68 * @default_no_sample: Default to no sampling. 69 * @inject: indicates the event (not just the sample) must be fully synthesized 70 * because 'perf inject' will write it out 71 * @instructions: whether to synthesize 'instructions' events 72 * @branches: whether to synthesize 'branches' events 73 * (branch misses only for Arm SPE) 74 * @transactions: whether to synthesize events for transactions 75 * @ptwrites: whether to synthesize events for ptwrites 76 * @pwr_events: whether to synthesize power events 77 * @other_events: whether to synthesize other events recorded due to the use of 78 * aux_output 79 * @intr_events: whether to synthesize interrupt events 80 * @errors: whether to synthesize decoder error events 81 * @dont_decode: whether to skip decoding entirely 82 * @log: write a decoding log 83 * @calls: limit branch samples to calls (can be combined with @returns) 84 * @returns: limit branch samples to returns (can be combined with @calls) 85 * @callchain: add callchain to 'instructions' events 86 * @add_callchain: add callchain to existing event records 87 * @thread_stack: feed branches to the thread_stack 88 * @last_branch: add branch context to 'instruction' events 89 * @add_last_branch: add branch context to existing event records 90 * @approx_ipc: approximate IPC 91 * @flc: whether to synthesize first level cache events 92 * @llc: whether to synthesize last level cache events 93 * @tlb: whether to synthesize TLB events 94 * @remote_access: whether to synthesize remote access events 95 * @mem: whether to synthesize memory events 96 * @timeless_decoding: prefer "timeless" decoding i.e. ignore timestamps 97 * @vm_time_correlation: perform VM Time Correlation 98 * @vm_tm_corr_dry_run: VM Time Correlation dry-run 99 * @vm_tm_corr_args: VM Time Correlation implementation-specific arguments 100 * @callchain_sz: maximum callchain size 101 * @last_branch_sz: branch context size 102 * @period: 'instructions' events period 103 * @period_type: 'instructions' events period type 104 * @initial_skip: skip N events at the beginning. 105 * @cpu_bitmap: CPUs for which to synthesize events, or NULL for all 106 * @ptime_range: time intervals to trace or NULL 107 * @range_num: number of time intervals to trace 108 * @error_plus_flags: flags to affect what errors are reported 109 * @error_minus_flags: flags to affect what errors are reported 110 * @log_plus_flags: flags to affect what is logged 111 * @log_minus_flags: flags to affect what is logged 112 * @quick: quicker (less detailed) decoding 113 */ 114 struct itrace_synth_opts { 115 bool set; 116 bool default_no_sample; 117 bool inject; 118 bool instructions; 119 bool branches; 120 bool transactions; 121 bool ptwrites; 122 bool pwr_events; 123 bool other_events; 124 bool intr_events; 125 bool errors; 126 bool dont_decode; 127 bool log; 128 bool calls; 129 bool returns; 130 bool callchain; 131 bool add_callchain; 132 bool thread_stack; 133 bool last_branch; 134 bool add_last_branch; 135 bool approx_ipc; 136 bool flc; 137 bool llc; 138 bool tlb; 139 bool remote_access; 140 bool mem; 141 bool timeless_decoding; 142 bool vm_time_correlation; 143 bool vm_tm_corr_dry_run; 144 char *vm_tm_corr_args; 145 unsigned int callchain_sz; 146 unsigned int last_branch_sz; 147 unsigned long long period; 148 enum itrace_period_type period_type; 149 unsigned long initial_skip; 150 unsigned long *cpu_bitmap; 151 struct perf_time_interval *ptime_range; 152 int range_num; 153 unsigned int error_plus_flags; 154 unsigned int error_minus_flags; 155 unsigned int log_plus_flags; 156 unsigned int log_minus_flags; 157 unsigned int quick; 158 }; 159 160 /** 161 * struct auxtrace_index_entry - indexes a AUX area tracing event within a 162 * perf.data file. 163 * @file_offset: offset within the perf.data file 164 * @sz: size of the event 165 */ 166 struct auxtrace_index_entry { 167 u64 file_offset; 168 u64 sz; 169 }; 170 171 #define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256 172 173 /** 174 * struct auxtrace_index - index of AUX area tracing events within a perf.data 175 * file. 176 * @list: linking a number of arrays of entries 177 * @nr: number of entries 178 * @entries: array of entries 179 */ 180 struct auxtrace_index { 181 struct list_head list; 182 size_t nr; 183 struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT]; 184 }; 185 186 /** 187 * struct auxtrace - session callbacks to allow AUX area data decoding. 188 * @process_event: lets the decoder see all session events 189 * @process_auxtrace_event: process a PERF_RECORD_AUXTRACE event 190 * @queue_data: queue an AUX sample or PERF_RECORD_AUXTRACE event for later 191 * processing 192 * @dump_auxtrace_sample: dump AUX area sample data 193 * @flush_events: process any remaining data 194 * @free_events: free resources associated with event processing 195 * @free: free resources associated with the session 196 */ 197 struct auxtrace { 198 int (*process_event)(struct perf_session *session, 199 union perf_event *event, 200 struct perf_sample *sample, 201 struct perf_tool *tool); 202 int (*process_auxtrace_event)(struct perf_session *session, 203 union perf_event *event, 204 struct perf_tool *tool); 205 int (*queue_data)(struct perf_session *session, 206 struct perf_sample *sample, union perf_event *event, 207 u64 data_offset); 208 void (*dump_auxtrace_sample)(struct perf_session *session, 209 struct perf_sample *sample); 210 int (*flush_events)(struct perf_session *session, 211 struct perf_tool *tool); 212 void (*free_events)(struct perf_session *session); 213 void (*free)(struct perf_session *session); 214 bool (*evsel_is_auxtrace)(struct perf_session *session, 215 struct evsel *evsel); 216 }; 217 218 /** 219 * struct auxtrace_buffer - a buffer containing AUX area tracing data. 220 * @list: buffers are queued in a list held by struct auxtrace_queue 221 * @size: size of the buffer in bytes 222 * @pid: in per-thread mode, the pid this buffer is associated with 223 * @tid: in per-thread mode, the tid this buffer is associated with 224 * @cpu: in per-cpu mode, the cpu this buffer is associated with 225 * @data: actual buffer data (can be null if the data has not been loaded) 226 * @data_offset: file offset at which the buffer can be read 227 * @mmap_addr: mmap address at which the buffer can be read 228 * @mmap_size: size of the mmap at @mmap_addr 229 * @data_needs_freeing: @data was malloc'd so free it when it is no longer 230 * needed 231 * @consecutive: the original data was split up and this buffer is consecutive 232 * to the previous buffer 233 * @offset: offset as determined by aux_head / aux_tail members of struct 234 * perf_event_mmap_page 235 * @reference: an implementation-specific reference determined when the data is 236 * recorded 237 * @buffer_nr: used to number each buffer 238 * @use_size: implementation actually only uses this number of bytes 239 * @use_data: implementation actually only uses data starting at this address 240 */ 241 struct auxtrace_buffer { 242 struct list_head list; 243 size_t size; 244 pid_t pid; 245 pid_t tid; 246 struct perf_cpu cpu; 247 void *data; 248 off_t data_offset; 249 void *mmap_addr; 250 size_t mmap_size; 251 bool data_needs_freeing; 252 bool consecutive; 253 u64 offset; 254 u64 reference; 255 u64 buffer_nr; 256 size_t use_size; 257 void *use_data; 258 }; 259 260 /** 261 * struct auxtrace_queue - a queue of AUX area tracing data buffers. 262 * @head: head of buffer list 263 * @tid: in per-thread mode, the tid this queue is associated with 264 * @cpu: in per-cpu mode, the cpu this queue is associated with 265 * @set: %true once this queue has been dedicated to a specific thread or cpu 266 * @priv: implementation-specific data 267 */ 268 struct auxtrace_queue { 269 struct list_head head; 270 pid_t tid; 271 int cpu; 272 bool set; 273 void *priv; 274 }; 275 276 /** 277 * struct auxtrace_queues - an array of AUX area tracing queues. 278 * @queue_array: array of queues 279 * @nr_queues: number of queues 280 * @new_data: set whenever new data is queued 281 * @populated: queues have been fully populated using the auxtrace_index 282 * @next_buffer_nr: used to number each buffer 283 */ 284 struct auxtrace_queues { 285 struct auxtrace_queue *queue_array; 286 unsigned int nr_queues; 287 bool new_data; 288 bool populated; 289 u64 next_buffer_nr; 290 }; 291 292 /** 293 * struct auxtrace_heap_item - element of struct auxtrace_heap. 294 * @queue_nr: queue number 295 * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected 296 * to be a timestamp 297 */ 298 struct auxtrace_heap_item { 299 unsigned int queue_nr; 300 u64 ordinal; 301 }; 302 303 /** 304 * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues. 305 * @heap_array: the heap 306 * @heap_cnt: the number of elements in the heap 307 * @heap_sz: maximum number of elements (grows as needed) 308 */ 309 struct auxtrace_heap { 310 struct auxtrace_heap_item *heap_array; 311 unsigned int heap_cnt; 312 unsigned int heap_sz; 313 }; 314 315 /** 316 * struct auxtrace_mmap - records an mmap of the auxtrace buffer. 317 * @base: address of mapped area 318 * @userpg: pointer to buffer's perf_event_mmap_page 319 * @mask: %0 if @len is not a power of two, otherwise (@len - %1) 320 * @len: size of mapped area 321 * @prev: previous aux_head 322 * @idx: index of this mmap 323 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu 324 * mmap) otherwise %0 325 * @cpu: cpu number for a per-cpu mmap otherwise %-1 326 */ 327 struct auxtrace_mmap { 328 void *base; 329 void *userpg; 330 size_t mask; 331 size_t len; 332 u64 prev; 333 int idx; 334 pid_t tid; 335 int cpu; 336 }; 337 338 /** 339 * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap. 340 * @mask: %0 if @len is not a power of two, otherwise (@len - %1) 341 * @offset: file offset of mapped area 342 * @len: size of mapped area 343 * @prot: mmap memory protection 344 * @idx: index of this mmap 345 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu 346 * mmap) otherwise %0 347 * @mmap_needed: set to %false for non-auxtrace events. This is needed because 348 * auxtrace mmapping is done in the same code path as non-auxtrace 349 * mmapping but not every evsel that needs non-auxtrace mmapping 350 * also needs auxtrace mmapping. 351 * @cpu: cpu number for a per-cpu mmap otherwise %-1 352 */ 353 struct auxtrace_mmap_params { 354 size_t mask; 355 off_t offset; 356 size_t len; 357 int prot; 358 int idx; 359 pid_t tid; 360 bool mmap_needed; 361 struct perf_cpu cpu; 362 }; 363 364 /** 365 * struct auxtrace_record - callbacks for recording AUX area data. 366 * @recording_options: validate and process recording options 367 * @info_priv_size: return the size of the private data in auxtrace_info_event 368 * @info_fill: fill-in the private data in auxtrace_info_event 369 * @free: free this auxtrace record structure 370 * @snapshot_start: starting a snapshot 371 * @snapshot_finish: finishing a snapshot 372 * @find_snapshot: find data to snapshot within auxtrace mmap 373 * @parse_snapshot_options: parse snapshot options 374 * @reference: provide a 64-bit reference number for auxtrace_event 375 * @read_finish: called after reading from an auxtrace mmap 376 * @alignment: alignment (if any) for AUX area data 377 * @default_aux_sample_size: default sample size for --aux sample option 378 * @pmu: associated pmu 379 * @evlist: selected events list 380 */ 381 struct auxtrace_record { 382 int (*recording_options)(struct auxtrace_record *itr, 383 struct evlist *evlist, 384 struct record_opts *opts); 385 size_t (*info_priv_size)(struct auxtrace_record *itr, 386 struct evlist *evlist); 387 int (*info_fill)(struct auxtrace_record *itr, 388 struct perf_session *session, 389 struct perf_record_auxtrace_info *auxtrace_info, 390 size_t priv_size); 391 void (*free)(struct auxtrace_record *itr); 392 int (*snapshot_start)(struct auxtrace_record *itr); 393 int (*snapshot_finish)(struct auxtrace_record *itr); 394 int (*find_snapshot)(struct auxtrace_record *itr, int idx, 395 struct auxtrace_mmap *mm, unsigned char *data, 396 u64 *head, u64 *old); 397 int (*parse_snapshot_options)(struct auxtrace_record *itr, 398 struct record_opts *opts, 399 const char *str); 400 u64 (*reference)(struct auxtrace_record *itr); 401 int (*read_finish)(struct auxtrace_record *itr, int idx); 402 unsigned int alignment; 403 unsigned int default_aux_sample_size; 404 struct perf_pmu *pmu; 405 struct evlist *evlist; 406 }; 407 408 /** 409 * struct addr_filter - address filter. 410 * @list: list node 411 * @range: true if it is a range filter 412 * @start: true if action is 'filter' or 'start' 413 * @action: 'filter', 'start' or 'stop' ('tracestop' is accepted but converted 414 * to 'stop') 415 * @sym_from: symbol name for the filter address 416 * @sym_to: symbol name that determines the filter size 417 * @sym_from_idx: selects n'th from symbols with the same name (0 means global 418 * and less than 0 means symbol must be unique) 419 * @sym_to_idx: same as @sym_from_idx but for @sym_to 420 * @addr: filter address 421 * @size: filter region size (for range filters) 422 * @filename: DSO file name or NULL for the kernel 423 * @str: allocated string that contains the other string members 424 */ 425 struct addr_filter { 426 struct list_head list; 427 bool range; 428 bool start; 429 const char *action; 430 const char *sym_from; 431 const char *sym_to; 432 int sym_from_idx; 433 int sym_to_idx; 434 u64 addr; 435 u64 size; 436 const char *filename; 437 char *str; 438 }; 439 440 /** 441 * struct addr_filters - list of address filters. 442 * @head: list of address filters 443 * @cnt: number of address filters 444 */ 445 struct addr_filters { 446 struct list_head head; 447 int cnt; 448 }; 449 450 struct auxtrace_cache; 451 452 #ifdef HAVE_AUXTRACE_SUPPORT 453 454 u64 compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm); 455 int compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail); 456 457 static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm, 458 int kernel_is_64_bit __maybe_unused) 459 { 460 struct perf_event_mmap_page *pc = mm->userpg; 461 u64 head; 462 463 #if BITS_PER_LONG == 32 464 if (kernel_is_64_bit) 465 return compat_auxtrace_mmap__read_head(mm); 466 #endif 467 head = READ_ONCE(pc->aux_head); 468 469 /* Ensure all reads are done after we read the head */ 470 smp_rmb(); 471 return head; 472 } 473 474 static inline int auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail, 475 int kernel_is_64_bit __maybe_unused) 476 { 477 struct perf_event_mmap_page *pc = mm->userpg; 478 479 #if BITS_PER_LONG == 32 480 if (kernel_is_64_bit) 481 return compat_auxtrace_mmap__write_tail(mm, tail); 482 #endif 483 /* Ensure all reads are done before we write the tail out */ 484 smp_mb(); 485 WRITE_ONCE(pc->aux_tail, tail); 486 return 0; 487 } 488 489 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, 490 struct auxtrace_mmap_params *mp, 491 void *userpg, int fd); 492 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm); 493 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp, 494 off_t auxtrace_offset, 495 unsigned int auxtrace_pages, 496 bool auxtrace_overwrite); 497 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, 498 struct evlist *evlist, 499 struct evsel *evsel, int idx); 500 501 typedef int (*process_auxtrace_t)(struct perf_tool *tool, 502 struct mmap *map, 503 union perf_event *event, void *data1, 504 size_t len1, void *data2, size_t len2); 505 506 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr, 507 struct perf_tool *tool, process_auxtrace_t fn); 508 509 int auxtrace_mmap__read_snapshot(struct mmap *map, 510 struct auxtrace_record *itr, 511 struct perf_tool *tool, process_auxtrace_t fn, 512 size_t snapshot_size); 513 514 int auxtrace_queues__init(struct auxtrace_queues *queues); 515 int auxtrace_queues__add_event(struct auxtrace_queues *queues, 516 struct perf_session *session, 517 union perf_event *event, off_t data_offset, 518 struct auxtrace_buffer **buffer_ptr); 519 struct auxtrace_queue * 520 auxtrace_queues__sample_queue(struct auxtrace_queues *queues, 521 struct perf_sample *sample, 522 struct perf_session *session); 523 int auxtrace_queues__add_sample(struct auxtrace_queues *queues, 524 struct perf_session *session, 525 struct perf_sample *sample, u64 data_offset, 526 u64 reference); 527 void auxtrace_queues__free(struct auxtrace_queues *queues); 528 int auxtrace_queues__process_index(struct auxtrace_queues *queues, 529 struct perf_session *session); 530 int auxtrace_queue_data(struct perf_session *session, bool samples, 531 bool events); 532 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue, 533 struct auxtrace_buffer *buffer); 534 void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw); 535 static inline void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd) 536 { 537 return auxtrace_buffer__get_data_rw(buffer, fd, false); 538 } 539 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer); 540 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer); 541 void auxtrace_buffer__free(struct auxtrace_buffer *buffer); 542 543 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr, 544 u64 ordinal); 545 void auxtrace_heap__pop(struct auxtrace_heap *heap); 546 void auxtrace_heap__free(struct auxtrace_heap *heap); 547 548 struct auxtrace_cache_entry { 549 struct hlist_node hash; 550 u32 key; 551 }; 552 553 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size, 554 unsigned int limit_percent); 555 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache); 556 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c); 557 void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry); 558 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key, 559 struct auxtrace_cache_entry *entry); 560 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key); 561 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key); 562 563 struct auxtrace_record *auxtrace_record__init(struct evlist *evlist, 564 int *err); 565 566 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr, 567 struct record_opts *opts, 568 const char *str); 569 int auxtrace_parse_sample_options(struct auxtrace_record *itr, 570 struct evlist *evlist, 571 struct record_opts *opts, const char *str); 572 void auxtrace_regroup_aux_output(struct evlist *evlist); 573 int auxtrace_record__options(struct auxtrace_record *itr, 574 struct evlist *evlist, 575 struct record_opts *opts); 576 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr, 577 struct evlist *evlist); 578 int auxtrace_record__info_fill(struct auxtrace_record *itr, 579 struct perf_session *session, 580 struct perf_record_auxtrace_info *auxtrace_info, 581 size_t priv_size); 582 void auxtrace_record__free(struct auxtrace_record *itr); 583 int auxtrace_record__snapshot_start(struct auxtrace_record *itr); 584 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit); 585 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx, 586 struct auxtrace_mmap *mm, 587 unsigned char *data, u64 *head, u64 *old); 588 u64 auxtrace_record__reference(struct auxtrace_record *itr); 589 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx); 590 591 int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event, 592 off_t file_offset); 593 int auxtrace_index__write(int fd, struct list_head *head); 594 int auxtrace_index__process(int fd, u64 size, struct perf_session *session, 595 bool needs_swap); 596 void auxtrace_index__free(struct list_head *head); 597 598 void auxtrace_synth_guest_error(struct perf_record_auxtrace_error *auxtrace_error, int type, 599 int code, int cpu, pid_t pid, pid_t tid, u64 ip, 600 const char *msg, u64 timestamp, 601 pid_t machine_pid, int vcpu); 602 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type, 603 int code, int cpu, pid_t pid, pid_t tid, u64 ip, 604 const char *msg, u64 timestamp); 605 606 int perf_event__process_auxtrace_info(struct perf_session *session, 607 union perf_event *event); 608 s64 perf_event__process_auxtrace(struct perf_session *session, 609 union perf_event *event); 610 int perf_event__process_auxtrace_error(struct perf_session *session, 611 union perf_event *event); 612 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts, 613 const char *str, int unset); 614 int itrace_parse_synth_opts(const struct option *opt, const char *str, 615 int unset); 616 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts, 617 bool no_sample); 618 619 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp); 620 void perf_session__auxtrace_error_inc(struct perf_session *session, 621 union perf_event *event); 622 void events_stats__auxtrace_error_warn(const struct events_stats *stats); 623 624 void addr_filters__init(struct addr_filters *filts); 625 void addr_filters__exit(struct addr_filters *filts); 626 int addr_filters__parse_bare_filter(struct addr_filters *filts, 627 const char *filter); 628 int auxtrace_parse_filters(struct evlist *evlist); 629 630 int auxtrace__process_event(struct perf_session *session, union perf_event *event, 631 struct perf_sample *sample, struct perf_tool *tool); 632 void auxtrace__dump_auxtrace_sample(struct perf_session *session, 633 struct perf_sample *sample); 634 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool); 635 void auxtrace__free_events(struct perf_session *session); 636 void auxtrace__free(struct perf_session *session); 637 bool auxtrace__evsel_is_auxtrace(struct perf_session *session, 638 struct evsel *evsel); 639 640 #define ITRACE_HELP \ 641 " i[period]: synthesize instructions events\n" \ 642 " b: synthesize branches events (branch misses for Arm SPE)\n" \ 643 " c: synthesize branches events (calls only)\n" \ 644 " r: synthesize branches events (returns only)\n" \ 645 " x: synthesize transactions events\n" \ 646 " w: synthesize ptwrite events\n" \ 647 " p: synthesize power events\n" \ 648 " o: synthesize other events recorded due to the use\n" \ 649 " of aux-output (refer to perf record)\n" \ 650 " I: synthesize interrupt or similar (asynchronous) events\n" \ 651 " (e.g. Intel PT Event Trace)\n" \ 652 " e[flags]: synthesize error events\n" \ 653 " each flag must be preceded by + or -\n" \ 654 " error flags are: o (overflow)\n" \ 655 " l (data lost)\n" \ 656 " d[flags]: create a debug log\n" \ 657 " each flag must be preceded by + or -\n" \ 658 " log flags are: a (all perf events)\n" \ 659 " o (output to stdout)\n" \ 660 " f: synthesize first level cache events\n" \ 661 " m: synthesize last level cache events\n" \ 662 " t: synthesize TLB events\n" \ 663 " a: synthesize remote access events\n" \ 664 " g[len]: synthesize a call chain (use with i or x)\n" \ 665 " G[len]: synthesize a call chain on existing event records\n" \ 666 " l[len]: synthesize last branch entries (use with i or x)\n" \ 667 " L[len]: synthesize last branch entries on existing event records\n" \ 668 " sNUMBER: skip initial number of events\n" \ 669 " q: quicker (less detailed) decoding\n" \ 670 " A: approximate IPC\n" \ 671 " Z: prefer to ignore timestamps (so-called \"timeless\" decoding)\n" \ 672 " PERIOD[ns|us|ms|i|t]: specify period to sample stream\n" \ 673 " concatenate multiple options. Default is ibxwpe or cewp\n" 674 675 static inline 676 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts, 677 struct perf_time_interval *ptime_range, 678 int range_num) 679 { 680 opts->ptime_range = ptime_range; 681 opts->range_num = range_num; 682 } 683 684 static inline 685 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts) 686 { 687 opts->ptime_range = NULL; 688 opts->range_num = 0; 689 } 690 691 #else 692 #include "debug.h" 693 694 static inline struct auxtrace_record * 695 auxtrace_record__init(struct evlist *evlist __maybe_unused, 696 int *err) 697 { 698 *err = 0; 699 return NULL; 700 } 701 702 static inline 703 void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused) 704 { 705 } 706 707 static inline 708 int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused, 709 struct evlist *evlist __maybe_unused, 710 struct record_opts *opts __maybe_unused) 711 { 712 return 0; 713 } 714 715 static inline 716 int perf_event__process_auxtrace_info(struct perf_session *session __maybe_unused, 717 union perf_event *event __maybe_unused) 718 { 719 return 0; 720 } 721 722 static inline 723 s64 perf_event__process_auxtrace(struct perf_session *session __maybe_unused, 724 union perf_event *event __maybe_unused) 725 { 726 return 0; 727 } 728 729 static inline 730 int perf_event__process_auxtrace_error(struct perf_session *session __maybe_unused, 731 union perf_event *event __maybe_unused) 732 { 733 return 0; 734 } 735 736 static inline 737 void perf_session__auxtrace_error_inc(struct perf_session *session 738 __maybe_unused, 739 union perf_event *event 740 __maybe_unused) 741 { 742 } 743 744 static inline 745 void events_stats__auxtrace_error_warn(const struct events_stats *stats 746 __maybe_unused) 747 { 748 } 749 750 static inline 751 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts __maybe_unused, 752 const char *str __maybe_unused, int unset __maybe_unused) 753 { 754 pr_err("AUX area tracing not supported\n"); 755 return -EINVAL; 756 } 757 758 static inline 759 int itrace_parse_synth_opts(const struct option *opt __maybe_unused, 760 const char *str __maybe_unused, 761 int unset __maybe_unused) 762 { 763 pr_err("AUX area tracing not supported\n"); 764 return -EINVAL; 765 } 766 767 static inline 768 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused, 769 struct record_opts *opts __maybe_unused, 770 const char *str) 771 { 772 if (!str) 773 return 0; 774 pr_err("AUX area tracing not supported\n"); 775 return -EINVAL; 776 } 777 778 static inline 779 int auxtrace_parse_sample_options(struct auxtrace_record *itr __maybe_unused, 780 struct evlist *evlist __maybe_unused, 781 struct record_opts *opts __maybe_unused, 782 const char *str) 783 { 784 if (!str) 785 return 0; 786 pr_err("AUX area tracing not supported\n"); 787 return -EINVAL; 788 } 789 790 static inline 791 void auxtrace_regroup_aux_output(struct evlist *evlist __maybe_unused) 792 { 793 } 794 795 static inline 796 int auxtrace__process_event(struct perf_session *session __maybe_unused, 797 union perf_event *event __maybe_unused, 798 struct perf_sample *sample __maybe_unused, 799 struct perf_tool *tool __maybe_unused) 800 { 801 return 0; 802 } 803 804 static inline 805 void auxtrace__dump_auxtrace_sample(struct perf_session *session __maybe_unused, 806 struct perf_sample *sample __maybe_unused) 807 { 808 } 809 810 static inline 811 int auxtrace__flush_events(struct perf_session *session __maybe_unused, 812 struct perf_tool *tool __maybe_unused) 813 { 814 return 0; 815 } 816 817 static inline 818 void auxtrace__free_events(struct perf_session *session __maybe_unused) 819 { 820 } 821 822 static inline 823 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused) 824 { 825 } 826 827 static inline 828 void auxtrace__free(struct perf_session *session __maybe_unused) 829 { 830 } 831 832 static inline 833 int auxtrace_index__write(int fd __maybe_unused, 834 struct list_head *head __maybe_unused) 835 { 836 return -EINVAL; 837 } 838 839 static inline 840 int auxtrace_index__process(int fd __maybe_unused, 841 u64 size __maybe_unused, 842 struct perf_session *session __maybe_unused, 843 bool needs_swap __maybe_unused) 844 { 845 return -EINVAL; 846 } 847 848 static inline 849 void auxtrace_index__free(struct list_head *head __maybe_unused) 850 { 851 } 852 853 static inline 854 bool auxtrace__evsel_is_auxtrace(struct perf_session *session __maybe_unused, 855 struct evsel *evsel __maybe_unused) 856 { 857 return false; 858 } 859 860 static inline 861 int auxtrace_parse_filters(struct evlist *evlist __maybe_unused) 862 { 863 return 0; 864 } 865 866 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, 867 struct auxtrace_mmap_params *mp, 868 void *userpg, int fd); 869 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm); 870 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp, 871 off_t auxtrace_offset, 872 unsigned int auxtrace_pages, 873 bool auxtrace_overwrite); 874 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, 875 struct evlist *evlist, 876 struct evsel *evsel, int idx); 877 878 #define ITRACE_HELP "" 879 880 static inline 881 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts 882 __maybe_unused, 883 struct perf_time_interval *ptime_range 884 __maybe_unused, 885 int range_num __maybe_unused) 886 { 887 } 888 889 static inline 890 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts 891 __maybe_unused) 892 { 893 } 894 895 #endif 896 897 #endif 898