1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * auxtrace.h: AUX area trace support 4 * Copyright (c) 2013-2015, Intel Corporation. 5 */ 6 7 #ifndef __PERF_AUXTRACE_H 8 #define __PERF_AUXTRACE_H 9 10 #include <sys/types.h> 11 #include <errno.h> 12 #include <stdbool.h> 13 #include <stddef.h> 14 #include <stdio.h> // FILE 15 #include <linux/list.h> 16 #include <linux/perf_event.h> 17 #include <linux/types.h> 18 #include <asm/bitsperlong.h> 19 #include <asm/barrier.h> 20 21 union perf_event; 22 struct perf_session; 23 struct evlist; 24 struct evsel; 25 struct perf_tool; 26 struct mmap; 27 struct perf_sample; 28 struct option; 29 struct record_opts; 30 struct perf_record_auxtrace_error; 31 struct perf_record_auxtrace_info; 32 struct events_stats; 33 struct perf_pmu; 34 35 enum auxtrace_error_type { 36 PERF_AUXTRACE_ERROR_ITRACE = 1, 37 PERF_AUXTRACE_ERROR_MAX 38 }; 39 40 /* Auxtrace records must have the same alignment as perf event records */ 41 #define PERF_AUXTRACE_RECORD_ALIGNMENT 8 42 43 enum auxtrace_type { 44 PERF_AUXTRACE_UNKNOWN, 45 PERF_AUXTRACE_INTEL_PT, 46 PERF_AUXTRACE_INTEL_BTS, 47 PERF_AUXTRACE_CS_ETM, 48 PERF_AUXTRACE_ARM_SPE, 49 PERF_AUXTRACE_S390_CPUMSF, 50 }; 51 52 enum itrace_period_type { 53 PERF_ITRACE_PERIOD_INSTRUCTIONS, 54 PERF_ITRACE_PERIOD_TICKS, 55 PERF_ITRACE_PERIOD_NANOSECS, 56 }; 57 58 /** 59 * struct itrace_synth_opts - AUX area tracing synthesis options. 60 * @set: indicates whether or not options have been set 61 * @default_no_sample: Default to no sampling. 62 * @inject: indicates the event (not just the sample) must be fully synthesized 63 * because 'perf inject' will write it out 64 * @instructions: whether to synthesize 'instructions' events 65 * @branches: whether to synthesize 'branches' events 66 * @transactions: whether to synthesize events for transactions 67 * @ptwrites: whether to synthesize events for ptwrites 68 * @pwr_events: whether to synthesize power events 69 * @other_events: whether to synthesize other events recorded due to the use of 70 * aux_output 71 * @errors: whether to synthesize decoder error events 72 * @dont_decode: whether to skip decoding entirely 73 * @log: write a decoding log 74 * @calls: limit branch samples to calls (can be combined with @returns) 75 * @returns: limit branch samples to returns (can be combined with @calls) 76 * @callchain: add callchain to 'instructions' events 77 * @add_callchain: add callchain to existing event records 78 * @thread_stack: feed branches to the thread_stack 79 * @last_branch: add branch context to 'instruction' events 80 * @callchain_sz: maximum callchain size 81 * @last_branch_sz: branch context size 82 * @period: 'instructions' events period 83 * @period_type: 'instructions' events period type 84 * @initial_skip: skip N events at the beginning. 85 * @cpu_bitmap: CPUs for which to synthesize events, or NULL for all 86 * @ptime_range: time intervals to trace or NULL 87 * @range_num: number of time intervals to trace 88 */ 89 struct itrace_synth_opts { 90 bool set; 91 bool default_no_sample; 92 bool inject; 93 bool instructions; 94 bool branches; 95 bool transactions; 96 bool ptwrites; 97 bool pwr_events; 98 bool other_events; 99 bool errors; 100 bool dont_decode; 101 bool log; 102 bool calls; 103 bool returns; 104 bool callchain; 105 bool add_callchain; 106 bool thread_stack; 107 bool last_branch; 108 unsigned int callchain_sz; 109 unsigned int last_branch_sz; 110 unsigned long long period; 111 enum itrace_period_type period_type; 112 unsigned long initial_skip; 113 unsigned long *cpu_bitmap; 114 struct perf_time_interval *ptime_range; 115 int range_num; 116 }; 117 118 /** 119 * struct auxtrace_index_entry - indexes a AUX area tracing event within a 120 * perf.data file. 121 * @file_offset: offset within the perf.data file 122 * @sz: size of the event 123 */ 124 struct auxtrace_index_entry { 125 u64 file_offset; 126 u64 sz; 127 }; 128 129 #define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256 130 131 /** 132 * struct auxtrace_index - index of AUX area tracing events within a perf.data 133 * file. 134 * @list: linking a number of arrays of entries 135 * @nr: number of entries 136 * @entries: array of entries 137 */ 138 struct auxtrace_index { 139 struct list_head list; 140 size_t nr; 141 struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT]; 142 }; 143 144 /** 145 * struct auxtrace - session callbacks to allow AUX area data decoding. 146 * @process_event: lets the decoder see all session events 147 * @process_auxtrace_event: process a PERF_RECORD_AUXTRACE event 148 * @queue_data: queue an AUX sample or PERF_RECORD_AUXTRACE event for later 149 * processing 150 * @dump_auxtrace_sample: dump AUX area sample data 151 * @flush_events: process any remaining data 152 * @free_events: free resources associated with event processing 153 * @free: free resources associated with the session 154 */ 155 struct auxtrace { 156 int (*process_event)(struct perf_session *session, 157 union perf_event *event, 158 struct perf_sample *sample, 159 struct perf_tool *tool); 160 int (*process_auxtrace_event)(struct perf_session *session, 161 union perf_event *event, 162 struct perf_tool *tool); 163 int (*queue_data)(struct perf_session *session, 164 struct perf_sample *sample, union perf_event *event, 165 u64 data_offset); 166 void (*dump_auxtrace_sample)(struct perf_session *session, 167 struct perf_sample *sample); 168 int (*flush_events)(struct perf_session *session, 169 struct perf_tool *tool); 170 void (*free_events)(struct perf_session *session); 171 void (*free)(struct perf_session *session); 172 bool (*evsel_is_auxtrace)(struct perf_session *session, 173 struct evsel *evsel); 174 }; 175 176 /** 177 * struct auxtrace_buffer - a buffer containing AUX area tracing data. 178 * @list: buffers are queued in a list held by struct auxtrace_queue 179 * @size: size of the buffer in bytes 180 * @pid: in per-thread mode, the pid this buffer is associated with 181 * @tid: in per-thread mode, the tid this buffer is associated with 182 * @cpu: in per-cpu mode, the cpu this buffer is associated with 183 * @data: actual buffer data (can be null if the data has not been loaded) 184 * @data_offset: file offset at which the buffer can be read 185 * @mmap_addr: mmap address at which the buffer can be read 186 * @mmap_size: size of the mmap at @mmap_addr 187 * @data_needs_freeing: @data was malloc'd so free it when it is no longer 188 * needed 189 * @consecutive: the original data was split up and this buffer is consecutive 190 * to the previous buffer 191 * @offset: offset as determined by aux_head / aux_tail members of struct 192 * perf_event_mmap_page 193 * @reference: an implementation-specific reference determined when the data is 194 * recorded 195 * @buffer_nr: used to number each buffer 196 * @use_size: implementation actually only uses this number of bytes 197 * @use_data: implementation actually only uses data starting at this address 198 */ 199 struct auxtrace_buffer { 200 struct list_head list; 201 size_t size; 202 pid_t pid; 203 pid_t tid; 204 int cpu; 205 void *data; 206 off_t data_offset; 207 void *mmap_addr; 208 size_t mmap_size; 209 bool data_needs_freeing; 210 bool consecutive; 211 u64 offset; 212 u64 reference; 213 u64 buffer_nr; 214 size_t use_size; 215 void *use_data; 216 }; 217 218 /** 219 * struct auxtrace_queue - a queue of AUX area tracing data buffers. 220 * @head: head of buffer list 221 * @tid: in per-thread mode, the tid this queue is associated with 222 * @cpu: in per-cpu mode, the cpu this queue is associated with 223 * @set: %true once this queue has been dedicated to a specific thread or cpu 224 * @priv: implementation-specific data 225 */ 226 struct auxtrace_queue { 227 struct list_head head; 228 pid_t tid; 229 int cpu; 230 bool set; 231 void *priv; 232 }; 233 234 /** 235 * struct auxtrace_queues - an array of AUX area tracing queues. 236 * @queue_array: array of queues 237 * @nr_queues: number of queues 238 * @new_data: set whenever new data is queued 239 * @populated: queues have been fully populated using the auxtrace_index 240 * @next_buffer_nr: used to number each buffer 241 */ 242 struct auxtrace_queues { 243 struct auxtrace_queue *queue_array; 244 unsigned int nr_queues; 245 bool new_data; 246 bool populated; 247 u64 next_buffer_nr; 248 }; 249 250 /** 251 * struct auxtrace_heap_item - element of struct auxtrace_heap. 252 * @queue_nr: queue number 253 * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected 254 * to be a timestamp 255 */ 256 struct auxtrace_heap_item { 257 unsigned int queue_nr; 258 u64 ordinal; 259 }; 260 261 /** 262 * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues. 263 * @heap_array: the heap 264 * @heap_cnt: the number of elements in the heap 265 * @heap_sz: maximum number of elements (grows as needed) 266 */ 267 struct auxtrace_heap { 268 struct auxtrace_heap_item *heap_array; 269 unsigned int heap_cnt; 270 unsigned int heap_sz; 271 }; 272 273 /** 274 * struct auxtrace_mmap - records an mmap of the auxtrace buffer. 275 * @base: address of mapped area 276 * @userpg: pointer to buffer's perf_event_mmap_page 277 * @mask: %0 if @len is not a power of two, otherwise (@len - %1) 278 * @len: size of mapped area 279 * @prev: previous aux_head 280 * @idx: index of this mmap 281 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu 282 * mmap) otherwise %0 283 * @cpu: cpu number for a per-cpu mmap otherwise %-1 284 */ 285 struct auxtrace_mmap { 286 void *base; 287 void *userpg; 288 size_t mask; 289 size_t len; 290 u64 prev; 291 int idx; 292 pid_t tid; 293 int cpu; 294 }; 295 296 /** 297 * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap. 298 * @mask: %0 if @len is not a power of two, otherwise (@len - %1) 299 * @offset: file offset of mapped area 300 * @len: size of mapped area 301 * @prot: mmap memory protection 302 * @idx: index of this mmap 303 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu 304 * mmap) otherwise %0 305 * @cpu: cpu number for a per-cpu mmap otherwise %-1 306 */ 307 struct auxtrace_mmap_params { 308 size_t mask; 309 off_t offset; 310 size_t len; 311 int prot; 312 int idx; 313 pid_t tid; 314 int cpu; 315 }; 316 317 /** 318 * struct auxtrace_record - callbacks for recording AUX area data. 319 * @recording_options: validate and process recording options 320 * @info_priv_size: return the size of the private data in auxtrace_info_event 321 * @info_fill: fill-in the private data in auxtrace_info_event 322 * @free: free this auxtrace record structure 323 * @snapshot_start: starting a snapshot 324 * @snapshot_finish: finishing a snapshot 325 * @find_snapshot: find data to snapshot within auxtrace mmap 326 * @parse_snapshot_options: parse snapshot options 327 * @reference: provide a 64-bit reference number for auxtrace_event 328 * @read_finish: called after reading from an auxtrace mmap 329 * @alignment: alignment (if any) for AUX area data 330 * @default_aux_sample_size: default sample size for --aux sample option 331 * @pmu: associated pmu 332 * @evlist: selected events list 333 */ 334 struct auxtrace_record { 335 int (*recording_options)(struct auxtrace_record *itr, 336 struct evlist *evlist, 337 struct record_opts *opts); 338 size_t (*info_priv_size)(struct auxtrace_record *itr, 339 struct evlist *evlist); 340 int (*info_fill)(struct auxtrace_record *itr, 341 struct perf_session *session, 342 struct perf_record_auxtrace_info *auxtrace_info, 343 size_t priv_size); 344 void (*free)(struct auxtrace_record *itr); 345 int (*snapshot_start)(struct auxtrace_record *itr); 346 int (*snapshot_finish)(struct auxtrace_record *itr); 347 int (*find_snapshot)(struct auxtrace_record *itr, int idx, 348 struct auxtrace_mmap *mm, unsigned char *data, 349 u64 *head, u64 *old); 350 int (*parse_snapshot_options)(struct auxtrace_record *itr, 351 struct record_opts *opts, 352 const char *str); 353 u64 (*reference)(struct auxtrace_record *itr); 354 int (*read_finish)(struct auxtrace_record *itr, int idx); 355 unsigned int alignment; 356 unsigned int default_aux_sample_size; 357 struct perf_pmu *pmu; 358 struct evlist *evlist; 359 }; 360 361 /** 362 * struct addr_filter - address filter. 363 * @list: list node 364 * @range: true if it is a range filter 365 * @start: true if action is 'filter' or 'start' 366 * @action: 'filter', 'start' or 'stop' ('tracestop' is accepted but converted 367 * to 'stop') 368 * @sym_from: symbol name for the filter address 369 * @sym_to: symbol name that determines the filter size 370 * @sym_from_idx: selects n'th from symbols with the same name (0 means global 371 * and less than 0 means symbol must be unique) 372 * @sym_to_idx: same as @sym_from_idx but for @sym_to 373 * @addr: filter address 374 * @size: filter region size (for range filters) 375 * @filename: DSO file name or NULL for the kernel 376 * @str: allocated string that contains the other string members 377 */ 378 struct addr_filter { 379 struct list_head list; 380 bool range; 381 bool start; 382 const char *action; 383 const char *sym_from; 384 const char *sym_to; 385 int sym_from_idx; 386 int sym_to_idx; 387 u64 addr; 388 u64 size; 389 const char *filename; 390 char *str; 391 }; 392 393 /** 394 * struct addr_filters - list of address filters. 395 * @head: list of address filters 396 * @cnt: number of address filters 397 */ 398 struct addr_filters { 399 struct list_head head; 400 int cnt; 401 }; 402 403 struct auxtrace_cache; 404 405 #ifdef HAVE_AUXTRACE_SUPPORT 406 407 /* 408 * In snapshot mode the mmapped page is read-only which makes using 409 * __sync_val_compare_and_swap() problematic. However, snapshot mode expects 410 * the buffer is not updated while the snapshot is made (e.g. Intel PT disables 411 * the event) so there is not a race anyway. 412 */ 413 static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm) 414 { 415 struct perf_event_mmap_page *pc = mm->userpg; 416 u64 head = READ_ONCE(pc->aux_head); 417 418 /* Ensure all reads are done after we read the head */ 419 rmb(); 420 return head; 421 } 422 423 static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm) 424 { 425 struct perf_event_mmap_page *pc = mm->userpg; 426 #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) 427 u64 head = READ_ONCE(pc->aux_head); 428 #else 429 u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0); 430 #endif 431 432 /* Ensure all reads are done after we read the head */ 433 rmb(); 434 return head; 435 } 436 437 static inline void auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail) 438 { 439 struct perf_event_mmap_page *pc = mm->userpg; 440 #if BITS_PER_LONG != 64 && defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) 441 u64 old_tail; 442 #endif 443 444 /* Ensure all reads are done before we write the tail out */ 445 mb(); 446 #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) 447 pc->aux_tail = tail; 448 #else 449 do { 450 old_tail = __sync_val_compare_and_swap(&pc->aux_tail, 0, 0); 451 } while (!__sync_bool_compare_and_swap(&pc->aux_tail, old_tail, tail)); 452 #endif 453 } 454 455 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, 456 struct auxtrace_mmap_params *mp, 457 void *userpg, int fd); 458 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm); 459 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp, 460 off_t auxtrace_offset, 461 unsigned int auxtrace_pages, 462 bool auxtrace_overwrite); 463 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, 464 struct evlist *evlist, int idx, 465 bool per_cpu); 466 467 typedef int (*process_auxtrace_t)(struct perf_tool *tool, 468 struct mmap *map, 469 union perf_event *event, void *data1, 470 size_t len1, void *data2, size_t len2); 471 472 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr, 473 struct perf_tool *tool, process_auxtrace_t fn); 474 475 int auxtrace_mmap__read_snapshot(struct mmap *map, 476 struct auxtrace_record *itr, 477 struct perf_tool *tool, process_auxtrace_t fn, 478 size_t snapshot_size); 479 480 int auxtrace_queues__init(struct auxtrace_queues *queues); 481 int auxtrace_queues__add_event(struct auxtrace_queues *queues, 482 struct perf_session *session, 483 union perf_event *event, off_t data_offset, 484 struct auxtrace_buffer **buffer_ptr); 485 struct auxtrace_queue * 486 auxtrace_queues__sample_queue(struct auxtrace_queues *queues, 487 struct perf_sample *sample, 488 struct perf_session *session); 489 int auxtrace_queues__add_sample(struct auxtrace_queues *queues, 490 struct perf_session *session, 491 struct perf_sample *sample, u64 data_offset, 492 u64 reference); 493 void auxtrace_queues__free(struct auxtrace_queues *queues); 494 int auxtrace_queues__process_index(struct auxtrace_queues *queues, 495 struct perf_session *session); 496 int auxtrace_queue_data(struct perf_session *session, bool samples, 497 bool events); 498 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue, 499 struct auxtrace_buffer *buffer); 500 void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd); 501 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer); 502 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer); 503 void auxtrace_buffer__free(struct auxtrace_buffer *buffer); 504 505 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr, 506 u64 ordinal); 507 void auxtrace_heap__pop(struct auxtrace_heap *heap); 508 void auxtrace_heap__free(struct auxtrace_heap *heap); 509 510 struct auxtrace_cache_entry { 511 struct hlist_node hash; 512 u32 key; 513 }; 514 515 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size, 516 unsigned int limit_percent); 517 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache); 518 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c); 519 void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry); 520 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key, 521 struct auxtrace_cache_entry *entry); 522 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key); 523 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key); 524 525 struct auxtrace_record *auxtrace_record__init(struct evlist *evlist, 526 int *err); 527 528 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr, 529 struct record_opts *opts, 530 const char *str); 531 int auxtrace_parse_sample_options(struct auxtrace_record *itr, 532 struct evlist *evlist, 533 struct record_opts *opts, const char *str); 534 int auxtrace_record__options(struct auxtrace_record *itr, 535 struct evlist *evlist, 536 struct record_opts *opts); 537 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr, 538 struct evlist *evlist); 539 int auxtrace_record__info_fill(struct auxtrace_record *itr, 540 struct perf_session *session, 541 struct perf_record_auxtrace_info *auxtrace_info, 542 size_t priv_size); 543 void auxtrace_record__free(struct auxtrace_record *itr); 544 int auxtrace_record__snapshot_start(struct auxtrace_record *itr); 545 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit); 546 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx, 547 struct auxtrace_mmap *mm, 548 unsigned char *data, u64 *head, u64 *old); 549 u64 auxtrace_record__reference(struct auxtrace_record *itr); 550 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx); 551 552 int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event, 553 off_t file_offset); 554 int auxtrace_index__write(int fd, struct list_head *head); 555 int auxtrace_index__process(int fd, u64 size, struct perf_session *session, 556 bool needs_swap); 557 void auxtrace_index__free(struct list_head *head); 558 559 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type, 560 int code, int cpu, pid_t pid, pid_t tid, u64 ip, 561 const char *msg, u64 timestamp); 562 563 int perf_event__process_auxtrace_info(struct perf_session *session, 564 union perf_event *event); 565 s64 perf_event__process_auxtrace(struct perf_session *session, 566 union perf_event *event); 567 int perf_event__process_auxtrace_error(struct perf_session *session, 568 union perf_event *event); 569 int itrace_parse_synth_opts(const struct option *opt, const char *str, 570 int unset); 571 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts, 572 bool no_sample); 573 574 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp); 575 void perf_session__auxtrace_error_inc(struct perf_session *session, 576 union perf_event *event); 577 void events_stats__auxtrace_error_warn(const struct events_stats *stats); 578 579 void addr_filters__init(struct addr_filters *filts); 580 void addr_filters__exit(struct addr_filters *filts); 581 int addr_filters__parse_bare_filter(struct addr_filters *filts, 582 const char *filter); 583 int auxtrace_parse_filters(struct evlist *evlist); 584 585 int auxtrace__process_event(struct perf_session *session, union perf_event *event, 586 struct perf_sample *sample, struct perf_tool *tool); 587 void auxtrace__dump_auxtrace_sample(struct perf_session *session, 588 struct perf_sample *sample); 589 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool); 590 void auxtrace__free_events(struct perf_session *session); 591 void auxtrace__free(struct perf_session *session); 592 bool auxtrace__evsel_is_auxtrace(struct perf_session *session, 593 struct evsel *evsel); 594 595 #define ITRACE_HELP \ 596 " i: synthesize instructions events\n" \ 597 " b: synthesize branches events\n" \ 598 " c: synthesize branches events (calls only)\n" \ 599 " r: synthesize branches events (returns only)\n" \ 600 " x: synthesize transactions events\n" \ 601 " w: synthesize ptwrite events\n" \ 602 " p: synthesize power events\n" \ 603 " e: synthesize error events\n" \ 604 " d: create a debug log\n" \ 605 " g[len]: synthesize a call chain (use with i or x)\n" \ 606 " l[len]: synthesize last branch entries (use with i or x)\n" \ 607 " sNUMBER: skip initial number of events\n" \ 608 " PERIOD[ns|us|ms|i|t]: specify period to sample stream\n" \ 609 " concatenate multiple options. Default is ibxwpe or cewp\n" 610 611 static inline 612 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts, 613 struct perf_time_interval *ptime_range, 614 int range_num) 615 { 616 opts->ptime_range = ptime_range; 617 opts->range_num = range_num; 618 } 619 620 static inline 621 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts) 622 { 623 opts->ptime_range = NULL; 624 opts->range_num = 0; 625 } 626 627 #else 628 #include "debug.h" 629 630 static inline struct auxtrace_record * 631 auxtrace_record__init(struct evlist *evlist __maybe_unused, 632 int *err) 633 { 634 *err = 0; 635 return NULL; 636 } 637 638 static inline 639 void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused) 640 { 641 } 642 643 static inline 644 int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused, 645 struct evlist *evlist __maybe_unused, 646 struct record_opts *opts __maybe_unused) 647 { 648 return 0; 649 } 650 651 #define perf_event__process_auxtrace_info 0 652 #define perf_event__process_auxtrace 0 653 #define perf_event__process_auxtrace_error 0 654 655 static inline 656 void perf_session__auxtrace_error_inc(struct perf_session *session 657 __maybe_unused, 658 union perf_event *event 659 __maybe_unused) 660 { 661 } 662 663 static inline 664 void events_stats__auxtrace_error_warn(const struct events_stats *stats 665 __maybe_unused) 666 { 667 } 668 669 static inline 670 int itrace_parse_synth_opts(const struct option *opt __maybe_unused, 671 const char *str __maybe_unused, 672 int unset __maybe_unused) 673 { 674 pr_err("AUX area tracing not supported\n"); 675 return -EINVAL; 676 } 677 678 static inline 679 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused, 680 struct record_opts *opts __maybe_unused, 681 const char *str) 682 { 683 if (!str) 684 return 0; 685 pr_err("AUX area tracing not supported\n"); 686 return -EINVAL; 687 } 688 689 static inline 690 int auxtrace_parse_sample_options(struct auxtrace_record *itr __maybe_unused, 691 struct evlist *evlist __maybe_unused, 692 struct record_opts *opts __maybe_unused, 693 const char *str) 694 { 695 if (!str) 696 return 0; 697 pr_err("AUX area tracing not supported\n"); 698 return -EINVAL; 699 } 700 701 static inline 702 int auxtrace__process_event(struct perf_session *session __maybe_unused, 703 union perf_event *event __maybe_unused, 704 struct perf_sample *sample __maybe_unused, 705 struct perf_tool *tool __maybe_unused) 706 { 707 return 0; 708 } 709 710 static inline 711 void auxtrace__dump_auxtrace_sample(struct perf_session *session __maybe_unused, 712 struct perf_sample *sample __maybe_unused) 713 { 714 } 715 716 static inline 717 int auxtrace__flush_events(struct perf_session *session __maybe_unused, 718 struct perf_tool *tool __maybe_unused) 719 { 720 return 0; 721 } 722 723 static inline 724 void auxtrace__free_events(struct perf_session *session __maybe_unused) 725 { 726 } 727 728 static inline 729 void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused) 730 { 731 } 732 733 static inline 734 void auxtrace__free(struct perf_session *session __maybe_unused) 735 { 736 } 737 738 static inline 739 int auxtrace_index__write(int fd __maybe_unused, 740 struct list_head *head __maybe_unused) 741 { 742 return -EINVAL; 743 } 744 745 static inline 746 int auxtrace_index__process(int fd __maybe_unused, 747 u64 size __maybe_unused, 748 struct perf_session *session __maybe_unused, 749 bool needs_swap __maybe_unused) 750 { 751 return -EINVAL; 752 } 753 754 static inline 755 void auxtrace_index__free(struct list_head *head __maybe_unused) 756 { 757 } 758 759 static inline 760 bool auxtrace__evsel_is_auxtrace(struct perf_session *session __maybe_unused, 761 struct evsel *evsel __maybe_unused) 762 { 763 return false; 764 } 765 766 static inline 767 int auxtrace_parse_filters(struct evlist *evlist __maybe_unused) 768 { 769 return 0; 770 } 771 772 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, 773 struct auxtrace_mmap_params *mp, 774 void *userpg, int fd); 775 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm); 776 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp, 777 off_t auxtrace_offset, 778 unsigned int auxtrace_pages, 779 bool auxtrace_overwrite); 780 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, 781 struct evlist *evlist, int idx, 782 bool per_cpu); 783 784 #define ITRACE_HELP "" 785 786 static inline 787 void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts 788 __maybe_unused, 789 struct perf_time_interval *ptime_range 790 __maybe_unused, 791 int range_num __maybe_unused) 792 { 793 } 794 795 static inline 796 void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts 797 __maybe_unused) 798 { 799 } 800 801 #endif 802 803 #endif 804