1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __PERF_RECORD_H 3 #define __PERF_RECORD_H 4 5 #include <limits.h> 6 #include <stdio.h> 7 #include <linux/kernel.h> 8 #include <linux/bpf.h> 9 #include <linux/perf_event.h> 10 11 #include "../perf.h" 12 #include "build-id.h" 13 #include "perf_regs.h" 14 15 struct mmap_event { 16 struct perf_event_header header; 17 u32 pid, tid; 18 u64 start; 19 u64 len; 20 u64 pgoff; 21 char filename[PATH_MAX]; 22 }; 23 24 struct mmap2_event { 25 struct perf_event_header header; 26 u32 pid, tid; 27 u64 start; 28 u64 len; 29 u64 pgoff; 30 u32 maj; 31 u32 min; 32 u64 ino; 33 u64 ino_generation; 34 u32 prot; 35 u32 flags; 36 char filename[PATH_MAX]; 37 }; 38 39 struct comm_event { 40 struct perf_event_header header; 41 u32 pid, tid; 42 char comm[16]; 43 }; 44 45 struct namespaces_event { 46 struct perf_event_header header; 47 u32 pid, tid; 48 u64 nr_namespaces; 49 struct perf_ns_link_info link_info[]; 50 }; 51 52 struct fork_event { 53 struct perf_event_header header; 54 u32 pid, ppid; 55 u32 tid, ptid; 56 u64 time; 57 }; 58 59 struct lost_event { 60 struct perf_event_header header; 61 u64 id; 62 u64 lost; 63 }; 64 65 struct lost_samples_event { 66 struct perf_event_header header; 67 u64 lost; 68 }; 69 70 /* 71 * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID 72 */ 73 struct read_event { 74 struct perf_event_header header; 75 u32 pid, tid; 76 u64 value; 77 u64 time_enabled; 78 u64 time_running; 79 u64 id; 80 }; 81 82 struct throttle_event { 83 struct perf_event_header header; 84 u64 time; 85 u64 id; 86 u64 stream_id; 87 }; 88 89 #ifndef KSYM_NAME_LEN 90 #define KSYM_NAME_LEN 256 91 #endif 92 93 struct ksymbol_event { 94 struct perf_event_header header; 95 u64 addr; 96 u32 len; 97 u16 ksym_type; 98 u16 flags; 99 char name[KSYM_NAME_LEN]; 100 }; 101 102 struct bpf_event { 103 struct perf_event_header header; 104 u16 type; 105 u16 flags; 106 u32 id; 107 108 /* for bpf_prog types */ 109 u8 tag[BPF_TAG_SIZE]; // prog tag 110 }; 111 112 #define PERF_SAMPLE_MASK \ 113 (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \ 114 PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \ 115 PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \ 116 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \ 117 PERF_SAMPLE_IDENTIFIER) 118 119 /* perf sample has 16 bits size limit */ 120 #define PERF_SAMPLE_MAX_SIZE (1 << 16) 121 122 struct sample_event { 123 struct perf_event_header header; 124 u64 array[]; 125 }; 126 127 struct regs_dump { 128 u64 abi; 129 u64 mask; 130 u64 *regs; 131 132 /* Cached values/mask filled by first register access. */ 133 u64 cache_regs[PERF_REGS_MAX]; 134 u64 cache_mask; 135 }; 136 137 struct stack_dump { 138 u16 offset; 139 u64 size; 140 char *data; 141 }; 142 143 struct sample_read_value { 144 u64 value; 145 u64 id; 146 }; 147 148 struct sample_read { 149 u64 time_enabled; 150 u64 time_running; 151 union { 152 struct { 153 u64 nr; 154 struct sample_read_value *values; 155 } group; 156 struct sample_read_value one; 157 }; 158 }; 159 160 struct ip_callchain { 161 u64 nr; 162 u64 ips[0]; 163 }; 164 165 struct branch_stack; 166 167 enum { 168 PERF_IP_FLAG_BRANCH = 1ULL << 0, 169 PERF_IP_FLAG_CALL = 1ULL << 1, 170 PERF_IP_FLAG_RETURN = 1ULL << 2, 171 PERF_IP_FLAG_CONDITIONAL = 1ULL << 3, 172 PERF_IP_FLAG_SYSCALLRET = 1ULL << 4, 173 PERF_IP_FLAG_ASYNC = 1ULL << 5, 174 PERF_IP_FLAG_INTERRUPT = 1ULL << 6, 175 PERF_IP_FLAG_TX_ABORT = 1ULL << 7, 176 PERF_IP_FLAG_TRACE_BEGIN = 1ULL << 8, 177 PERF_IP_FLAG_TRACE_END = 1ULL << 9, 178 PERF_IP_FLAG_IN_TX = 1ULL << 10, 179 }; 180 181 #define PERF_IP_FLAG_CHARS "bcrosyiABEx" 182 183 #define PERF_BRANCH_MASK (\ 184 PERF_IP_FLAG_BRANCH |\ 185 PERF_IP_FLAG_CALL |\ 186 PERF_IP_FLAG_RETURN |\ 187 PERF_IP_FLAG_CONDITIONAL |\ 188 PERF_IP_FLAG_SYSCALLRET |\ 189 PERF_IP_FLAG_ASYNC |\ 190 PERF_IP_FLAG_INTERRUPT |\ 191 PERF_IP_FLAG_TX_ABORT |\ 192 PERF_IP_FLAG_TRACE_BEGIN |\ 193 PERF_IP_FLAG_TRACE_END) 194 195 #define MAX_INSN 16 196 197 struct perf_sample { 198 u64 ip; 199 u32 pid, tid; 200 u64 time; 201 u64 addr; 202 u64 id; 203 u64 stream_id; 204 u64 period; 205 u64 weight; 206 u64 transaction; 207 u32 cpu; 208 u32 raw_size; 209 u64 data_src; 210 u64 phys_addr; 211 u32 flags; 212 u16 insn_len; 213 u8 cpumode; 214 u16 misc; 215 char insn[MAX_INSN]; 216 void *raw_data; 217 struct ip_callchain *callchain; 218 struct branch_stack *branch_stack; 219 struct regs_dump user_regs; 220 struct regs_dump intr_regs; 221 struct stack_dump user_stack; 222 struct sample_read read; 223 }; 224 225 #define PERF_MEM_DATA_SRC_NONE \ 226 (PERF_MEM_S(OP, NA) |\ 227 PERF_MEM_S(LVL, NA) |\ 228 PERF_MEM_S(SNOOP, NA) |\ 229 PERF_MEM_S(LOCK, NA) |\ 230 PERF_MEM_S(TLB, NA)) 231 232 struct build_id_event { 233 struct perf_event_header header; 234 pid_t pid; 235 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; 236 char filename[]; 237 }; 238 239 enum perf_user_event_type { /* above any possible kernel type */ 240 PERF_RECORD_USER_TYPE_START = 64, 241 PERF_RECORD_HEADER_ATTR = 64, 242 PERF_RECORD_HEADER_EVENT_TYPE = 65, /* deprecated */ 243 PERF_RECORD_HEADER_TRACING_DATA = 66, 244 PERF_RECORD_HEADER_BUILD_ID = 67, 245 PERF_RECORD_FINISHED_ROUND = 68, 246 PERF_RECORD_ID_INDEX = 69, 247 PERF_RECORD_AUXTRACE_INFO = 70, 248 PERF_RECORD_AUXTRACE = 71, 249 PERF_RECORD_AUXTRACE_ERROR = 72, 250 PERF_RECORD_THREAD_MAP = 73, 251 PERF_RECORD_CPU_MAP = 74, 252 PERF_RECORD_STAT_CONFIG = 75, 253 PERF_RECORD_STAT = 76, 254 PERF_RECORD_STAT_ROUND = 77, 255 PERF_RECORD_EVENT_UPDATE = 78, 256 PERF_RECORD_TIME_CONV = 79, 257 PERF_RECORD_HEADER_FEATURE = 80, 258 PERF_RECORD_HEADER_MAX 259 }; 260 261 enum auxtrace_error_type { 262 PERF_AUXTRACE_ERROR_ITRACE = 1, 263 PERF_AUXTRACE_ERROR_MAX 264 }; 265 266 /* Attribute type for custom synthesized events */ 267 #define PERF_TYPE_SYNTH (INT_MAX + 1U) 268 269 /* Attribute config for custom synthesized events */ 270 enum perf_synth_id { 271 PERF_SYNTH_INTEL_PTWRITE, 272 PERF_SYNTH_INTEL_MWAIT, 273 PERF_SYNTH_INTEL_PWRE, 274 PERF_SYNTH_INTEL_EXSTOP, 275 PERF_SYNTH_INTEL_PWRX, 276 PERF_SYNTH_INTEL_CBR, 277 }; 278 279 /* 280 * Raw data formats for synthesized events. Note that 4 bytes of padding are 281 * present to match the 'size' member of PERF_SAMPLE_RAW data which is always 282 * 8-byte aligned. That means we must dereference raw_data with an offset of 4. 283 * Refer perf_sample__synth_ptr() and perf_synth__raw_data(). It also means the 284 * structure sizes are 4 bytes bigger than the raw_size, refer 285 * perf_synth__raw_size(). 286 */ 287 288 struct perf_synth_intel_ptwrite { 289 u32 padding; 290 union { 291 struct { 292 u32 ip : 1, 293 reserved : 31; 294 }; 295 u32 flags; 296 }; 297 u64 payload; 298 }; 299 300 struct perf_synth_intel_mwait { 301 u32 padding; 302 u32 reserved; 303 union { 304 struct { 305 u64 hints : 8, 306 reserved1 : 24, 307 extensions : 2, 308 reserved2 : 30; 309 }; 310 u64 payload; 311 }; 312 }; 313 314 struct perf_synth_intel_pwre { 315 u32 padding; 316 u32 reserved; 317 union { 318 struct { 319 u64 reserved1 : 7, 320 hw : 1, 321 subcstate : 4, 322 cstate : 4, 323 reserved2 : 48; 324 }; 325 u64 payload; 326 }; 327 }; 328 329 struct perf_synth_intel_exstop { 330 u32 padding; 331 union { 332 struct { 333 u32 ip : 1, 334 reserved : 31; 335 }; 336 u32 flags; 337 }; 338 }; 339 340 struct perf_synth_intel_pwrx { 341 u32 padding; 342 u32 reserved; 343 union { 344 struct { 345 u64 deepest_cstate : 4, 346 last_cstate : 4, 347 wake_reason : 4, 348 reserved1 : 52; 349 }; 350 u64 payload; 351 }; 352 }; 353 354 struct perf_synth_intel_cbr { 355 u32 padding; 356 union { 357 struct { 358 u32 cbr : 8, 359 reserved1 : 8, 360 max_nonturbo : 8, 361 reserved2 : 8; 362 }; 363 u32 flags; 364 }; 365 u32 freq; 366 u32 reserved3; 367 }; 368 369 /* 370 * raw_data is always 4 bytes from an 8-byte boundary, so subtract 4 to get 371 * 8-byte alignment. 372 */ 373 static inline void *perf_sample__synth_ptr(struct perf_sample *sample) 374 { 375 return sample->raw_data - 4; 376 } 377 378 static inline void *perf_synth__raw_data(void *p) 379 { 380 return p + 4; 381 } 382 383 #define perf_synth__raw_size(d) (sizeof(d) - 4) 384 385 #define perf_sample__bad_synth_size(s, d) ((s)->raw_size < sizeof(d) - 4) 386 387 /* 388 * The kernel collects the number of events it couldn't send in a stretch and 389 * when possible sends this number in a PERF_RECORD_LOST event. The number of 390 * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while 391 * total_lost tells exactly how many events the kernel in fact lost, i.e. it is 392 * the sum of all struct lost_event.lost fields reported. 393 * 394 * The kernel discards mixed up samples and sends the number in a 395 * PERF_RECORD_LOST_SAMPLES event. The number of lost-samples events is stored 396 * in .nr_events[PERF_RECORD_LOST_SAMPLES] while total_lost_samples tells 397 * exactly how many samples the kernel in fact dropped, i.e. it is the sum of 398 * all struct lost_samples_event.lost fields reported. 399 * 400 * The total_period is needed because by default auto-freq is used, so 401 * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get 402 * the total number of low level events, it is necessary to to sum all struct 403 * sample_event.period and stash the result in total_period. 404 */ 405 struct events_stats { 406 u64 total_period; 407 u64 total_non_filtered_period; 408 u64 total_lost; 409 u64 total_lost_samples; 410 u64 total_aux_lost; 411 u64 total_aux_partial; 412 u64 total_invalid_chains; 413 u32 nr_events[PERF_RECORD_HEADER_MAX]; 414 u32 nr_non_filtered_samples; 415 u32 nr_lost_warned; 416 u32 nr_unknown_events; 417 u32 nr_invalid_chains; 418 u32 nr_unknown_id; 419 u32 nr_unprocessable_samples; 420 u32 nr_auxtrace_errors[PERF_AUXTRACE_ERROR_MAX]; 421 u32 nr_proc_map_timeout; 422 }; 423 424 enum { 425 PERF_CPU_MAP__CPUS = 0, 426 PERF_CPU_MAP__MASK = 1, 427 }; 428 429 struct cpu_map_entries { 430 u16 nr; 431 u16 cpu[]; 432 }; 433 434 struct cpu_map_mask { 435 u16 nr; 436 u16 long_size; 437 unsigned long mask[]; 438 }; 439 440 struct cpu_map_data { 441 u16 type; 442 char data[]; 443 }; 444 445 struct cpu_map_event { 446 struct perf_event_header header; 447 struct cpu_map_data data; 448 }; 449 450 struct attr_event { 451 struct perf_event_header header; 452 struct perf_event_attr attr; 453 u64 id[]; 454 }; 455 456 enum { 457 PERF_EVENT_UPDATE__UNIT = 0, 458 PERF_EVENT_UPDATE__SCALE = 1, 459 PERF_EVENT_UPDATE__NAME = 2, 460 PERF_EVENT_UPDATE__CPUS = 3, 461 }; 462 463 struct event_update_event_cpus { 464 struct cpu_map_data cpus; 465 }; 466 467 struct event_update_event_scale { 468 double scale; 469 }; 470 471 struct event_update_event { 472 struct perf_event_header header; 473 u64 type; 474 u64 id; 475 476 char data[]; 477 }; 478 479 #define MAX_EVENT_NAME 64 480 481 struct perf_trace_event_type { 482 u64 event_id; 483 char name[MAX_EVENT_NAME]; 484 }; 485 486 struct event_type_event { 487 struct perf_event_header header; 488 struct perf_trace_event_type event_type; 489 }; 490 491 struct tracing_data_event { 492 struct perf_event_header header; 493 u32 size; 494 }; 495 496 struct id_index_entry { 497 u64 id; 498 u64 idx; 499 u64 cpu; 500 u64 tid; 501 }; 502 503 struct id_index_event { 504 struct perf_event_header header; 505 u64 nr; 506 struct id_index_entry entries[0]; 507 }; 508 509 struct auxtrace_info_event { 510 struct perf_event_header header; 511 u32 type; 512 u32 reserved__; /* For alignment */ 513 u64 priv[]; 514 }; 515 516 struct auxtrace_event { 517 struct perf_event_header header; 518 u64 size; 519 u64 offset; 520 u64 reference; 521 u32 idx; 522 u32 tid; 523 u32 cpu; 524 u32 reserved__; /* For alignment */ 525 }; 526 527 #define MAX_AUXTRACE_ERROR_MSG 64 528 529 struct auxtrace_error_event { 530 struct perf_event_header header; 531 u32 type; 532 u32 code; 533 u32 cpu; 534 u32 pid; 535 u32 tid; 536 u32 fmt; 537 u64 ip; 538 u64 time; 539 char msg[MAX_AUXTRACE_ERROR_MSG]; 540 }; 541 542 struct aux_event { 543 struct perf_event_header header; 544 u64 aux_offset; 545 u64 aux_size; 546 u64 flags; 547 }; 548 549 struct itrace_start_event { 550 struct perf_event_header header; 551 u32 pid, tid; 552 }; 553 554 struct context_switch_event { 555 struct perf_event_header header; 556 u32 next_prev_pid; 557 u32 next_prev_tid; 558 }; 559 560 struct thread_map_event_entry { 561 u64 pid; 562 char comm[16]; 563 }; 564 565 struct thread_map_event { 566 struct perf_event_header header; 567 u64 nr; 568 struct thread_map_event_entry entries[]; 569 }; 570 571 enum { 572 PERF_STAT_CONFIG_TERM__AGGR_MODE = 0, 573 PERF_STAT_CONFIG_TERM__INTERVAL = 1, 574 PERF_STAT_CONFIG_TERM__SCALE = 2, 575 PERF_STAT_CONFIG_TERM__MAX = 3, 576 }; 577 578 struct stat_config_event_entry { 579 u64 tag; 580 u64 val; 581 }; 582 583 struct stat_config_event { 584 struct perf_event_header header; 585 u64 nr; 586 struct stat_config_event_entry data[]; 587 }; 588 589 struct stat_event { 590 struct perf_event_header header; 591 592 u64 id; 593 u32 cpu; 594 u32 thread; 595 596 union { 597 struct { 598 u64 val; 599 u64 ena; 600 u64 run; 601 }; 602 u64 values[3]; 603 }; 604 }; 605 606 enum { 607 PERF_STAT_ROUND_TYPE__INTERVAL = 0, 608 PERF_STAT_ROUND_TYPE__FINAL = 1, 609 }; 610 611 struct stat_round_event { 612 struct perf_event_header header; 613 u64 type; 614 u64 time; 615 }; 616 617 struct time_conv_event { 618 struct perf_event_header header; 619 u64 time_shift; 620 u64 time_mult; 621 u64 time_zero; 622 }; 623 624 struct feature_event { 625 struct perf_event_header header; 626 u64 feat_id; 627 char data[]; 628 }; 629 630 union perf_event { 631 struct perf_event_header header; 632 struct mmap_event mmap; 633 struct mmap2_event mmap2; 634 struct comm_event comm; 635 struct namespaces_event namespaces; 636 struct fork_event fork; 637 struct lost_event lost; 638 struct lost_samples_event lost_samples; 639 struct read_event read; 640 struct throttle_event throttle; 641 struct sample_event sample; 642 struct attr_event attr; 643 struct event_update_event event_update; 644 struct event_type_event event_type; 645 struct tracing_data_event tracing_data; 646 struct build_id_event build_id; 647 struct id_index_event id_index; 648 struct auxtrace_info_event auxtrace_info; 649 struct auxtrace_event auxtrace; 650 struct auxtrace_error_event auxtrace_error; 651 struct aux_event aux; 652 struct itrace_start_event itrace_start; 653 struct context_switch_event context_switch; 654 struct thread_map_event thread_map; 655 struct cpu_map_event cpu_map; 656 struct stat_config_event stat_config; 657 struct stat_event stat; 658 struct stat_round_event stat_round; 659 struct time_conv_event time_conv; 660 struct feature_event feat; 661 struct ksymbol_event ksymbol_event; 662 struct bpf_event bpf_event; 663 }; 664 665 void perf_event__print_totals(void); 666 667 struct perf_tool; 668 struct thread_map; 669 struct cpu_map; 670 struct perf_stat_config; 671 struct perf_counts_values; 672 673 typedef int (*perf_event__handler_t)(struct perf_tool *tool, 674 union perf_event *event, 675 struct perf_sample *sample, 676 struct machine *machine); 677 678 int perf_event__synthesize_thread_map(struct perf_tool *tool, 679 struct thread_map *threads, 680 perf_event__handler_t process, 681 struct machine *machine, bool mmap_data); 682 int perf_event__synthesize_thread_map2(struct perf_tool *tool, 683 struct thread_map *threads, 684 perf_event__handler_t process, 685 struct machine *machine); 686 int perf_event__synthesize_cpu_map(struct perf_tool *tool, 687 struct cpu_map *cpus, 688 perf_event__handler_t process, 689 struct machine *machine); 690 int perf_event__synthesize_threads(struct perf_tool *tool, 691 perf_event__handler_t process, 692 struct machine *machine, bool mmap_data, 693 unsigned int nr_threads_synthesize); 694 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 695 perf_event__handler_t process, 696 struct machine *machine); 697 int perf_event__synthesize_stat_config(struct perf_tool *tool, 698 struct perf_stat_config *config, 699 perf_event__handler_t process, 700 struct machine *machine); 701 void perf_event__read_stat_config(struct perf_stat_config *config, 702 struct stat_config_event *event); 703 int perf_event__synthesize_stat(struct perf_tool *tool, 704 u32 cpu, u32 thread, u64 id, 705 struct perf_counts_values *count, 706 perf_event__handler_t process, 707 struct machine *machine); 708 int perf_event__synthesize_stat_round(struct perf_tool *tool, 709 u64 time, u64 type, 710 perf_event__handler_t process, 711 struct machine *machine); 712 int perf_event__synthesize_modules(struct perf_tool *tool, 713 perf_event__handler_t process, 714 struct machine *machine); 715 716 int perf_event__process_comm(struct perf_tool *tool, 717 union perf_event *event, 718 struct perf_sample *sample, 719 struct machine *machine); 720 int perf_event__process_lost(struct perf_tool *tool, 721 union perf_event *event, 722 struct perf_sample *sample, 723 struct machine *machine); 724 int perf_event__process_lost_samples(struct perf_tool *tool, 725 union perf_event *event, 726 struct perf_sample *sample, 727 struct machine *machine); 728 int perf_event__process_aux(struct perf_tool *tool, 729 union perf_event *event, 730 struct perf_sample *sample, 731 struct machine *machine); 732 int perf_event__process_itrace_start(struct perf_tool *tool, 733 union perf_event *event, 734 struct perf_sample *sample, 735 struct machine *machine); 736 int perf_event__process_switch(struct perf_tool *tool, 737 union perf_event *event, 738 struct perf_sample *sample, 739 struct machine *machine); 740 int perf_event__process_namespaces(struct perf_tool *tool, 741 union perf_event *event, 742 struct perf_sample *sample, 743 struct machine *machine); 744 int perf_event__process_mmap(struct perf_tool *tool, 745 union perf_event *event, 746 struct perf_sample *sample, 747 struct machine *machine); 748 int perf_event__process_mmap2(struct perf_tool *tool, 749 union perf_event *event, 750 struct perf_sample *sample, 751 struct machine *machine); 752 int perf_event__process_fork(struct perf_tool *tool, 753 union perf_event *event, 754 struct perf_sample *sample, 755 struct machine *machine); 756 int perf_event__process_exit(struct perf_tool *tool, 757 union perf_event *event, 758 struct perf_sample *sample, 759 struct machine *machine); 760 int perf_event__process_ksymbol(struct perf_tool *tool, 761 union perf_event *event, 762 struct perf_sample *sample, 763 struct machine *machine); 764 int perf_event__process_bpf_event(struct perf_tool *tool, 765 union perf_event *event, 766 struct perf_sample *sample, 767 struct machine *machine); 768 int perf_tool__process_synth_event(struct perf_tool *tool, 769 union perf_event *event, 770 struct machine *machine, 771 perf_event__handler_t process); 772 int perf_event__process(struct perf_tool *tool, 773 union perf_event *event, 774 struct perf_sample *sample, 775 struct machine *machine); 776 777 struct addr_location; 778 779 int machine__resolve(struct machine *machine, struct addr_location *al, 780 struct perf_sample *sample); 781 782 void addr_location__put(struct addr_location *al); 783 784 struct thread; 785 786 bool is_bts_event(struct perf_event_attr *attr); 787 bool sample_addr_correlates_sym(struct perf_event_attr *attr); 788 void thread__resolve(struct thread *thread, struct addr_location *al, 789 struct perf_sample *sample); 790 791 const char *perf_event__name(unsigned int id); 792 793 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, 794 u64 read_format); 795 int perf_event__synthesize_sample(union perf_event *event, u64 type, 796 u64 read_format, 797 const struct perf_sample *sample); 798 799 pid_t perf_event__synthesize_comm(struct perf_tool *tool, 800 union perf_event *event, pid_t pid, 801 perf_event__handler_t process, 802 struct machine *machine); 803 804 int perf_event__synthesize_namespaces(struct perf_tool *tool, 805 union perf_event *event, 806 pid_t pid, pid_t tgid, 807 perf_event__handler_t process, 808 struct machine *machine); 809 810 int perf_event__synthesize_mmap_events(struct perf_tool *tool, 811 union perf_event *event, 812 pid_t pid, pid_t tgid, 813 perf_event__handler_t process, 814 struct machine *machine, 815 bool mmap_data); 816 817 int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, 818 perf_event__handler_t process, 819 struct machine *machine); 820 821 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp); 822 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp); 823 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp); 824 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp); 825 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp); 826 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp); 827 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp); 828 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp); 829 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp); 830 size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp); 831 size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp); 832 size_t perf_event__fprintf_bpf_event(union perf_event *event, FILE *fp); 833 size_t perf_event__fprintf(union perf_event *event, FILE *fp); 834 835 int kallsyms__get_function_start(const char *kallsyms_filename, 836 const char *symbol_name, u64 *addr); 837 838 void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max); 839 void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map, 840 u16 type, int max); 841 842 void event_attr_init(struct perf_event_attr *attr); 843 844 int perf_event_paranoid(void); 845 846 extern int sysctl_perf_event_max_stack; 847 extern int sysctl_perf_event_max_contexts_per_stack; 848 extern unsigned int proc_map_timeout; 849 850 #endif /* __PERF_RECORD_H */ 851