1 /* 2 * builtin-trace.c 3 * 4 * Builtin 'trace' command: 5 * 6 * Display a continuously updated trace of any workload, CPU, specific PID, 7 * system wide, etc. Default format is loosely strace like, but any other 8 * event may be specified using --event. 9 * 10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 11 * 12 * Initially based on the 'trace' prototype by Thomas Gleixner: 13 * 14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'") 15 */ 16 17 #include "util/record.h" 18 #include <api/fs/tracing_path.h> 19 #ifdef HAVE_LIBBPF_SUPPORT 20 #include <bpf/bpf.h> 21 #endif 22 #include "util/bpf_map.h" 23 #include "util/rlimit.h" 24 #include "builtin.h" 25 #include "util/cgroup.h" 26 #include "util/color.h" 27 #include "util/config.h" 28 #include "util/debug.h" 29 #include "util/dso.h" 30 #include "util/env.h" 31 #include "util/event.h" 32 #include "util/evsel.h" 33 #include "util/evsel_fprintf.h" 34 #include "util/synthetic-events.h" 35 #include "util/evlist.h" 36 #include "util/evswitch.h" 37 #include "util/mmap.h" 38 #include <subcmd/pager.h> 39 #include <subcmd/exec-cmd.h> 40 #include "util/machine.h" 41 #include "util/map.h" 42 #include "util/symbol.h" 43 #include "util/path.h" 44 #include "util/session.h" 45 #include "util/thread.h" 46 #include <subcmd/parse-options.h> 47 #include "util/strlist.h" 48 #include "util/intlist.h" 49 #include "util/thread_map.h" 50 #include "util/stat.h" 51 #include "util/tool.h" 52 #include "util/util.h" 53 #include "trace/beauty/beauty.h" 54 #include "trace-event.h" 55 #include "util/parse-events.h" 56 #include "util/bpf-loader.h" 57 #include "util/tracepoint.h" 58 #include "callchain.h" 59 #include "print_binary.h" 60 #include "string2.h" 61 #include "syscalltbl.h" 62 #include "rb_resort.h" 63 #include "../perf.h" 64 65 #include <errno.h> 66 #include <inttypes.h> 67 #include <poll.h> 68 #include <signal.h> 69 #include <stdlib.h> 70 #include <string.h> 71 #include <linux/err.h> 72 #include <linux/filter.h> 73 #include <linux/kernel.h> 74 #include <linux/random.h> 75 #include <linux/stringify.h> 76 #include <linux/time64.h> 77 #include <linux/zalloc.h> 78 #include <fcntl.h> 79 #include <sys/sysmacros.h> 80 81 #include <linux/ctype.h> 82 #include <perf/mmap.h> 83 84 #ifdef HAVE_LIBTRACEEVENT 85 #include <traceevent/event-parse.h> 86 #endif 87 88 #ifndef O_CLOEXEC 89 # define O_CLOEXEC 02000000 90 #endif 91 92 #ifndef F_LINUX_SPECIFIC_BASE 93 # define F_LINUX_SPECIFIC_BASE 1024 94 #endif 95 96 #define RAW_SYSCALL_ARGS_NUM 6 97 98 /* 99 * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100 100 */ 101 struct syscall_arg_fmt { 102 size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg); 103 bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val); 104 unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val); 105 void *parm; 106 const char *name; 107 u16 nr_entries; // for arrays 108 bool show_zero; 109 }; 110 111 struct syscall_fmt { 112 const char *name; 113 const char *alias; 114 struct { 115 const char *sys_enter, 116 *sys_exit; 117 } bpf_prog_name; 118 struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM]; 119 u8 nr_args; 120 bool errpid; 121 bool timeout; 122 bool hexret; 123 }; 124 125 struct trace { 126 struct perf_tool tool; 127 struct syscalltbl *sctbl; 128 struct { 129 struct syscall *table; 130 struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY 131 struct bpf_map *sys_enter, 132 *sys_exit; 133 } prog_array; 134 struct { 135 struct evsel *sys_enter, 136 *sys_exit, 137 *augmented; 138 } events; 139 struct bpf_program *unaugmented_prog; 140 } syscalls; 141 struct { 142 struct bpf_map *map; 143 } dump; 144 struct record_opts opts; 145 struct evlist *evlist; 146 struct machine *host; 147 struct thread *current; 148 struct bpf_object *bpf_obj; 149 struct cgroup *cgroup; 150 u64 base_time; 151 FILE *output; 152 unsigned long nr_events; 153 unsigned long nr_events_printed; 154 unsigned long max_events; 155 struct evswitch evswitch; 156 struct strlist *ev_qualifier; 157 struct { 158 size_t nr; 159 int *entries; 160 } ev_qualifier_ids; 161 struct { 162 size_t nr; 163 pid_t *entries; 164 struct bpf_map *map; 165 } filter_pids; 166 double duration_filter; 167 double runtime_ms; 168 struct { 169 u64 vfs_getname, 170 proc_getname; 171 } stats; 172 unsigned int max_stack; 173 unsigned int min_stack; 174 int raw_augmented_syscalls_args_size; 175 bool raw_augmented_syscalls; 176 bool fd_path_disabled; 177 bool sort_events; 178 bool not_ev_qualifier; 179 bool live; 180 bool full_time; 181 bool sched; 182 bool multiple_threads; 183 bool summary; 184 bool summary_only; 185 bool errno_summary; 186 bool failure_only; 187 bool show_comm; 188 bool print_sample; 189 bool show_tool_stats; 190 bool trace_syscalls; 191 bool libtraceevent_print; 192 bool kernel_syscallchains; 193 s16 args_alignment; 194 bool show_tstamp; 195 bool show_duration; 196 bool show_zeros; 197 bool show_arg_names; 198 bool show_string_prefix; 199 bool force; 200 bool vfs_getname; 201 int trace_pgfaults; 202 char *perfconfig_events; 203 struct { 204 struct ordered_events data; 205 u64 last; 206 } oe; 207 }; 208 209 struct tp_field { 210 int offset; 211 union { 212 u64 (*integer)(struct tp_field *field, struct perf_sample *sample); 213 void *(*pointer)(struct tp_field *field, struct perf_sample *sample); 214 }; 215 }; 216 217 #define TP_UINT_FIELD(bits) \ 218 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \ 219 { \ 220 u##bits value; \ 221 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \ 222 return value; \ 223 } 224 225 TP_UINT_FIELD(8); 226 TP_UINT_FIELD(16); 227 TP_UINT_FIELD(32); 228 TP_UINT_FIELD(64); 229 230 #define TP_UINT_FIELD__SWAPPED(bits) \ 231 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \ 232 { \ 233 u##bits value; \ 234 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \ 235 return bswap_##bits(value);\ 236 } 237 238 TP_UINT_FIELD__SWAPPED(16); 239 TP_UINT_FIELD__SWAPPED(32); 240 TP_UINT_FIELD__SWAPPED(64); 241 242 static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap) 243 { 244 field->offset = offset; 245 246 switch (size) { 247 case 1: 248 field->integer = tp_field__u8; 249 break; 250 case 2: 251 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16; 252 break; 253 case 4: 254 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32; 255 break; 256 case 8: 257 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64; 258 break; 259 default: 260 return -1; 261 } 262 263 return 0; 264 } 265 266 static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap) 267 { 268 return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap); 269 } 270 271 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample) 272 { 273 return sample->raw_data + field->offset; 274 } 275 276 static int __tp_field__init_ptr(struct tp_field *field, int offset) 277 { 278 field->offset = offset; 279 field->pointer = tp_field__ptr; 280 return 0; 281 } 282 283 static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field) 284 { 285 return __tp_field__init_ptr(field, format_field->offset); 286 } 287 288 struct syscall_tp { 289 struct tp_field id; 290 union { 291 struct tp_field args, ret; 292 }; 293 }; 294 295 /* 296 * The evsel->priv as used by 'perf trace' 297 * sc: for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME 298 * fmt: for all the other tracepoints 299 */ 300 struct evsel_trace { 301 struct syscall_tp sc; 302 struct syscall_arg_fmt *fmt; 303 }; 304 305 static struct evsel_trace *evsel_trace__new(void) 306 { 307 return zalloc(sizeof(struct evsel_trace)); 308 } 309 310 static void evsel_trace__delete(struct evsel_trace *et) 311 { 312 if (et == NULL) 313 return; 314 315 zfree(&et->fmt); 316 free(et); 317 } 318 319 /* 320 * Used with raw_syscalls:sys_{enter,exit} and with the 321 * syscalls:sys_{enter,exit}_SYSCALL tracepoints 322 */ 323 static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel) 324 { 325 struct evsel_trace *et = evsel->priv; 326 327 return &et->sc; 328 } 329 330 static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel) 331 { 332 if (evsel->priv == NULL) { 333 evsel->priv = evsel_trace__new(); 334 if (evsel->priv == NULL) 335 return NULL; 336 } 337 338 return __evsel__syscall_tp(evsel); 339 } 340 341 /* 342 * Used with all the other tracepoints. 343 */ 344 static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel) 345 { 346 struct evsel_trace *et = evsel->priv; 347 348 return et->fmt; 349 } 350 351 static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel) 352 { 353 struct evsel_trace *et = evsel->priv; 354 355 if (evsel->priv == NULL) { 356 et = evsel->priv = evsel_trace__new(); 357 358 if (et == NULL) 359 return NULL; 360 } 361 362 if (et->fmt == NULL) { 363 et->fmt = calloc(evsel->tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt)); 364 if (et->fmt == NULL) 365 goto out_delete; 366 } 367 368 return __evsel__syscall_arg_fmt(evsel); 369 370 out_delete: 371 evsel_trace__delete(evsel->priv); 372 evsel->priv = NULL; 373 return NULL; 374 } 375 376 static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name) 377 { 378 struct tep_format_field *format_field = evsel__field(evsel, name); 379 380 if (format_field == NULL) 381 return -1; 382 383 return tp_field__init_uint(field, format_field, evsel->needs_swap); 384 } 385 386 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \ 387 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\ 388 evsel__init_tp_uint_field(evsel, &sc->name, #name); }) 389 390 static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name) 391 { 392 struct tep_format_field *format_field = evsel__field(evsel, name); 393 394 if (format_field == NULL) 395 return -1; 396 397 return tp_field__init_ptr(field, format_field); 398 } 399 400 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \ 401 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\ 402 evsel__init_tp_ptr_field(evsel, &sc->name, #name); }) 403 404 static void evsel__delete_priv(struct evsel *evsel) 405 { 406 zfree(&evsel->priv); 407 evsel__delete(evsel); 408 } 409 410 static int evsel__init_syscall_tp(struct evsel *evsel) 411 { 412 struct syscall_tp *sc = evsel__syscall_tp(evsel); 413 414 if (sc != NULL) { 415 if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") && 416 evsel__init_tp_uint_field(evsel, &sc->id, "nr")) 417 return -ENOENT; 418 return 0; 419 } 420 421 return -ENOMEM; 422 } 423 424 static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp) 425 { 426 struct syscall_tp *sc = evsel__syscall_tp(evsel); 427 428 if (sc != NULL) { 429 struct tep_format_field *syscall_id = evsel__field(tp, "id"); 430 if (syscall_id == NULL) 431 syscall_id = evsel__field(tp, "__syscall_nr"); 432 if (syscall_id == NULL || 433 __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap)) 434 return -EINVAL; 435 436 return 0; 437 } 438 439 return -ENOMEM; 440 } 441 442 static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel) 443 { 444 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 445 446 return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)); 447 } 448 449 static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel) 450 { 451 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 452 453 return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap); 454 } 455 456 static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler) 457 { 458 if (evsel__syscall_tp(evsel) != NULL) { 459 if (perf_evsel__init_sc_tp_uint_field(evsel, id)) 460 return -ENOENT; 461 462 evsel->handler = handler; 463 return 0; 464 } 465 466 return -ENOMEM; 467 } 468 469 static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler) 470 { 471 struct evsel *evsel = evsel__newtp("raw_syscalls", direction); 472 473 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */ 474 if (IS_ERR(evsel)) 475 evsel = evsel__newtp("syscalls", direction); 476 477 if (IS_ERR(evsel)) 478 return NULL; 479 480 if (evsel__init_raw_syscall_tp(evsel, handler)) 481 goto out_delete; 482 483 return evsel; 484 485 out_delete: 486 evsel__delete_priv(evsel); 487 return NULL; 488 } 489 490 #define perf_evsel__sc_tp_uint(evsel, name, sample) \ 491 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \ 492 fields->name.integer(&fields->name, sample); }) 493 494 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \ 495 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \ 496 fields->name.pointer(&fields->name, sample); }) 497 498 size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val) 499 { 500 int idx = val - sa->offset; 501 502 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { 503 size_t printed = scnprintf(bf, size, intfmt, val); 504 if (show_suffix) 505 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); 506 return printed; 507 } 508 509 return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : ""); 510 } 511 512 size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val) 513 { 514 int idx = val - sa->offset; 515 516 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { 517 size_t printed = scnprintf(bf, size, intfmt, val); 518 if (show_prefix) 519 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); 520 return printed; 521 } 522 523 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); 524 } 525 526 static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size, 527 const char *intfmt, 528 struct syscall_arg *arg) 529 { 530 return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val); 531 } 532 533 static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size, 534 struct syscall_arg *arg) 535 { 536 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg); 537 } 538 539 #define SCA_STRARRAY syscall_arg__scnprintf_strarray 540 541 bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 542 { 543 return strarray__strtoul(arg->parm, bf, size, ret); 544 } 545 546 bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 547 { 548 return strarray__strtoul_flags(arg->parm, bf, size, ret); 549 } 550 551 bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 552 { 553 return strarrays__strtoul(arg->parm, bf, size, ret); 554 } 555 556 size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg) 557 { 558 return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val); 559 } 560 561 size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val) 562 { 563 size_t printed; 564 int i; 565 566 for (i = 0; i < sas->nr_entries; ++i) { 567 struct strarray *sa = sas->entries[i]; 568 int idx = val - sa->offset; 569 570 if (idx >= 0 && idx < sa->nr_entries) { 571 if (sa->entries[idx] == NULL) 572 break; 573 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); 574 } 575 } 576 577 printed = scnprintf(bf, size, intfmt, val); 578 if (show_prefix) 579 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix); 580 return printed; 581 } 582 583 bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret) 584 { 585 int i; 586 587 for (i = 0; i < sa->nr_entries; ++i) { 588 if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') { 589 *ret = sa->offset + i; 590 return true; 591 } 592 } 593 594 return false; 595 } 596 597 bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret) 598 { 599 u64 val = 0; 600 char *tok = bf, *sep, *end; 601 602 *ret = 0; 603 604 while (size != 0) { 605 int toklen = size; 606 607 sep = memchr(tok, '|', size); 608 if (sep != NULL) { 609 size -= sep - tok + 1; 610 611 end = sep - 1; 612 while (end > tok && isspace(*end)) 613 --end; 614 615 toklen = end - tok + 1; 616 } 617 618 while (isspace(*tok)) 619 ++tok; 620 621 if (isalpha(*tok) || *tok == '_') { 622 if (!strarray__strtoul(sa, tok, toklen, &val)) 623 return false; 624 } else 625 val = strtoul(tok, NULL, 0); 626 627 *ret |= (1 << (val - 1)); 628 629 if (sep == NULL) 630 break; 631 tok = sep + 1; 632 } 633 634 return true; 635 } 636 637 bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret) 638 { 639 int i; 640 641 for (i = 0; i < sas->nr_entries; ++i) { 642 struct strarray *sa = sas->entries[i]; 643 644 if (strarray__strtoul(sa, bf, size, ret)) 645 return true; 646 } 647 648 return false; 649 } 650 651 size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size, 652 struct syscall_arg *arg) 653 { 654 return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val); 655 } 656 657 #ifndef AT_FDCWD 658 #define AT_FDCWD -100 659 #endif 660 661 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size, 662 struct syscall_arg *arg) 663 { 664 int fd = arg->val; 665 const char *prefix = "AT_FD"; 666 667 if (fd == AT_FDCWD) 668 return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD"); 669 670 return syscall_arg__scnprintf_fd(bf, size, arg); 671 } 672 673 #define SCA_FDAT syscall_arg__scnprintf_fd_at 674 675 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, 676 struct syscall_arg *arg); 677 678 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd 679 680 size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg) 681 { 682 return scnprintf(bf, size, "%#lx", arg->val); 683 } 684 685 size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg) 686 { 687 if (arg->val == 0) 688 return scnprintf(bf, size, "NULL"); 689 return syscall_arg__scnprintf_hex(bf, size, arg); 690 } 691 692 size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg) 693 { 694 return scnprintf(bf, size, "%d", arg->val); 695 } 696 697 size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg) 698 { 699 return scnprintf(bf, size, "%ld", arg->val); 700 } 701 702 static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg) 703 { 704 // XXX Hey, maybe for sched:sched_switch prev/next comm fields we can 705 // fill missing comms using thread__set_comm()... 706 // here or in a special syscall_arg__scnprintf_pid_sched_tp... 707 return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val); 708 } 709 710 #define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array 711 712 static const char *bpf_cmd[] = { 713 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM", 714 "MAP_GET_NEXT_KEY", "PROG_LOAD", "OBJ_PIN", "OBJ_GET", "PROG_ATTACH", 715 "PROG_DETACH", "PROG_TEST_RUN", "PROG_GET_NEXT_ID", "MAP_GET_NEXT_ID", 716 "PROG_GET_FD_BY_ID", "MAP_GET_FD_BY_ID", "OBJ_GET_INFO_BY_FD", 717 "PROG_QUERY", "RAW_TRACEPOINT_OPEN", "BTF_LOAD", "BTF_GET_FD_BY_ID", 718 "TASK_FD_QUERY", "MAP_LOOKUP_AND_DELETE_ELEM", "MAP_FREEZE", 719 "BTF_GET_NEXT_ID", "MAP_LOOKUP_BATCH", "MAP_LOOKUP_AND_DELETE_BATCH", 720 "MAP_UPDATE_BATCH", "MAP_DELETE_BATCH", "LINK_CREATE", "LINK_UPDATE", 721 "LINK_GET_FD_BY_ID", "LINK_GET_NEXT_ID", "ENABLE_STATS", "ITER_CREATE", 722 "LINK_DETACH", "PROG_BIND_MAP", 723 }; 724 static DEFINE_STRARRAY(bpf_cmd, "BPF_"); 725 726 static const char *fsmount_flags[] = { 727 [1] = "CLOEXEC", 728 }; 729 static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_"); 730 731 #include "trace/beauty/generated/fsconfig_arrays.c" 732 733 static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_"); 734 735 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", }; 736 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1); 737 738 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", }; 739 static DEFINE_STRARRAY(itimers, "ITIMER_"); 740 741 static const char *keyctl_options[] = { 742 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN", 743 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ", 744 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT", 745 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT", 746 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT", 747 }; 748 static DEFINE_STRARRAY(keyctl_options, "KEYCTL_"); 749 750 static const char *whences[] = { "SET", "CUR", "END", 751 #ifdef SEEK_DATA 752 "DATA", 753 #endif 754 #ifdef SEEK_HOLE 755 "HOLE", 756 #endif 757 }; 758 static DEFINE_STRARRAY(whences, "SEEK_"); 759 760 static const char *fcntl_cmds[] = { 761 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK", 762 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64", 763 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX", 764 "GETOWNER_UIDS", 765 }; 766 static DEFINE_STRARRAY(fcntl_cmds, "F_"); 767 768 static const char *fcntl_linux_specific_cmds[] = { 769 "SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC", 770 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS", 771 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT", 772 }; 773 774 static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE); 775 776 static struct strarray *fcntl_cmds_arrays[] = { 777 &strarray__fcntl_cmds, 778 &strarray__fcntl_linux_specific_cmds, 779 }; 780 781 static DEFINE_STRARRAYS(fcntl_cmds_arrays); 782 783 static const char *rlimit_resources[] = { 784 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE", 785 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO", 786 "RTTIME", 787 }; 788 static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_"); 789 790 static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", }; 791 static DEFINE_STRARRAY(sighow, "SIG_"); 792 793 static const char *clockid[] = { 794 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID", 795 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME", 796 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI" 797 }; 798 static DEFINE_STRARRAY(clockid, "CLOCK_"); 799 800 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size, 801 struct syscall_arg *arg) 802 { 803 bool show_prefix = arg->show_string_prefix; 804 const char *suffix = "_OK"; 805 size_t printed = 0; 806 int mode = arg->val; 807 808 if (mode == F_OK) /* 0 */ 809 return scnprintf(bf, size, "F%s", show_prefix ? suffix : ""); 810 #define P_MODE(n) \ 811 if (mode & n##_OK) { \ 812 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \ 813 mode &= ~n##_OK; \ 814 } 815 816 P_MODE(R); 817 P_MODE(W); 818 P_MODE(X); 819 #undef P_MODE 820 821 if (mode) 822 printed += scnprintf(bf + printed, size - printed, "|%#x", mode); 823 824 return printed; 825 } 826 827 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode 828 829 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size, 830 struct syscall_arg *arg); 831 832 #define SCA_FILENAME syscall_arg__scnprintf_filename 833 834 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size, 835 struct syscall_arg *arg) 836 { 837 bool show_prefix = arg->show_string_prefix; 838 const char *prefix = "O_"; 839 int printed = 0, flags = arg->val; 840 841 #define P_FLAG(n) \ 842 if (flags & O_##n) { \ 843 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ 844 flags &= ~O_##n; \ 845 } 846 847 P_FLAG(CLOEXEC); 848 P_FLAG(NONBLOCK); 849 #undef P_FLAG 850 851 if (flags) 852 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 853 854 return printed; 855 } 856 857 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags 858 859 #ifndef GRND_NONBLOCK 860 #define GRND_NONBLOCK 0x0001 861 #endif 862 #ifndef GRND_RANDOM 863 #define GRND_RANDOM 0x0002 864 #endif 865 866 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size, 867 struct syscall_arg *arg) 868 { 869 bool show_prefix = arg->show_string_prefix; 870 const char *prefix = "GRND_"; 871 int printed = 0, flags = arg->val; 872 873 #define P_FLAG(n) \ 874 if (flags & GRND_##n) { \ 875 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ 876 flags &= ~GRND_##n; \ 877 } 878 879 P_FLAG(RANDOM); 880 P_FLAG(NONBLOCK); 881 #undef P_FLAG 882 883 if (flags) 884 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 885 886 return printed; 887 } 888 889 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags 890 891 #define STRARRAY(name, array) \ 892 { .scnprintf = SCA_STRARRAY, \ 893 .strtoul = STUL_STRARRAY, \ 894 .parm = &strarray__##array, } 895 896 #define STRARRAY_FLAGS(name, array) \ 897 { .scnprintf = SCA_STRARRAY_FLAGS, \ 898 .strtoul = STUL_STRARRAY_FLAGS, \ 899 .parm = &strarray__##array, } 900 901 #include "trace/beauty/arch_errno_names.c" 902 #include "trace/beauty/eventfd.c" 903 #include "trace/beauty/futex_op.c" 904 #include "trace/beauty/futex_val3.c" 905 #include "trace/beauty/mmap.c" 906 #include "trace/beauty/mode_t.c" 907 #include "trace/beauty/msg_flags.c" 908 #include "trace/beauty/open_flags.c" 909 #include "trace/beauty/perf_event_open.c" 910 #include "trace/beauty/pid.c" 911 #include "trace/beauty/sched_policy.c" 912 #include "trace/beauty/seccomp.c" 913 #include "trace/beauty/signum.c" 914 #include "trace/beauty/socket_type.c" 915 #include "trace/beauty/waitid_options.c" 916 917 static const struct syscall_fmt syscall_fmts[] = { 918 { .name = "access", 919 .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, }, 920 { .name = "arch_prctl", 921 .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ }, 922 [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, }, 923 { .name = "bind", 924 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ }, 925 [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ }, 926 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, }, 927 { .name = "bpf", 928 .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, }, 929 { .name = "brk", .hexret = true, 930 .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, }, 931 { .name = "clock_gettime", 932 .arg = { [0] = STRARRAY(clk_id, clockid), }, }, 933 { .name = "clock_nanosleep", 934 .arg = { [2] = { .scnprintf = SCA_TIMESPEC, /* rqtp */ }, }, }, 935 { .name = "clone", .errpid = true, .nr_args = 5, 936 .arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, }, 937 [1] = { .name = "child_stack", .scnprintf = SCA_HEX, }, 938 [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, }, 939 [3] = { .name = "child_tidptr", .scnprintf = SCA_HEX, }, 940 [4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, }, 941 { .name = "close", 942 .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, }, 943 { .name = "connect", 944 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ }, 945 [1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ }, 946 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, }, 947 { .name = "epoll_ctl", 948 .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, }, 949 { .name = "eventfd2", 950 .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, }, 951 { .name = "fchmodat", 952 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 953 { .name = "fchownat", 954 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 955 { .name = "fcntl", 956 .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */ 957 .strtoul = STUL_STRARRAYS, 958 .parm = &strarrays__fcntl_cmds_arrays, 959 .show_zero = true, }, 960 [2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, }, 961 { .name = "flock", 962 .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, }, 963 { .name = "fsconfig", 964 .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, }, 965 { .name = "fsmount", 966 .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags), 967 [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, }, 968 { .name = "fspick", 969 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 970 [1] = { .scnprintf = SCA_FILENAME, /* path */ }, 971 [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, }, 972 { .name = "fstat", .alias = "newfstat", }, 973 { .name = "fstatat", .alias = "newfstatat", }, 974 { .name = "futex", 975 .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ }, 976 [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, }, 977 { .name = "futimesat", 978 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 979 { .name = "getitimer", 980 .arg = { [0] = STRARRAY(which, itimers), }, }, 981 { .name = "getpid", .errpid = true, }, 982 { .name = "getpgid", .errpid = true, }, 983 { .name = "getppid", .errpid = true, }, 984 { .name = "getrandom", 985 .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, }, 986 { .name = "getrlimit", 987 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, }, 988 { .name = "getsockopt", 989 .arg = { [1] = STRARRAY(level, socket_level), }, }, 990 { .name = "gettid", .errpid = true, }, 991 { .name = "ioctl", 992 .arg = { 993 #if defined(__i386__) || defined(__x86_64__) 994 /* 995 * FIXME: Make this available to all arches. 996 */ 997 [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ }, 998 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, }, 999 #else 1000 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, }, 1001 #endif 1002 { .name = "kcmp", .nr_args = 5, 1003 .arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, }, 1004 [1] = { .name = "pid2", .scnprintf = SCA_PID, }, 1005 [2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, }, 1006 [3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, }, 1007 [4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, }, 1008 { .name = "keyctl", 1009 .arg = { [0] = STRARRAY(option, keyctl_options), }, }, 1010 { .name = "kill", 1011 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1012 { .name = "linkat", 1013 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1014 { .name = "lseek", 1015 .arg = { [2] = STRARRAY(whence, whences), }, }, 1016 { .name = "lstat", .alias = "newlstat", }, 1017 { .name = "madvise", 1018 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1019 [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, }, 1020 { .name = "mkdirat", 1021 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1022 { .name = "mknodat", 1023 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1024 { .name = "mmap", .hexret = true, 1025 /* The standard mmap maps to old_mmap on s390x */ 1026 #if defined(__s390x__) 1027 .alias = "old_mmap", 1028 #endif 1029 .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, 1030 [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */ 1031 .strtoul = STUL_STRARRAY_FLAGS, 1032 .parm = &strarray__mmap_flags, }, 1033 [5] = { .scnprintf = SCA_HEX, /* offset */ }, }, }, 1034 { .name = "mount", 1035 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ }, 1036 [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */ 1037 .mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, }, 1038 { .name = "move_mount", 1039 .arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ }, 1040 [1] = { .scnprintf = SCA_FILENAME, /* from_pathname */ }, 1041 [2] = { .scnprintf = SCA_FDAT, /* to_dfd */ }, 1042 [3] = { .scnprintf = SCA_FILENAME, /* to_pathname */ }, 1043 [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, }, 1044 { .name = "mprotect", 1045 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1046 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, }, }, 1047 { .name = "mq_unlink", 1048 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, }, 1049 { .name = "mremap", .hexret = true, 1050 .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, }, 1051 { .name = "name_to_handle_at", 1052 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1053 { .name = "newfstatat", 1054 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1055 { .name = "open", 1056 .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1057 { .name = "open_by_handle_at", 1058 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1059 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1060 { .name = "openat", 1061 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1062 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1063 { .name = "perf_event_open", 1064 .arg = { [0] = { .scnprintf = SCA_PERF_ATTR, /* attr */ }, 1065 [2] = { .scnprintf = SCA_INT, /* cpu */ }, 1066 [3] = { .scnprintf = SCA_FD, /* group_fd */ }, 1067 [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, }, 1068 { .name = "pipe2", 1069 .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, }, 1070 { .name = "pkey_alloc", 1071 .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS, /* access_rights */ }, }, }, 1072 { .name = "pkey_free", 1073 .arg = { [0] = { .scnprintf = SCA_INT, /* key */ }, }, }, 1074 { .name = "pkey_mprotect", 1075 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1076 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, 1077 [3] = { .scnprintf = SCA_INT, /* pkey */ }, }, }, 1078 { .name = "poll", .timeout = true, }, 1079 { .name = "ppoll", .timeout = true, }, 1080 { .name = "prctl", 1081 .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */ 1082 .strtoul = STUL_STRARRAY, 1083 .parm = &strarray__prctl_options, }, 1084 [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ }, 1085 [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, }, 1086 { .name = "pread", .alias = "pread64", }, 1087 { .name = "preadv", .alias = "pread", }, 1088 { .name = "prlimit64", 1089 .arg = { [1] = STRARRAY(resource, rlimit_resources), }, }, 1090 { .name = "pwrite", .alias = "pwrite64", }, 1091 { .name = "readlinkat", 1092 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1093 { .name = "recvfrom", 1094 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1095 { .name = "recvmmsg", 1096 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1097 { .name = "recvmsg", 1098 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1099 { .name = "renameat", 1100 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ }, 1101 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, }, 1102 { .name = "renameat2", 1103 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ }, 1104 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, 1105 [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, }, 1106 { .name = "rt_sigaction", 1107 .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1108 { .name = "rt_sigprocmask", 1109 .arg = { [0] = STRARRAY(how, sighow), }, }, 1110 { .name = "rt_sigqueueinfo", 1111 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1112 { .name = "rt_tgsigqueueinfo", 1113 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1114 { .name = "sched_setscheduler", 1115 .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, }, 1116 { .name = "seccomp", 1117 .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP, /* op */ }, 1118 [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, }, 1119 { .name = "select", .timeout = true, }, 1120 { .name = "sendfile", .alias = "sendfile64", }, 1121 { .name = "sendmmsg", 1122 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1123 { .name = "sendmsg", 1124 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1125 { .name = "sendto", 1126 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, 1127 [4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, }, 1128 { .name = "set_tid_address", .errpid = true, }, 1129 { .name = "setitimer", 1130 .arg = { [0] = STRARRAY(which, itimers), }, }, 1131 { .name = "setrlimit", 1132 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, }, 1133 { .name = "setsockopt", 1134 .arg = { [1] = STRARRAY(level, socket_level), }, }, 1135 { .name = "socket", 1136 .arg = { [0] = STRARRAY(family, socket_families), 1137 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, 1138 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, }, 1139 { .name = "socketpair", 1140 .arg = { [0] = STRARRAY(family, socket_families), 1141 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, 1142 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, }, 1143 { .name = "stat", .alias = "newstat", }, 1144 { .name = "statx", 1145 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ }, 1146 [2] = { .scnprintf = SCA_STATX_FLAGS, /* flags */ } , 1147 [3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, }, 1148 { .name = "swapoff", 1149 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, }, 1150 { .name = "swapon", 1151 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, }, 1152 { .name = "symlinkat", 1153 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1154 { .name = "sync_file_range", 1155 .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, }, 1156 { .name = "tgkill", 1157 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1158 { .name = "tkill", 1159 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1160 { .name = "umount2", .alias = "umount", 1161 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, }, 1162 { .name = "uname", .alias = "newuname", }, 1163 { .name = "unlinkat", 1164 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1165 { .name = "utimensat", 1166 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, }, 1167 { .name = "wait4", .errpid = true, 1168 .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, }, 1169 { .name = "waitid", .errpid = true, 1170 .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, }, 1171 }; 1172 1173 static int syscall_fmt__cmp(const void *name, const void *fmtp) 1174 { 1175 const struct syscall_fmt *fmt = fmtp; 1176 return strcmp(name, fmt->name); 1177 } 1178 1179 static const struct syscall_fmt *__syscall_fmt__find(const struct syscall_fmt *fmts, 1180 const int nmemb, 1181 const char *name) 1182 { 1183 return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp); 1184 } 1185 1186 static const struct syscall_fmt *syscall_fmt__find(const char *name) 1187 { 1188 const int nmemb = ARRAY_SIZE(syscall_fmts); 1189 return __syscall_fmt__find(syscall_fmts, nmemb, name); 1190 } 1191 1192 static const struct syscall_fmt *__syscall_fmt__find_by_alias(const struct syscall_fmt *fmts, 1193 const int nmemb, const char *alias) 1194 { 1195 int i; 1196 1197 for (i = 0; i < nmemb; ++i) { 1198 if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0) 1199 return &fmts[i]; 1200 } 1201 1202 return NULL; 1203 } 1204 1205 static const struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias) 1206 { 1207 const int nmemb = ARRAY_SIZE(syscall_fmts); 1208 return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias); 1209 } 1210 1211 /* 1212 * is_exit: is this "exit" or "exit_group"? 1213 * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter. 1214 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc. 1215 * nonexistent: Just a hole in the syscall table, syscall id not allocated 1216 */ 1217 struct syscall { 1218 struct tep_event *tp_format; 1219 int nr_args; 1220 int args_size; 1221 struct { 1222 struct bpf_program *sys_enter, 1223 *sys_exit; 1224 } bpf_prog; 1225 bool is_exit; 1226 bool is_open; 1227 bool nonexistent; 1228 struct tep_format_field *args; 1229 const char *name; 1230 const struct syscall_fmt *fmt; 1231 struct syscall_arg_fmt *arg_fmt; 1232 }; 1233 1234 /* 1235 * We need to have this 'calculated' boolean because in some cases we really 1236 * don't know what is the duration of a syscall, for instance, when we start 1237 * a session and some threads are waiting for a syscall to finish, say 'poll', 1238 * in which case all we can do is to print "( ? ) for duration and for the 1239 * start timestamp. 1240 */ 1241 static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp) 1242 { 1243 double duration = (double)t / NSEC_PER_MSEC; 1244 size_t printed = fprintf(fp, "("); 1245 1246 if (!calculated) 1247 printed += fprintf(fp, " "); 1248 else if (duration >= 1.0) 1249 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration); 1250 else if (duration >= 0.01) 1251 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration); 1252 else 1253 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration); 1254 return printed + fprintf(fp, "): "); 1255 } 1256 1257 /** 1258 * filename.ptr: The filename char pointer that will be vfs_getname'd 1259 * filename.entry_str_pos: Where to insert the string translated from 1260 * filename.ptr by the vfs_getname tracepoint/kprobe. 1261 * ret_scnprintf: syscall args may set this to a different syscall return 1262 * formatter, for instance, fcntl may return fds, file flags, etc. 1263 */ 1264 struct thread_trace { 1265 u64 entry_time; 1266 bool entry_pending; 1267 unsigned long nr_events; 1268 unsigned long pfmaj, pfmin; 1269 char *entry_str; 1270 double runtime_ms; 1271 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg); 1272 struct { 1273 unsigned long ptr; 1274 short int entry_str_pos; 1275 bool pending_open; 1276 unsigned int namelen; 1277 char *name; 1278 } filename; 1279 struct { 1280 int max; 1281 struct file *table; 1282 } files; 1283 1284 struct intlist *syscall_stats; 1285 }; 1286 1287 static struct thread_trace *thread_trace__new(void) 1288 { 1289 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace)); 1290 1291 if (ttrace) { 1292 ttrace->files.max = -1; 1293 ttrace->syscall_stats = intlist__new(NULL); 1294 } 1295 1296 return ttrace; 1297 } 1298 1299 static void thread_trace__free_files(struct thread_trace *ttrace); 1300 1301 static void thread_trace__delete(void *pttrace) 1302 { 1303 struct thread_trace *ttrace = pttrace; 1304 1305 if (!ttrace) 1306 return; 1307 1308 intlist__delete(ttrace->syscall_stats); 1309 ttrace->syscall_stats = NULL; 1310 thread_trace__free_files(ttrace); 1311 zfree(&ttrace->entry_str); 1312 free(ttrace); 1313 } 1314 1315 static struct thread_trace *thread__trace(struct thread *thread, FILE *fp) 1316 { 1317 struct thread_trace *ttrace; 1318 1319 if (thread == NULL) 1320 goto fail; 1321 1322 if (thread__priv(thread) == NULL) 1323 thread__set_priv(thread, thread_trace__new()); 1324 1325 if (thread__priv(thread) == NULL) 1326 goto fail; 1327 1328 ttrace = thread__priv(thread); 1329 ++ttrace->nr_events; 1330 1331 return ttrace; 1332 fail: 1333 color_fprintf(fp, PERF_COLOR_RED, 1334 "WARNING: not enough memory, dropping samples!\n"); 1335 return NULL; 1336 } 1337 1338 1339 void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg, 1340 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg)) 1341 { 1342 struct thread_trace *ttrace = thread__priv(arg->thread); 1343 1344 ttrace->ret_scnprintf = ret_scnprintf; 1345 } 1346 1347 #define TRACE_PFMAJ (1 << 0) 1348 #define TRACE_PFMIN (1 << 1) 1349 1350 static const size_t trace__entry_str_size = 2048; 1351 1352 static void thread_trace__free_files(struct thread_trace *ttrace) 1353 { 1354 for (int i = 0; i < ttrace->files.max; ++i) { 1355 struct file *file = ttrace->files.table + i; 1356 zfree(&file->pathname); 1357 } 1358 1359 zfree(&ttrace->files.table); 1360 ttrace->files.max = -1; 1361 } 1362 1363 static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd) 1364 { 1365 if (fd < 0) 1366 return NULL; 1367 1368 if (fd > ttrace->files.max) { 1369 struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file)); 1370 1371 if (nfiles == NULL) 1372 return NULL; 1373 1374 if (ttrace->files.max != -1) { 1375 memset(nfiles + ttrace->files.max + 1, 0, 1376 (fd - ttrace->files.max) * sizeof(struct file)); 1377 } else { 1378 memset(nfiles, 0, (fd + 1) * sizeof(struct file)); 1379 } 1380 1381 ttrace->files.table = nfiles; 1382 ttrace->files.max = fd; 1383 } 1384 1385 return ttrace->files.table + fd; 1386 } 1387 1388 struct file *thread__files_entry(struct thread *thread, int fd) 1389 { 1390 return thread_trace__files_entry(thread__priv(thread), fd); 1391 } 1392 1393 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname) 1394 { 1395 struct thread_trace *ttrace = thread__priv(thread); 1396 struct file *file = thread_trace__files_entry(ttrace, fd); 1397 1398 if (file != NULL) { 1399 struct stat st; 1400 if (stat(pathname, &st) == 0) 1401 file->dev_maj = major(st.st_rdev); 1402 file->pathname = strdup(pathname); 1403 if (file->pathname) 1404 return 0; 1405 } 1406 1407 return -1; 1408 } 1409 1410 static int thread__read_fd_path(struct thread *thread, int fd) 1411 { 1412 char linkname[PATH_MAX], pathname[PATH_MAX]; 1413 struct stat st; 1414 int ret; 1415 1416 if (thread__pid(thread) == thread__tid(thread)) { 1417 scnprintf(linkname, sizeof(linkname), 1418 "/proc/%d/fd/%d", thread__pid(thread), fd); 1419 } else { 1420 scnprintf(linkname, sizeof(linkname), 1421 "/proc/%d/task/%d/fd/%d", 1422 thread__pid(thread), thread__tid(thread), fd); 1423 } 1424 1425 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname)) 1426 return -1; 1427 1428 ret = readlink(linkname, pathname, sizeof(pathname)); 1429 1430 if (ret < 0 || ret > st.st_size) 1431 return -1; 1432 1433 pathname[ret] = '\0'; 1434 return trace__set_fd_pathname(thread, fd, pathname); 1435 } 1436 1437 static const char *thread__fd_path(struct thread *thread, int fd, 1438 struct trace *trace) 1439 { 1440 struct thread_trace *ttrace = thread__priv(thread); 1441 1442 if (ttrace == NULL || trace->fd_path_disabled) 1443 return NULL; 1444 1445 if (fd < 0) 1446 return NULL; 1447 1448 if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) { 1449 if (!trace->live) 1450 return NULL; 1451 ++trace->stats.proc_getname; 1452 if (thread__read_fd_path(thread, fd)) 1453 return NULL; 1454 } 1455 1456 return ttrace->files.table[fd].pathname; 1457 } 1458 1459 size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg) 1460 { 1461 int fd = arg->val; 1462 size_t printed = scnprintf(bf, size, "%d", fd); 1463 const char *path = thread__fd_path(arg->thread, fd, arg->trace); 1464 1465 if (path) 1466 printed += scnprintf(bf + printed, size - printed, "<%s>", path); 1467 1468 return printed; 1469 } 1470 1471 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size) 1472 { 1473 size_t printed = scnprintf(bf, size, "%d", fd); 1474 struct thread *thread = machine__find_thread(trace->host, pid, pid); 1475 1476 if (thread) { 1477 const char *path = thread__fd_path(thread, fd, trace); 1478 1479 if (path) 1480 printed += scnprintf(bf + printed, size - printed, "<%s>", path); 1481 1482 thread__put(thread); 1483 } 1484 1485 return printed; 1486 } 1487 1488 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, 1489 struct syscall_arg *arg) 1490 { 1491 int fd = arg->val; 1492 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg); 1493 struct thread_trace *ttrace = thread__priv(arg->thread); 1494 1495 if (ttrace && fd >= 0 && fd <= ttrace->files.max) 1496 zfree(&ttrace->files.table[fd].pathname); 1497 1498 return printed; 1499 } 1500 1501 static void thread__set_filename_pos(struct thread *thread, const char *bf, 1502 unsigned long ptr) 1503 { 1504 struct thread_trace *ttrace = thread__priv(thread); 1505 1506 ttrace->filename.ptr = ptr; 1507 ttrace->filename.entry_str_pos = bf - ttrace->entry_str; 1508 } 1509 1510 static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size) 1511 { 1512 struct augmented_arg *augmented_arg = arg->augmented.args; 1513 size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value); 1514 /* 1515 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls 1516 * we would have two strings, each prefixed by its size. 1517 */ 1518 int consumed = sizeof(*augmented_arg) + augmented_arg->size; 1519 1520 arg->augmented.args = ((void *)arg->augmented.args) + consumed; 1521 arg->augmented.size -= consumed; 1522 1523 return printed; 1524 } 1525 1526 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size, 1527 struct syscall_arg *arg) 1528 { 1529 unsigned long ptr = arg->val; 1530 1531 if (arg->augmented.args) 1532 return syscall_arg__scnprintf_augmented_string(arg, bf, size); 1533 1534 if (!arg->trace->vfs_getname) 1535 return scnprintf(bf, size, "%#x", ptr); 1536 1537 thread__set_filename_pos(arg->thread, bf, ptr); 1538 return 0; 1539 } 1540 1541 static bool trace__filter_duration(struct trace *trace, double t) 1542 { 1543 return t < (trace->duration_filter * NSEC_PER_MSEC); 1544 } 1545 1546 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) 1547 { 1548 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC; 1549 1550 return fprintf(fp, "%10.3f ", ts); 1551 } 1552 1553 /* 1554 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are 1555 * using ttrace->entry_time for a thread that receives a sys_exit without 1556 * first having received a sys_enter ("poll" issued before tracing session 1557 * starts, lost sys_enter exit due to ring buffer overflow). 1558 */ 1559 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) 1560 { 1561 if (tstamp > 0) 1562 return __trace__fprintf_tstamp(trace, tstamp, fp); 1563 1564 return fprintf(fp, " ? "); 1565 } 1566 1567 static pid_t workload_pid = -1; 1568 static volatile sig_atomic_t done = false; 1569 static volatile sig_atomic_t interrupted = false; 1570 1571 static void sighandler_interrupt(int sig __maybe_unused) 1572 { 1573 done = interrupted = true; 1574 } 1575 1576 static void sighandler_chld(int sig __maybe_unused, siginfo_t *info, 1577 void *context __maybe_unused) 1578 { 1579 if (info->si_pid == workload_pid) 1580 done = true; 1581 } 1582 1583 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp) 1584 { 1585 size_t printed = 0; 1586 1587 if (trace->multiple_threads) { 1588 if (trace->show_comm) 1589 printed += fprintf(fp, "%.14s/", thread__comm_str(thread)); 1590 printed += fprintf(fp, "%d ", thread__tid(thread)); 1591 } 1592 1593 return printed; 1594 } 1595 1596 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread, 1597 u64 duration, bool duration_calculated, u64 tstamp, FILE *fp) 1598 { 1599 size_t printed = 0; 1600 1601 if (trace->show_tstamp) 1602 printed = trace__fprintf_tstamp(trace, tstamp, fp); 1603 if (trace->show_duration) 1604 printed += fprintf_duration(duration, duration_calculated, fp); 1605 return printed + trace__fprintf_comm_tid(trace, thread, fp); 1606 } 1607 1608 static int trace__process_event(struct trace *trace, struct machine *machine, 1609 union perf_event *event, struct perf_sample *sample) 1610 { 1611 int ret = 0; 1612 1613 switch (event->header.type) { 1614 case PERF_RECORD_LOST: 1615 color_fprintf(trace->output, PERF_COLOR_RED, 1616 "LOST %" PRIu64 " events!\n", event->lost.lost); 1617 ret = machine__process_lost_event(machine, event, sample); 1618 break; 1619 default: 1620 ret = machine__process_event(machine, event, sample); 1621 break; 1622 } 1623 1624 return ret; 1625 } 1626 1627 static int trace__tool_process(struct perf_tool *tool, 1628 union perf_event *event, 1629 struct perf_sample *sample, 1630 struct machine *machine) 1631 { 1632 struct trace *trace = container_of(tool, struct trace, tool); 1633 return trace__process_event(trace, machine, event, sample); 1634 } 1635 1636 static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp) 1637 { 1638 struct machine *machine = vmachine; 1639 1640 if (machine->kptr_restrict_warned) 1641 return NULL; 1642 1643 if (symbol_conf.kptr_restrict) { 1644 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" 1645 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n" 1646 "Kernel samples will not be resolved.\n"); 1647 machine->kptr_restrict_warned = true; 1648 return NULL; 1649 } 1650 1651 return machine__resolve_kernel_addr(vmachine, addrp, modp); 1652 } 1653 1654 static int trace__symbols_init(struct trace *trace, struct evlist *evlist) 1655 { 1656 int err = symbol__init(NULL); 1657 1658 if (err) 1659 return err; 1660 1661 trace->host = machine__new_host(); 1662 if (trace->host == NULL) 1663 return -ENOMEM; 1664 1665 thread__set_priv_destructor(thread_trace__delete); 1666 1667 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr); 1668 if (err < 0) 1669 goto out; 1670 1671 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, 1672 evlist->core.threads, trace__tool_process, 1673 true, false, 1); 1674 out: 1675 if (err) 1676 symbol__exit(); 1677 1678 return err; 1679 } 1680 1681 static void trace__symbols__exit(struct trace *trace) 1682 { 1683 machine__exit(trace->host); 1684 trace->host = NULL; 1685 1686 symbol__exit(); 1687 } 1688 1689 static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args) 1690 { 1691 int idx; 1692 1693 if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0) 1694 nr_args = sc->fmt->nr_args; 1695 1696 sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt)); 1697 if (sc->arg_fmt == NULL) 1698 return -1; 1699 1700 for (idx = 0; idx < nr_args; ++idx) { 1701 if (sc->fmt) 1702 sc->arg_fmt[idx] = sc->fmt->arg[idx]; 1703 } 1704 1705 sc->nr_args = nr_args; 1706 return 0; 1707 } 1708 1709 static const struct syscall_arg_fmt syscall_arg_fmts__by_name[] = { 1710 { .name = "msr", .scnprintf = SCA_X86_MSR, .strtoul = STUL_X86_MSR, }, 1711 { .name = "vector", .scnprintf = SCA_X86_IRQ_VECTORS, .strtoul = STUL_X86_IRQ_VECTORS, }, 1712 }; 1713 1714 static int syscall_arg_fmt__cmp(const void *name, const void *fmtp) 1715 { 1716 const struct syscall_arg_fmt *fmt = fmtp; 1717 return strcmp(name, fmt->name); 1718 } 1719 1720 static const struct syscall_arg_fmt * 1721 __syscall_arg_fmt__find_by_name(const struct syscall_arg_fmt *fmts, const int nmemb, 1722 const char *name) 1723 { 1724 return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp); 1725 } 1726 1727 static const struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name) 1728 { 1729 const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name); 1730 return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name); 1731 } 1732 1733 static struct tep_format_field * 1734 syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field) 1735 { 1736 struct tep_format_field *last_field = NULL; 1737 int len; 1738 1739 for (; field; field = field->next, ++arg) { 1740 last_field = field; 1741 1742 if (arg->scnprintf) 1743 continue; 1744 1745 len = strlen(field->name); 1746 1747 if (strcmp(field->type, "const char *") == 0 && 1748 ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) || 1749 strstr(field->name, "path") != NULL)) 1750 arg->scnprintf = SCA_FILENAME; 1751 else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr")) 1752 arg->scnprintf = SCA_PTR; 1753 else if (strcmp(field->type, "pid_t") == 0) 1754 arg->scnprintf = SCA_PID; 1755 else if (strcmp(field->type, "umode_t") == 0) 1756 arg->scnprintf = SCA_MODE_T; 1757 else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) { 1758 arg->scnprintf = SCA_CHAR_ARRAY; 1759 arg->nr_entries = field->arraylen; 1760 } else if ((strcmp(field->type, "int") == 0 || 1761 strcmp(field->type, "unsigned int") == 0 || 1762 strcmp(field->type, "long") == 0) && 1763 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) { 1764 /* 1765 * /sys/kernel/tracing/events/syscalls/sys_enter* 1766 * grep -E 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c 1767 * 65 int 1768 * 23 unsigned int 1769 * 7 unsigned long 1770 */ 1771 arg->scnprintf = SCA_FD; 1772 } else { 1773 const struct syscall_arg_fmt *fmt = 1774 syscall_arg_fmt__find_by_name(field->name); 1775 1776 if (fmt) { 1777 arg->scnprintf = fmt->scnprintf; 1778 arg->strtoul = fmt->strtoul; 1779 } 1780 } 1781 } 1782 1783 return last_field; 1784 } 1785 1786 static int syscall__set_arg_fmts(struct syscall *sc) 1787 { 1788 struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args); 1789 1790 if (last_field) 1791 sc->args_size = last_field->offset + last_field->size; 1792 1793 return 0; 1794 } 1795 1796 static int trace__read_syscall_info(struct trace *trace, int id) 1797 { 1798 char tp_name[128]; 1799 struct syscall *sc; 1800 const char *name = syscalltbl__name(trace->sctbl, id); 1801 1802 #ifdef HAVE_SYSCALL_TABLE_SUPPORT 1803 if (trace->syscalls.table == NULL) { 1804 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc)); 1805 if (trace->syscalls.table == NULL) 1806 return -ENOMEM; 1807 } 1808 #else 1809 if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) { 1810 // When using libaudit we don't know beforehand what is the max syscall id 1811 struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc)); 1812 1813 if (table == NULL) 1814 return -ENOMEM; 1815 1816 // Need to memset from offset 0 and +1 members if brand new 1817 if (trace->syscalls.table == NULL) 1818 memset(table, 0, (id + 1) * sizeof(*sc)); 1819 else 1820 memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc)); 1821 1822 trace->syscalls.table = table; 1823 trace->sctbl->syscalls.max_id = id; 1824 } 1825 #endif 1826 sc = trace->syscalls.table + id; 1827 if (sc->nonexistent) 1828 return -EEXIST; 1829 1830 if (name == NULL) { 1831 sc->nonexistent = true; 1832 return -EEXIST; 1833 } 1834 1835 sc->name = name; 1836 sc->fmt = syscall_fmt__find(sc->name); 1837 1838 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name); 1839 sc->tp_format = trace_event__tp_format("syscalls", tp_name); 1840 1841 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) { 1842 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias); 1843 sc->tp_format = trace_event__tp_format("syscalls", tp_name); 1844 } 1845 1846 /* 1847 * Fails to read trace point format via sysfs node, so the trace point 1848 * doesn't exist. Set the 'nonexistent' flag as true. 1849 */ 1850 if (IS_ERR(sc->tp_format)) { 1851 sc->nonexistent = true; 1852 return PTR_ERR(sc->tp_format); 1853 } 1854 1855 if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 1856 RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields)) 1857 return -ENOMEM; 1858 1859 sc->args = sc->tp_format->format.fields; 1860 /* 1861 * We need to check and discard the first variable '__syscall_nr' 1862 * or 'nr' that mean the syscall number. It is needless here. 1863 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels. 1864 */ 1865 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) { 1866 sc->args = sc->args->next; 1867 --sc->nr_args; 1868 } 1869 1870 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit"); 1871 sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat"); 1872 1873 return syscall__set_arg_fmts(sc); 1874 } 1875 1876 static int evsel__init_tp_arg_scnprintf(struct evsel *evsel) 1877 { 1878 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel); 1879 1880 if (fmt != NULL) { 1881 syscall_arg_fmt__init_array(fmt, evsel->tp_format->format.fields); 1882 return 0; 1883 } 1884 1885 return -ENOMEM; 1886 } 1887 1888 static int intcmp(const void *a, const void *b) 1889 { 1890 const int *one = a, *another = b; 1891 1892 return *one - *another; 1893 } 1894 1895 static int trace__validate_ev_qualifier(struct trace *trace) 1896 { 1897 int err = 0; 1898 bool printed_invalid_prefix = false; 1899 struct str_node *pos; 1900 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier); 1901 1902 trace->ev_qualifier_ids.entries = malloc(nr_allocated * 1903 sizeof(trace->ev_qualifier_ids.entries[0])); 1904 1905 if (trace->ev_qualifier_ids.entries == NULL) { 1906 fputs("Error:\tNot enough memory for allocating events qualifier ids\n", 1907 trace->output); 1908 err = -EINVAL; 1909 goto out; 1910 } 1911 1912 strlist__for_each_entry(pos, trace->ev_qualifier) { 1913 const char *sc = pos->s; 1914 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1; 1915 1916 if (id < 0) { 1917 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next); 1918 if (id >= 0) 1919 goto matches; 1920 1921 if (!printed_invalid_prefix) { 1922 pr_debug("Skipping unknown syscalls: "); 1923 printed_invalid_prefix = true; 1924 } else { 1925 pr_debug(", "); 1926 } 1927 1928 pr_debug("%s", sc); 1929 continue; 1930 } 1931 matches: 1932 trace->ev_qualifier_ids.entries[nr_used++] = id; 1933 if (match_next == -1) 1934 continue; 1935 1936 while (1) { 1937 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next); 1938 if (id < 0) 1939 break; 1940 if (nr_allocated == nr_used) { 1941 void *entries; 1942 1943 nr_allocated += 8; 1944 entries = realloc(trace->ev_qualifier_ids.entries, 1945 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0])); 1946 if (entries == NULL) { 1947 err = -ENOMEM; 1948 fputs("\nError:\t Not enough memory for parsing\n", trace->output); 1949 goto out_free; 1950 } 1951 trace->ev_qualifier_ids.entries = entries; 1952 } 1953 trace->ev_qualifier_ids.entries[nr_used++] = id; 1954 } 1955 } 1956 1957 trace->ev_qualifier_ids.nr = nr_used; 1958 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp); 1959 out: 1960 if (printed_invalid_prefix) 1961 pr_debug("\n"); 1962 return err; 1963 out_free: 1964 zfree(&trace->ev_qualifier_ids.entries); 1965 trace->ev_qualifier_ids.nr = 0; 1966 goto out; 1967 } 1968 1969 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id) 1970 { 1971 bool in_ev_qualifier; 1972 1973 if (trace->ev_qualifier_ids.nr == 0) 1974 return true; 1975 1976 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries, 1977 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL; 1978 1979 if (in_ev_qualifier) 1980 return !trace->not_ev_qualifier; 1981 1982 return trace->not_ev_qualifier; 1983 } 1984 1985 /* 1986 * args is to be interpreted as a series of longs but we need to handle 1987 * 8-byte unaligned accesses. args points to raw_data within the event 1988 * and raw_data is guaranteed to be 8-byte unaligned because it is 1989 * preceded by raw_size which is a u32. So we need to copy args to a temp 1990 * variable to read it. Most notably this avoids extended load instructions 1991 * on unaligned addresses 1992 */ 1993 unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx) 1994 { 1995 unsigned long val; 1996 unsigned char *p = arg->args + sizeof(unsigned long) * idx; 1997 1998 memcpy(&val, p, sizeof(val)); 1999 return val; 2000 } 2001 2002 static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size, 2003 struct syscall_arg *arg) 2004 { 2005 if (sc->arg_fmt && sc->arg_fmt[arg->idx].name) 2006 return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name); 2007 2008 return scnprintf(bf, size, "arg%d: ", arg->idx); 2009 } 2010 2011 /* 2012 * Check if the value is in fact zero, i.e. mask whatever needs masking, such 2013 * as mount 'flags' argument that needs ignoring some magic flag, see comment 2014 * in tools/perf/trace/beauty/mount_flags.c 2015 */ 2016 static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val) 2017 { 2018 if (fmt && fmt->mask_val) 2019 return fmt->mask_val(arg, val); 2020 2021 return val; 2022 } 2023 2024 static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size, 2025 struct syscall_arg *arg, unsigned long val) 2026 { 2027 if (fmt && fmt->scnprintf) { 2028 arg->val = val; 2029 if (fmt->parm) 2030 arg->parm = fmt->parm; 2031 return fmt->scnprintf(bf, size, arg); 2032 } 2033 return scnprintf(bf, size, "%ld", val); 2034 } 2035 2036 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size, 2037 unsigned char *args, void *augmented_args, int augmented_args_size, 2038 struct trace *trace, struct thread *thread) 2039 { 2040 size_t printed = 0; 2041 unsigned long val; 2042 u8 bit = 1; 2043 struct syscall_arg arg = { 2044 .args = args, 2045 .augmented = { 2046 .size = augmented_args_size, 2047 .args = augmented_args, 2048 }, 2049 .idx = 0, 2050 .mask = 0, 2051 .trace = trace, 2052 .thread = thread, 2053 .show_string_prefix = trace->show_string_prefix, 2054 }; 2055 struct thread_trace *ttrace = thread__priv(thread); 2056 2057 /* 2058 * Things like fcntl will set this in its 'cmd' formatter to pick the 2059 * right formatter for the return value (an fd? file flags?), which is 2060 * not needed for syscalls that always return a given type, say an fd. 2061 */ 2062 ttrace->ret_scnprintf = NULL; 2063 2064 if (sc->args != NULL) { 2065 struct tep_format_field *field; 2066 2067 for (field = sc->args; field; 2068 field = field->next, ++arg.idx, bit <<= 1) { 2069 if (arg.mask & bit) 2070 continue; 2071 2072 arg.fmt = &sc->arg_fmt[arg.idx]; 2073 val = syscall_arg__val(&arg, arg.idx); 2074 /* 2075 * Some syscall args need some mask, most don't and 2076 * return val untouched. 2077 */ 2078 val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val); 2079 2080 /* 2081 * Suppress this argument if its value is zero and 2082 * and we don't have a string associated in an 2083 * strarray for it. 2084 */ 2085 if (val == 0 && 2086 !trace->show_zeros && 2087 !(sc->arg_fmt && 2088 (sc->arg_fmt[arg.idx].show_zero || 2089 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY || 2090 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAYS) && 2091 sc->arg_fmt[arg.idx].parm)) 2092 continue; 2093 2094 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); 2095 2096 if (trace->show_arg_names) 2097 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); 2098 2099 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], 2100 bf + printed, size - printed, &arg, val); 2101 } 2102 } else if (IS_ERR(sc->tp_format)) { 2103 /* 2104 * If we managed to read the tracepoint /format file, then we 2105 * may end up not having any args, like with gettid(), so only 2106 * print the raw args when we didn't manage to read it. 2107 */ 2108 while (arg.idx < sc->nr_args) { 2109 if (arg.mask & bit) 2110 goto next_arg; 2111 val = syscall_arg__val(&arg, arg.idx); 2112 if (printed) 2113 printed += scnprintf(bf + printed, size - printed, ", "); 2114 printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg); 2115 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val); 2116 next_arg: 2117 ++arg.idx; 2118 bit <<= 1; 2119 } 2120 } 2121 2122 return printed; 2123 } 2124 2125 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel, 2126 union perf_event *event, 2127 struct perf_sample *sample); 2128 2129 static struct syscall *trace__syscall_info(struct trace *trace, 2130 struct evsel *evsel, int id) 2131 { 2132 int err = 0; 2133 2134 if (id < 0) { 2135 2136 /* 2137 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried 2138 * before that, leaving at a higher verbosity level till that is 2139 * explained. Reproduced with plain ftrace with: 2140 * 2141 * echo 1 > /t/events/raw_syscalls/sys_exit/enable 2142 * grep "NR -1 " /t/trace_pipe 2143 * 2144 * After generating some load on the machine. 2145 */ 2146 if (verbose > 1) { 2147 static u64 n; 2148 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n", 2149 id, evsel__name(evsel), ++n); 2150 } 2151 return NULL; 2152 } 2153 2154 err = -EINVAL; 2155 2156 #ifdef HAVE_SYSCALL_TABLE_SUPPORT 2157 if (id > trace->sctbl->syscalls.max_id) { 2158 #else 2159 if (id >= trace->sctbl->syscalls.max_id) { 2160 /* 2161 * With libaudit we don't know beforehand what is the max_id, 2162 * so we let trace__read_syscall_info() figure that out as we 2163 * go on reading syscalls. 2164 */ 2165 err = trace__read_syscall_info(trace, id); 2166 if (err) 2167 #endif 2168 goto out_cant_read; 2169 } 2170 2171 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) && 2172 (err = trace__read_syscall_info(trace, id)) != 0) 2173 goto out_cant_read; 2174 2175 if (trace->syscalls.table && trace->syscalls.table[id].nonexistent) 2176 goto out_cant_read; 2177 2178 return &trace->syscalls.table[id]; 2179 2180 out_cant_read: 2181 if (verbose > 0) { 2182 char sbuf[STRERR_BUFSIZE]; 2183 fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf))); 2184 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL) 2185 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name); 2186 fputs(" information\n", trace->output); 2187 } 2188 return NULL; 2189 } 2190 2191 struct syscall_stats { 2192 struct stats stats; 2193 u64 nr_failures; 2194 int max_errno; 2195 u32 *errnos; 2196 }; 2197 2198 static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace, 2199 int id, struct perf_sample *sample, long err, bool errno_summary) 2200 { 2201 struct int_node *inode; 2202 struct syscall_stats *stats; 2203 u64 duration = 0; 2204 2205 inode = intlist__findnew(ttrace->syscall_stats, id); 2206 if (inode == NULL) 2207 return; 2208 2209 stats = inode->priv; 2210 if (stats == NULL) { 2211 stats = zalloc(sizeof(*stats)); 2212 if (stats == NULL) 2213 return; 2214 2215 init_stats(&stats->stats); 2216 inode->priv = stats; 2217 } 2218 2219 if (ttrace->entry_time && sample->time > ttrace->entry_time) 2220 duration = sample->time - ttrace->entry_time; 2221 2222 update_stats(&stats->stats, duration); 2223 2224 if (err < 0) { 2225 ++stats->nr_failures; 2226 2227 if (!errno_summary) 2228 return; 2229 2230 err = -err; 2231 if (err > stats->max_errno) { 2232 u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32)); 2233 2234 if (new_errnos) { 2235 memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32)); 2236 } else { 2237 pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n", 2238 thread__comm_str(thread), thread__pid(thread), 2239 thread__tid(thread)); 2240 return; 2241 } 2242 2243 stats->errnos = new_errnos; 2244 stats->max_errno = err; 2245 } 2246 2247 ++stats->errnos[err - 1]; 2248 } 2249 } 2250 2251 static int trace__printf_interrupted_entry(struct trace *trace) 2252 { 2253 struct thread_trace *ttrace; 2254 size_t printed; 2255 int len; 2256 2257 if (trace->failure_only || trace->current == NULL) 2258 return 0; 2259 2260 ttrace = thread__priv(trace->current); 2261 2262 if (!ttrace->entry_pending) 2263 return 0; 2264 2265 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output); 2266 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str); 2267 2268 if (len < trace->args_alignment - 4) 2269 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " "); 2270 2271 printed += fprintf(trace->output, " ...\n"); 2272 2273 ttrace->entry_pending = false; 2274 ++trace->nr_events_printed; 2275 2276 return printed; 2277 } 2278 2279 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel, 2280 struct perf_sample *sample, struct thread *thread) 2281 { 2282 int printed = 0; 2283 2284 if (trace->print_sample) { 2285 double ts = (double)sample->time / NSEC_PER_MSEC; 2286 2287 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n", 2288 evsel__name(evsel), ts, 2289 thread__comm_str(thread), 2290 sample->pid, sample->tid, sample->cpu); 2291 } 2292 2293 return printed; 2294 } 2295 2296 static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size) 2297 { 2298 void *augmented_args = NULL; 2299 /* 2300 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter 2301 * and there we get all 6 syscall args plus the tracepoint common fields 2302 * that gets calculated at the start and the syscall_nr (another long). 2303 * So we check if that is the case and if so don't look after the 2304 * sc->args_size but always after the full raw_syscalls:sys_enter payload, 2305 * which is fixed. 2306 * 2307 * We'll revisit this later to pass s->args_size to the BPF augmenter 2308 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it 2309 * copies only what we need for each syscall, like what happens when we 2310 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace 2311 * traffic to just what is needed for each syscall. 2312 */ 2313 int args_size = raw_augmented_args_size ?: sc->args_size; 2314 2315 *augmented_args_size = sample->raw_size - args_size; 2316 if (*augmented_args_size > 0) 2317 augmented_args = sample->raw_data + args_size; 2318 2319 return augmented_args; 2320 } 2321 2322 static void syscall__exit(struct syscall *sc) 2323 { 2324 if (!sc) 2325 return; 2326 2327 zfree(&sc->arg_fmt); 2328 } 2329 2330 static int trace__sys_enter(struct trace *trace, struct evsel *evsel, 2331 union perf_event *event __maybe_unused, 2332 struct perf_sample *sample) 2333 { 2334 char *msg; 2335 void *args; 2336 int printed = 0; 2337 struct thread *thread; 2338 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; 2339 int augmented_args_size = 0; 2340 void *augmented_args = NULL; 2341 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2342 struct thread_trace *ttrace; 2343 2344 if (sc == NULL) 2345 return -1; 2346 2347 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2348 ttrace = thread__trace(thread, trace->output); 2349 if (ttrace == NULL) 2350 goto out_put; 2351 2352 trace__fprintf_sample(trace, evsel, sample, thread); 2353 2354 args = perf_evsel__sc_tp_ptr(evsel, args, sample); 2355 2356 if (ttrace->entry_str == NULL) { 2357 ttrace->entry_str = malloc(trace__entry_str_size); 2358 if (!ttrace->entry_str) 2359 goto out_put; 2360 } 2361 2362 if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) 2363 trace__printf_interrupted_entry(trace); 2364 /* 2365 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible 2366 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments 2367 * this breaks syscall__augmented_args() check for augmented args, as we calculate 2368 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file, 2369 * so when handling, say the openat syscall, we end up getting 6 args for the 2370 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly 2371 * thinking that the extra 2 u64 args are the augmented filename, so just check 2372 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one. 2373 */ 2374 if (evsel != trace->syscalls.events.sys_enter) 2375 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); 2376 ttrace->entry_time = sample->time; 2377 msg = ttrace->entry_str; 2378 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name); 2379 2380 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed, 2381 args, augmented_args, augmented_args_size, trace, thread); 2382 2383 if (sc->is_exit) { 2384 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) { 2385 int alignment = 0; 2386 2387 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output); 2388 printed = fprintf(trace->output, "%s)", ttrace->entry_str); 2389 if (trace->args_alignment > printed) 2390 alignment = trace->args_alignment - printed; 2391 fprintf(trace->output, "%*s= ?\n", alignment, " "); 2392 } 2393 } else { 2394 ttrace->entry_pending = true; 2395 /* See trace__vfs_getname & trace__sys_exit */ 2396 ttrace->filename.pending_open = false; 2397 } 2398 2399 if (trace->current != thread) { 2400 thread__put(trace->current); 2401 trace->current = thread__get(thread); 2402 } 2403 err = 0; 2404 out_put: 2405 thread__put(thread); 2406 return err; 2407 } 2408 2409 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel, 2410 struct perf_sample *sample) 2411 { 2412 struct thread_trace *ttrace; 2413 struct thread *thread; 2414 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; 2415 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2416 char msg[1024]; 2417 void *args, *augmented_args = NULL; 2418 int augmented_args_size; 2419 2420 if (sc == NULL) 2421 return -1; 2422 2423 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2424 ttrace = thread__trace(thread, trace->output); 2425 /* 2426 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args() 2427 * and the rest of the beautifiers accessing it via struct syscall_arg touches it. 2428 */ 2429 if (ttrace == NULL) 2430 goto out_put; 2431 2432 args = perf_evsel__sc_tp_ptr(evsel, args, sample); 2433 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); 2434 syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread); 2435 fprintf(trace->output, "%s", msg); 2436 err = 0; 2437 out_put: 2438 thread__put(thread); 2439 return err; 2440 } 2441 2442 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel, 2443 struct perf_sample *sample, 2444 struct callchain_cursor *cursor) 2445 { 2446 struct addr_location al; 2447 int max_stack = evsel->core.attr.sample_max_stack ? 2448 evsel->core.attr.sample_max_stack : 2449 trace->max_stack; 2450 int err = -1; 2451 2452 addr_location__init(&al); 2453 if (machine__resolve(trace->host, &al, sample) < 0) 2454 goto out; 2455 2456 err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack); 2457 out: 2458 addr_location__exit(&al); 2459 return err; 2460 } 2461 2462 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample) 2463 { 2464 /* TODO: user-configurable print_opts */ 2465 const unsigned int print_opts = EVSEL__PRINT_SYM | 2466 EVSEL__PRINT_DSO | 2467 EVSEL__PRINT_UNKNOWN_AS_ADDR; 2468 2469 return sample__fprintf_callchain(sample, 38, print_opts, get_tls_callchain_cursor(), symbol_conf.bt_stop_list, trace->output); 2470 } 2471 2472 static const char *errno_to_name(struct evsel *evsel, int err) 2473 { 2474 struct perf_env *env = evsel__env(evsel); 2475 const char *arch_name = perf_env__arch(env); 2476 2477 return arch_syscalls__strerrno(arch_name, err); 2478 } 2479 2480 static int trace__sys_exit(struct trace *trace, struct evsel *evsel, 2481 union perf_event *event __maybe_unused, 2482 struct perf_sample *sample) 2483 { 2484 long ret; 2485 u64 duration = 0; 2486 bool duration_calculated = false; 2487 struct thread *thread; 2488 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0; 2489 int alignment = trace->args_alignment; 2490 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2491 struct thread_trace *ttrace; 2492 2493 if (sc == NULL) 2494 return -1; 2495 2496 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2497 ttrace = thread__trace(thread, trace->output); 2498 if (ttrace == NULL) 2499 goto out_put; 2500 2501 trace__fprintf_sample(trace, evsel, sample, thread); 2502 2503 ret = perf_evsel__sc_tp_uint(evsel, ret, sample); 2504 2505 if (trace->summary) 2506 thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary); 2507 2508 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) { 2509 trace__set_fd_pathname(thread, ret, ttrace->filename.name); 2510 ttrace->filename.pending_open = false; 2511 ++trace->stats.vfs_getname; 2512 } 2513 2514 if (ttrace->entry_time) { 2515 duration = sample->time - ttrace->entry_time; 2516 if (trace__filter_duration(trace, duration)) 2517 goto out; 2518 duration_calculated = true; 2519 } else if (trace->duration_filter) 2520 goto out; 2521 2522 if (sample->callchain) { 2523 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 2524 2525 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); 2526 if (callchain_ret == 0) { 2527 if (cursor->nr < trace->min_stack) 2528 goto out; 2529 callchain_ret = 1; 2530 } 2531 } 2532 2533 if (trace->summary_only || (ret >= 0 && trace->failure_only)) 2534 goto out; 2535 2536 trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output); 2537 2538 if (ttrace->entry_pending) { 2539 printed = fprintf(trace->output, "%s", ttrace->entry_str); 2540 } else { 2541 printed += fprintf(trace->output, " ... ["); 2542 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued"); 2543 printed += 9; 2544 printed += fprintf(trace->output, "]: %s()", sc->name); 2545 } 2546 2547 printed++; /* the closing ')' */ 2548 2549 if (alignment > printed) 2550 alignment -= printed; 2551 else 2552 alignment = 0; 2553 2554 fprintf(trace->output, ")%*s= ", alignment, " "); 2555 2556 if (sc->fmt == NULL) { 2557 if (ret < 0) 2558 goto errno_print; 2559 signed_print: 2560 fprintf(trace->output, "%ld", ret); 2561 } else if (ret < 0) { 2562 errno_print: { 2563 char bf[STRERR_BUFSIZE]; 2564 const char *emsg = str_error_r(-ret, bf, sizeof(bf)), 2565 *e = errno_to_name(evsel, -ret); 2566 2567 fprintf(trace->output, "-1 %s (%s)", e, emsg); 2568 } 2569 } else if (ret == 0 && sc->fmt->timeout) 2570 fprintf(trace->output, "0 (Timeout)"); 2571 else if (ttrace->ret_scnprintf) { 2572 char bf[1024]; 2573 struct syscall_arg arg = { 2574 .val = ret, 2575 .thread = thread, 2576 .trace = trace, 2577 }; 2578 ttrace->ret_scnprintf(bf, sizeof(bf), &arg); 2579 ttrace->ret_scnprintf = NULL; 2580 fprintf(trace->output, "%s", bf); 2581 } else if (sc->fmt->hexret) 2582 fprintf(trace->output, "%#lx", ret); 2583 else if (sc->fmt->errpid) { 2584 struct thread *child = machine__find_thread(trace->host, ret, ret); 2585 2586 if (child != NULL) { 2587 fprintf(trace->output, "%ld", ret); 2588 if (thread__comm_set(child)) 2589 fprintf(trace->output, " (%s)", thread__comm_str(child)); 2590 thread__put(child); 2591 } 2592 } else 2593 goto signed_print; 2594 2595 fputc('\n', trace->output); 2596 2597 /* 2598 * We only consider an 'event' for the sake of --max-events a non-filtered 2599 * sys_enter + sys_exit and other tracepoint events. 2600 */ 2601 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX) 2602 interrupted = true; 2603 2604 if (callchain_ret > 0) 2605 trace__fprintf_callchain(trace, sample); 2606 else if (callchain_ret < 0) 2607 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 2608 out: 2609 ttrace->entry_pending = false; 2610 err = 0; 2611 out_put: 2612 thread__put(thread); 2613 return err; 2614 } 2615 2616 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel, 2617 union perf_event *event __maybe_unused, 2618 struct perf_sample *sample) 2619 { 2620 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2621 struct thread_trace *ttrace; 2622 size_t filename_len, entry_str_len, to_move; 2623 ssize_t remaining_space; 2624 char *pos; 2625 const char *filename = evsel__rawptr(evsel, sample, "pathname"); 2626 2627 if (!thread) 2628 goto out; 2629 2630 ttrace = thread__priv(thread); 2631 if (!ttrace) 2632 goto out_put; 2633 2634 filename_len = strlen(filename); 2635 if (filename_len == 0) 2636 goto out_put; 2637 2638 if (ttrace->filename.namelen < filename_len) { 2639 char *f = realloc(ttrace->filename.name, filename_len + 1); 2640 2641 if (f == NULL) 2642 goto out_put; 2643 2644 ttrace->filename.namelen = filename_len; 2645 ttrace->filename.name = f; 2646 } 2647 2648 strcpy(ttrace->filename.name, filename); 2649 ttrace->filename.pending_open = true; 2650 2651 if (!ttrace->filename.ptr) 2652 goto out_put; 2653 2654 entry_str_len = strlen(ttrace->entry_str); 2655 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */ 2656 if (remaining_space <= 0) 2657 goto out_put; 2658 2659 if (filename_len > (size_t)remaining_space) { 2660 filename += filename_len - remaining_space; 2661 filename_len = remaining_space; 2662 } 2663 2664 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */ 2665 pos = ttrace->entry_str + ttrace->filename.entry_str_pos; 2666 memmove(pos + filename_len, pos, to_move); 2667 memcpy(pos, filename, filename_len); 2668 2669 ttrace->filename.ptr = 0; 2670 ttrace->filename.entry_str_pos = 0; 2671 out_put: 2672 thread__put(thread); 2673 out: 2674 return 0; 2675 } 2676 2677 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel, 2678 union perf_event *event __maybe_unused, 2679 struct perf_sample *sample) 2680 { 2681 u64 runtime = evsel__intval(evsel, sample, "runtime"); 2682 double runtime_ms = (double)runtime / NSEC_PER_MSEC; 2683 struct thread *thread = machine__findnew_thread(trace->host, 2684 sample->pid, 2685 sample->tid); 2686 struct thread_trace *ttrace = thread__trace(thread, trace->output); 2687 2688 if (ttrace == NULL) 2689 goto out_dump; 2690 2691 ttrace->runtime_ms += runtime_ms; 2692 trace->runtime_ms += runtime_ms; 2693 out_put: 2694 thread__put(thread); 2695 return 0; 2696 2697 out_dump: 2698 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n", 2699 evsel->name, 2700 evsel__strval(evsel, sample, "comm"), 2701 (pid_t)evsel__intval(evsel, sample, "pid"), 2702 runtime, 2703 evsel__intval(evsel, sample, "vruntime")); 2704 goto out_put; 2705 } 2706 2707 static int bpf_output__printer(enum binary_printer_ops op, 2708 unsigned int val, void *extra __maybe_unused, FILE *fp) 2709 { 2710 unsigned char ch = (unsigned char)val; 2711 2712 switch (op) { 2713 case BINARY_PRINT_CHAR_DATA: 2714 return fprintf(fp, "%c", isprint(ch) ? ch : '.'); 2715 case BINARY_PRINT_DATA_BEGIN: 2716 case BINARY_PRINT_LINE_BEGIN: 2717 case BINARY_PRINT_ADDR: 2718 case BINARY_PRINT_NUM_DATA: 2719 case BINARY_PRINT_NUM_PAD: 2720 case BINARY_PRINT_SEP: 2721 case BINARY_PRINT_CHAR_PAD: 2722 case BINARY_PRINT_LINE_END: 2723 case BINARY_PRINT_DATA_END: 2724 default: 2725 break; 2726 } 2727 2728 return 0; 2729 } 2730 2731 static void bpf_output__fprintf(struct trace *trace, 2732 struct perf_sample *sample) 2733 { 2734 binary__fprintf(sample->raw_data, sample->raw_size, 8, 2735 bpf_output__printer, NULL, trace->output); 2736 ++trace->nr_events_printed; 2737 } 2738 2739 static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample, 2740 struct thread *thread, void *augmented_args, int augmented_args_size) 2741 { 2742 char bf[2048]; 2743 size_t size = sizeof(bf); 2744 struct tep_format_field *field = evsel->tp_format->format.fields; 2745 struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel); 2746 size_t printed = 0; 2747 unsigned long val; 2748 u8 bit = 1; 2749 struct syscall_arg syscall_arg = { 2750 .augmented = { 2751 .size = augmented_args_size, 2752 .args = augmented_args, 2753 }, 2754 .idx = 0, 2755 .mask = 0, 2756 .trace = trace, 2757 .thread = thread, 2758 .show_string_prefix = trace->show_string_prefix, 2759 }; 2760 2761 for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) { 2762 if (syscall_arg.mask & bit) 2763 continue; 2764 2765 syscall_arg.len = 0; 2766 syscall_arg.fmt = arg; 2767 if (field->flags & TEP_FIELD_IS_ARRAY) { 2768 int offset = field->offset; 2769 2770 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2771 offset = format_field__intval(field, sample, evsel->needs_swap); 2772 syscall_arg.len = offset >> 16; 2773 offset &= 0xffff; 2774 if (tep_field_is_relative(field->flags)) 2775 offset += field->offset + field->size; 2776 } 2777 2778 val = (uintptr_t)(sample->raw_data + offset); 2779 } else 2780 val = format_field__intval(field, sample, evsel->needs_swap); 2781 /* 2782 * Some syscall args need some mask, most don't and 2783 * return val untouched. 2784 */ 2785 val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val); 2786 2787 /* 2788 * Suppress this argument if its value is zero and 2789 * we don't have a string associated in an 2790 * strarray for it. 2791 */ 2792 if (val == 0 && 2793 !trace->show_zeros && 2794 !((arg->show_zero || 2795 arg->scnprintf == SCA_STRARRAY || 2796 arg->scnprintf == SCA_STRARRAYS) && 2797 arg->parm)) 2798 continue; 2799 2800 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); 2801 2802 if (trace->show_arg_names) 2803 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); 2804 2805 printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val); 2806 } 2807 2808 return printed + fprintf(trace->output, "%s", bf); 2809 } 2810 2811 static int trace__event_handler(struct trace *trace, struct evsel *evsel, 2812 union perf_event *event __maybe_unused, 2813 struct perf_sample *sample) 2814 { 2815 struct thread *thread; 2816 int callchain_ret = 0; 2817 /* 2818 * Check if we called perf_evsel__disable(evsel) due to, for instance, 2819 * this event's max_events having been hit and this is an entry coming 2820 * from the ring buffer that we should discard, since the max events 2821 * have already been considered/printed. 2822 */ 2823 if (evsel->disabled) 2824 return 0; 2825 2826 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2827 2828 if (sample->callchain) { 2829 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 2830 2831 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); 2832 if (callchain_ret == 0) { 2833 if (cursor->nr < trace->min_stack) 2834 goto out; 2835 callchain_ret = 1; 2836 } 2837 } 2838 2839 trace__printf_interrupted_entry(trace); 2840 trace__fprintf_tstamp(trace, sample->time, trace->output); 2841 2842 if (trace->trace_syscalls && trace->show_duration) 2843 fprintf(trace->output, "( ): "); 2844 2845 if (thread) 2846 trace__fprintf_comm_tid(trace, thread, trace->output); 2847 2848 if (evsel == trace->syscalls.events.augmented) { 2849 int id = perf_evsel__sc_tp_uint(evsel, id, sample); 2850 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2851 2852 if (sc) { 2853 fprintf(trace->output, "%s(", sc->name); 2854 trace__fprintf_sys_enter(trace, evsel, sample); 2855 fputc(')', trace->output); 2856 goto newline; 2857 } 2858 2859 /* 2860 * XXX: Not having the associated syscall info or not finding/adding 2861 * the thread should never happen, but if it does... 2862 * fall thru and print it as a bpf_output event. 2863 */ 2864 } 2865 2866 fprintf(trace->output, "%s(", evsel->name); 2867 2868 if (evsel__is_bpf_output(evsel)) { 2869 bpf_output__fprintf(trace, sample); 2870 } else if (evsel->tp_format) { 2871 if (strncmp(evsel->tp_format->name, "sys_enter_", 10) || 2872 trace__fprintf_sys_enter(trace, evsel, sample)) { 2873 if (trace->libtraceevent_print) { 2874 event_format__fprintf(evsel->tp_format, sample->cpu, 2875 sample->raw_data, sample->raw_size, 2876 trace->output); 2877 } else { 2878 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0); 2879 } 2880 } 2881 } 2882 2883 newline: 2884 fprintf(trace->output, ")\n"); 2885 2886 if (callchain_ret > 0) 2887 trace__fprintf_callchain(trace, sample); 2888 else if (callchain_ret < 0) 2889 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 2890 2891 ++trace->nr_events_printed; 2892 2893 if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) { 2894 evsel__disable(evsel); 2895 evsel__close(evsel); 2896 } 2897 out: 2898 thread__put(thread); 2899 return 0; 2900 } 2901 2902 static void print_location(FILE *f, struct perf_sample *sample, 2903 struct addr_location *al, 2904 bool print_dso, bool print_sym) 2905 { 2906 2907 if ((verbose > 0 || print_dso) && al->map) 2908 fprintf(f, "%s@", map__dso(al->map)->long_name); 2909 2910 if ((verbose > 0 || print_sym) && al->sym) 2911 fprintf(f, "%s+0x%" PRIx64, al->sym->name, 2912 al->addr - al->sym->start); 2913 else if (al->map) 2914 fprintf(f, "0x%" PRIx64, al->addr); 2915 else 2916 fprintf(f, "0x%" PRIx64, sample->addr); 2917 } 2918 2919 static int trace__pgfault(struct trace *trace, 2920 struct evsel *evsel, 2921 union perf_event *event __maybe_unused, 2922 struct perf_sample *sample) 2923 { 2924 struct thread *thread; 2925 struct addr_location al; 2926 char map_type = 'd'; 2927 struct thread_trace *ttrace; 2928 int err = -1; 2929 int callchain_ret = 0; 2930 2931 addr_location__init(&al); 2932 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2933 2934 if (sample->callchain) { 2935 struct callchain_cursor *cursor = get_tls_callchain_cursor(); 2936 2937 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor); 2938 if (callchain_ret == 0) { 2939 if (cursor->nr < trace->min_stack) 2940 goto out_put; 2941 callchain_ret = 1; 2942 } 2943 } 2944 2945 ttrace = thread__trace(thread, trace->output); 2946 if (ttrace == NULL) 2947 goto out_put; 2948 2949 if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ) 2950 ttrace->pfmaj++; 2951 else 2952 ttrace->pfmin++; 2953 2954 if (trace->summary_only) 2955 goto out; 2956 2957 thread__find_symbol(thread, sample->cpumode, sample->ip, &al); 2958 2959 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output); 2960 2961 fprintf(trace->output, "%sfault [", 2962 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ? 2963 "maj" : "min"); 2964 2965 print_location(trace->output, sample, &al, false, true); 2966 2967 fprintf(trace->output, "] => "); 2968 2969 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); 2970 2971 if (!al.map) { 2972 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); 2973 2974 if (al.map) 2975 map_type = 'x'; 2976 else 2977 map_type = '?'; 2978 } 2979 2980 print_location(trace->output, sample, &al, true, false); 2981 2982 fprintf(trace->output, " (%c%c)\n", map_type, al.level); 2983 2984 if (callchain_ret > 0) 2985 trace__fprintf_callchain(trace, sample); 2986 else if (callchain_ret < 0) 2987 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 2988 2989 ++trace->nr_events_printed; 2990 out: 2991 err = 0; 2992 out_put: 2993 thread__put(thread); 2994 addr_location__exit(&al); 2995 return err; 2996 } 2997 2998 static void trace__set_base_time(struct trace *trace, 2999 struct evsel *evsel, 3000 struct perf_sample *sample) 3001 { 3002 /* 3003 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust 3004 * and don't use sample->time unconditionally, we may end up having 3005 * some other event in the future without PERF_SAMPLE_TIME for good 3006 * reason, i.e. we may not be interested in its timestamps, just in 3007 * it taking place, picking some piece of information when it 3008 * appears in our event stream (vfs_getname comes to mind). 3009 */ 3010 if (trace->base_time == 0 && !trace->full_time && 3011 (evsel->core.attr.sample_type & PERF_SAMPLE_TIME)) 3012 trace->base_time = sample->time; 3013 } 3014 3015 static int trace__process_sample(struct perf_tool *tool, 3016 union perf_event *event, 3017 struct perf_sample *sample, 3018 struct evsel *evsel, 3019 struct machine *machine __maybe_unused) 3020 { 3021 struct trace *trace = container_of(tool, struct trace, tool); 3022 struct thread *thread; 3023 int err = 0; 3024 3025 tracepoint_handler handler = evsel->handler; 3026 3027 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 3028 if (thread && thread__is_filtered(thread)) 3029 goto out; 3030 3031 trace__set_base_time(trace, evsel, sample); 3032 3033 if (handler) { 3034 ++trace->nr_events; 3035 handler(trace, evsel, event, sample); 3036 } 3037 out: 3038 thread__put(thread); 3039 return err; 3040 } 3041 3042 static int trace__record(struct trace *trace, int argc, const char **argv) 3043 { 3044 unsigned int rec_argc, i, j; 3045 const char **rec_argv; 3046 const char * const record_args[] = { 3047 "record", 3048 "-R", 3049 "-m", "1024", 3050 "-c", "1", 3051 }; 3052 pid_t pid = getpid(); 3053 char *filter = asprintf__tp_filter_pids(1, &pid); 3054 const char * const sc_args[] = { "-e", }; 3055 unsigned int sc_args_nr = ARRAY_SIZE(sc_args); 3056 const char * const majpf_args[] = { "-e", "major-faults" }; 3057 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args); 3058 const char * const minpf_args[] = { "-e", "minor-faults" }; 3059 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args); 3060 int err = -1; 3061 3062 /* +3 is for the event string below and the pid filter */ 3063 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 3 + 3064 majpf_args_nr + minpf_args_nr + argc; 3065 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 3066 3067 if (rec_argv == NULL || filter == NULL) 3068 goto out_free; 3069 3070 j = 0; 3071 for (i = 0; i < ARRAY_SIZE(record_args); i++) 3072 rec_argv[j++] = record_args[i]; 3073 3074 if (trace->trace_syscalls) { 3075 for (i = 0; i < sc_args_nr; i++) 3076 rec_argv[j++] = sc_args[i]; 3077 3078 /* event string may be different for older kernels - e.g., RHEL6 */ 3079 if (is_valid_tracepoint("raw_syscalls:sys_enter")) 3080 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit"; 3081 else if (is_valid_tracepoint("syscalls:sys_enter")) 3082 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit"; 3083 else { 3084 pr_err("Neither raw_syscalls nor syscalls events exist.\n"); 3085 goto out_free; 3086 } 3087 } 3088 3089 rec_argv[j++] = "--filter"; 3090 rec_argv[j++] = filter; 3091 3092 if (trace->trace_pgfaults & TRACE_PFMAJ) 3093 for (i = 0; i < majpf_args_nr; i++) 3094 rec_argv[j++] = majpf_args[i]; 3095 3096 if (trace->trace_pgfaults & TRACE_PFMIN) 3097 for (i = 0; i < minpf_args_nr; i++) 3098 rec_argv[j++] = minpf_args[i]; 3099 3100 for (i = 0; i < (unsigned int)argc; i++) 3101 rec_argv[j++] = argv[i]; 3102 3103 err = cmd_record(j, rec_argv); 3104 out_free: 3105 free(filter); 3106 free(rec_argv); 3107 return err; 3108 } 3109 3110 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp); 3111 3112 static bool evlist__add_vfs_getname(struct evlist *evlist) 3113 { 3114 bool found = false; 3115 struct evsel *evsel, *tmp; 3116 struct parse_events_error err; 3117 int ret; 3118 3119 parse_events_error__init(&err); 3120 ret = parse_events(evlist, "probe:vfs_getname*", &err); 3121 parse_events_error__exit(&err); 3122 if (ret) 3123 return false; 3124 3125 evlist__for_each_entry_safe(evlist, evsel, tmp) { 3126 if (!strstarts(evsel__name(evsel), "probe:vfs_getname")) 3127 continue; 3128 3129 if (evsel__field(evsel, "pathname")) { 3130 evsel->handler = trace__vfs_getname; 3131 found = true; 3132 continue; 3133 } 3134 3135 list_del_init(&evsel->core.node); 3136 evsel->evlist = NULL; 3137 evsel__delete(evsel); 3138 } 3139 3140 return found; 3141 } 3142 3143 static struct evsel *evsel__new_pgfault(u64 config) 3144 { 3145 struct evsel *evsel; 3146 struct perf_event_attr attr = { 3147 .type = PERF_TYPE_SOFTWARE, 3148 .mmap_data = 1, 3149 }; 3150 3151 attr.config = config; 3152 attr.sample_period = 1; 3153 3154 event_attr_init(&attr); 3155 3156 evsel = evsel__new(&attr); 3157 if (evsel) 3158 evsel->handler = trace__pgfault; 3159 3160 return evsel; 3161 } 3162 3163 static void evlist__free_syscall_tp_fields(struct evlist *evlist) 3164 { 3165 struct evsel *evsel; 3166 3167 evlist__for_each_entry(evlist, evsel) { 3168 evsel_trace__delete(evsel->priv); 3169 evsel->priv = NULL; 3170 } 3171 } 3172 3173 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample) 3174 { 3175 const u32 type = event->header.type; 3176 struct evsel *evsel; 3177 3178 if (type != PERF_RECORD_SAMPLE) { 3179 trace__process_event(trace, trace->host, event, sample); 3180 return; 3181 } 3182 3183 evsel = evlist__id2evsel(trace->evlist, sample->id); 3184 if (evsel == NULL) { 3185 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id); 3186 return; 3187 } 3188 3189 if (evswitch__discard(&trace->evswitch, evsel)) 3190 return; 3191 3192 trace__set_base_time(trace, evsel, sample); 3193 3194 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && 3195 sample->raw_data == NULL) { 3196 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n", 3197 evsel__name(evsel), sample->tid, 3198 sample->cpu, sample->raw_size); 3199 } else { 3200 tracepoint_handler handler = evsel->handler; 3201 handler(trace, evsel, event, sample); 3202 } 3203 3204 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX) 3205 interrupted = true; 3206 } 3207 3208 static int trace__add_syscall_newtp(struct trace *trace) 3209 { 3210 int ret = -1; 3211 struct evlist *evlist = trace->evlist; 3212 struct evsel *sys_enter, *sys_exit; 3213 3214 sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter); 3215 if (sys_enter == NULL) 3216 goto out; 3217 3218 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args)) 3219 goto out_delete_sys_enter; 3220 3221 sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit); 3222 if (sys_exit == NULL) 3223 goto out_delete_sys_enter; 3224 3225 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret)) 3226 goto out_delete_sys_exit; 3227 3228 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param); 3229 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param); 3230 3231 evlist__add(evlist, sys_enter); 3232 evlist__add(evlist, sys_exit); 3233 3234 if (callchain_param.enabled && !trace->kernel_syscallchains) { 3235 /* 3236 * We're interested only in the user space callchain 3237 * leading to the syscall, allow overriding that for 3238 * debugging reasons using --kernel_syscall_callchains 3239 */ 3240 sys_exit->core.attr.exclude_callchain_kernel = 1; 3241 } 3242 3243 trace->syscalls.events.sys_enter = sys_enter; 3244 trace->syscalls.events.sys_exit = sys_exit; 3245 3246 ret = 0; 3247 out: 3248 return ret; 3249 3250 out_delete_sys_exit: 3251 evsel__delete_priv(sys_exit); 3252 out_delete_sys_enter: 3253 evsel__delete_priv(sys_enter); 3254 goto out; 3255 } 3256 3257 static int trace__set_ev_qualifier_tp_filter(struct trace *trace) 3258 { 3259 int err = -1; 3260 struct evsel *sys_exit; 3261 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier, 3262 trace->ev_qualifier_ids.nr, 3263 trace->ev_qualifier_ids.entries); 3264 3265 if (filter == NULL) 3266 goto out_enomem; 3267 3268 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) { 3269 sys_exit = trace->syscalls.events.sys_exit; 3270 err = evsel__append_tp_filter(sys_exit, filter); 3271 } 3272 3273 free(filter); 3274 out: 3275 return err; 3276 out_enomem: 3277 errno = ENOMEM; 3278 goto out; 3279 } 3280 3281 #ifdef HAVE_LIBBPF_SUPPORT 3282 static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name) 3283 { 3284 if (trace->bpf_obj == NULL) 3285 return NULL; 3286 3287 return bpf_object__find_map_by_name(trace->bpf_obj, name); 3288 } 3289 3290 static void trace__set_bpf_map_filtered_pids(struct trace *trace) 3291 { 3292 trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered"); 3293 } 3294 3295 static void trace__set_bpf_map_syscalls(struct trace *trace) 3296 { 3297 trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter"); 3298 trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit"); 3299 } 3300 3301 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name) 3302 { 3303 struct bpf_program *pos, *prog = NULL; 3304 const char *sec_name; 3305 3306 if (trace->bpf_obj == NULL) 3307 return NULL; 3308 3309 bpf_object__for_each_program(pos, trace->bpf_obj) { 3310 sec_name = bpf_program__section_name(pos); 3311 if (sec_name && !strcmp(sec_name, name)) { 3312 prog = pos; 3313 break; 3314 } 3315 } 3316 3317 return prog; 3318 } 3319 3320 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc, 3321 const char *prog_name, const char *type) 3322 { 3323 struct bpf_program *prog; 3324 3325 if (prog_name == NULL) { 3326 char default_prog_name[256]; 3327 scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->name); 3328 prog = trace__find_bpf_program_by_title(trace, default_prog_name); 3329 if (prog != NULL) 3330 goto out_found; 3331 if (sc->fmt && sc->fmt->alias) { 3332 scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->fmt->alias); 3333 prog = trace__find_bpf_program_by_title(trace, default_prog_name); 3334 if (prog != NULL) 3335 goto out_found; 3336 } 3337 goto out_unaugmented; 3338 } 3339 3340 prog = trace__find_bpf_program_by_title(trace, prog_name); 3341 3342 if (prog != NULL) { 3343 out_found: 3344 return prog; 3345 } 3346 3347 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n", 3348 prog_name, type, sc->name); 3349 out_unaugmented: 3350 return trace->syscalls.unaugmented_prog; 3351 } 3352 3353 static void trace__init_syscall_bpf_progs(struct trace *trace, int id) 3354 { 3355 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3356 3357 if (sc == NULL) 3358 return; 3359 3360 sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter"); 3361 sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit"); 3362 } 3363 3364 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id) 3365 { 3366 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3367 return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_prog); 3368 } 3369 3370 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id) 3371 { 3372 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3373 return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog); 3374 } 3375 3376 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc) 3377 { 3378 struct tep_format_field *field, *candidate_field; 3379 int id; 3380 3381 /* 3382 * We're only interested in syscalls that have a pointer: 3383 */ 3384 for (field = sc->args; field; field = field->next) { 3385 if (field->flags & TEP_FIELD_IS_POINTER) 3386 goto try_to_find_pair; 3387 } 3388 3389 return NULL; 3390 3391 try_to_find_pair: 3392 for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) { 3393 struct syscall *pair = trace__syscall_info(trace, NULL, id); 3394 struct bpf_program *pair_prog; 3395 bool is_candidate = false; 3396 3397 if (pair == NULL || pair == sc || 3398 pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog) 3399 continue; 3400 3401 for (field = sc->args, candidate_field = pair->args; 3402 field && candidate_field; field = field->next, candidate_field = candidate_field->next) { 3403 bool is_pointer = field->flags & TEP_FIELD_IS_POINTER, 3404 candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER; 3405 3406 if (is_pointer) { 3407 if (!candidate_is_pointer) { 3408 // The candidate just doesn't copies our pointer arg, might copy other pointers we want. 3409 continue; 3410 } 3411 } else { 3412 if (candidate_is_pointer) { 3413 // The candidate might copy a pointer we don't have, skip it. 3414 goto next_candidate; 3415 } 3416 continue; 3417 } 3418 3419 if (strcmp(field->type, candidate_field->type)) 3420 goto next_candidate; 3421 3422 is_candidate = true; 3423 } 3424 3425 if (!is_candidate) 3426 goto next_candidate; 3427 3428 /* 3429 * Check if the tentative pair syscall augmenter has more pointers, if it has, 3430 * then it may be collecting that and we then can't use it, as it would collect 3431 * more than what is common to the two syscalls. 3432 */ 3433 if (candidate_field) { 3434 for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next) 3435 if (candidate_field->flags & TEP_FIELD_IS_POINTER) 3436 goto next_candidate; 3437 } 3438 3439 pair_prog = pair->bpf_prog.sys_enter; 3440 /* 3441 * If the pair isn't enabled, then its bpf_prog.sys_enter will not 3442 * have been searched for, so search it here and if it returns the 3443 * unaugmented one, then ignore it, otherwise we'll reuse that BPF 3444 * program for a filtered syscall on a non-filtered one. 3445 * 3446 * For instance, we have "!syscalls:sys_enter_renameat" and that is 3447 * useful for "renameat2". 3448 */ 3449 if (pair_prog == NULL) { 3450 pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter"); 3451 if (pair_prog == trace->syscalls.unaugmented_prog) 3452 goto next_candidate; 3453 } 3454 3455 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name); 3456 return pair_prog; 3457 next_candidate: 3458 continue; 3459 } 3460 3461 return NULL; 3462 } 3463 3464 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace) 3465 { 3466 int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter), 3467 map_exit_fd = bpf_map__fd(trace->syscalls.prog_array.sys_exit); 3468 int err = 0, key; 3469 3470 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) { 3471 int prog_fd; 3472 3473 if (!trace__syscall_enabled(trace, key)) 3474 continue; 3475 3476 trace__init_syscall_bpf_progs(trace, key); 3477 3478 // It'll get at least the "!raw_syscalls:unaugmented" 3479 prog_fd = trace__bpf_prog_sys_enter_fd(trace, key); 3480 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY); 3481 if (err) 3482 break; 3483 prog_fd = trace__bpf_prog_sys_exit_fd(trace, key); 3484 err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY); 3485 if (err) 3486 break; 3487 } 3488 3489 /* 3490 * Now lets do a second pass looking for enabled syscalls without 3491 * an augmenter that have a signature that is a superset of another 3492 * syscall with an augmenter so that we can auto-reuse it. 3493 * 3494 * I.e. if we have an augmenter for the "open" syscall that has 3495 * this signature: 3496 * 3497 * int open(const char *pathname, int flags, mode_t mode); 3498 * 3499 * I.e. that will collect just the first string argument, then we 3500 * can reuse it for the 'creat' syscall, that has this signature: 3501 * 3502 * int creat(const char *pathname, mode_t mode); 3503 * 3504 * and for: 3505 * 3506 * int stat(const char *pathname, struct stat *statbuf); 3507 * int lstat(const char *pathname, struct stat *statbuf); 3508 * 3509 * Because the 'open' augmenter will collect the first arg as a string, 3510 * and leave alone all the other args, which already helps with 3511 * beautifying 'stat' and 'lstat''s pathname arg. 3512 * 3513 * Then, in time, when 'stat' gets an augmenter that collects both 3514 * first and second arg (this one on the raw_syscalls:sys_exit prog 3515 * array tail call, then that one will be used. 3516 */ 3517 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) { 3518 struct syscall *sc = trace__syscall_info(trace, NULL, key); 3519 struct bpf_program *pair_prog; 3520 int prog_fd; 3521 3522 if (sc == NULL || sc->bpf_prog.sys_enter == NULL) 3523 continue; 3524 3525 /* 3526 * For now we're just reusing the sys_enter prog, and if it 3527 * already has an augmenter, we don't need to find one. 3528 */ 3529 if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog) 3530 continue; 3531 3532 /* 3533 * Look at all the other syscalls for one that has a signature 3534 * that is close enough that we can share: 3535 */ 3536 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc); 3537 if (pair_prog == NULL) 3538 continue; 3539 3540 sc->bpf_prog.sys_enter = pair_prog; 3541 3542 /* 3543 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter 3544 * with the fd for the program we're reusing: 3545 */ 3546 prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter); 3547 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY); 3548 if (err) 3549 break; 3550 } 3551 3552 3553 return err; 3554 } 3555 3556 static void trace__delete_augmented_syscalls(struct trace *trace) 3557 { 3558 struct evsel *evsel, *tmp; 3559 3560 evlist__remove(trace->evlist, trace->syscalls.events.augmented); 3561 evsel__delete(trace->syscalls.events.augmented); 3562 trace->syscalls.events.augmented = NULL; 3563 3564 evlist__for_each_entry_safe(trace->evlist, tmp, evsel) { 3565 if (evsel->bpf_obj == trace->bpf_obj) { 3566 evlist__remove(trace->evlist, evsel); 3567 evsel__delete(evsel); 3568 } 3569 3570 } 3571 3572 bpf_object__close(trace->bpf_obj); 3573 trace->bpf_obj = NULL; 3574 } 3575 #else // HAVE_LIBBPF_SUPPORT 3576 static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace __maybe_unused, 3577 const char *name __maybe_unused) 3578 { 3579 return NULL; 3580 } 3581 3582 static void trace__set_bpf_map_filtered_pids(struct trace *trace __maybe_unused) 3583 { 3584 } 3585 3586 static void trace__set_bpf_map_syscalls(struct trace *trace __maybe_unused) 3587 { 3588 } 3589 3590 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused, 3591 const char *name __maybe_unused) 3592 { 3593 return NULL; 3594 } 3595 3596 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused) 3597 { 3598 return 0; 3599 } 3600 3601 static void trace__delete_augmented_syscalls(struct trace *trace __maybe_unused) 3602 { 3603 } 3604 #endif // HAVE_LIBBPF_SUPPORT 3605 3606 static bool trace__only_augmented_syscalls_evsels(struct trace *trace) 3607 { 3608 struct evsel *evsel; 3609 3610 evlist__for_each_entry(trace->evlist, evsel) { 3611 if (evsel == trace->syscalls.events.augmented || 3612 evsel->bpf_obj == trace->bpf_obj) 3613 continue; 3614 3615 return false; 3616 } 3617 3618 return true; 3619 } 3620 3621 static int trace__set_ev_qualifier_filter(struct trace *trace) 3622 { 3623 if (trace->syscalls.events.sys_enter) 3624 return trace__set_ev_qualifier_tp_filter(trace); 3625 return 0; 3626 } 3627 3628 static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused, 3629 size_t npids __maybe_unused, pid_t *pids __maybe_unused) 3630 { 3631 int err = 0; 3632 #ifdef HAVE_LIBBPF_SUPPORT 3633 bool value = true; 3634 int map_fd = bpf_map__fd(map); 3635 size_t i; 3636 3637 for (i = 0; i < npids; ++i) { 3638 err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY); 3639 if (err) 3640 break; 3641 } 3642 #endif 3643 return err; 3644 } 3645 3646 static int trace__set_filter_loop_pids(struct trace *trace) 3647 { 3648 unsigned int nr = 1, err; 3649 pid_t pids[32] = { 3650 getpid(), 3651 }; 3652 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]); 3653 3654 while (thread && nr < ARRAY_SIZE(pids)) { 3655 struct thread *parent = machine__find_thread(trace->host, 3656 thread__ppid(thread), 3657 thread__ppid(thread)); 3658 3659 if (parent == NULL) 3660 break; 3661 3662 if (!strcmp(thread__comm_str(parent), "sshd") || 3663 strstarts(thread__comm_str(parent), "gnome-terminal")) { 3664 pids[nr++] = thread__tid(parent); 3665 break; 3666 } 3667 thread = parent; 3668 } 3669 3670 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids); 3671 if (!err && trace->filter_pids.map) 3672 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids); 3673 3674 return err; 3675 } 3676 3677 static int trace__set_filter_pids(struct trace *trace) 3678 { 3679 int err = 0; 3680 /* 3681 * Better not use !target__has_task() here because we need to cover the 3682 * case where no threads were specified in the command line, but a 3683 * workload was, and in that case we will fill in the thread_map when 3684 * we fork the workload in evlist__prepare_workload. 3685 */ 3686 if (trace->filter_pids.nr > 0) { 3687 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr, 3688 trace->filter_pids.entries); 3689 if (!err && trace->filter_pids.map) { 3690 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr, 3691 trace->filter_pids.entries); 3692 } 3693 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) { 3694 err = trace__set_filter_loop_pids(trace); 3695 } 3696 3697 return err; 3698 } 3699 3700 static int __trace__deliver_event(struct trace *trace, union perf_event *event) 3701 { 3702 struct evlist *evlist = trace->evlist; 3703 struct perf_sample sample; 3704 int err = evlist__parse_sample(evlist, event, &sample); 3705 3706 if (err) 3707 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err); 3708 else 3709 trace__handle_event(trace, event, &sample); 3710 3711 return 0; 3712 } 3713 3714 static int __trace__flush_events(struct trace *trace) 3715 { 3716 u64 first = ordered_events__first_time(&trace->oe.data); 3717 u64 flush = trace->oe.last - NSEC_PER_SEC; 3718 3719 /* Is there some thing to flush.. */ 3720 if (first && first < flush) 3721 return ordered_events__flush_time(&trace->oe.data, flush); 3722 3723 return 0; 3724 } 3725 3726 static int trace__flush_events(struct trace *trace) 3727 { 3728 return !trace->sort_events ? 0 : __trace__flush_events(trace); 3729 } 3730 3731 static int trace__deliver_event(struct trace *trace, union perf_event *event) 3732 { 3733 int err; 3734 3735 if (!trace->sort_events) 3736 return __trace__deliver_event(trace, event); 3737 3738 err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last); 3739 if (err && err != -1) 3740 return err; 3741 3742 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL); 3743 if (err) 3744 return err; 3745 3746 return trace__flush_events(trace); 3747 } 3748 3749 static int ordered_events__deliver_event(struct ordered_events *oe, 3750 struct ordered_event *event) 3751 { 3752 struct trace *trace = container_of(oe, struct trace, oe.data); 3753 3754 return __trace__deliver_event(trace, event->event); 3755 } 3756 3757 static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg) 3758 { 3759 struct tep_format_field *field; 3760 struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel); 3761 3762 if (evsel->tp_format == NULL || fmt == NULL) 3763 return NULL; 3764 3765 for (field = evsel->tp_format->format.fields; field; field = field->next, ++fmt) 3766 if (strcmp(field->name, arg) == 0) 3767 return fmt; 3768 3769 return NULL; 3770 } 3771 3772 static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel *evsel) 3773 { 3774 char *tok, *left = evsel->filter, *new_filter = evsel->filter; 3775 3776 while ((tok = strpbrk(left, "=<>!")) != NULL) { 3777 char *right = tok + 1, *right_end; 3778 3779 if (*right == '=') 3780 ++right; 3781 3782 while (isspace(*right)) 3783 ++right; 3784 3785 if (*right == '\0') 3786 break; 3787 3788 while (!isalpha(*left)) 3789 if (++left == tok) { 3790 /* 3791 * Bail out, can't find the name of the argument that is being 3792 * used in the filter, let it try to set this filter, will fail later. 3793 */ 3794 return 0; 3795 } 3796 3797 right_end = right + 1; 3798 while (isalnum(*right_end) || *right_end == '_' || *right_end == '|') 3799 ++right_end; 3800 3801 if (isalpha(*right)) { 3802 struct syscall_arg_fmt *fmt; 3803 int left_size = tok - left, 3804 right_size = right_end - right; 3805 char arg[128]; 3806 3807 while (isspace(left[left_size - 1])) 3808 --left_size; 3809 3810 scnprintf(arg, sizeof(arg), "%.*s", left_size, left); 3811 3812 fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg); 3813 if (fmt == NULL) { 3814 pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n", 3815 arg, evsel->name, evsel->filter); 3816 return -1; 3817 } 3818 3819 pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ", 3820 arg, (int)(right - tok), tok, right_size, right); 3821 3822 if (fmt->strtoul) { 3823 u64 val; 3824 struct syscall_arg syscall_arg = { 3825 .parm = fmt->parm, 3826 }; 3827 3828 if (fmt->strtoul(right, right_size, &syscall_arg, &val)) { 3829 char *n, expansion[19]; 3830 int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val); 3831 int expansion_offset = right - new_filter; 3832 3833 pr_debug("%s", expansion); 3834 3835 if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) { 3836 pr_debug(" out of memory!\n"); 3837 free(new_filter); 3838 return -1; 3839 } 3840 if (new_filter != evsel->filter) 3841 free(new_filter); 3842 left = n + expansion_offset + expansion_lenght; 3843 new_filter = n; 3844 } else { 3845 pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n", 3846 right_size, right, arg, evsel->name, evsel->filter); 3847 return -1; 3848 } 3849 } else { 3850 pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n", 3851 arg, evsel->name, evsel->filter); 3852 return -1; 3853 } 3854 3855 pr_debug("\n"); 3856 } else { 3857 left = right_end; 3858 } 3859 } 3860 3861 if (new_filter != evsel->filter) { 3862 pr_debug("New filter for %s: %s\n", evsel->name, new_filter); 3863 evsel__set_filter(evsel, new_filter); 3864 free(new_filter); 3865 } 3866 3867 return 0; 3868 } 3869 3870 static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel) 3871 { 3872 struct evlist *evlist = trace->evlist; 3873 struct evsel *evsel; 3874 3875 evlist__for_each_entry(evlist, evsel) { 3876 if (evsel->filter == NULL) 3877 continue; 3878 3879 if (trace__expand_filter(trace, evsel)) { 3880 *err_evsel = evsel; 3881 return -1; 3882 } 3883 } 3884 3885 return 0; 3886 } 3887 3888 static int trace__run(struct trace *trace, int argc, const char **argv) 3889 { 3890 struct evlist *evlist = trace->evlist; 3891 struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL; 3892 int err = -1, i; 3893 unsigned long before; 3894 const bool forks = argc > 0; 3895 bool draining = false; 3896 3897 trace->live = true; 3898 3899 if (!trace->raw_augmented_syscalls) { 3900 if (trace->trace_syscalls && trace__add_syscall_newtp(trace)) 3901 goto out_error_raw_syscalls; 3902 3903 if (trace->trace_syscalls) 3904 trace->vfs_getname = evlist__add_vfs_getname(evlist); 3905 } 3906 3907 if ((trace->trace_pgfaults & TRACE_PFMAJ)) { 3908 pgfault_maj = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ); 3909 if (pgfault_maj == NULL) 3910 goto out_error_mem; 3911 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param); 3912 evlist__add(evlist, pgfault_maj); 3913 } 3914 3915 if ((trace->trace_pgfaults & TRACE_PFMIN)) { 3916 pgfault_min = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN); 3917 if (pgfault_min == NULL) 3918 goto out_error_mem; 3919 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param); 3920 evlist__add(evlist, pgfault_min); 3921 } 3922 3923 /* Enable ignoring missing threads when -u/-p option is defined. */ 3924 trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid; 3925 3926 if (trace->sched && 3927 evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime)) 3928 goto out_error_sched_stat_runtime; 3929 /* 3930 * If a global cgroup was set, apply it to all the events without an 3931 * explicit cgroup. I.e.: 3932 * 3933 * trace -G A -e sched:*switch 3934 * 3935 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc 3936 * _and_ sched:sched_switch to the 'A' cgroup, while: 3937 * 3938 * trace -e sched:*switch -G A 3939 * 3940 * will only set the sched:sched_switch event to the 'A' cgroup, all the 3941 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without" 3942 * a cgroup (on the root cgroup, sys wide, etc). 3943 * 3944 * Multiple cgroups: 3945 * 3946 * trace -G A -e sched:*switch -G B 3947 * 3948 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes 3949 * to the 'B' cgroup. 3950 * 3951 * evlist__set_default_cgroup() grabs a reference of the passed cgroup 3952 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL. 3953 */ 3954 if (trace->cgroup) 3955 evlist__set_default_cgroup(trace->evlist, trace->cgroup); 3956 3957 err = evlist__create_maps(evlist, &trace->opts.target); 3958 if (err < 0) { 3959 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n"); 3960 goto out_delete_evlist; 3961 } 3962 3963 err = trace__symbols_init(trace, evlist); 3964 if (err < 0) { 3965 fprintf(trace->output, "Problems initializing symbol libraries!\n"); 3966 goto out_delete_evlist; 3967 } 3968 3969 evlist__config(evlist, &trace->opts, &callchain_param); 3970 3971 if (forks) { 3972 err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL); 3973 if (err < 0) { 3974 fprintf(trace->output, "Couldn't run the workload!\n"); 3975 goto out_delete_evlist; 3976 } 3977 workload_pid = evlist->workload.pid; 3978 } 3979 3980 err = evlist__open(evlist); 3981 if (err < 0) 3982 goto out_error_open; 3983 3984 err = bpf__apply_obj_config(); 3985 if (err) { 3986 char errbuf[BUFSIZ]; 3987 3988 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf)); 3989 pr_err("ERROR: Apply config to BPF failed: %s\n", 3990 errbuf); 3991 goto out_error_open; 3992 } 3993 3994 err = trace__set_filter_pids(trace); 3995 if (err < 0) 3996 goto out_error_mem; 3997 3998 if (trace->syscalls.prog_array.sys_enter) 3999 trace__init_syscalls_bpf_prog_array_maps(trace); 4000 4001 if (trace->ev_qualifier_ids.nr > 0) { 4002 err = trace__set_ev_qualifier_filter(trace); 4003 if (err < 0) 4004 goto out_errno; 4005 4006 if (trace->syscalls.events.sys_exit) { 4007 pr_debug("event qualifier tracepoint filter: %s\n", 4008 trace->syscalls.events.sys_exit->filter); 4009 } 4010 } 4011 4012 /* 4013 * If the "close" syscall is not traced, then we will not have the 4014 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the 4015 * fd->pathname table and were ending up showing the last value set by 4016 * syscalls opening a pathname and associating it with a descriptor or 4017 * reading it from /proc/pid/fd/ in cases where that doesn't make 4018 * sense. 4019 * 4020 * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is 4021 * not in use. 4022 */ 4023 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close")); 4024 4025 err = trace__expand_filters(trace, &evsel); 4026 if (err) 4027 goto out_delete_evlist; 4028 err = evlist__apply_filters(evlist, &evsel); 4029 if (err < 0) 4030 goto out_error_apply_filters; 4031 4032 if (trace->dump.map) 4033 bpf_map__fprintf(trace->dump.map, trace->output); 4034 4035 err = evlist__mmap(evlist, trace->opts.mmap_pages); 4036 if (err < 0) 4037 goto out_error_mmap; 4038 4039 if (!target__none(&trace->opts.target) && !trace->opts.target.initial_delay) 4040 evlist__enable(evlist); 4041 4042 if (forks) 4043 evlist__start_workload(evlist); 4044 4045 if (trace->opts.target.initial_delay) { 4046 usleep(trace->opts.target.initial_delay * 1000); 4047 evlist__enable(evlist); 4048 } 4049 4050 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 || 4051 perf_thread_map__nr(evlist->core.threads) > 1 || 4052 evlist__first(evlist)->core.attr.inherit; 4053 4054 /* 4055 * Now that we already used evsel->core.attr to ask the kernel to setup the 4056 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in 4057 * trace__resolve_callchain(), allowing per-event max-stack settings 4058 * to override an explicitly set --max-stack global setting. 4059 */ 4060 evlist__for_each_entry(evlist, evsel) { 4061 if (evsel__has_callchain(evsel) && 4062 evsel->core.attr.sample_max_stack == 0) 4063 evsel->core.attr.sample_max_stack = trace->max_stack; 4064 } 4065 again: 4066 before = trace->nr_events; 4067 4068 for (i = 0; i < evlist->core.nr_mmaps; i++) { 4069 union perf_event *event; 4070 struct mmap *md; 4071 4072 md = &evlist->mmap[i]; 4073 if (perf_mmap__read_init(&md->core) < 0) 4074 continue; 4075 4076 while ((event = perf_mmap__read_event(&md->core)) != NULL) { 4077 ++trace->nr_events; 4078 4079 err = trace__deliver_event(trace, event); 4080 if (err) 4081 goto out_disable; 4082 4083 perf_mmap__consume(&md->core); 4084 4085 if (interrupted) 4086 goto out_disable; 4087 4088 if (done && !draining) { 4089 evlist__disable(evlist); 4090 draining = true; 4091 } 4092 } 4093 perf_mmap__read_done(&md->core); 4094 } 4095 4096 if (trace->nr_events == before) { 4097 int timeout = done ? 100 : -1; 4098 4099 if (!draining && evlist__poll(evlist, timeout) > 0) { 4100 if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0) 4101 draining = true; 4102 4103 goto again; 4104 } else { 4105 if (trace__flush_events(trace)) 4106 goto out_disable; 4107 } 4108 } else { 4109 goto again; 4110 } 4111 4112 out_disable: 4113 thread__zput(trace->current); 4114 4115 evlist__disable(evlist); 4116 4117 if (trace->sort_events) 4118 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL); 4119 4120 if (!err) { 4121 if (trace->summary) 4122 trace__fprintf_thread_summary(trace, trace->output); 4123 4124 if (trace->show_tool_stats) { 4125 fprintf(trace->output, "Stats:\n " 4126 " vfs_getname : %" PRIu64 "\n" 4127 " proc_getname: %" PRIu64 "\n", 4128 trace->stats.vfs_getname, 4129 trace->stats.proc_getname); 4130 } 4131 } 4132 4133 out_delete_evlist: 4134 trace__symbols__exit(trace); 4135 evlist__free_syscall_tp_fields(evlist); 4136 evlist__delete(evlist); 4137 cgroup__put(trace->cgroup); 4138 trace->evlist = NULL; 4139 trace->live = false; 4140 return err; 4141 { 4142 char errbuf[BUFSIZ]; 4143 4144 out_error_sched_stat_runtime: 4145 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime"); 4146 goto out_error; 4147 4148 out_error_raw_syscalls: 4149 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)"); 4150 goto out_error; 4151 4152 out_error_mmap: 4153 evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf)); 4154 goto out_error; 4155 4156 out_error_open: 4157 evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf)); 4158 4159 out_error: 4160 fprintf(trace->output, "%s\n", errbuf); 4161 goto out_delete_evlist; 4162 4163 out_error_apply_filters: 4164 fprintf(trace->output, 4165 "Failed to set filter \"%s\" on event %s with %d (%s)\n", 4166 evsel->filter, evsel__name(evsel), errno, 4167 str_error_r(errno, errbuf, sizeof(errbuf))); 4168 goto out_delete_evlist; 4169 } 4170 out_error_mem: 4171 fprintf(trace->output, "Not enough memory to run!\n"); 4172 goto out_delete_evlist; 4173 4174 out_errno: 4175 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno)); 4176 goto out_delete_evlist; 4177 } 4178 4179 static int trace__replay(struct trace *trace) 4180 { 4181 const struct evsel_str_handler handlers[] = { 4182 { "probe:vfs_getname", trace__vfs_getname, }, 4183 }; 4184 struct perf_data data = { 4185 .path = input_name, 4186 .mode = PERF_DATA_MODE_READ, 4187 .force = trace->force, 4188 }; 4189 struct perf_session *session; 4190 struct evsel *evsel; 4191 int err = -1; 4192 4193 trace->tool.sample = trace__process_sample; 4194 trace->tool.mmap = perf_event__process_mmap; 4195 trace->tool.mmap2 = perf_event__process_mmap2; 4196 trace->tool.comm = perf_event__process_comm; 4197 trace->tool.exit = perf_event__process_exit; 4198 trace->tool.fork = perf_event__process_fork; 4199 trace->tool.attr = perf_event__process_attr; 4200 trace->tool.tracing_data = perf_event__process_tracing_data; 4201 trace->tool.build_id = perf_event__process_build_id; 4202 trace->tool.namespaces = perf_event__process_namespaces; 4203 4204 trace->tool.ordered_events = true; 4205 trace->tool.ordering_requires_timestamps = true; 4206 4207 /* add tid to output */ 4208 trace->multiple_threads = true; 4209 4210 session = perf_session__new(&data, &trace->tool); 4211 if (IS_ERR(session)) 4212 return PTR_ERR(session); 4213 4214 if (trace->opts.target.pid) 4215 symbol_conf.pid_list_str = strdup(trace->opts.target.pid); 4216 4217 if (trace->opts.target.tid) 4218 symbol_conf.tid_list_str = strdup(trace->opts.target.tid); 4219 4220 if (symbol__init(&session->header.env) < 0) 4221 goto out; 4222 4223 trace->host = &session->machines.host; 4224 4225 err = perf_session__set_tracepoints_handlers(session, handlers); 4226 if (err) 4227 goto out; 4228 4229 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter"); 4230 trace->syscalls.events.sys_enter = evsel; 4231 /* older kernels have syscalls tp versus raw_syscalls */ 4232 if (evsel == NULL) 4233 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter"); 4234 4235 if (evsel && 4236 (evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 || 4237 perf_evsel__init_sc_tp_ptr_field(evsel, args))) { 4238 pr_err("Error during initialize raw_syscalls:sys_enter event\n"); 4239 goto out; 4240 } 4241 4242 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit"); 4243 trace->syscalls.events.sys_exit = evsel; 4244 if (evsel == NULL) 4245 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit"); 4246 if (evsel && 4247 (evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 || 4248 perf_evsel__init_sc_tp_uint_field(evsel, ret))) { 4249 pr_err("Error during initialize raw_syscalls:sys_exit event\n"); 4250 goto out; 4251 } 4252 4253 evlist__for_each_entry(session->evlist, evsel) { 4254 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE && 4255 (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ || 4256 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN || 4257 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS)) 4258 evsel->handler = trace__pgfault; 4259 } 4260 4261 setup_pager(); 4262 4263 err = perf_session__process_events(session); 4264 if (err) 4265 pr_err("Failed to process events, error %d", err); 4266 4267 else if (trace->summary) 4268 trace__fprintf_thread_summary(trace, trace->output); 4269 4270 out: 4271 perf_session__delete(session); 4272 4273 return err; 4274 } 4275 4276 static size_t trace__fprintf_threads_header(FILE *fp) 4277 { 4278 size_t printed; 4279 4280 printed = fprintf(fp, "\n Summary of events:\n\n"); 4281 4282 return printed; 4283 } 4284 4285 DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs, 4286 struct syscall_stats *stats; 4287 double msecs; 4288 int syscall; 4289 ) 4290 { 4291 struct int_node *source = rb_entry(nd, struct int_node, rb_node); 4292 struct syscall_stats *stats = source->priv; 4293 4294 entry->syscall = source->i; 4295 entry->stats = stats; 4296 entry->msecs = stats ? (u64)stats->stats.n * (avg_stats(&stats->stats) / NSEC_PER_MSEC) : 0; 4297 } 4298 4299 static size_t thread__dump_stats(struct thread_trace *ttrace, 4300 struct trace *trace, FILE *fp) 4301 { 4302 size_t printed = 0; 4303 struct syscall *sc; 4304 struct rb_node *nd; 4305 DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats); 4306 4307 if (syscall_stats == NULL) 4308 return 0; 4309 4310 printed += fprintf(fp, "\n"); 4311 4312 printed += fprintf(fp, " syscall calls errors total min avg max stddev\n"); 4313 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n"); 4314 printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n"); 4315 4316 resort_rb__for_each_entry(nd, syscall_stats) { 4317 struct syscall_stats *stats = syscall_stats_entry->stats; 4318 if (stats) { 4319 double min = (double)(stats->stats.min) / NSEC_PER_MSEC; 4320 double max = (double)(stats->stats.max) / NSEC_PER_MSEC; 4321 double avg = avg_stats(&stats->stats); 4322 double pct; 4323 u64 n = (u64)stats->stats.n; 4324 4325 pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0; 4326 avg /= NSEC_PER_MSEC; 4327 4328 sc = &trace->syscalls.table[syscall_stats_entry->syscall]; 4329 printed += fprintf(fp, " %-15s", sc->name); 4330 printed += fprintf(fp, " %8" PRIu64 " %6" PRIu64 " %9.3f %9.3f %9.3f", 4331 n, stats->nr_failures, syscall_stats_entry->msecs, min, avg); 4332 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct); 4333 4334 if (trace->errno_summary && stats->nr_failures) { 4335 const char *arch_name = perf_env__arch(trace->host->env); 4336 int e; 4337 4338 for (e = 0; e < stats->max_errno; ++e) { 4339 if (stats->errnos[e] != 0) 4340 fprintf(fp, "\t\t\t\t%s: %d\n", arch_syscalls__strerrno(arch_name, e + 1), stats->errnos[e]); 4341 } 4342 } 4343 } 4344 } 4345 4346 resort_rb__delete(syscall_stats); 4347 printed += fprintf(fp, "\n\n"); 4348 4349 return printed; 4350 } 4351 4352 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace) 4353 { 4354 size_t printed = 0; 4355 struct thread_trace *ttrace = thread__priv(thread); 4356 double ratio; 4357 4358 if (ttrace == NULL) 4359 return 0; 4360 4361 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0; 4362 4363 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread__tid(thread)); 4364 printed += fprintf(fp, "%lu events, ", ttrace->nr_events); 4365 printed += fprintf(fp, "%.1f%%", ratio); 4366 if (ttrace->pfmaj) 4367 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj); 4368 if (ttrace->pfmin) 4369 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin); 4370 if (trace->sched) 4371 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms); 4372 else if (fputc('\n', fp) != EOF) 4373 ++printed; 4374 4375 printed += thread__dump_stats(ttrace, trace, fp); 4376 4377 return printed; 4378 } 4379 4380 static unsigned long thread__nr_events(struct thread_trace *ttrace) 4381 { 4382 return ttrace ? ttrace->nr_events : 0; 4383 } 4384 4385 DEFINE_RESORT_RB(threads, 4386 (thread__nr_events(thread__priv(a->thread)) < 4387 thread__nr_events(thread__priv(b->thread))), 4388 struct thread *thread; 4389 ) 4390 { 4391 entry->thread = rb_entry(nd, struct thread_rb_node, rb_node)->thread; 4392 } 4393 4394 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp) 4395 { 4396 size_t printed = trace__fprintf_threads_header(fp); 4397 struct rb_node *nd; 4398 int i; 4399 4400 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 4401 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i); 4402 4403 if (threads == NULL) { 4404 fprintf(fp, "%s", "Error sorting output by nr_events!\n"); 4405 return 0; 4406 } 4407 4408 resort_rb__for_each_entry(nd, threads) 4409 printed += trace__fprintf_thread(fp, threads_entry->thread, trace); 4410 4411 resort_rb__delete(threads); 4412 } 4413 return printed; 4414 } 4415 4416 static int trace__set_duration(const struct option *opt, const char *str, 4417 int unset __maybe_unused) 4418 { 4419 struct trace *trace = opt->value; 4420 4421 trace->duration_filter = atof(str); 4422 return 0; 4423 } 4424 4425 static int trace__set_filter_pids_from_option(const struct option *opt, const char *str, 4426 int unset __maybe_unused) 4427 { 4428 int ret = -1; 4429 size_t i; 4430 struct trace *trace = opt->value; 4431 /* 4432 * FIXME: introduce a intarray class, plain parse csv and create a 4433 * { int nr, int entries[] } struct... 4434 */ 4435 struct intlist *list = intlist__new(str); 4436 4437 if (list == NULL) 4438 return -1; 4439 4440 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1; 4441 trace->filter_pids.entries = calloc(i, sizeof(pid_t)); 4442 4443 if (trace->filter_pids.entries == NULL) 4444 goto out; 4445 4446 trace->filter_pids.entries[0] = getpid(); 4447 4448 for (i = 1; i < trace->filter_pids.nr; ++i) 4449 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i; 4450 4451 intlist__delete(list); 4452 ret = 0; 4453 out: 4454 return ret; 4455 } 4456 4457 static int trace__open_output(struct trace *trace, const char *filename) 4458 { 4459 struct stat st; 4460 4461 if (!stat(filename, &st) && st.st_size) { 4462 char oldname[PATH_MAX]; 4463 4464 scnprintf(oldname, sizeof(oldname), "%s.old", filename); 4465 unlink(oldname); 4466 rename(filename, oldname); 4467 } 4468 4469 trace->output = fopen(filename, "w"); 4470 4471 return trace->output == NULL ? -errno : 0; 4472 } 4473 4474 static int parse_pagefaults(const struct option *opt, const char *str, 4475 int unset __maybe_unused) 4476 { 4477 int *trace_pgfaults = opt->value; 4478 4479 if (strcmp(str, "all") == 0) 4480 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN; 4481 else if (strcmp(str, "maj") == 0) 4482 *trace_pgfaults |= TRACE_PFMAJ; 4483 else if (strcmp(str, "min") == 0) 4484 *trace_pgfaults |= TRACE_PFMIN; 4485 else 4486 return -1; 4487 4488 return 0; 4489 } 4490 4491 static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler) 4492 { 4493 struct evsel *evsel; 4494 4495 evlist__for_each_entry(evlist, evsel) { 4496 if (evsel->handler == NULL) 4497 evsel->handler = handler; 4498 } 4499 } 4500 4501 static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name) 4502 { 4503 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel); 4504 4505 if (fmt) { 4506 const struct syscall_fmt *scfmt = syscall_fmt__find(name); 4507 4508 if (scfmt) { 4509 int skip = 0; 4510 4511 if (strcmp(evsel->tp_format->format.fields->name, "__syscall_nr") == 0 || 4512 strcmp(evsel->tp_format->format.fields->name, "nr") == 0) 4513 ++skip; 4514 4515 memcpy(fmt + skip, scfmt->arg, (evsel->tp_format->format.nr_fields - skip) * sizeof(*fmt)); 4516 } 4517 } 4518 } 4519 4520 static int evlist__set_syscall_tp_fields(struct evlist *evlist) 4521 { 4522 struct evsel *evsel; 4523 4524 evlist__for_each_entry(evlist, evsel) { 4525 if (evsel->priv || !evsel->tp_format) 4526 continue; 4527 4528 if (strcmp(evsel->tp_format->system, "syscalls")) { 4529 evsel__init_tp_arg_scnprintf(evsel); 4530 continue; 4531 } 4532 4533 if (evsel__init_syscall_tp(evsel)) 4534 return -1; 4535 4536 if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) { 4537 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 4538 4539 if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64))) 4540 return -1; 4541 4542 evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_enter_") - 1); 4543 } else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) { 4544 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 4545 4546 if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap)) 4547 return -1; 4548 4549 evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_exit_") - 1); 4550 } 4551 } 4552 4553 return 0; 4554 } 4555 4556 /* 4557 * XXX: Hackish, just splitting the combined -e+--event (syscalls 4558 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use 4559 * existing facilities unchanged (trace->ev_qualifier + parse_options()). 4560 * 4561 * It'd be better to introduce a parse_options() variant that would return a 4562 * list with the terms it didn't match to an event... 4563 */ 4564 static int trace__parse_events_option(const struct option *opt, const char *str, 4565 int unset __maybe_unused) 4566 { 4567 struct trace *trace = (struct trace *)opt->value; 4568 const char *s = str; 4569 char *sep = NULL, *lists[2] = { NULL, NULL, }; 4570 int len = strlen(str) + 1, err = -1, list, idx; 4571 char *strace_groups_dir = system_path(STRACE_GROUPS_DIR); 4572 char group_name[PATH_MAX]; 4573 const struct syscall_fmt *fmt; 4574 4575 if (strace_groups_dir == NULL) 4576 return -1; 4577 4578 if (*s == '!') { 4579 ++s; 4580 trace->not_ev_qualifier = true; 4581 } 4582 4583 while (1) { 4584 if ((sep = strchr(s, ',')) != NULL) 4585 *sep = '\0'; 4586 4587 list = 0; 4588 if (syscalltbl__id(trace->sctbl, s) >= 0 || 4589 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) { 4590 list = 1; 4591 goto do_concat; 4592 } 4593 4594 fmt = syscall_fmt__find_by_alias(s); 4595 if (fmt != NULL) { 4596 list = 1; 4597 s = fmt->name; 4598 } else { 4599 path__join(group_name, sizeof(group_name), strace_groups_dir, s); 4600 if (access(group_name, R_OK) == 0) 4601 list = 1; 4602 } 4603 do_concat: 4604 if (lists[list]) { 4605 sprintf(lists[list] + strlen(lists[list]), ",%s", s); 4606 } else { 4607 lists[list] = malloc(len); 4608 if (lists[list] == NULL) 4609 goto out; 4610 strcpy(lists[list], s); 4611 } 4612 4613 if (!sep) 4614 break; 4615 4616 *sep = ','; 4617 s = sep + 1; 4618 } 4619 4620 if (lists[1] != NULL) { 4621 struct strlist_config slist_config = { 4622 .dirname = strace_groups_dir, 4623 }; 4624 4625 trace->ev_qualifier = strlist__new(lists[1], &slist_config); 4626 if (trace->ev_qualifier == NULL) { 4627 fputs("Not enough memory to parse event qualifier", trace->output); 4628 goto out; 4629 } 4630 4631 if (trace__validate_ev_qualifier(trace)) 4632 goto out; 4633 trace->trace_syscalls = true; 4634 } 4635 4636 err = 0; 4637 4638 if (lists[0]) { 4639 struct parse_events_option_args parse_events_option_args = { 4640 .evlistp = &trace->evlist, 4641 }; 4642 struct option o = { 4643 .value = &parse_events_option_args, 4644 }; 4645 err = parse_events_option(&o, lists[0], 0); 4646 } 4647 out: 4648 free(strace_groups_dir); 4649 free(lists[0]); 4650 free(lists[1]); 4651 if (sep) 4652 *sep = ','; 4653 4654 return err; 4655 } 4656 4657 static int trace__parse_cgroups(const struct option *opt, const char *str, int unset) 4658 { 4659 struct trace *trace = opt->value; 4660 4661 if (!list_empty(&trace->evlist->core.entries)) { 4662 struct option o = { 4663 .value = &trace->evlist, 4664 }; 4665 return parse_cgroups(&o, str, unset); 4666 } 4667 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str); 4668 4669 return 0; 4670 } 4671 4672 static int trace__config(const char *var, const char *value, void *arg) 4673 { 4674 struct trace *trace = arg; 4675 int err = 0; 4676 4677 if (!strcmp(var, "trace.add_events")) { 4678 trace->perfconfig_events = strdup(value); 4679 if (trace->perfconfig_events == NULL) { 4680 pr_err("Not enough memory for %s\n", "trace.add_events"); 4681 return -1; 4682 } 4683 } else if (!strcmp(var, "trace.show_timestamp")) { 4684 trace->show_tstamp = perf_config_bool(var, value); 4685 } else if (!strcmp(var, "trace.show_duration")) { 4686 trace->show_duration = perf_config_bool(var, value); 4687 } else if (!strcmp(var, "trace.show_arg_names")) { 4688 trace->show_arg_names = perf_config_bool(var, value); 4689 if (!trace->show_arg_names) 4690 trace->show_zeros = true; 4691 } else if (!strcmp(var, "trace.show_zeros")) { 4692 bool new_show_zeros = perf_config_bool(var, value); 4693 if (!trace->show_arg_names && !new_show_zeros) { 4694 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n"); 4695 goto out; 4696 } 4697 trace->show_zeros = new_show_zeros; 4698 } else if (!strcmp(var, "trace.show_prefix")) { 4699 trace->show_string_prefix = perf_config_bool(var, value); 4700 } else if (!strcmp(var, "trace.no_inherit")) { 4701 trace->opts.no_inherit = perf_config_bool(var, value); 4702 } else if (!strcmp(var, "trace.args_alignment")) { 4703 int args_alignment = 0; 4704 if (perf_config_int(&args_alignment, var, value) == 0) 4705 trace->args_alignment = args_alignment; 4706 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) { 4707 if (strcasecmp(value, "libtraceevent") == 0) 4708 trace->libtraceevent_print = true; 4709 else if (strcasecmp(value, "libbeauty") == 0) 4710 trace->libtraceevent_print = false; 4711 } 4712 out: 4713 return err; 4714 } 4715 4716 static void trace__exit(struct trace *trace) 4717 { 4718 int i; 4719 4720 strlist__delete(trace->ev_qualifier); 4721 zfree(&trace->ev_qualifier_ids.entries); 4722 if (trace->syscalls.table) { 4723 for (i = 0; i <= trace->sctbl->syscalls.max_id; i++) 4724 syscall__exit(&trace->syscalls.table[i]); 4725 zfree(&trace->syscalls.table); 4726 } 4727 syscalltbl__delete(trace->sctbl); 4728 zfree(&trace->perfconfig_events); 4729 } 4730 4731 int cmd_trace(int argc, const char **argv) 4732 { 4733 const char *trace_usage[] = { 4734 "perf trace [<options>] [<command>]", 4735 "perf trace [<options>] -- <command> [<options>]", 4736 "perf trace record [<options>] [<command>]", 4737 "perf trace record [<options>] -- <command> [<options>]", 4738 NULL 4739 }; 4740 struct trace trace = { 4741 .opts = { 4742 .target = { 4743 .uid = UINT_MAX, 4744 .uses_mmap = true, 4745 }, 4746 .user_freq = UINT_MAX, 4747 .user_interval = ULLONG_MAX, 4748 .no_buffering = true, 4749 .mmap_pages = UINT_MAX, 4750 }, 4751 .output = stderr, 4752 .show_comm = true, 4753 .show_tstamp = true, 4754 .show_duration = true, 4755 .show_arg_names = true, 4756 .args_alignment = 70, 4757 .trace_syscalls = false, 4758 .kernel_syscallchains = false, 4759 .max_stack = UINT_MAX, 4760 .max_events = ULONG_MAX, 4761 }; 4762 const char *map_dump_str = NULL; 4763 const char *output_name = NULL; 4764 const struct option trace_options[] = { 4765 OPT_CALLBACK('e', "event", &trace, "event", 4766 "event/syscall selector. use 'perf list' to list available events", 4767 trace__parse_events_option), 4768 OPT_CALLBACK(0, "filter", &trace.evlist, "filter", 4769 "event filter", parse_filter), 4770 OPT_BOOLEAN(0, "comm", &trace.show_comm, 4771 "show the thread COMM next to its id"), 4772 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"), 4773 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace", 4774 trace__parse_events_option), 4775 OPT_STRING('o', "output", &output_name, "file", "output file name"), 4776 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"), 4777 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid", 4778 "trace events on existing process id"), 4779 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid", 4780 "trace events on existing thread id"), 4781 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids", 4782 "pids to filter (by the kernel)", trace__set_filter_pids_from_option), 4783 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide, 4784 "system-wide collection from all CPUs"), 4785 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu", 4786 "list of cpus to monitor"), 4787 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit, 4788 "child tasks do not inherit counters"), 4789 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages", 4790 "number of mmap data pages", evlist__parse_mmap_pages), 4791 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user", 4792 "user to profile"), 4793 OPT_CALLBACK(0, "duration", &trace, "float", 4794 "show only events with duration > N.M ms", 4795 trace__set_duration), 4796 #ifdef HAVE_LIBBPF_SUPPORT 4797 OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"), 4798 #endif 4799 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"), 4800 OPT_INCR('v', "verbose", &verbose, "be more verbose"), 4801 OPT_BOOLEAN('T', "time", &trace.full_time, 4802 "Show full timestamp, not time relative to first start"), 4803 OPT_BOOLEAN(0, "failure", &trace.failure_only, 4804 "Show only syscalls that failed"), 4805 OPT_BOOLEAN('s', "summary", &trace.summary_only, 4806 "Show only syscall summary with statistics"), 4807 OPT_BOOLEAN('S', "with-summary", &trace.summary, 4808 "Show all syscalls and summary with statistics"), 4809 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary, 4810 "Show errno stats per syscall, use with -s or -S"), 4811 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min", 4812 "Trace pagefaults", parse_pagefaults, "maj"), 4813 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"), 4814 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"), 4815 OPT_CALLBACK(0, "call-graph", &trace.opts, 4816 "record_mode[,record_size]", record_callchain_help, 4817 &record_parse_callchain_opt), 4818 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print, 4819 "Use libtraceevent to print the tracepoint arguments."), 4820 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains, 4821 "Show the kernel callchains on the syscall exit path"), 4822 OPT_ULONG(0, "max-events", &trace.max_events, 4823 "Set the maximum number of events to print, exit after that is reached. "), 4824 OPT_UINTEGER(0, "min-stack", &trace.min_stack, 4825 "Set the minimum stack depth when parsing the callchain, " 4826 "anything below the specified depth will be ignored."), 4827 OPT_UINTEGER(0, "max-stack", &trace.max_stack, 4828 "Set the maximum stack depth when parsing the callchain, " 4829 "anything beyond the specified depth will be ignored. " 4830 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)), 4831 OPT_BOOLEAN(0, "sort-events", &trace.sort_events, 4832 "Sort batch of events before processing, use if getting out of order events"), 4833 OPT_BOOLEAN(0, "print-sample", &trace.print_sample, 4834 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"), 4835 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout, 4836 "per thread proc mmap processing timeout in ms"), 4837 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only", 4838 trace__parse_cgroups), 4839 OPT_INTEGER('D', "delay", &trace.opts.target.initial_delay, 4840 "ms to wait before starting measurement after program " 4841 "start"), 4842 OPTS_EVSWITCH(&trace.evswitch), 4843 OPT_END() 4844 }; 4845 bool __maybe_unused max_stack_user_set = true; 4846 bool mmap_pages_user_set = true; 4847 struct evsel *evsel; 4848 const char * const trace_subcommands[] = { "record", NULL }; 4849 int err = -1; 4850 char bf[BUFSIZ]; 4851 struct sigaction sigchld_act; 4852 4853 signal(SIGSEGV, sighandler_dump_stack); 4854 signal(SIGFPE, sighandler_dump_stack); 4855 signal(SIGINT, sighandler_interrupt); 4856 4857 memset(&sigchld_act, 0, sizeof(sigchld_act)); 4858 sigchld_act.sa_flags = SA_SIGINFO; 4859 sigchld_act.sa_sigaction = sighandler_chld; 4860 sigaction(SIGCHLD, &sigchld_act, NULL); 4861 4862 trace.evlist = evlist__new(); 4863 trace.sctbl = syscalltbl__new(); 4864 4865 if (trace.evlist == NULL || trace.sctbl == NULL) { 4866 pr_err("Not enough memory to run!\n"); 4867 err = -ENOMEM; 4868 goto out; 4869 } 4870 4871 /* 4872 * Parsing .perfconfig may entail creating a BPF event, that may need 4873 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting 4874 * is too small. This affects just this process, not touching the 4875 * global setting. If it fails we'll get something in 'perf trace -v' 4876 * to help diagnose the problem. 4877 */ 4878 rlimit__bump_memlock(); 4879 4880 err = perf_config(trace__config, &trace); 4881 if (err) 4882 goto out; 4883 4884 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands, 4885 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION); 4886 4887 /* 4888 * Here we already passed thru trace__parse_events_option() and it has 4889 * already figured out if -e syscall_name, if not but if --event 4890 * foo:bar was used, the user is interested _just_ in those, say, 4891 * tracepoint events, not in the strace-like syscall-name-based mode. 4892 * 4893 * This is important because we need to check if strace-like mode is 4894 * needed to decided if we should filter out the eBPF 4895 * __augmented_syscalls__ code, if it is in the mix, say, via 4896 * .perfconfig trace.add_events, and filter those out. 4897 */ 4898 if (!trace.trace_syscalls && !trace.trace_pgfaults && 4899 trace.evlist->core.nr_entries == 0 /* Was --events used? */) { 4900 trace.trace_syscalls = true; 4901 } 4902 /* 4903 * Now that we have --verbose figured out, lets see if we need to parse 4904 * events from .perfconfig, so that if those events fail parsing, say some 4905 * BPF program fails, then we'll be able to use --verbose to see what went 4906 * wrong in more detail. 4907 */ 4908 if (trace.perfconfig_events != NULL) { 4909 struct parse_events_error parse_err; 4910 4911 parse_events_error__init(&parse_err); 4912 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err); 4913 if (err) 4914 parse_events_error__print(&parse_err, trace.perfconfig_events); 4915 parse_events_error__exit(&parse_err); 4916 if (err) 4917 goto out; 4918 } 4919 4920 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) { 4921 usage_with_options_msg(trace_usage, trace_options, 4922 "cgroup monitoring only available in system-wide mode"); 4923 } 4924 4925 evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__"); 4926 if (IS_ERR(evsel)) { 4927 bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf)); 4928 pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf); 4929 goto out; 4930 } 4931 4932 if (evsel) { 4933 trace.syscalls.events.augmented = evsel; 4934 4935 evsel = evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter"); 4936 if (evsel == NULL) { 4937 pr_err("ERROR: raw_syscalls:sys_enter not found in the augmented BPF object\n"); 4938 goto out; 4939 } 4940 4941 if (evsel->bpf_obj == NULL) { 4942 pr_err("ERROR: raw_syscalls:sys_enter not associated to a BPF object\n"); 4943 goto out; 4944 } 4945 4946 trace.bpf_obj = evsel->bpf_obj; 4947 4948 /* 4949 * If we have _just_ the augmenter event but don't have a 4950 * explicit --syscalls, then assume we want all strace-like 4951 * syscalls: 4952 */ 4953 if (!trace.trace_syscalls && trace__only_augmented_syscalls_evsels(&trace)) 4954 trace.trace_syscalls = true; 4955 /* 4956 * So, if we have a syscall augmenter, but trace_syscalls, aka 4957 * strace-like syscall tracing is not set, then we need to trow 4958 * away the augmenter, i.e. all the events that were created 4959 * from that BPF object file. 4960 * 4961 * This is more to fix the current .perfconfig trace.add_events 4962 * style of setting up the strace-like eBPF based syscall point 4963 * payload augmenter. 4964 * 4965 * All this complexity will be avoided by adding an alternative 4966 * to trace.add_events in the form of 4967 * trace.bpf_augmented_syscalls, that will be only parsed if we 4968 * need it. 4969 * 4970 * .perfconfig trace.add_events is still useful if we want, for 4971 * instance, have msr_write.msr in some .perfconfig profile based 4972 * 'perf trace --config determinism.profile' mode, where for some 4973 * particular goal/workload type we want a set of events and 4974 * output mode (with timings, etc) instead of having to add 4975 * all via the command line. 4976 * 4977 * Also --config to specify an alternate .perfconfig file needs 4978 * to be implemented. 4979 */ 4980 if (!trace.trace_syscalls) { 4981 trace__delete_augmented_syscalls(&trace); 4982 } else { 4983 trace__set_bpf_map_filtered_pids(&trace); 4984 trace__set_bpf_map_syscalls(&trace); 4985 trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented"); 4986 } 4987 } 4988 4989 err = bpf__setup_stdout(trace.evlist); 4990 if (err) { 4991 bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf)); 4992 pr_err("ERROR: Setup BPF stdout failed: %s\n", bf); 4993 goto out; 4994 } 4995 4996 err = -1; 4997 4998 if (map_dump_str) { 4999 trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str); 5000 if (trace.dump.map == NULL) { 5001 pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str); 5002 goto out; 5003 } 5004 } 5005 5006 if (trace.trace_pgfaults) { 5007 trace.opts.sample_address = true; 5008 trace.opts.sample_time = true; 5009 } 5010 5011 if (trace.opts.mmap_pages == UINT_MAX) 5012 mmap_pages_user_set = false; 5013 5014 if (trace.max_stack == UINT_MAX) { 5015 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack(); 5016 max_stack_user_set = false; 5017 } 5018 5019 #ifdef HAVE_DWARF_UNWIND_SUPPORT 5020 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) { 5021 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false); 5022 } 5023 #endif 5024 5025 if (callchain_param.enabled) { 5026 if (!mmap_pages_user_set && geteuid() == 0) 5027 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4; 5028 5029 symbol_conf.use_callchain = true; 5030 } 5031 5032 if (trace.evlist->core.nr_entries > 0) { 5033 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler); 5034 if (evlist__set_syscall_tp_fields(trace.evlist)) { 5035 perror("failed to set syscalls:* tracepoint fields"); 5036 goto out; 5037 } 5038 } 5039 5040 if (trace.sort_events) { 5041 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace); 5042 ordered_events__set_copy_on_queue(&trace.oe.data, true); 5043 } 5044 5045 /* 5046 * If we are augmenting syscalls, then combine what we put in the 5047 * __augmented_syscalls__ BPF map with what is in the 5048 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF, 5049 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit. 5050 * 5051 * We'll switch to look at two BPF maps, one for sys_enter and the 5052 * other for sys_exit when we start augmenting the sys_exit paths with 5053 * buffers that are being copied from kernel to userspace, think 'read' 5054 * syscall. 5055 */ 5056 if (trace.syscalls.events.augmented) { 5057 evlist__for_each_entry(trace.evlist, evsel) { 5058 bool raw_syscalls_sys_exit = strcmp(evsel__name(evsel), "raw_syscalls:sys_exit") == 0; 5059 5060 if (raw_syscalls_sys_exit) { 5061 trace.raw_augmented_syscalls = true; 5062 goto init_augmented_syscall_tp; 5063 } 5064 5065 if (trace.syscalls.events.augmented->priv == NULL && 5066 strstr(evsel__name(evsel), "syscalls:sys_enter")) { 5067 struct evsel *augmented = trace.syscalls.events.augmented; 5068 if (evsel__init_augmented_syscall_tp(augmented, evsel) || 5069 evsel__init_augmented_syscall_tp_args(augmented)) 5070 goto out; 5071 /* 5072 * Augmented is __augmented_syscalls__ BPF_OUTPUT event 5073 * Above we made sure we can get from the payload the tp fields 5074 * that we get from syscalls:sys_enter tracefs format file. 5075 */ 5076 augmented->handler = trace__sys_enter; 5077 /* 5078 * Now we do the same for the *syscalls:sys_enter event so that 5079 * if we handle it directly, i.e. if the BPF prog returns 0 so 5080 * as not to filter it, then we'll handle it just like we would 5081 * for the BPF_OUTPUT one: 5082 */ 5083 if (evsel__init_augmented_syscall_tp(evsel, evsel) || 5084 evsel__init_augmented_syscall_tp_args(evsel)) 5085 goto out; 5086 evsel->handler = trace__sys_enter; 5087 } 5088 5089 if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) { 5090 struct syscall_tp *sc; 5091 init_augmented_syscall_tp: 5092 if (evsel__init_augmented_syscall_tp(evsel, evsel)) 5093 goto out; 5094 sc = __evsel__syscall_tp(evsel); 5095 /* 5096 * For now with BPF raw_augmented we hook into 5097 * raw_syscalls:sys_enter and there we get all 5098 * 6 syscall args plus the tracepoint common 5099 * fields and the syscall_nr (another long). 5100 * So we check if that is the case and if so 5101 * don't look after the sc->args_size but 5102 * always after the full raw_syscalls:sys_enter 5103 * payload, which is fixed. 5104 * 5105 * We'll revisit this later to pass 5106 * s->args_size to the BPF augmenter (now 5107 * tools/perf/examples/bpf/augmented_raw_syscalls.c, 5108 * so that it copies only what we need for each 5109 * syscall, like what happens when we use 5110 * syscalls:sys_enter_NAME, so that we reduce 5111 * the kernel/userspace traffic to just what is 5112 * needed for each syscall. 5113 */ 5114 if (trace.raw_augmented_syscalls) 5115 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset; 5116 evsel__init_augmented_syscall_tp_ret(evsel); 5117 evsel->handler = trace__sys_exit; 5118 } 5119 } 5120 } 5121 5122 if ((argc >= 1) && (strcmp(argv[0], "record") == 0)) 5123 return trace__record(&trace, argc-1, &argv[1]); 5124 5125 /* Using just --errno-summary will trigger --summary */ 5126 if (trace.errno_summary && !trace.summary && !trace.summary_only) 5127 trace.summary_only = true; 5128 5129 /* summary_only implies summary option, but don't overwrite summary if set */ 5130 if (trace.summary_only) 5131 trace.summary = trace.summary_only; 5132 5133 if (output_name != NULL) { 5134 err = trace__open_output(&trace, output_name); 5135 if (err < 0) { 5136 perror("failed to create output file"); 5137 goto out; 5138 } 5139 } 5140 5141 err = evswitch__init(&trace.evswitch, trace.evlist, stderr); 5142 if (err) 5143 goto out_close; 5144 5145 err = target__validate(&trace.opts.target); 5146 if (err) { 5147 target__strerror(&trace.opts.target, err, bf, sizeof(bf)); 5148 fprintf(trace.output, "%s", bf); 5149 goto out_close; 5150 } 5151 5152 err = target__parse_uid(&trace.opts.target); 5153 if (err) { 5154 target__strerror(&trace.opts.target, err, bf, sizeof(bf)); 5155 fprintf(trace.output, "%s", bf); 5156 goto out_close; 5157 } 5158 5159 if (!argc && target__none(&trace.opts.target)) 5160 trace.opts.target.system_wide = true; 5161 5162 if (input_name) 5163 err = trace__replay(&trace); 5164 else 5165 err = trace__run(&trace, argc, argv); 5166 5167 out_close: 5168 if (output_name != NULL) 5169 fclose(trace.output); 5170 out: 5171 trace__exit(&trace); 5172 return err; 5173 } 5174