1 /* 2 * builtin-trace.c 3 * 4 * Builtin 'trace' command: 5 * 6 * Display a continuously updated trace of any workload, CPU, specific PID, 7 * system wide, etc. Default format is loosely strace like, but any other 8 * event may be specified using --event. 9 * 10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 11 * 12 * Initially based on the 'trace' prototype by Thomas Gleixner: 13 * 14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'") 15 */ 16 17 #include "util/record.h" 18 #include <traceevent/event-parse.h> 19 #include <api/fs/tracing_path.h> 20 #include <bpf/bpf.h> 21 #include "util/bpf_map.h" 22 #include "util/rlimit.h" 23 #include "builtin.h" 24 #include "util/cgroup.h" 25 #include "util/color.h" 26 #include "util/config.h" 27 #include "util/debug.h" 28 #include "util/dso.h" 29 #include "util/env.h" 30 #include "util/event.h" 31 #include "util/evsel.h" 32 #include "util/evsel_fprintf.h" 33 #include "util/synthetic-events.h" 34 #include "util/evlist.h" 35 #include "util/evswitch.h" 36 #include "util/mmap.h" 37 #include <subcmd/pager.h> 38 #include <subcmd/exec-cmd.h> 39 #include "util/machine.h" 40 #include "util/map.h" 41 #include "util/symbol.h" 42 #include "util/path.h" 43 #include "util/session.h" 44 #include "util/thread.h" 45 #include <subcmd/parse-options.h> 46 #include "util/strlist.h" 47 #include "util/intlist.h" 48 #include "util/thread_map.h" 49 #include "util/stat.h" 50 #include "util/tool.h" 51 #include "util/util.h" 52 #include "trace/beauty/beauty.h" 53 #include "trace-event.h" 54 #include "util/parse-events.h" 55 #include "util/bpf-loader.h" 56 #include "callchain.h" 57 #include "print_binary.h" 58 #include "string2.h" 59 #include "syscalltbl.h" 60 #include "rb_resort.h" 61 #include "../perf.h" 62 63 #include <errno.h> 64 #include <inttypes.h> 65 #include <poll.h> 66 #include <signal.h> 67 #include <stdlib.h> 68 #include <string.h> 69 #include <linux/err.h> 70 #include <linux/filter.h> 71 #include <linux/kernel.h> 72 #include <linux/random.h> 73 #include <linux/stringify.h> 74 #include <linux/time64.h> 75 #include <linux/zalloc.h> 76 #include <fcntl.h> 77 #include <sys/sysmacros.h> 78 79 #include <linux/ctype.h> 80 #include <perf/mmap.h> 81 82 #ifndef O_CLOEXEC 83 # define O_CLOEXEC 02000000 84 #endif 85 86 #ifndef F_LINUX_SPECIFIC_BASE 87 # define F_LINUX_SPECIFIC_BASE 1024 88 #endif 89 90 /* 91 * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100 92 */ 93 struct syscall_arg_fmt { 94 size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg); 95 bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val); 96 unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val); 97 void *parm; 98 const char *name; 99 u16 nr_entries; // for arrays 100 bool show_zero; 101 }; 102 103 struct syscall_fmt { 104 const char *name; 105 const char *alias; 106 struct { 107 const char *sys_enter, 108 *sys_exit; 109 } bpf_prog_name; 110 struct syscall_arg_fmt arg[6]; 111 u8 nr_args; 112 bool errpid; 113 bool timeout; 114 bool hexret; 115 }; 116 117 struct trace { 118 struct perf_tool tool; 119 struct syscalltbl *sctbl; 120 struct { 121 struct syscall *table; 122 struct bpf_map *map; 123 struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY 124 struct bpf_map *sys_enter, 125 *sys_exit; 126 } prog_array; 127 struct { 128 struct evsel *sys_enter, 129 *sys_exit, 130 *augmented; 131 } events; 132 struct bpf_program *unaugmented_prog; 133 } syscalls; 134 struct { 135 struct bpf_map *map; 136 } dump; 137 struct record_opts opts; 138 struct evlist *evlist; 139 struct machine *host; 140 struct thread *current; 141 struct bpf_object *bpf_obj; 142 struct cgroup *cgroup; 143 u64 base_time; 144 FILE *output; 145 unsigned long nr_events; 146 unsigned long nr_events_printed; 147 unsigned long max_events; 148 struct evswitch evswitch; 149 struct strlist *ev_qualifier; 150 struct { 151 size_t nr; 152 int *entries; 153 } ev_qualifier_ids; 154 struct { 155 size_t nr; 156 pid_t *entries; 157 struct bpf_map *map; 158 } filter_pids; 159 double duration_filter; 160 double runtime_ms; 161 struct { 162 u64 vfs_getname, 163 proc_getname; 164 } stats; 165 unsigned int max_stack; 166 unsigned int min_stack; 167 int raw_augmented_syscalls_args_size; 168 bool raw_augmented_syscalls; 169 bool fd_path_disabled; 170 bool sort_events; 171 bool not_ev_qualifier; 172 bool live; 173 bool full_time; 174 bool sched; 175 bool multiple_threads; 176 bool summary; 177 bool summary_only; 178 bool errno_summary; 179 bool failure_only; 180 bool show_comm; 181 bool print_sample; 182 bool show_tool_stats; 183 bool trace_syscalls; 184 bool libtraceevent_print; 185 bool kernel_syscallchains; 186 s16 args_alignment; 187 bool show_tstamp; 188 bool show_duration; 189 bool show_zeros; 190 bool show_arg_names; 191 bool show_string_prefix; 192 bool force; 193 bool vfs_getname; 194 int trace_pgfaults; 195 char *perfconfig_events; 196 struct { 197 struct ordered_events data; 198 u64 last; 199 } oe; 200 }; 201 202 struct tp_field { 203 int offset; 204 union { 205 u64 (*integer)(struct tp_field *field, struct perf_sample *sample); 206 void *(*pointer)(struct tp_field *field, struct perf_sample *sample); 207 }; 208 }; 209 210 #define TP_UINT_FIELD(bits) \ 211 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \ 212 { \ 213 u##bits value; \ 214 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \ 215 return value; \ 216 } 217 218 TP_UINT_FIELD(8); 219 TP_UINT_FIELD(16); 220 TP_UINT_FIELD(32); 221 TP_UINT_FIELD(64); 222 223 #define TP_UINT_FIELD__SWAPPED(bits) \ 224 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \ 225 { \ 226 u##bits value; \ 227 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \ 228 return bswap_##bits(value);\ 229 } 230 231 TP_UINT_FIELD__SWAPPED(16); 232 TP_UINT_FIELD__SWAPPED(32); 233 TP_UINT_FIELD__SWAPPED(64); 234 235 static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap) 236 { 237 field->offset = offset; 238 239 switch (size) { 240 case 1: 241 field->integer = tp_field__u8; 242 break; 243 case 2: 244 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16; 245 break; 246 case 4: 247 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32; 248 break; 249 case 8: 250 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64; 251 break; 252 default: 253 return -1; 254 } 255 256 return 0; 257 } 258 259 static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap) 260 { 261 return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap); 262 } 263 264 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample) 265 { 266 return sample->raw_data + field->offset; 267 } 268 269 static int __tp_field__init_ptr(struct tp_field *field, int offset) 270 { 271 field->offset = offset; 272 field->pointer = tp_field__ptr; 273 return 0; 274 } 275 276 static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field) 277 { 278 return __tp_field__init_ptr(field, format_field->offset); 279 } 280 281 struct syscall_tp { 282 struct tp_field id; 283 union { 284 struct tp_field args, ret; 285 }; 286 }; 287 288 /* 289 * The evsel->priv as used by 'perf trace' 290 * sc: for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME 291 * fmt: for all the other tracepoints 292 */ 293 struct evsel_trace { 294 struct syscall_tp sc; 295 struct syscall_arg_fmt *fmt; 296 }; 297 298 static struct evsel_trace *evsel_trace__new(void) 299 { 300 return zalloc(sizeof(struct evsel_trace)); 301 } 302 303 static void evsel_trace__delete(struct evsel_trace *et) 304 { 305 if (et == NULL) 306 return; 307 308 zfree(&et->fmt); 309 free(et); 310 } 311 312 /* 313 * Used with raw_syscalls:sys_{enter,exit} and with the 314 * syscalls:sys_{enter,exit}_SYSCALL tracepoints 315 */ 316 static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel) 317 { 318 struct evsel_trace *et = evsel->priv; 319 320 return &et->sc; 321 } 322 323 static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel) 324 { 325 if (evsel->priv == NULL) { 326 evsel->priv = evsel_trace__new(); 327 if (evsel->priv == NULL) 328 return NULL; 329 } 330 331 return __evsel__syscall_tp(evsel); 332 } 333 334 /* 335 * Used with all the other tracepoints. 336 */ 337 static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel) 338 { 339 struct evsel_trace *et = evsel->priv; 340 341 return et->fmt; 342 } 343 344 static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel) 345 { 346 struct evsel_trace *et = evsel->priv; 347 348 if (evsel->priv == NULL) { 349 et = evsel->priv = evsel_trace__new(); 350 351 if (et == NULL) 352 return NULL; 353 } 354 355 if (et->fmt == NULL) { 356 et->fmt = calloc(evsel->tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt)); 357 if (et->fmt == NULL) 358 goto out_delete; 359 } 360 361 return __evsel__syscall_arg_fmt(evsel); 362 363 out_delete: 364 evsel_trace__delete(evsel->priv); 365 evsel->priv = NULL; 366 return NULL; 367 } 368 369 static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name) 370 { 371 struct tep_format_field *format_field = evsel__field(evsel, name); 372 373 if (format_field == NULL) 374 return -1; 375 376 return tp_field__init_uint(field, format_field, evsel->needs_swap); 377 } 378 379 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \ 380 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\ 381 evsel__init_tp_uint_field(evsel, &sc->name, #name); }) 382 383 static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name) 384 { 385 struct tep_format_field *format_field = evsel__field(evsel, name); 386 387 if (format_field == NULL) 388 return -1; 389 390 return tp_field__init_ptr(field, format_field); 391 } 392 393 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \ 394 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\ 395 evsel__init_tp_ptr_field(evsel, &sc->name, #name); }) 396 397 static void evsel__delete_priv(struct evsel *evsel) 398 { 399 zfree(&evsel->priv); 400 evsel__delete(evsel); 401 } 402 403 static int evsel__init_syscall_tp(struct evsel *evsel) 404 { 405 struct syscall_tp *sc = evsel__syscall_tp(evsel); 406 407 if (sc != NULL) { 408 if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") && 409 evsel__init_tp_uint_field(evsel, &sc->id, "nr")) 410 return -ENOENT; 411 return 0; 412 } 413 414 return -ENOMEM; 415 } 416 417 static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp) 418 { 419 struct syscall_tp *sc = evsel__syscall_tp(evsel); 420 421 if (sc != NULL) { 422 struct tep_format_field *syscall_id = evsel__field(tp, "id"); 423 if (syscall_id == NULL) 424 syscall_id = evsel__field(tp, "__syscall_nr"); 425 if (syscall_id == NULL || 426 __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap)) 427 return -EINVAL; 428 429 return 0; 430 } 431 432 return -ENOMEM; 433 } 434 435 static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel) 436 { 437 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 438 439 return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)); 440 } 441 442 static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel) 443 { 444 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 445 446 return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap); 447 } 448 449 static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler) 450 { 451 if (evsel__syscall_tp(evsel) != NULL) { 452 if (perf_evsel__init_sc_tp_uint_field(evsel, id)) 453 return -ENOENT; 454 455 evsel->handler = handler; 456 return 0; 457 } 458 459 return -ENOMEM; 460 } 461 462 static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler) 463 { 464 struct evsel *evsel = evsel__newtp("raw_syscalls", direction); 465 466 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */ 467 if (IS_ERR(evsel)) 468 evsel = evsel__newtp("syscalls", direction); 469 470 if (IS_ERR(evsel)) 471 return NULL; 472 473 if (evsel__init_raw_syscall_tp(evsel, handler)) 474 goto out_delete; 475 476 return evsel; 477 478 out_delete: 479 evsel__delete_priv(evsel); 480 return NULL; 481 } 482 483 #define perf_evsel__sc_tp_uint(evsel, name, sample) \ 484 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \ 485 fields->name.integer(&fields->name, sample); }) 486 487 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \ 488 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \ 489 fields->name.pointer(&fields->name, sample); }) 490 491 size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val) 492 { 493 int idx = val - sa->offset; 494 495 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { 496 size_t printed = scnprintf(bf, size, intfmt, val); 497 if (show_suffix) 498 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); 499 return printed; 500 } 501 502 return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : ""); 503 } 504 505 size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val) 506 { 507 int idx = val - sa->offset; 508 509 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { 510 size_t printed = scnprintf(bf, size, intfmt, val); 511 if (show_prefix) 512 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); 513 return printed; 514 } 515 516 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); 517 } 518 519 static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size, 520 const char *intfmt, 521 struct syscall_arg *arg) 522 { 523 return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val); 524 } 525 526 static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size, 527 struct syscall_arg *arg) 528 { 529 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg); 530 } 531 532 #define SCA_STRARRAY syscall_arg__scnprintf_strarray 533 534 bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 535 { 536 return strarray__strtoul(arg->parm, bf, size, ret); 537 } 538 539 bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 540 { 541 return strarray__strtoul_flags(arg->parm, bf, size, ret); 542 } 543 544 bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 545 { 546 return strarrays__strtoul(arg->parm, bf, size, ret); 547 } 548 549 size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg) 550 { 551 return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val); 552 } 553 554 size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val) 555 { 556 size_t printed; 557 int i; 558 559 for (i = 0; i < sas->nr_entries; ++i) { 560 struct strarray *sa = sas->entries[i]; 561 int idx = val - sa->offset; 562 563 if (idx >= 0 && idx < sa->nr_entries) { 564 if (sa->entries[idx] == NULL) 565 break; 566 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); 567 } 568 } 569 570 printed = scnprintf(bf, size, intfmt, val); 571 if (show_prefix) 572 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix); 573 return printed; 574 } 575 576 bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret) 577 { 578 int i; 579 580 for (i = 0; i < sa->nr_entries; ++i) { 581 if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') { 582 *ret = sa->offset + i; 583 return true; 584 } 585 } 586 587 return false; 588 } 589 590 bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret) 591 { 592 u64 val = 0; 593 char *tok = bf, *sep, *end; 594 595 *ret = 0; 596 597 while (size != 0) { 598 int toklen = size; 599 600 sep = memchr(tok, '|', size); 601 if (sep != NULL) { 602 size -= sep - tok + 1; 603 604 end = sep - 1; 605 while (end > tok && isspace(*end)) 606 --end; 607 608 toklen = end - tok + 1; 609 } 610 611 while (isspace(*tok)) 612 ++tok; 613 614 if (isalpha(*tok) || *tok == '_') { 615 if (!strarray__strtoul(sa, tok, toklen, &val)) 616 return false; 617 } else { 618 bool is_hexa = tok[0] == 0 && (tok[1] = 'x' || tok[1] == 'X'); 619 620 val = strtoul(tok, NULL, is_hexa ? 16 : 0); 621 } 622 623 *ret |= (1 << (val - 1)); 624 625 if (sep == NULL) 626 break; 627 tok = sep + 1; 628 } 629 630 return true; 631 } 632 633 bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret) 634 { 635 int i; 636 637 for (i = 0; i < sas->nr_entries; ++i) { 638 struct strarray *sa = sas->entries[i]; 639 640 if (strarray__strtoul(sa, bf, size, ret)) 641 return true; 642 } 643 644 return false; 645 } 646 647 size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size, 648 struct syscall_arg *arg) 649 { 650 return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val); 651 } 652 653 #ifndef AT_FDCWD 654 #define AT_FDCWD -100 655 #endif 656 657 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size, 658 struct syscall_arg *arg) 659 { 660 int fd = arg->val; 661 const char *prefix = "AT_FD"; 662 663 if (fd == AT_FDCWD) 664 return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD"); 665 666 return syscall_arg__scnprintf_fd(bf, size, arg); 667 } 668 669 #define SCA_FDAT syscall_arg__scnprintf_fd_at 670 671 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, 672 struct syscall_arg *arg); 673 674 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd 675 676 size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg) 677 { 678 return scnprintf(bf, size, "%#lx", arg->val); 679 } 680 681 size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg) 682 { 683 if (arg->val == 0) 684 return scnprintf(bf, size, "NULL"); 685 return syscall_arg__scnprintf_hex(bf, size, arg); 686 } 687 688 size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg) 689 { 690 return scnprintf(bf, size, "%d", arg->val); 691 } 692 693 size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg) 694 { 695 return scnprintf(bf, size, "%ld", arg->val); 696 } 697 698 static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg) 699 { 700 // XXX Hey, maybe for sched:sched_switch prev/next comm fields we can 701 // fill missing comms using thread__set_comm()... 702 // here or in a special syscall_arg__scnprintf_pid_sched_tp... 703 return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val); 704 } 705 706 #define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array 707 708 static const char *bpf_cmd[] = { 709 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM", 710 "MAP_GET_NEXT_KEY", "PROG_LOAD", 711 }; 712 static DEFINE_STRARRAY(bpf_cmd, "BPF_"); 713 714 static const char *fsmount_flags[] = { 715 [1] = "CLOEXEC", 716 }; 717 static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_"); 718 719 #include "trace/beauty/generated/fsconfig_arrays.c" 720 721 static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_"); 722 723 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", }; 724 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1); 725 726 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", }; 727 static DEFINE_STRARRAY(itimers, "ITIMER_"); 728 729 static const char *keyctl_options[] = { 730 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN", 731 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ", 732 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT", 733 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT", 734 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT", 735 }; 736 static DEFINE_STRARRAY(keyctl_options, "KEYCTL_"); 737 738 static const char *whences[] = { "SET", "CUR", "END", 739 #ifdef SEEK_DATA 740 "DATA", 741 #endif 742 #ifdef SEEK_HOLE 743 "HOLE", 744 #endif 745 }; 746 static DEFINE_STRARRAY(whences, "SEEK_"); 747 748 static const char *fcntl_cmds[] = { 749 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK", 750 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64", 751 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX", 752 "GETOWNER_UIDS", 753 }; 754 static DEFINE_STRARRAY(fcntl_cmds, "F_"); 755 756 static const char *fcntl_linux_specific_cmds[] = { 757 "SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC", 758 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS", 759 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT", 760 }; 761 762 static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE); 763 764 static struct strarray *fcntl_cmds_arrays[] = { 765 &strarray__fcntl_cmds, 766 &strarray__fcntl_linux_specific_cmds, 767 }; 768 769 static DEFINE_STRARRAYS(fcntl_cmds_arrays); 770 771 static const char *rlimit_resources[] = { 772 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE", 773 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO", 774 "RTTIME", 775 }; 776 static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_"); 777 778 static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", }; 779 static DEFINE_STRARRAY(sighow, "SIG_"); 780 781 static const char *clockid[] = { 782 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID", 783 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME", 784 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI" 785 }; 786 static DEFINE_STRARRAY(clockid, "CLOCK_"); 787 788 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size, 789 struct syscall_arg *arg) 790 { 791 bool show_prefix = arg->show_string_prefix; 792 const char *suffix = "_OK"; 793 size_t printed = 0; 794 int mode = arg->val; 795 796 if (mode == F_OK) /* 0 */ 797 return scnprintf(bf, size, "F%s", show_prefix ? suffix : ""); 798 #define P_MODE(n) \ 799 if (mode & n##_OK) { \ 800 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \ 801 mode &= ~n##_OK; \ 802 } 803 804 P_MODE(R); 805 P_MODE(W); 806 P_MODE(X); 807 #undef P_MODE 808 809 if (mode) 810 printed += scnprintf(bf + printed, size - printed, "|%#x", mode); 811 812 return printed; 813 } 814 815 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode 816 817 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size, 818 struct syscall_arg *arg); 819 820 #define SCA_FILENAME syscall_arg__scnprintf_filename 821 822 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size, 823 struct syscall_arg *arg) 824 { 825 bool show_prefix = arg->show_string_prefix; 826 const char *prefix = "O_"; 827 int printed = 0, flags = arg->val; 828 829 #define P_FLAG(n) \ 830 if (flags & O_##n) { \ 831 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ 832 flags &= ~O_##n; \ 833 } 834 835 P_FLAG(CLOEXEC); 836 P_FLAG(NONBLOCK); 837 #undef P_FLAG 838 839 if (flags) 840 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 841 842 return printed; 843 } 844 845 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags 846 847 #ifndef GRND_NONBLOCK 848 #define GRND_NONBLOCK 0x0001 849 #endif 850 #ifndef GRND_RANDOM 851 #define GRND_RANDOM 0x0002 852 #endif 853 854 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size, 855 struct syscall_arg *arg) 856 { 857 bool show_prefix = arg->show_string_prefix; 858 const char *prefix = "GRND_"; 859 int printed = 0, flags = arg->val; 860 861 #define P_FLAG(n) \ 862 if (flags & GRND_##n) { \ 863 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ 864 flags &= ~GRND_##n; \ 865 } 866 867 P_FLAG(RANDOM); 868 P_FLAG(NONBLOCK); 869 #undef P_FLAG 870 871 if (flags) 872 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 873 874 return printed; 875 } 876 877 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags 878 879 #define STRARRAY(name, array) \ 880 { .scnprintf = SCA_STRARRAY, \ 881 .strtoul = STUL_STRARRAY, \ 882 .parm = &strarray__##array, } 883 884 #define STRARRAY_FLAGS(name, array) \ 885 { .scnprintf = SCA_STRARRAY_FLAGS, \ 886 .strtoul = STUL_STRARRAY_FLAGS, \ 887 .parm = &strarray__##array, } 888 889 #include "trace/beauty/arch_errno_names.c" 890 #include "trace/beauty/eventfd.c" 891 #include "trace/beauty/futex_op.c" 892 #include "trace/beauty/futex_val3.c" 893 #include "trace/beauty/mmap.c" 894 #include "trace/beauty/mode_t.c" 895 #include "trace/beauty/msg_flags.c" 896 #include "trace/beauty/open_flags.c" 897 #include "trace/beauty/perf_event_open.c" 898 #include "trace/beauty/pid.c" 899 #include "trace/beauty/sched_policy.c" 900 #include "trace/beauty/seccomp.c" 901 #include "trace/beauty/signum.c" 902 #include "trace/beauty/socket_type.c" 903 #include "trace/beauty/waitid_options.c" 904 905 static struct syscall_fmt syscall_fmts[] = { 906 { .name = "access", 907 .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, }, 908 { .name = "arch_prctl", 909 .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ }, 910 [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, }, 911 { .name = "bind", 912 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ }, 913 [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ }, 914 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, }, 915 { .name = "bpf", 916 .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, }, 917 { .name = "brk", .hexret = true, 918 .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, }, 919 { .name = "clock_gettime", 920 .arg = { [0] = STRARRAY(clk_id, clockid), }, }, 921 { .name = "clone", .errpid = true, .nr_args = 5, 922 .arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, }, 923 [1] = { .name = "child_stack", .scnprintf = SCA_HEX, }, 924 [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, }, 925 [3] = { .name = "child_tidptr", .scnprintf = SCA_HEX, }, 926 [4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, }, 927 { .name = "close", 928 .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, }, 929 { .name = "connect", 930 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ }, 931 [1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ }, 932 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, }, 933 { .name = "epoll_ctl", 934 .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, }, 935 { .name = "eventfd2", 936 .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, }, 937 { .name = "fchmodat", 938 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 939 { .name = "fchownat", 940 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 941 { .name = "fcntl", 942 .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */ 943 .strtoul = STUL_STRARRAYS, 944 .parm = &strarrays__fcntl_cmds_arrays, 945 .show_zero = true, }, 946 [2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, }, 947 { .name = "flock", 948 .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, }, 949 { .name = "fsconfig", 950 .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, }, 951 { .name = "fsmount", 952 .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags), 953 [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, }, 954 { .name = "fspick", 955 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 956 [1] = { .scnprintf = SCA_FILENAME, /* path */ }, 957 [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, }, 958 { .name = "fstat", .alias = "newfstat", }, 959 { .name = "fstatat", .alias = "newfstatat", }, 960 { .name = "futex", 961 .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ }, 962 [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, }, 963 { .name = "futimesat", 964 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 965 { .name = "getitimer", 966 .arg = { [0] = STRARRAY(which, itimers), }, }, 967 { .name = "getpid", .errpid = true, }, 968 { .name = "getpgid", .errpid = true, }, 969 { .name = "getppid", .errpid = true, }, 970 { .name = "getrandom", 971 .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, }, 972 { .name = "getrlimit", 973 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, }, 974 { .name = "gettid", .errpid = true, }, 975 { .name = "ioctl", 976 .arg = { 977 #if defined(__i386__) || defined(__x86_64__) 978 /* 979 * FIXME: Make this available to all arches. 980 */ 981 [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ }, 982 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, }, 983 #else 984 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, }, 985 #endif 986 { .name = "kcmp", .nr_args = 5, 987 .arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, }, 988 [1] = { .name = "pid2", .scnprintf = SCA_PID, }, 989 [2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, }, 990 [3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, }, 991 [4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, }, 992 { .name = "keyctl", 993 .arg = { [0] = STRARRAY(option, keyctl_options), }, }, 994 { .name = "kill", 995 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 996 { .name = "linkat", 997 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 998 { .name = "lseek", 999 .arg = { [2] = STRARRAY(whence, whences), }, }, 1000 { .name = "lstat", .alias = "newlstat", }, 1001 { .name = "madvise", 1002 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1003 [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, }, 1004 { .name = "mkdirat", 1005 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1006 { .name = "mknodat", 1007 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1008 { .name = "mmap", .hexret = true, 1009 /* The standard mmap maps to old_mmap on s390x */ 1010 #if defined(__s390x__) 1011 .alias = "old_mmap", 1012 #endif 1013 .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, 1014 [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */ 1015 .strtoul = STUL_STRARRAY_FLAGS, 1016 .parm = &strarray__mmap_flags, }, 1017 [5] = { .scnprintf = SCA_HEX, /* offset */ }, }, }, 1018 { .name = "mount", 1019 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ }, 1020 [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */ 1021 .mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, }, 1022 { .name = "move_mount", 1023 .arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ }, 1024 [1] = { .scnprintf = SCA_FILENAME, /* from_pathname */ }, 1025 [2] = { .scnprintf = SCA_FDAT, /* to_dfd */ }, 1026 [3] = { .scnprintf = SCA_FILENAME, /* to_pathname */ }, 1027 [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, }, 1028 { .name = "mprotect", 1029 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1030 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, }, }, 1031 { .name = "mq_unlink", 1032 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, }, 1033 { .name = "mremap", .hexret = true, 1034 .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, }, 1035 { .name = "name_to_handle_at", 1036 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1037 { .name = "newfstatat", 1038 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1039 { .name = "open", 1040 .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1041 { .name = "open_by_handle_at", 1042 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1043 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1044 { .name = "openat", 1045 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1046 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1047 { .name = "perf_event_open", 1048 .arg = { [2] = { .scnprintf = SCA_INT, /* cpu */ }, 1049 [3] = { .scnprintf = SCA_FD, /* group_fd */ }, 1050 [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, }, 1051 { .name = "pipe2", 1052 .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, }, 1053 { .name = "pkey_alloc", 1054 .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS, /* access_rights */ }, }, }, 1055 { .name = "pkey_free", 1056 .arg = { [0] = { .scnprintf = SCA_INT, /* key */ }, }, }, 1057 { .name = "pkey_mprotect", 1058 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1059 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, 1060 [3] = { .scnprintf = SCA_INT, /* pkey */ }, }, }, 1061 { .name = "poll", .timeout = true, }, 1062 { .name = "ppoll", .timeout = true, }, 1063 { .name = "prctl", 1064 .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */ 1065 .strtoul = STUL_STRARRAY, 1066 .parm = &strarray__prctl_options, }, 1067 [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ }, 1068 [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, }, 1069 { .name = "pread", .alias = "pread64", }, 1070 { .name = "preadv", .alias = "pread", }, 1071 { .name = "prlimit64", 1072 .arg = { [1] = STRARRAY(resource, rlimit_resources), }, }, 1073 { .name = "pwrite", .alias = "pwrite64", }, 1074 { .name = "readlinkat", 1075 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1076 { .name = "recvfrom", 1077 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1078 { .name = "recvmmsg", 1079 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1080 { .name = "recvmsg", 1081 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1082 { .name = "renameat", 1083 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ }, 1084 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, }, 1085 { .name = "renameat2", 1086 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ }, 1087 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, 1088 [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, }, 1089 { .name = "rt_sigaction", 1090 .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1091 { .name = "rt_sigprocmask", 1092 .arg = { [0] = STRARRAY(how, sighow), }, }, 1093 { .name = "rt_sigqueueinfo", 1094 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1095 { .name = "rt_tgsigqueueinfo", 1096 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1097 { .name = "sched_setscheduler", 1098 .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, }, 1099 { .name = "seccomp", 1100 .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP, /* op */ }, 1101 [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, }, 1102 { .name = "select", .timeout = true, }, 1103 { .name = "sendfile", .alias = "sendfile64", }, 1104 { .name = "sendmmsg", 1105 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1106 { .name = "sendmsg", 1107 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1108 { .name = "sendto", 1109 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, 1110 [4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, }, 1111 { .name = "set_tid_address", .errpid = true, }, 1112 { .name = "setitimer", 1113 .arg = { [0] = STRARRAY(which, itimers), }, }, 1114 { .name = "setrlimit", 1115 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, }, 1116 { .name = "socket", 1117 .arg = { [0] = STRARRAY(family, socket_families), 1118 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, 1119 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, }, 1120 { .name = "socketpair", 1121 .arg = { [0] = STRARRAY(family, socket_families), 1122 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, 1123 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, }, 1124 { .name = "stat", .alias = "newstat", }, 1125 { .name = "statx", 1126 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ }, 1127 [2] = { .scnprintf = SCA_STATX_FLAGS, /* flags */ } , 1128 [3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, }, 1129 { .name = "swapoff", 1130 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, }, 1131 { .name = "swapon", 1132 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, }, 1133 { .name = "symlinkat", 1134 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1135 { .name = "sync_file_range", 1136 .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, }, 1137 { .name = "tgkill", 1138 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1139 { .name = "tkill", 1140 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1141 { .name = "umount2", .alias = "umount", 1142 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, }, 1143 { .name = "uname", .alias = "newuname", }, 1144 { .name = "unlinkat", 1145 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1146 { .name = "utimensat", 1147 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, }, 1148 { .name = "wait4", .errpid = true, 1149 .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, }, 1150 { .name = "waitid", .errpid = true, 1151 .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, }, 1152 }; 1153 1154 static int syscall_fmt__cmp(const void *name, const void *fmtp) 1155 { 1156 const struct syscall_fmt *fmt = fmtp; 1157 return strcmp(name, fmt->name); 1158 } 1159 1160 static struct syscall_fmt *__syscall_fmt__find(struct syscall_fmt *fmts, const int nmemb, const char *name) 1161 { 1162 return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp); 1163 } 1164 1165 static struct syscall_fmt *syscall_fmt__find(const char *name) 1166 { 1167 const int nmemb = ARRAY_SIZE(syscall_fmts); 1168 return __syscall_fmt__find(syscall_fmts, nmemb, name); 1169 } 1170 1171 static struct syscall_fmt *__syscall_fmt__find_by_alias(struct syscall_fmt *fmts, const int nmemb, const char *alias) 1172 { 1173 int i; 1174 1175 for (i = 0; i < nmemb; ++i) { 1176 if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0) 1177 return &fmts[i]; 1178 } 1179 1180 return NULL; 1181 } 1182 1183 static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias) 1184 { 1185 const int nmemb = ARRAY_SIZE(syscall_fmts); 1186 return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias); 1187 } 1188 1189 /* 1190 * is_exit: is this "exit" or "exit_group"? 1191 * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter. 1192 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc. 1193 * nonexistent: Just a hole in the syscall table, syscall id not allocated 1194 */ 1195 struct syscall { 1196 struct tep_event *tp_format; 1197 int nr_args; 1198 int args_size; 1199 struct { 1200 struct bpf_program *sys_enter, 1201 *sys_exit; 1202 } bpf_prog; 1203 bool is_exit; 1204 bool is_open; 1205 bool nonexistent; 1206 struct tep_format_field *args; 1207 const char *name; 1208 struct syscall_fmt *fmt; 1209 struct syscall_arg_fmt *arg_fmt; 1210 }; 1211 1212 /* 1213 * Must match what is in the BPF program: 1214 * 1215 * tools/perf/examples/bpf/augmented_raw_syscalls.c 1216 */ 1217 struct bpf_map_syscall_entry { 1218 bool enabled; 1219 u16 string_args_len[6]; 1220 }; 1221 1222 /* 1223 * We need to have this 'calculated' boolean because in some cases we really 1224 * don't know what is the duration of a syscall, for instance, when we start 1225 * a session and some threads are waiting for a syscall to finish, say 'poll', 1226 * in which case all we can do is to print "( ? ) for duration and for the 1227 * start timestamp. 1228 */ 1229 static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp) 1230 { 1231 double duration = (double)t / NSEC_PER_MSEC; 1232 size_t printed = fprintf(fp, "("); 1233 1234 if (!calculated) 1235 printed += fprintf(fp, " "); 1236 else if (duration >= 1.0) 1237 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration); 1238 else if (duration >= 0.01) 1239 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration); 1240 else 1241 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration); 1242 return printed + fprintf(fp, "): "); 1243 } 1244 1245 /** 1246 * filename.ptr: The filename char pointer that will be vfs_getname'd 1247 * filename.entry_str_pos: Where to insert the string translated from 1248 * filename.ptr by the vfs_getname tracepoint/kprobe. 1249 * ret_scnprintf: syscall args may set this to a different syscall return 1250 * formatter, for instance, fcntl may return fds, file flags, etc. 1251 */ 1252 struct thread_trace { 1253 u64 entry_time; 1254 bool entry_pending; 1255 unsigned long nr_events; 1256 unsigned long pfmaj, pfmin; 1257 char *entry_str; 1258 double runtime_ms; 1259 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg); 1260 struct { 1261 unsigned long ptr; 1262 short int entry_str_pos; 1263 bool pending_open; 1264 unsigned int namelen; 1265 char *name; 1266 } filename; 1267 struct { 1268 int max; 1269 struct file *table; 1270 } files; 1271 1272 struct intlist *syscall_stats; 1273 }; 1274 1275 static struct thread_trace *thread_trace__new(void) 1276 { 1277 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace)); 1278 1279 if (ttrace) { 1280 ttrace->files.max = -1; 1281 ttrace->syscall_stats = intlist__new(NULL); 1282 } 1283 1284 return ttrace; 1285 } 1286 1287 static struct thread_trace *thread__trace(struct thread *thread, FILE *fp) 1288 { 1289 struct thread_trace *ttrace; 1290 1291 if (thread == NULL) 1292 goto fail; 1293 1294 if (thread__priv(thread) == NULL) 1295 thread__set_priv(thread, thread_trace__new()); 1296 1297 if (thread__priv(thread) == NULL) 1298 goto fail; 1299 1300 ttrace = thread__priv(thread); 1301 ++ttrace->nr_events; 1302 1303 return ttrace; 1304 fail: 1305 color_fprintf(fp, PERF_COLOR_RED, 1306 "WARNING: not enough memory, dropping samples!\n"); 1307 return NULL; 1308 } 1309 1310 1311 void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg, 1312 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg)) 1313 { 1314 struct thread_trace *ttrace = thread__priv(arg->thread); 1315 1316 ttrace->ret_scnprintf = ret_scnprintf; 1317 } 1318 1319 #define TRACE_PFMAJ (1 << 0) 1320 #define TRACE_PFMIN (1 << 1) 1321 1322 static const size_t trace__entry_str_size = 2048; 1323 1324 static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd) 1325 { 1326 if (fd < 0) 1327 return NULL; 1328 1329 if (fd > ttrace->files.max) { 1330 struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file)); 1331 1332 if (nfiles == NULL) 1333 return NULL; 1334 1335 if (ttrace->files.max != -1) { 1336 memset(nfiles + ttrace->files.max + 1, 0, 1337 (fd - ttrace->files.max) * sizeof(struct file)); 1338 } else { 1339 memset(nfiles, 0, (fd + 1) * sizeof(struct file)); 1340 } 1341 1342 ttrace->files.table = nfiles; 1343 ttrace->files.max = fd; 1344 } 1345 1346 return ttrace->files.table + fd; 1347 } 1348 1349 struct file *thread__files_entry(struct thread *thread, int fd) 1350 { 1351 return thread_trace__files_entry(thread__priv(thread), fd); 1352 } 1353 1354 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname) 1355 { 1356 struct thread_trace *ttrace = thread__priv(thread); 1357 struct file *file = thread_trace__files_entry(ttrace, fd); 1358 1359 if (file != NULL) { 1360 struct stat st; 1361 if (stat(pathname, &st) == 0) 1362 file->dev_maj = major(st.st_rdev); 1363 file->pathname = strdup(pathname); 1364 if (file->pathname) 1365 return 0; 1366 } 1367 1368 return -1; 1369 } 1370 1371 static int thread__read_fd_path(struct thread *thread, int fd) 1372 { 1373 char linkname[PATH_MAX], pathname[PATH_MAX]; 1374 struct stat st; 1375 int ret; 1376 1377 if (thread->pid_ == thread->tid) { 1378 scnprintf(linkname, sizeof(linkname), 1379 "/proc/%d/fd/%d", thread->pid_, fd); 1380 } else { 1381 scnprintf(linkname, sizeof(linkname), 1382 "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd); 1383 } 1384 1385 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname)) 1386 return -1; 1387 1388 ret = readlink(linkname, pathname, sizeof(pathname)); 1389 1390 if (ret < 0 || ret > st.st_size) 1391 return -1; 1392 1393 pathname[ret] = '\0'; 1394 return trace__set_fd_pathname(thread, fd, pathname); 1395 } 1396 1397 static const char *thread__fd_path(struct thread *thread, int fd, 1398 struct trace *trace) 1399 { 1400 struct thread_trace *ttrace = thread__priv(thread); 1401 1402 if (ttrace == NULL || trace->fd_path_disabled) 1403 return NULL; 1404 1405 if (fd < 0) 1406 return NULL; 1407 1408 if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) { 1409 if (!trace->live) 1410 return NULL; 1411 ++trace->stats.proc_getname; 1412 if (thread__read_fd_path(thread, fd)) 1413 return NULL; 1414 } 1415 1416 return ttrace->files.table[fd].pathname; 1417 } 1418 1419 size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg) 1420 { 1421 int fd = arg->val; 1422 size_t printed = scnprintf(bf, size, "%d", fd); 1423 const char *path = thread__fd_path(arg->thread, fd, arg->trace); 1424 1425 if (path) 1426 printed += scnprintf(bf + printed, size - printed, "<%s>", path); 1427 1428 return printed; 1429 } 1430 1431 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size) 1432 { 1433 size_t printed = scnprintf(bf, size, "%d", fd); 1434 struct thread *thread = machine__find_thread(trace->host, pid, pid); 1435 1436 if (thread) { 1437 const char *path = thread__fd_path(thread, fd, trace); 1438 1439 if (path) 1440 printed += scnprintf(bf + printed, size - printed, "<%s>", path); 1441 1442 thread__put(thread); 1443 } 1444 1445 return printed; 1446 } 1447 1448 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, 1449 struct syscall_arg *arg) 1450 { 1451 int fd = arg->val; 1452 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg); 1453 struct thread_trace *ttrace = thread__priv(arg->thread); 1454 1455 if (ttrace && fd >= 0 && fd <= ttrace->files.max) 1456 zfree(&ttrace->files.table[fd].pathname); 1457 1458 return printed; 1459 } 1460 1461 static void thread__set_filename_pos(struct thread *thread, const char *bf, 1462 unsigned long ptr) 1463 { 1464 struct thread_trace *ttrace = thread__priv(thread); 1465 1466 ttrace->filename.ptr = ptr; 1467 ttrace->filename.entry_str_pos = bf - ttrace->entry_str; 1468 } 1469 1470 static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size) 1471 { 1472 struct augmented_arg *augmented_arg = arg->augmented.args; 1473 size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value); 1474 /* 1475 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls 1476 * we would have two strings, each prefixed by its size. 1477 */ 1478 int consumed = sizeof(*augmented_arg) + augmented_arg->size; 1479 1480 arg->augmented.args = ((void *)arg->augmented.args) + consumed; 1481 arg->augmented.size -= consumed; 1482 1483 return printed; 1484 } 1485 1486 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size, 1487 struct syscall_arg *arg) 1488 { 1489 unsigned long ptr = arg->val; 1490 1491 if (arg->augmented.args) 1492 return syscall_arg__scnprintf_augmented_string(arg, bf, size); 1493 1494 if (!arg->trace->vfs_getname) 1495 return scnprintf(bf, size, "%#x", ptr); 1496 1497 thread__set_filename_pos(arg->thread, bf, ptr); 1498 return 0; 1499 } 1500 1501 static bool trace__filter_duration(struct trace *trace, double t) 1502 { 1503 return t < (trace->duration_filter * NSEC_PER_MSEC); 1504 } 1505 1506 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) 1507 { 1508 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC; 1509 1510 return fprintf(fp, "%10.3f ", ts); 1511 } 1512 1513 /* 1514 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are 1515 * using ttrace->entry_time for a thread that receives a sys_exit without 1516 * first having received a sys_enter ("poll" issued before tracing session 1517 * starts, lost sys_enter exit due to ring buffer overflow). 1518 */ 1519 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) 1520 { 1521 if (tstamp > 0) 1522 return __trace__fprintf_tstamp(trace, tstamp, fp); 1523 1524 return fprintf(fp, " ? "); 1525 } 1526 1527 static bool done = false; 1528 static bool interrupted = false; 1529 1530 static void sig_handler(int sig) 1531 { 1532 done = true; 1533 interrupted = sig == SIGINT; 1534 } 1535 1536 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp) 1537 { 1538 size_t printed = 0; 1539 1540 if (trace->multiple_threads) { 1541 if (trace->show_comm) 1542 printed += fprintf(fp, "%.14s/", thread__comm_str(thread)); 1543 printed += fprintf(fp, "%d ", thread->tid); 1544 } 1545 1546 return printed; 1547 } 1548 1549 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread, 1550 u64 duration, bool duration_calculated, u64 tstamp, FILE *fp) 1551 { 1552 size_t printed = 0; 1553 1554 if (trace->show_tstamp) 1555 printed = trace__fprintf_tstamp(trace, tstamp, fp); 1556 if (trace->show_duration) 1557 printed += fprintf_duration(duration, duration_calculated, fp); 1558 return printed + trace__fprintf_comm_tid(trace, thread, fp); 1559 } 1560 1561 static int trace__process_event(struct trace *trace, struct machine *machine, 1562 union perf_event *event, struct perf_sample *sample) 1563 { 1564 int ret = 0; 1565 1566 switch (event->header.type) { 1567 case PERF_RECORD_LOST: 1568 color_fprintf(trace->output, PERF_COLOR_RED, 1569 "LOST %" PRIu64 " events!\n", event->lost.lost); 1570 ret = machine__process_lost_event(machine, event, sample); 1571 break; 1572 default: 1573 ret = machine__process_event(machine, event, sample); 1574 break; 1575 } 1576 1577 return ret; 1578 } 1579 1580 static int trace__tool_process(struct perf_tool *tool, 1581 union perf_event *event, 1582 struct perf_sample *sample, 1583 struct machine *machine) 1584 { 1585 struct trace *trace = container_of(tool, struct trace, tool); 1586 return trace__process_event(trace, machine, event, sample); 1587 } 1588 1589 static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp) 1590 { 1591 struct machine *machine = vmachine; 1592 1593 if (machine->kptr_restrict_warned) 1594 return NULL; 1595 1596 if (symbol_conf.kptr_restrict) { 1597 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" 1598 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n" 1599 "Kernel samples will not be resolved.\n"); 1600 machine->kptr_restrict_warned = true; 1601 return NULL; 1602 } 1603 1604 return machine__resolve_kernel_addr(vmachine, addrp, modp); 1605 } 1606 1607 static int trace__symbols_init(struct trace *trace, struct evlist *evlist) 1608 { 1609 int err = symbol__init(NULL); 1610 1611 if (err) 1612 return err; 1613 1614 trace->host = machine__new_host(); 1615 if (trace->host == NULL) 1616 return -ENOMEM; 1617 1618 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr); 1619 if (err < 0) 1620 goto out; 1621 1622 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, 1623 evlist->core.threads, trace__tool_process, false, 1624 1); 1625 out: 1626 if (err) 1627 symbol__exit(); 1628 1629 return err; 1630 } 1631 1632 static void trace__symbols__exit(struct trace *trace) 1633 { 1634 machine__exit(trace->host); 1635 trace->host = NULL; 1636 1637 symbol__exit(); 1638 } 1639 1640 static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args) 1641 { 1642 int idx; 1643 1644 if (nr_args == 6 && sc->fmt && sc->fmt->nr_args != 0) 1645 nr_args = sc->fmt->nr_args; 1646 1647 sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt)); 1648 if (sc->arg_fmt == NULL) 1649 return -1; 1650 1651 for (idx = 0; idx < nr_args; ++idx) { 1652 if (sc->fmt) 1653 sc->arg_fmt[idx] = sc->fmt->arg[idx]; 1654 } 1655 1656 sc->nr_args = nr_args; 1657 return 0; 1658 } 1659 1660 static struct syscall_arg_fmt syscall_arg_fmts__by_name[] = { 1661 { .name = "msr", .scnprintf = SCA_X86_MSR, .strtoul = STUL_X86_MSR, }, 1662 { .name = "vector", .scnprintf = SCA_X86_IRQ_VECTORS, .strtoul = STUL_X86_IRQ_VECTORS, }, 1663 }; 1664 1665 static int syscall_arg_fmt__cmp(const void *name, const void *fmtp) 1666 { 1667 const struct syscall_arg_fmt *fmt = fmtp; 1668 return strcmp(name, fmt->name); 1669 } 1670 1671 static struct syscall_arg_fmt * 1672 __syscall_arg_fmt__find_by_name(struct syscall_arg_fmt *fmts, const int nmemb, const char *name) 1673 { 1674 return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp); 1675 } 1676 1677 static struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name) 1678 { 1679 const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name); 1680 return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name); 1681 } 1682 1683 static struct tep_format_field * 1684 syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field) 1685 { 1686 struct tep_format_field *last_field = NULL; 1687 int len; 1688 1689 for (; field; field = field->next, ++arg) { 1690 last_field = field; 1691 1692 if (arg->scnprintf) 1693 continue; 1694 1695 len = strlen(field->name); 1696 1697 if (strcmp(field->type, "const char *") == 0 && 1698 ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) || 1699 strstr(field->name, "path") != NULL)) 1700 arg->scnprintf = SCA_FILENAME; 1701 else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr")) 1702 arg->scnprintf = SCA_PTR; 1703 else if (strcmp(field->type, "pid_t") == 0) 1704 arg->scnprintf = SCA_PID; 1705 else if (strcmp(field->type, "umode_t") == 0) 1706 arg->scnprintf = SCA_MODE_T; 1707 else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) { 1708 arg->scnprintf = SCA_CHAR_ARRAY; 1709 arg->nr_entries = field->arraylen; 1710 } else if ((strcmp(field->type, "int") == 0 || 1711 strcmp(field->type, "unsigned int") == 0 || 1712 strcmp(field->type, "long") == 0) && 1713 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) { 1714 /* 1715 * /sys/kernel/tracing/events/syscalls/sys_enter* 1716 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c 1717 * 65 int 1718 * 23 unsigned int 1719 * 7 unsigned long 1720 */ 1721 arg->scnprintf = SCA_FD; 1722 } else { 1723 struct syscall_arg_fmt *fmt = syscall_arg_fmt__find_by_name(field->name); 1724 1725 if (fmt) { 1726 arg->scnprintf = fmt->scnprintf; 1727 arg->strtoul = fmt->strtoul; 1728 } 1729 } 1730 } 1731 1732 return last_field; 1733 } 1734 1735 static int syscall__set_arg_fmts(struct syscall *sc) 1736 { 1737 struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args); 1738 1739 if (last_field) 1740 sc->args_size = last_field->offset + last_field->size; 1741 1742 return 0; 1743 } 1744 1745 static int trace__read_syscall_info(struct trace *trace, int id) 1746 { 1747 char tp_name[128]; 1748 struct syscall *sc; 1749 const char *name = syscalltbl__name(trace->sctbl, id); 1750 1751 #ifdef HAVE_SYSCALL_TABLE_SUPPORT 1752 if (trace->syscalls.table == NULL) { 1753 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc)); 1754 if (trace->syscalls.table == NULL) 1755 return -ENOMEM; 1756 } 1757 #else 1758 if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) { 1759 // When using libaudit we don't know beforehand what is the max syscall id 1760 struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc)); 1761 1762 if (table == NULL) 1763 return -ENOMEM; 1764 1765 // Need to memset from offset 0 and +1 members if brand new 1766 if (trace->syscalls.table == NULL) 1767 memset(table, 0, (id + 1) * sizeof(*sc)); 1768 else 1769 memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc)); 1770 1771 trace->syscalls.table = table; 1772 trace->sctbl->syscalls.max_id = id; 1773 } 1774 #endif 1775 sc = trace->syscalls.table + id; 1776 if (sc->nonexistent) 1777 return 0; 1778 1779 if (name == NULL) { 1780 sc->nonexistent = true; 1781 return 0; 1782 } 1783 1784 sc->name = name; 1785 sc->fmt = syscall_fmt__find(sc->name); 1786 1787 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name); 1788 sc->tp_format = trace_event__tp_format("syscalls", tp_name); 1789 1790 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) { 1791 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias); 1792 sc->tp_format = trace_event__tp_format("syscalls", tp_name); 1793 } 1794 1795 if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 6 : sc->tp_format->format.nr_fields)) 1796 return -ENOMEM; 1797 1798 if (IS_ERR(sc->tp_format)) 1799 return PTR_ERR(sc->tp_format); 1800 1801 sc->args = sc->tp_format->format.fields; 1802 /* 1803 * We need to check and discard the first variable '__syscall_nr' 1804 * or 'nr' that mean the syscall number. It is needless here. 1805 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels. 1806 */ 1807 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) { 1808 sc->args = sc->args->next; 1809 --sc->nr_args; 1810 } 1811 1812 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit"); 1813 sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat"); 1814 1815 return syscall__set_arg_fmts(sc); 1816 } 1817 1818 static int evsel__init_tp_arg_scnprintf(struct evsel *evsel) 1819 { 1820 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel); 1821 1822 if (fmt != NULL) { 1823 syscall_arg_fmt__init_array(fmt, evsel->tp_format->format.fields); 1824 return 0; 1825 } 1826 1827 return -ENOMEM; 1828 } 1829 1830 static int intcmp(const void *a, const void *b) 1831 { 1832 const int *one = a, *another = b; 1833 1834 return *one - *another; 1835 } 1836 1837 static int trace__validate_ev_qualifier(struct trace *trace) 1838 { 1839 int err = 0; 1840 bool printed_invalid_prefix = false; 1841 struct str_node *pos; 1842 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier); 1843 1844 trace->ev_qualifier_ids.entries = malloc(nr_allocated * 1845 sizeof(trace->ev_qualifier_ids.entries[0])); 1846 1847 if (trace->ev_qualifier_ids.entries == NULL) { 1848 fputs("Error:\tNot enough memory for allocating events qualifier ids\n", 1849 trace->output); 1850 err = -EINVAL; 1851 goto out; 1852 } 1853 1854 strlist__for_each_entry(pos, trace->ev_qualifier) { 1855 const char *sc = pos->s; 1856 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1; 1857 1858 if (id < 0) { 1859 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next); 1860 if (id >= 0) 1861 goto matches; 1862 1863 if (!printed_invalid_prefix) { 1864 pr_debug("Skipping unknown syscalls: "); 1865 printed_invalid_prefix = true; 1866 } else { 1867 pr_debug(", "); 1868 } 1869 1870 pr_debug("%s", sc); 1871 continue; 1872 } 1873 matches: 1874 trace->ev_qualifier_ids.entries[nr_used++] = id; 1875 if (match_next == -1) 1876 continue; 1877 1878 while (1) { 1879 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next); 1880 if (id < 0) 1881 break; 1882 if (nr_allocated == nr_used) { 1883 void *entries; 1884 1885 nr_allocated += 8; 1886 entries = realloc(trace->ev_qualifier_ids.entries, 1887 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0])); 1888 if (entries == NULL) { 1889 err = -ENOMEM; 1890 fputs("\nError:\t Not enough memory for parsing\n", trace->output); 1891 goto out_free; 1892 } 1893 trace->ev_qualifier_ids.entries = entries; 1894 } 1895 trace->ev_qualifier_ids.entries[nr_used++] = id; 1896 } 1897 } 1898 1899 trace->ev_qualifier_ids.nr = nr_used; 1900 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp); 1901 out: 1902 if (printed_invalid_prefix) 1903 pr_debug("\n"); 1904 return err; 1905 out_free: 1906 zfree(&trace->ev_qualifier_ids.entries); 1907 trace->ev_qualifier_ids.nr = 0; 1908 goto out; 1909 } 1910 1911 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id) 1912 { 1913 bool in_ev_qualifier; 1914 1915 if (trace->ev_qualifier_ids.nr == 0) 1916 return true; 1917 1918 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries, 1919 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL; 1920 1921 if (in_ev_qualifier) 1922 return !trace->not_ev_qualifier; 1923 1924 return trace->not_ev_qualifier; 1925 } 1926 1927 /* 1928 * args is to be interpreted as a series of longs but we need to handle 1929 * 8-byte unaligned accesses. args points to raw_data within the event 1930 * and raw_data is guaranteed to be 8-byte unaligned because it is 1931 * preceded by raw_size which is a u32. So we need to copy args to a temp 1932 * variable to read it. Most notably this avoids extended load instructions 1933 * on unaligned addresses 1934 */ 1935 unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx) 1936 { 1937 unsigned long val; 1938 unsigned char *p = arg->args + sizeof(unsigned long) * idx; 1939 1940 memcpy(&val, p, sizeof(val)); 1941 return val; 1942 } 1943 1944 static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size, 1945 struct syscall_arg *arg) 1946 { 1947 if (sc->arg_fmt && sc->arg_fmt[arg->idx].name) 1948 return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name); 1949 1950 return scnprintf(bf, size, "arg%d: ", arg->idx); 1951 } 1952 1953 /* 1954 * Check if the value is in fact zero, i.e. mask whatever needs masking, such 1955 * as mount 'flags' argument that needs ignoring some magic flag, see comment 1956 * in tools/perf/trace/beauty/mount_flags.c 1957 */ 1958 static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val) 1959 { 1960 if (fmt && fmt->mask_val) 1961 return fmt->mask_val(arg, val); 1962 1963 return val; 1964 } 1965 1966 static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size, 1967 struct syscall_arg *arg, unsigned long val) 1968 { 1969 if (fmt && fmt->scnprintf) { 1970 arg->val = val; 1971 if (fmt->parm) 1972 arg->parm = fmt->parm; 1973 return fmt->scnprintf(bf, size, arg); 1974 } 1975 return scnprintf(bf, size, "%ld", val); 1976 } 1977 1978 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size, 1979 unsigned char *args, void *augmented_args, int augmented_args_size, 1980 struct trace *trace, struct thread *thread) 1981 { 1982 size_t printed = 0; 1983 unsigned long val; 1984 u8 bit = 1; 1985 struct syscall_arg arg = { 1986 .args = args, 1987 .augmented = { 1988 .size = augmented_args_size, 1989 .args = augmented_args, 1990 }, 1991 .idx = 0, 1992 .mask = 0, 1993 .trace = trace, 1994 .thread = thread, 1995 .show_string_prefix = trace->show_string_prefix, 1996 }; 1997 struct thread_trace *ttrace = thread__priv(thread); 1998 1999 /* 2000 * Things like fcntl will set this in its 'cmd' formatter to pick the 2001 * right formatter for the return value (an fd? file flags?), which is 2002 * not needed for syscalls that always return a given type, say an fd. 2003 */ 2004 ttrace->ret_scnprintf = NULL; 2005 2006 if (sc->args != NULL) { 2007 struct tep_format_field *field; 2008 2009 for (field = sc->args; field; 2010 field = field->next, ++arg.idx, bit <<= 1) { 2011 if (arg.mask & bit) 2012 continue; 2013 2014 arg.fmt = &sc->arg_fmt[arg.idx]; 2015 val = syscall_arg__val(&arg, arg.idx); 2016 /* 2017 * Some syscall args need some mask, most don't and 2018 * return val untouched. 2019 */ 2020 val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val); 2021 2022 /* 2023 * Suppress this argument if its value is zero and 2024 * and we don't have a string associated in an 2025 * strarray for it. 2026 */ 2027 if (val == 0 && 2028 !trace->show_zeros && 2029 !(sc->arg_fmt && 2030 (sc->arg_fmt[arg.idx].show_zero || 2031 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY || 2032 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAYS) && 2033 sc->arg_fmt[arg.idx].parm)) 2034 continue; 2035 2036 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); 2037 2038 if (trace->show_arg_names) 2039 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); 2040 2041 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], 2042 bf + printed, size - printed, &arg, val); 2043 } 2044 } else if (IS_ERR(sc->tp_format)) { 2045 /* 2046 * If we managed to read the tracepoint /format file, then we 2047 * may end up not having any args, like with gettid(), so only 2048 * print the raw args when we didn't manage to read it. 2049 */ 2050 while (arg.idx < sc->nr_args) { 2051 if (arg.mask & bit) 2052 goto next_arg; 2053 val = syscall_arg__val(&arg, arg.idx); 2054 if (printed) 2055 printed += scnprintf(bf + printed, size - printed, ", "); 2056 printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg); 2057 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val); 2058 next_arg: 2059 ++arg.idx; 2060 bit <<= 1; 2061 } 2062 } 2063 2064 return printed; 2065 } 2066 2067 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel, 2068 union perf_event *event, 2069 struct perf_sample *sample); 2070 2071 static struct syscall *trace__syscall_info(struct trace *trace, 2072 struct evsel *evsel, int id) 2073 { 2074 int err = 0; 2075 2076 if (id < 0) { 2077 2078 /* 2079 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried 2080 * before that, leaving at a higher verbosity level till that is 2081 * explained. Reproduced with plain ftrace with: 2082 * 2083 * echo 1 > /t/events/raw_syscalls/sys_exit/enable 2084 * grep "NR -1 " /t/trace_pipe 2085 * 2086 * After generating some load on the machine. 2087 */ 2088 if (verbose > 1) { 2089 static u64 n; 2090 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n", 2091 id, evsel__name(evsel), ++n); 2092 } 2093 return NULL; 2094 } 2095 2096 err = -EINVAL; 2097 2098 #ifdef HAVE_SYSCALL_TABLE_SUPPORT 2099 if (id > trace->sctbl->syscalls.max_id) { 2100 #else 2101 if (id >= trace->sctbl->syscalls.max_id) { 2102 /* 2103 * With libaudit we don't know beforehand what is the max_id, 2104 * so we let trace__read_syscall_info() figure that out as we 2105 * go on reading syscalls. 2106 */ 2107 err = trace__read_syscall_info(trace, id); 2108 if (err) 2109 #endif 2110 goto out_cant_read; 2111 } 2112 2113 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) && 2114 (err = trace__read_syscall_info(trace, id)) != 0) 2115 goto out_cant_read; 2116 2117 if (trace->syscalls.table[id].name == NULL) { 2118 if (trace->syscalls.table[id].nonexistent) 2119 return NULL; 2120 goto out_cant_read; 2121 } 2122 2123 return &trace->syscalls.table[id]; 2124 2125 out_cant_read: 2126 if (verbose > 0) { 2127 char sbuf[STRERR_BUFSIZE]; 2128 fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf))); 2129 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL) 2130 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name); 2131 fputs(" information\n", trace->output); 2132 } 2133 return NULL; 2134 } 2135 2136 struct syscall_stats { 2137 struct stats stats; 2138 u64 nr_failures; 2139 int max_errno; 2140 u32 *errnos; 2141 }; 2142 2143 static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace, 2144 int id, struct perf_sample *sample, long err, bool errno_summary) 2145 { 2146 struct int_node *inode; 2147 struct syscall_stats *stats; 2148 u64 duration = 0; 2149 2150 inode = intlist__findnew(ttrace->syscall_stats, id); 2151 if (inode == NULL) 2152 return; 2153 2154 stats = inode->priv; 2155 if (stats == NULL) { 2156 stats = malloc(sizeof(*stats)); 2157 if (stats == NULL) 2158 return; 2159 2160 stats->nr_failures = 0; 2161 stats->max_errno = 0; 2162 stats->errnos = NULL; 2163 init_stats(&stats->stats); 2164 inode->priv = stats; 2165 } 2166 2167 if (ttrace->entry_time && sample->time > ttrace->entry_time) 2168 duration = sample->time - ttrace->entry_time; 2169 2170 update_stats(&stats->stats, duration); 2171 2172 if (err < 0) { 2173 ++stats->nr_failures; 2174 2175 if (!errno_summary) 2176 return; 2177 2178 err = -err; 2179 if (err > stats->max_errno) { 2180 u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32)); 2181 2182 if (new_errnos) { 2183 memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32)); 2184 } else { 2185 pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n", 2186 thread__comm_str(thread), thread->pid_, thread->tid); 2187 return; 2188 } 2189 2190 stats->errnos = new_errnos; 2191 stats->max_errno = err; 2192 } 2193 2194 ++stats->errnos[err - 1]; 2195 } 2196 } 2197 2198 static int trace__printf_interrupted_entry(struct trace *trace) 2199 { 2200 struct thread_trace *ttrace; 2201 size_t printed; 2202 int len; 2203 2204 if (trace->failure_only || trace->current == NULL) 2205 return 0; 2206 2207 ttrace = thread__priv(trace->current); 2208 2209 if (!ttrace->entry_pending) 2210 return 0; 2211 2212 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output); 2213 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str); 2214 2215 if (len < trace->args_alignment - 4) 2216 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " "); 2217 2218 printed += fprintf(trace->output, " ...\n"); 2219 2220 ttrace->entry_pending = false; 2221 ++trace->nr_events_printed; 2222 2223 return printed; 2224 } 2225 2226 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel, 2227 struct perf_sample *sample, struct thread *thread) 2228 { 2229 int printed = 0; 2230 2231 if (trace->print_sample) { 2232 double ts = (double)sample->time / NSEC_PER_MSEC; 2233 2234 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n", 2235 evsel__name(evsel), ts, 2236 thread__comm_str(thread), 2237 sample->pid, sample->tid, sample->cpu); 2238 } 2239 2240 return printed; 2241 } 2242 2243 static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size) 2244 { 2245 void *augmented_args = NULL; 2246 /* 2247 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter 2248 * and there we get all 6 syscall args plus the tracepoint common fields 2249 * that gets calculated at the start and the syscall_nr (another long). 2250 * So we check if that is the case and if so don't look after the 2251 * sc->args_size but always after the full raw_syscalls:sys_enter payload, 2252 * which is fixed. 2253 * 2254 * We'll revisit this later to pass s->args_size to the BPF augmenter 2255 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it 2256 * copies only what we need for each syscall, like what happens when we 2257 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace 2258 * traffic to just what is needed for each syscall. 2259 */ 2260 int args_size = raw_augmented_args_size ?: sc->args_size; 2261 2262 *augmented_args_size = sample->raw_size - args_size; 2263 if (*augmented_args_size > 0) 2264 augmented_args = sample->raw_data + args_size; 2265 2266 return augmented_args; 2267 } 2268 2269 static void syscall__exit(struct syscall *sc) 2270 { 2271 if (!sc) 2272 return; 2273 2274 free(sc->arg_fmt); 2275 } 2276 2277 static int trace__sys_enter(struct trace *trace, struct evsel *evsel, 2278 union perf_event *event __maybe_unused, 2279 struct perf_sample *sample) 2280 { 2281 char *msg; 2282 void *args; 2283 int printed = 0; 2284 struct thread *thread; 2285 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; 2286 int augmented_args_size = 0; 2287 void *augmented_args = NULL; 2288 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2289 struct thread_trace *ttrace; 2290 2291 if (sc == NULL) 2292 return -1; 2293 2294 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2295 ttrace = thread__trace(thread, trace->output); 2296 if (ttrace == NULL) 2297 goto out_put; 2298 2299 trace__fprintf_sample(trace, evsel, sample, thread); 2300 2301 args = perf_evsel__sc_tp_ptr(evsel, args, sample); 2302 2303 if (ttrace->entry_str == NULL) { 2304 ttrace->entry_str = malloc(trace__entry_str_size); 2305 if (!ttrace->entry_str) 2306 goto out_put; 2307 } 2308 2309 if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) 2310 trace__printf_interrupted_entry(trace); 2311 /* 2312 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible 2313 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments 2314 * this breaks syscall__augmented_args() check for augmented args, as we calculate 2315 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file, 2316 * so when handling, say the openat syscall, we end up getting 6 args for the 2317 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly 2318 * thinking that the extra 2 u64 args are the augmented filename, so just check 2319 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one. 2320 */ 2321 if (evsel != trace->syscalls.events.sys_enter) 2322 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); 2323 ttrace->entry_time = sample->time; 2324 msg = ttrace->entry_str; 2325 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name); 2326 2327 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed, 2328 args, augmented_args, augmented_args_size, trace, thread); 2329 2330 if (sc->is_exit) { 2331 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) { 2332 int alignment = 0; 2333 2334 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output); 2335 printed = fprintf(trace->output, "%s)", ttrace->entry_str); 2336 if (trace->args_alignment > printed) 2337 alignment = trace->args_alignment - printed; 2338 fprintf(trace->output, "%*s= ?\n", alignment, " "); 2339 } 2340 } else { 2341 ttrace->entry_pending = true; 2342 /* See trace__vfs_getname & trace__sys_exit */ 2343 ttrace->filename.pending_open = false; 2344 } 2345 2346 if (trace->current != thread) { 2347 thread__put(trace->current); 2348 trace->current = thread__get(thread); 2349 } 2350 err = 0; 2351 out_put: 2352 thread__put(thread); 2353 return err; 2354 } 2355 2356 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel, 2357 struct perf_sample *sample) 2358 { 2359 struct thread_trace *ttrace; 2360 struct thread *thread; 2361 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; 2362 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2363 char msg[1024]; 2364 void *args, *augmented_args = NULL; 2365 int augmented_args_size; 2366 2367 if (sc == NULL) 2368 return -1; 2369 2370 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2371 ttrace = thread__trace(thread, trace->output); 2372 /* 2373 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args() 2374 * and the rest of the beautifiers accessing it via struct syscall_arg touches it. 2375 */ 2376 if (ttrace == NULL) 2377 goto out_put; 2378 2379 args = perf_evsel__sc_tp_ptr(evsel, args, sample); 2380 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); 2381 syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread); 2382 fprintf(trace->output, "%s", msg); 2383 err = 0; 2384 out_put: 2385 thread__put(thread); 2386 return err; 2387 } 2388 2389 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel, 2390 struct perf_sample *sample, 2391 struct callchain_cursor *cursor) 2392 { 2393 struct addr_location al; 2394 int max_stack = evsel->core.attr.sample_max_stack ? 2395 evsel->core.attr.sample_max_stack : 2396 trace->max_stack; 2397 int err; 2398 2399 if (machine__resolve(trace->host, &al, sample) < 0) 2400 return -1; 2401 2402 err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack); 2403 addr_location__put(&al); 2404 return err; 2405 } 2406 2407 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample) 2408 { 2409 /* TODO: user-configurable print_opts */ 2410 const unsigned int print_opts = EVSEL__PRINT_SYM | 2411 EVSEL__PRINT_DSO | 2412 EVSEL__PRINT_UNKNOWN_AS_ADDR; 2413 2414 return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, symbol_conf.bt_stop_list, trace->output); 2415 } 2416 2417 static const char *errno_to_name(struct evsel *evsel, int err) 2418 { 2419 struct perf_env *env = evsel__env(evsel); 2420 const char *arch_name = perf_env__arch(env); 2421 2422 return arch_syscalls__strerrno(arch_name, err); 2423 } 2424 2425 static int trace__sys_exit(struct trace *trace, struct evsel *evsel, 2426 union perf_event *event __maybe_unused, 2427 struct perf_sample *sample) 2428 { 2429 long ret; 2430 u64 duration = 0; 2431 bool duration_calculated = false; 2432 struct thread *thread; 2433 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0; 2434 int alignment = trace->args_alignment; 2435 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2436 struct thread_trace *ttrace; 2437 2438 if (sc == NULL) 2439 return -1; 2440 2441 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2442 ttrace = thread__trace(thread, trace->output); 2443 if (ttrace == NULL) 2444 goto out_put; 2445 2446 trace__fprintf_sample(trace, evsel, sample, thread); 2447 2448 ret = perf_evsel__sc_tp_uint(evsel, ret, sample); 2449 2450 if (trace->summary) 2451 thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary); 2452 2453 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) { 2454 trace__set_fd_pathname(thread, ret, ttrace->filename.name); 2455 ttrace->filename.pending_open = false; 2456 ++trace->stats.vfs_getname; 2457 } 2458 2459 if (ttrace->entry_time) { 2460 duration = sample->time - ttrace->entry_time; 2461 if (trace__filter_duration(trace, duration)) 2462 goto out; 2463 duration_calculated = true; 2464 } else if (trace->duration_filter) 2465 goto out; 2466 2467 if (sample->callchain) { 2468 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor); 2469 if (callchain_ret == 0) { 2470 if (callchain_cursor.nr < trace->min_stack) 2471 goto out; 2472 callchain_ret = 1; 2473 } 2474 } 2475 2476 if (trace->summary_only || (ret >= 0 && trace->failure_only)) 2477 goto out; 2478 2479 trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output); 2480 2481 if (ttrace->entry_pending) { 2482 printed = fprintf(trace->output, "%s", ttrace->entry_str); 2483 } else { 2484 printed += fprintf(trace->output, " ... ["); 2485 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued"); 2486 printed += 9; 2487 printed += fprintf(trace->output, "]: %s()", sc->name); 2488 } 2489 2490 printed++; /* the closing ')' */ 2491 2492 if (alignment > printed) 2493 alignment -= printed; 2494 else 2495 alignment = 0; 2496 2497 fprintf(trace->output, ")%*s= ", alignment, " "); 2498 2499 if (sc->fmt == NULL) { 2500 if (ret < 0) 2501 goto errno_print; 2502 signed_print: 2503 fprintf(trace->output, "%ld", ret); 2504 } else if (ret < 0) { 2505 errno_print: { 2506 char bf[STRERR_BUFSIZE]; 2507 const char *emsg = str_error_r(-ret, bf, sizeof(bf)), 2508 *e = errno_to_name(evsel, -ret); 2509 2510 fprintf(trace->output, "-1 %s (%s)", e, emsg); 2511 } 2512 } else if (ret == 0 && sc->fmt->timeout) 2513 fprintf(trace->output, "0 (Timeout)"); 2514 else if (ttrace->ret_scnprintf) { 2515 char bf[1024]; 2516 struct syscall_arg arg = { 2517 .val = ret, 2518 .thread = thread, 2519 .trace = trace, 2520 }; 2521 ttrace->ret_scnprintf(bf, sizeof(bf), &arg); 2522 ttrace->ret_scnprintf = NULL; 2523 fprintf(trace->output, "%s", bf); 2524 } else if (sc->fmt->hexret) 2525 fprintf(trace->output, "%#lx", ret); 2526 else if (sc->fmt->errpid) { 2527 struct thread *child = machine__find_thread(trace->host, ret, ret); 2528 2529 if (child != NULL) { 2530 fprintf(trace->output, "%ld", ret); 2531 if (child->comm_set) 2532 fprintf(trace->output, " (%s)", thread__comm_str(child)); 2533 thread__put(child); 2534 } 2535 } else 2536 goto signed_print; 2537 2538 fputc('\n', trace->output); 2539 2540 /* 2541 * We only consider an 'event' for the sake of --max-events a non-filtered 2542 * sys_enter + sys_exit and other tracepoint events. 2543 */ 2544 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX) 2545 interrupted = true; 2546 2547 if (callchain_ret > 0) 2548 trace__fprintf_callchain(trace, sample); 2549 else if (callchain_ret < 0) 2550 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 2551 out: 2552 ttrace->entry_pending = false; 2553 err = 0; 2554 out_put: 2555 thread__put(thread); 2556 return err; 2557 } 2558 2559 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel, 2560 union perf_event *event __maybe_unused, 2561 struct perf_sample *sample) 2562 { 2563 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2564 struct thread_trace *ttrace; 2565 size_t filename_len, entry_str_len, to_move; 2566 ssize_t remaining_space; 2567 char *pos; 2568 const char *filename = evsel__rawptr(evsel, sample, "pathname"); 2569 2570 if (!thread) 2571 goto out; 2572 2573 ttrace = thread__priv(thread); 2574 if (!ttrace) 2575 goto out_put; 2576 2577 filename_len = strlen(filename); 2578 if (filename_len == 0) 2579 goto out_put; 2580 2581 if (ttrace->filename.namelen < filename_len) { 2582 char *f = realloc(ttrace->filename.name, filename_len + 1); 2583 2584 if (f == NULL) 2585 goto out_put; 2586 2587 ttrace->filename.namelen = filename_len; 2588 ttrace->filename.name = f; 2589 } 2590 2591 strcpy(ttrace->filename.name, filename); 2592 ttrace->filename.pending_open = true; 2593 2594 if (!ttrace->filename.ptr) 2595 goto out_put; 2596 2597 entry_str_len = strlen(ttrace->entry_str); 2598 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */ 2599 if (remaining_space <= 0) 2600 goto out_put; 2601 2602 if (filename_len > (size_t)remaining_space) { 2603 filename += filename_len - remaining_space; 2604 filename_len = remaining_space; 2605 } 2606 2607 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */ 2608 pos = ttrace->entry_str + ttrace->filename.entry_str_pos; 2609 memmove(pos + filename_len, pos, to_move); 2610 memcpy(pos, filename, filename_len); 2611 2612 ttrace->filename.ptr = 0; 2613 ttrace->filename.entry_str_pos = 0; 2614 out_put: 2615 thread__put(thread); 2616 out: 2617 return 0; 2618 } 2619 2620 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel, 2621 union perf_event *event __maybe_unused, 2622 struct perf_sample *sample) 2623 { 2624 u64 runtime = evsel__intval(evsel, sample, "runtime"); 2625 double runtime_ms = (double)runtime / NSEC_PER_MSEC; 2626 struct thread *thread = machine__findnew_thread(trace->host, 2627 sample->pid, 2628 sample->tid); 2629 struct thread_trace *ttrace = thread__trace(thread, trace->output); 2630 2631 if (ttrace == NULL) 2632 goto out_dump; 2633 2634 ttrace->runtime_ms += runtime_ms; 2635 trace->runtime_ms += runtime_ms; 2636 out_put: 2637 thread__put(thread); 2638 return 0; 2639 2640 out_dump: 2641 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n", 2642 evsel->name, 2643 evsel__strval(evsel, sample, "comm"), 2644 (pid_t)evsel__intval(evsel, sample, "pid"), 2645 runtime, 2646 evsel__intval(evsel, sample, "vruntime")); 2647 goto out_put; 2648 } 2649 2650 static int bpf_output__printer(enum binary_printer_ops op, 2651 unsigned int val, void *extra __maybe_unused, FILE *fp) 2652 { 2653 unsigned char ch = (unsigned char)val; 2654 2655 switch (op) { 2656 case BINARY_PRINT_CHAR_DATA: 2657 return fprintf(fp, "%c", isprint(ch) ? ch : '.'); 2658 case BINARY_PRINT_DATA_BEGIN: 2659 case BINARY_PRINT_LINE_BEGIN: 2660 case BINARY_PRINT_ADDR: 2661 case BINARY_PRINT_NUM_DATA: 2662 case BINARY_PRINT_NUM_PAD: 2663 case BINARY_PRINT_SEP: 2664 case BINARY_PRINT_CHAR_PAD: 2665 case BINARY_PRINT_LINE_END: 2666 case BINARY_PRINT_DATA_END: 2667 default: 2668 break; 2669 } 2670 2671 return 0; 2672 } 2673 2674 static void bpf_output__fprintf(struct trace *trace, 2675 struct perf_sample *sample) 2676 { 2677 binary__fprintf(sample->raw_data, sample->raw_size, 8, 2678 bpf_output__printer, NULL, trace->output); 2679 ++trace->nr_events_printed; 2680 } 2681 2682 static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample, 2683 struct thread *thread, void *augmented_args, int augmented_args_size) 2684 { 2685 char bf[2048]; 2686 size_t size = sizeof(bf); 2687 struct tep_format_field *field = evsel->tp_format->format.fields; 2688 struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel); 2689 size_t printed = 0; 2690 unsigned long val; 2691 u8 bit = 1; 2692 struct syscall_arg syscall_arg = { 2693 .augmented = { 2694 .size = augmented_args_size, 2695 .args = augmented_args, 2696 }, 2697 .idx = 0, 2698 .mask = 0, 2699 .trace = trace, 2700 .thread = thread, 2701 .show_string_prefix = trace->show_string_prefix, 2702 }; 2703 2704 for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) { 2705 if (syscall_arg.mask & bit) 2706 continue; 2707 2708 syscall_arg.len = 0; 2709 syscall_arg.fmt = arg; 2710 if (field->flags & TEP_FIELD_IS_ARRAY) { 2711 int offset = field->offset; 2712 2713 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2714 offset = format_field__intval(field, sample, evsel->needs_swap); 2715 syscall_arg.len = offset >> 16; 2716 offset &= 0xffff; 2717 } 2718 2719 val = (uintptr_t)(sample->raw_data + offset); 2720 } else 2721 val = format_field__intval(field, sample, evsel->needs_swap); 2722 /* 2723 * Some syscall args need some mask, most don't and 2724 * return val untouched. 2725 */ 2726 val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val); 2727 2728 /* 2729 * Suppress this argument if its value is zero and 2730 * and we don't have a string associated in an 2731 * strarray for it. 2732 */ 2733 if (val == 0 && 2734 !trace->show_zeros && 2735 !((arg->show_zero || 2736 arg->scnprintf == SCA_STRARRAY || 2737 arg->scnprintf == SCA_STRARRAYS) && 2738 arg->parm)) 2739 continue; 2740 2741 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); 2742 2743 /* 2744 * XXX Perhaps we should have a show_tp_arg_names, 2745 * leaving show_arg_names just for syscalls? 2746 */ 2747 if (1 || trace->show_arg_names) 2748 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); 2749 2750 printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val); 2751 } 2752 2753 return printed + fprintf(trace->output, "%s", bf); 2754 } 2755 2756 static int trace__event_handler(struct trace *trace, struct evsel *evsel, 2757 union perf_event *event __maybe_unused, 2758 struct perf_sample *sample) 2759 { 2760 struct thread *thread; 2761 int callchain_ret = 0; 2762 /* 2763 * Check if we called perf_evsel__disable(evsel) due to, for instance, 2764 * this event's max_events having been hit and this is an entry coming 2765 * from the ring buffer that we should discard, since the max events 2766 * have already been considered/printed. 2767 */ 2768 if (evsel->disabled) 2769 return 0; 2770 2771 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2772 2773 if (sample->callchain) { 2774 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor); 2775 if (callchain_ret == 0) { 2776 if (callchain_cursor.nr < trace->min_stack) 2777 goto out; 2778 callchain_ret = 1; 2779 } 2780 } 2781 2782 trace__printf_interrupted_entry(trace); 2783 trace__fprintf_tstamp(trace, sample->time, trace->output); 2784 2785 if (trace->trace_syscalls && trace->show_duration) 2786 fprintf(trace->output, "( ): "); 2787 2788 if (thread) 2789 trace__fprintf_comm_tid(trace, thread, trace->output); 2790 2791 if (evsel == trace->syscalls.events.augmented) { 2792 int id = perf_evsel__sc_tp_uint(evsel, id, sample); 2793 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2794 2795 if (sc) { 2796 fprintf(trace->output, "%s(", sc->name); 2797 trace__fprintf_sys_enter(trace, evsel, sample); 2798 fputc(')', trace->output); 2799 goto newline; 2800 } 2801 2802 /* 2803 * XXX: Not having the associated syscall info or not finding/adding 2804 * the thread should never happen, but if it does... 2805 * fall thru and print it as a bpf_output event. 2806 */ 2807 } 2808 2809 fprintf(trace->output, "%s(", evsel->name); 2810 2811 if (evsel__is_bpf_output(evsel)) { 2812 bpf_output__fprintf(trace, sample); 2813 } else if (evsel->tp_format) { 2814 if (strncmp(evsel->tp_format->name, "sys_enter_", 10) || 2815 trace__fprintf_sys_enter(trace, evsel, sample)) { 2816 if (trace->libtraceevent_print) { 2817 event_format__fprintf(evsel->tp_format, sample->cpu, 2818 sample->raw_data, sample->raw_size, 2819 trace->output); 2820 } else { 2821 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0); 2822 } 2823 } 2824 } 2825 2826 newline: 2827 fprintf(trace->output, ")\n"); 2828 2829 if (callchain_ret > 0) 2830 trace__fprintf_callchain(trace, sample); 2831 else if (callchain_ret < 0) 2832 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 2833 2834 ++trace->nr_events_printed; 2835 2836 if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) { 2837 evsel__disable(evsel); 2838 evsel__close(evsel); 2839 } 2840 out: 2841 thread__put(thread); 2842 return 0; 2843 } 2844 2845 static void print_location(FILE *f, struct perf_sample *sample, 2846 struct addr_location *al, 2847 bool print_dso, bool print_sym) 2848 { 2849 2850 if ((verbose > 0 || print_dso) && al->map) 2851 fprintf(f, "%s@", al->map->dso->long_name); 2852 2853 if ((verbose > 0 || print_sym) && al->sym) 2854 fprintf(f, "%s+0x%" PRIx64, al->sym->name, 2855 al->addr - al->sym->start); 2856 else if (al->map) 2857 fprintf(f, "0x%" PRIx64, al->addr); 2858 else 2859 fprintf(f, "0x%" PRIx64, sample->addr); 2860 } 2861 2862 static int trace__pgfault(struct trace *trace, 2863 struct evsel *evsel, 2864 union perf_event *event __maybe_unused, 2865 struct perf_sample *sample) 2866 { 2867 struct thread *thread; 2868 struct addr_location al; 2869 char map_type = 'd'; 2870 struct thread_trace *ttrace; 2871 int err = -1; 2872 int callchain_ret = 0; 2873 2874 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2875 2876 if (sample->callchain) { 2877 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor); 2878 if (callchain_ret == 0) { 2879 if (callchain_cursor.nr < trace->min_stack) 2880 goto out_put; 2881 callchain_ret = 1; 2882 } 2883 } 2884 2885 ttrace = thread__trace(thread, trace->output); 2886 if (ttrace == NULL) 2887 goto out_put; 2888 2889 if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ) 2890 ttrace->pfmaj++; 2891 else 2892 ttrace->pfmin++; 2893 2894 if (trace->summary_only) 2895 goto out; 2896 2897 thread__find_symbol(thread, sample->cpumode, sample->ip, &al); 2898 2899 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output); 2900 2901 fprintf(trace->output, "%sfault [", 2902 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ? 2903 "maj" : "min"); 2904 2905 print_location(trace->output, sample, &al, false, true); 2906 2907 fprintf(trace->output, "] => "); 2908 2909 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); 2910 2911 if (!al.map) { 2912 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); 2913 2914 if (al.map) 2915 map_type = 'x'; 2916 else 2917 map_type = '?'; 2918 } 2919 2920 print_location(trace->output, sample, &al, true, false); 2921 2922 fprintf(trace->output, " (%c%c)\n", map_type, al.level); 2923 2924 if (callchain_ret > 0) 2925 trace__fprintf_callchain(trace, sample); 2926 else if (callchain_ret < 0) 2927 pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel)); 2928 2929 ++trace->nr_events_printed; 2930 out: 2931 err = 0; 2932 out_put: 2933 thread__put(thread); 2934 return err; 2935 } 2936 2937 static void trace__set_base_time(struct trace *trace, 2938 struct evsel *evsel, 2939 struct perf_sample *sample) 2940 { 2941 /* 2942 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust 2943 * and don't use sample->time unconditionally, we may end up having 2944 * some other event in the future without PERF_SAMPLE_TIME for good 2945 * reason, i.e. we may not be interested in its timestamps, just in 2946 * it taking place, picking some piece of information when it 2947 * appears in our event stream (vfs_getname comes to mind). 2948 */ 2949 if (trace->base_time == 0 && !trace->full_time && 2950 (evsel->core.attr.sample_type & PERF_SAMPLE_TIME)) 2951 trace->base_time = sample->time; 2952 } 2953 2954 static int trace__process_sample(struct perf_tool *tool, 2955 union perf_event *event, 2956 struct perf_sample *sample, 2957 struct evsel *evsel, 2958 struct machine *machine __maybe_unused) 2959 { 2960 struct trace *trace = container_of(tool, struct trace, tool); 2961 struct thread *thread; 2962 int err = 0; 2963 2964 tracepoint_handler handler = evsel->handler; 2965 2966 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2967 if (thread && thread__is_filtered(thread)) 2968 goto out; 2969 2970 trace__set_base_time(trace, evsel, sample); 2971 2972 if (handler) { 2973 ++trace->nr_events; 2974 handler(trace, evsel, event, sample); 2975 } 2976 out: 2977 thread__put(thread); 2978 return err; 2979 } 2980 2981 static int trace__record(struct trace *trace, int argc, const char **argv) 2982 { 2983 unsigned int rec_argc, i, j; 2984 const char **rec_argv; 2985 const char * const record_args[] = { 2986 "record", 2987 "-R", 2988 "-m", "1024", 2989 "-c", "1", 2990 }; 2991 pid_t pid = getpid(); 2992 char *filter = asprintf__tp_filter_pids(1, &pid); 2993 const char * const sc_args[] = { "-e", }; 2994 unsigned int sc_args_nr = ARRAY_SIZE(sc_args); 2995 const char * const majpf_args[] = { "-e", "major-faults" }; 2996 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args); 2997 const char * const minpf_args[] = { "-e", "minor-faults" }; 2998 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args); 2999 int err = -1; 3000 3001 /* +3 is for the event string below and the pid filter */ 3002 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 3 + 3003 majpf_args_nr + minpf_args_nr + argc; 3004 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 3005 3006 if (rec_argv == NULL || filter == NULL) 3007 goto out_free; 3008 3009 j = 0; 3010 for (i = 0; i < ARRAY_SIZE(record_args); i++) 3011 rec_argv[j++] = record_args[i]; 3012 3013 if (trace->trace_syscalls) { 3014 for (i = 0; i < sc_args_nr; i++) 3015 rec_argv[j++] = sc_args[i]; 3016 3017 /* event string may be different for older kernels - e.g., RHEL6 */ 3018 if (is_valid_tracepoint("raw_syscalls:sys_enter")) 3019 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit"; 3020 else if (is_valid_tracepoint("syscalls:sys_enter")) 3021 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit"; 3022 else { 3023 pr_err("Neither raw_syscalls nor syscalls events exist.\n"); 3024 goto out_free; 3025 } 3026 } 3027 3028 rec_argv[j++] = "--filter"; 3029 rec_argv[j++] = filter; 3030 3031 if (trace->trace_pgfaults & TRACE_PFMAJ) 3032 for (i = 0; i < majpf_args_nr; i++) 3033 rec_argv[j++] = majpf_args[i]; 3034 3035 if (trace->trace_pgfaults & TRACE_PFMIN) 3036 for (i = 0; i < minpf_args_nr; i++) 3037 rec_argv[j++] = minpf_args[i]; 3038 3039 for (i = 0; i < (unsigned int)argc; i++) 3040 rec_argv[j++] = argv[i]; 3041 3042 err = cmd_record(j, rec_argv); 3043 out_free: 3044 free(filter); 3045 free(rec_argv); 3046 return err; 3047 } 3048 3049 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp); 3050 3051 static bool evlist__add_vfs_getname(struct evlist *evlist) 3052 { 3053 bool found = false; 3054 struct evsel *evsel, *tmp; 3055 struct parse_events_error err; 3056 int ret; 3057 3058 bzero(&err, sizeof(err)); 3059 ret = parse_events(evlist, "probe:vfs_getname*", &err); 3060 if (ret) { 3061 free(err.str); 3062 free(err.help); 3063 free(err.first_str); 3064 free(err.first_help); 3065 return false; 3066 } 3067 3068 evlist__for_each_entry_safe(evlist, evsel, tmp) { 3069 if (!strstarts(evsel__name(evsel), "probe:vfs_getname")) 3070 continue; 3071 3072 if (evsel__field(evsel, "pathname")) { 3073 evsel->handler = trace__vfs_getname; 3074 found = true; 3075 continue; 3076 } 3077 3078 list_del_init(&evsel->core.node); 3079 evsel->evlist = NULL; 3080 evsel__delete(evsel); 3081 } 3082 3083 return found; 3084 } 3085 3086 static struct evsel *evsel__new_pgfault(u64 config) 3087 { 3088 struct evsel *evsel; 3089 struct perf_event_attr attr = { 3090 .type = PERF_TYPE_SOFTWARE, 3091 .mmap_data = 1, 3092 }; 3093 3094 attr.config = config; 3095 attr.sample_period = 1; 3096 3097 event_attr_init(&attr); 3098 3099 evsel = evsel__new(&attr); 3100 if (evsel) 3101 evsel->handler = trace__pgfault; 3102 3103 return evsel; 3104 } 3105 3106 static void evlist__free_syscall_tp_fields(struct evlist *evlist) 3107 { 3108 struct evsel *evsel; 3109 3110 evlist__for_each_entry(evlist, evsel) { 3111 struct evsel_trace *et = evsel->priv; 3112 3113 if (!et || !evsel->tp_format || strcmp(evsel->tp_format->system, "syscalls")) 3114 continue; 3115 3116 free(et->fmt); 3117 free(et); 3118 } 3119 } 3120 3121 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample) 3122 { 3123 const u32 type = event->header.type; 3124 struct evsel *evsel; 3125 3126 if (type != PERF_RECORD_SAMPLE) { 3127 trace__process_event(trace, trace->host, event, sample); 3128 return; 3129 } 3130 3131 evsel = evlist__id2evsel(trace->evlist, sample->id); 3132 if (evsel == NULL) { 3133 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id); 3134 return; 3135 } 3136 3137 if (evswitch__discard(&trace->evswitch, evsel)) 3138 return; 3139 3140 trace__set_base_time(trace, evsel, sample); 3141 3142 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && 3143 sample->raw_data == NULL) { 3144 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n", 3145 evsel__name(evsel), sample->tid, 3146 sample->cpu, sample->raw_size); 3147 } else { 3148 tracepoint_handler handler = evsel->handler; 3149 handler(trace, evsel, event, sample); 3150 } 3151 3152 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX) 3153 interrupted = true; 3154 } 3155 3156 static int trace__add_syscall_newtp(struct trace *trace) 3157 { 3158 int ret = -1; 3159 struct evlist *evlist = trace->evlist; 3160 struct evsel *sys_enter, *sys_exit; 3161 3162 sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter); 3163 if (sys_enter == NULL) 3164 goto out; 3165 3166 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args)) 3167 goto out_delete_sys_enter; 3168 3169 sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit); 3170 if (sys_exit == NULL) 3171 goto out_delete_sys_enter; 3172 3173 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret)) 3174 goto out_delete_sys_exit; 3175 3176 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param); 3177 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param); 3178 3179 evlist__add(evlist, sys_enter); 3180 evlist__add(evlist, sys_exit); 3181 3182 if (callchain_param.enabled && !trace->kernel_syscallchains) { 3183 /* 3184 * We're interested only in the user space callchain 3185 * leading to the syscall, allow overriding that for 3186 * debugging reasons using --kernel_syscall_callchains 3187 */ 3188 sys_exit->core.attr.exclude_callchain_kernel = 1; 3189 } 3190 3191 trace->syscalls.events.sys_enter = sys_enter; 3192 trace->syscalls.events.sys_exit = sys_exit; 3193 3194 ret = 0; 3195 out: 3196 return ret; 3197 3198 out_delete_sys_exit: 3199 evsel__delete_priv(sys_exit); 3200 out_delete_sys_enter: 3201 evsel__delete_priv(sys_enter); 3202 goto out; 3203 } 3204 3205 static int trace__set_ev_qualifier_tp_filter(struct trace *trace) 3206 { 3207 int err = -1; 3208 struct evsel *sys_exit; 3209 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier, 3210 trace->ev_qualifier_ids.nr, 3211 trace->ev_qualifier_ids.entries); 3212 3213 if (filter == NULL) 3214 goto out_enomem; 3215 3216 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) { 3217 sys_exit = trace->syscalls.events.sys_exit; 3218 err = evsel__append_tp_filter(sys_exit, filter); 3219 } 3220 3221 free(filter); 3222 out: 3223 return err; 3224 out_enomem: 3225 errno = ENOMEM; 3226 goto out; 3227 } 3228 3229 #ifdef HAVE_LIBBPF_SUPPORT 3230 static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name) 3231 { 3232 if (trace->bpf_obj == NULL) 3233 return NULL; 3234 3235 return bpf_object__find_map_by_name(trace->bpf_obj, name); 3236 } 3237 3238 static void trace__set_bpf_map_filtered_pids(struct trace *trace) 3239 { 3240 trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered"); 3241 } 3242 3243 static void trace__set_bpf_map_syscalls(struct trace *trace) 3244 { 3245 trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls"); 3246 trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter"); 3247 trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit"); 3248 } 3249 3250 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name) 3251 { 3252 if (trace->bpf_obj == NULL) 3253 return NULL; 3254 3255 return bpf_object__find_program_by_title(trace->bpf_obj, name); 3256 } 3257 3258 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc, 3259 const char *prog_name, const char *type) 3260 { 3261 struct bpf_program *prog; 3262 3263 if (prog_name == NULL) { 3264 char default_prog_name[256]; 3265 scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->name); 3266 prog = trace__find_bpf_program_by_title(trace, default_prog_name); 3267 if (prog != NULL) 3268 goto out_found; 3269 if (sc->fmt && sc->fmt->alias) { 3270 scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->fmt->alias); 3271 prog = trace__find_bpf_program_by_title(trace, default_prog_name); 3272 if (prog != NULL) 3273 goto out_found; 3274 } 3275 goto out_unaugmented; 3276 } 3277 3278 prog = trace__find_bpf_program_by_title(trace, prog_name); 3279 3280 if (prog != NULL) { 3281 out_found: 3282 return prog; 3283 } 3284 3285 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n", 3286 prog_name, type, sc->name); 3287 out_unaugmented: 3288 return trace->syscalls.unaugmented_prog; 3289 } 3290 3291 static void trace__init_syscall_bpf_progs(struct trace *trace, int id) 3292 { 3293 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3294 3295 if (sc == NULL) 3296 return; 3297 3298 sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter"); 3299 sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit"); 3300 } 3301 3302 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id) 3303 { 3304 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3305 return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_prog); 3306 } 3307 3308 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id) 3309 { 3310 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3311 return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog); 3312 } 3313 3314 static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_entry *entry) 3315 { 3316 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3317 int arg = 0; 3318 3319 if (sc == NULL) 3320 goto out; 3321 3322 for (; arg < sc->nr_args; ++arg) { 3323 entry->string_args_len[arg] = 0; 3324 if (sc->arg_fmt[arg].scnprintf == SCA_FILENAME) { 3325 /* Should be set like strace -s strsize */ 3326 entry->string_args_len[arg] = PATH_MAX; 3327 } 3328 } 3329 out: 3330 for (; arg < 6; ++arg) 3331 entry->string_args_len[arg] = 0; 3332 } 3333 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace) 3334 { 3335 int fd = bpf_map__fd(trace->syscalls.map); 3336 struct bpf_map_syscall_entry value = { 3337 .enabled = !trace->not_ev_qualifier, 3338 }; 3339 int err = 0; 3340 size_t i; 3341 3342 for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) { 3343 int key = trace->ev_qualifier_ids.entries[i]; 3344 3345 if (value.enabled) { 3346 trace__init_bpf_map_syscall_args(trace, key, &value); 3347 trace__init_syscall_bpf_progs(trace, key); 3348 } 3349 3350 err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST); 3351 if (err) 3352 break; 3353 } 3354 3355 return err; 3356 } 3357 3358 static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled) 3359 { 3360 int fd = bpf_map__fd(trace->syscalls.map); 3361 struct bpf_map_syscall_entry value = { 3362 .enabled = enabled, 3363 }; 3364 int err = 0, key; 3365 3366 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) { 3367 if (enabled) 3368 trace__init_bpf_map_syscall_args(trace, key, &value); 3369 3370 err = bpf_map_update_elem(fd, &key, &value, BPF_ANY); 3371 if (err) 3372 break; 3373 } 3374 3375 return err; 3376 } 3377 3378 static int trace__init_syscalls_bpf_map(struct trace *trace) 3379 { 3380 bool enabled = true; 3381 3382 if (trace->ev_qualifier_ids.nr) 3383 enabled = trace->not_ev_qualifier; 3384 3385 return __trace__init_syscalls_bpf_map(trace, enabled); 3386 } 3387 3388 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc) 3389 { 3390 struct tep_format_field *field, *candidate_field; 3391 int id; 3392 3393 /* 3394 * We're only interested in syscalls that have a pointer: 3395 */ 3396 for (field = sc->args; field; field = field->next) { 3397 if (field->flags & TEP_FIELD_IS_POINTER) 3398 goto try_to_find_pair; 3399 } 3400 3401 return NULL; 3402 3403 try_to_find_pair: 3404 for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) { 3405 struct syscall *pair = trace__syscall_info(trace, NULL, id); 3406 struct bpf_program *pair_prog; 3407 bool is_candidate = false; 3408 3409 if (pair == NULL || pair == sc || 3410 pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog) 3411 continue; 3412 3413 for (field = sc->args, candidate_field = pair->args; 3414 field && candidate_field; field = field->next, candidate_field = candidate_field->next) { 3415 bool is_pointer = field->flags & TEP_FIELD_IS_POINTER, 3416 candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER; 3417 3418 if (is_pointer) { 3419 if (!candidate_is_pointer) { 3420 // The candidate just doesn't copies our pointer arg, might copy other pointers we want. 3421 continue; 3422 } 3423 } else { 3424 if (candidate_is_pointer) { 3425 // The candidate might copy a pointer we don't have, skip it. 3426 goto next_candidate; 3427 } 3428 continue; 3429 } 3430 3431 if (strcmp(field->type, candidate_field->type)) 3432 goto next_candidate; 3433 3434 is_candidate = true; 3435 } 3436 3437 if (!is_candidate) 3438 goto next_candidate; 3439 3440 /* 3441 * Check if the tentative pair syscall augmenter has more pointers, if it has, 3442 * then it may be collecting that and we then can't use it, as it would collect 3443 * more than what is common to the two syscalls. 3444 */ 3445 if (candidate_field) { 3446 for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next) 3447 if (candidate_field->flags & TEP_FIELD_IS_POINTER) 3448 goto next_candidate; 3449 } 3450 3451 pair_prog = pair->bpf_prog.sys_enter; 3452 /* 3453 * If the pair isn't enabled, then its bpf_prog.sys_enter will not 3454 * have been searched for, so search it here and if it returns the 3455 * unaugmented one, then ignore it, otherwise we'll reuse that BPF 3456 * program for a filtered syscall on a non-filtered one. 3457 * 3458 * For instance, we have "!syscalls:sys_enter_renameat" and that is 3459 * useful for "renameat2". 3460 */ 3461 if (pair_prog == NULL) { 3462 pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter"); 3463 if (pair_prog == trace->syscalls.unaugmented_prog) 3464 goto next_candidate; 3465 } 3466 3467 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name); 3468 return pair_prog; 3469 next_candidate: 3470 continue; 3471 } 3472 3473 return NULL; 3474 } 3475 3476 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace) 3477 { 3478 int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter), 3479 map_exit_fd = bpf_map__fd(trace->syscalls.prog_array.sys_exit); 3480 int err = 0, key; 3481 3482 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) { 3483 int prog_fd; 3484 3485 if (!trace__syscall_enabled(trace, key)) 3486 continue; 3487 3488 trace__init_syscall_bpf_progs(trace, key); 3489 3490 // It'll get at least the "!raw_syscalls:unaugmented" 3491 prog_fd = trace__bpf_prog_sys_enter_fd(trace, key); 3492 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY); 3493 if (err) 3494 break; 3495 prog_fd = trace__bpf_prog_sys_exit_fd(trace, key); 3496 err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY); 3497 if (err) 3498 break; 3499 } 3500 3501 /* 3502 * Now lets do a second pass looking for enabled syscalls without 3503 * an augmenter that have a signature that is a superset of another 3504 * syscall with an augmenter so that we can auto-reuse it. 3505 * 3506 * I.e. if we have an augmenter for the "open" syscall that has 3507 * this signature: 3508 * 3509 * int open(const char *pathname, int flags, mode_t mode); 3510 * 3511 * I.e. that will collect just the first string argument, then we 3512 * can reuse it for the 'creat' syscall, that has this signature: 3513 * 3514 * int creat(const char *pathname, mode_t mode); 3515 * 3516 * and for: 3517 * 3518 * int stat(const char *pathname, struct stat *statbuf); 3519 * int lstat(const char *pathname, struct stat *statbuf); 3520 * 3521 * Because the 'open' augmenter will collect the first arg as a string, 3522 * and leave alone all the other args, which already helps with 3523 * beautifying 'stat' and 'lstat''s pathname arg. 3524 * 3525 * Then, in time, when 'stat' gets an augmenter that collects both 3526 * first and second arg (this one on the raw_syscalls:sys_exit prog 3527 * array tail call, then that one will be used. 3528 */ 3529 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) { 3530 struct syscall *sc = trace__syscall_info(trace, NULL, key); 3531 struct bpf_program *pair_prog; 3532 int prog_fd; 3533 3534 if (sc == NULL || sc->bpf_prog.sys_enter == NULL) 3535 continue; 3536 3537 /* 3538 * For now we're just reusing the sys_enter prog, and if it 3539 * already has an augmenter, we don't need to find one. 3540 */ 3541 if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog) 3542 continue; 3543 3544 /* 3545 * Look at all the other syscalls for one that has a signature 3546 * that is close enough that we can share: 3547 */ 3548 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc); 3549 if (pair_prog == NULL) 3550 continue; 3551 3552 sc->bpf_prog.sys_enter = pair_prog; 3553 3554 /* 3555 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter 3556 * with the fd for the program we're reusing: 3557 */ 3558 prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter); 3559 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY); 3560 if (err) 3561 break; 3562 } 3563 3564 3565 return err; 3566 } 3567 3568 static void trace__delete_augmented_syscalls(struct trace *trace) 3569 { 3570 struct evsel *evsel, *tmp; 3571 3572 evlist__remove(trace->evlist, trace->syscalls.events.augmented); 3573 evsel__delete(trace->syscalls.events.augmented); 3574 trace->syscalls.events.augmented = NULL; 3575 3576 evlist__for_each_entry_safe(trace->evlist, tmp, evsel) { 3577 if (evsel->bpf_obj == trace->bpf_obj) { 3578 evlist__remove(trace->evlist, evsel); 3579 evsel__delete(evsel); 3580 } 3581 3582 } 3583 3584 bpf_object__close(trace->bpf_obj); 3585 trace->bpf_obj = NULL; 3586 } 3587 #else // HAVE_LIBBPF_SUPPORT 3588 static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace __maybe_unused, 3589 const char *name __maybe_unused) 3590 { 3591 return NULL; 3592 } 3593 3594 static void trace__set_bpf_map_filtered_pids(struct trace *trace __maybe_unused) 3595 { 3596 } 3597 3598 static void trace__set_bpf_map_syscalls(struct trace *trace __maybe_unused) 3599 { 3600 } 3601 3602 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused) 3603 { 3604 return 0; 3605 } 3606 3607 static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused) 3608 { 3609 return 0; 3610 } 3611 3612 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused, 3613 const char *name __maybe_unused) 3614 { 3615 return NULL; 3616 } 3617 3618 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused) 3619 { 3620 return 0; 3621 } 3622 3623 static void trace__delete_augmented_syscalls(struct trace *trace __maybe_unused) 3624 { 3625 } 3626 #endif // HAVE_LIBBPF_SUPPORT 3627 3628 static bool trace__only_augmented_syscalls_evsels(struct trace *trace) 3629 { 3630 struct evsel *evsel; 3631 3632 evlist__for_each_entry(trace->evlist, evsel) { 3633 if (evsel == trace->syscalls.events.augmented || 3634 evsel->bpf_obj == trace->bpf_obj) 3635 continue; 3636 3637 return false; 3638 } 3639 3640 return true; 3641 } 3642 3643 static int trace__set_ev_qualifier_filter(struct trace *trace) 3644 { 3645 if (trace->syscalls.map) 3646 return trace__set_ev_qualifier_bpf_filter(trace); 3647 if (trace->syscalls.events.sys_enter) 3648 return trace__set_ev_qualifier_tp_filter(trace); 3649 return 0; 3650 } 3651 3652 static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused, 3653 size_t npids __maybe_unused, pid_t *pids __maybe_unused) 3654 { 3655 int err = 0; 3656 #ifdef HAVE_LIBBPF_SUPPORT 3657 bool value = true; 3658 int map_fd = bpf_map__fd(map); 3659 size_t i; 3660 3661 for (i = 0; i < npids; ++i) { 3662 err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY); 3663 if (err) 3664 break; 3665 } 3666 #endif 3667 return err; 3668 } 3669 3670 static int trace__set_filter_loop_pids(struct trace *trace) 3671 { 3672 unsigned int nr = 1, err; 3673 pid_t pids[32] = { 3674 getpid(), 3675 }; 3676 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]); 3677 3678 while (thread && nr < ARRAY_SIZE(pids)) { 3679 struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid); 3680 3681 if (parent == NULL) 3682 break; 3683 3684 if (!strcmp(thread__comm_str(parent), "sshd") || 3685 strstarts(thread__comm_str(parent), "gnome-terminal")) { 3686 pids[nr++] = parent->tid; 3687 break; 3688 } 3689 thread = parent; 3690 } 3691 3692 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids); 3693 if (!err && trace->filter_pids.map) 3694 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids); 3695 3696 return err; 3697 } 3698 3699 static int trace__set_filter_pids(struct trace *trace) 3700 { 3701 int err = 0; 3702 /* 3703 * Better not use !target__has_task() here because we need to cover the 3704 * case where no threads were specified in the command line, but a 3705 * workload was, and in that case we will fill in the thread_map when 3706 * we fork the workload in evlist__prepare_workload. 3707 */ 3708 if (trace->filter_pids.nr > 0) { 3709 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr, 3710 trace->filter_pids.entries); 3711 if (!err && trace->filter_pids.map) { 3712 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr, 3713 trace->filter_pids.entries); 3714 } 3715 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) { 3716 err = trace__set_filter_loop_pids(trace); 3717 } 3718 3719 return err; 3720 } 3721 3722 static int __trace__deliver_event(struct trace *trace, union perf_event *event) 3723 { 3724 struct evlist *evlist = trace->evlist; 3725 struct perf_sample sample; 3726 int err = evlist__parse_sample(evlist, event, &sample); 3727 3728 if (err) 3729 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err); 3730 else 3731 trace__handle_event(trace, event, &sample); 3732 3733 return 0; 3734 } 3735 3736 static int __trace__flush_events(struct trace *trace) 3737 { 3738 u64 first = ordered_events__first_time(&trace->oe.data); 3739 u64 flush = trace->oe.last - NSEC_PER_SEC; 3740 3741 /* Is there some thing to flush.. */ 3742 if (first && first < flush) 3743 return ordered_events__flush_time(&trace->oe.data, flush); 3744 3745 return 0; 3746 } 3747 3748 static int trace__flush_events(struct trace *trace) 3749 { 3750 return !trace->sort_events ? 0 : __trace__flush_events(trace); 3751 } 3752 3753 static int trace__deliver_event(struct trace *trace, union perf_event *event) 3754 { 3755 int err; 3756 3757 if (!trace->sort_events) 3758 return __trace__deliver_event(trace, event); 3759 3760 err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last); 3761 if (err && err != -1) 3762 return err; 3763 3764 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0); 3765 if (err) 3766 return err; 3767 3768 return trace__flush_events(trace); 3769 } 3770 3771 static int ordered_events__deliver_event(struct ordered_events *oe, 3772 struct ordered_event *event) 3773 { 3774 struct trace *trace = container_of(oe, struct trace, oe.data); 3775 3776 return __trace__deliver_event(trace, event->event); 3777 } 3778 3779 static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg) 3780 { 3781 struct tep_format_field *field; 3782 struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel); 3783 3784 if (evsel->tp_format == NULL || fmt == NULL) 3785 return NULL; 3786 3787 for (field = evsel->tp_format->format.fields; field; field = field->next, ++fmt) 3788 if (strcmp(field->name, arg) == 0) 3789 return fmt; 3790 3791 return NULL; 3792 } 3793 3794 static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel *evsel) 3795 { 3796 char *tok, *left = evsel->filter, *new_filter = evsel->filter; 3797 3798 while ((tok = strpbrk(left, "=<>!")) != NULL) { 3799 char *right = tok + 1, *right_end; 3800 3801 if (*right == '=') 3802 ++right; 3803 3804 while (isspace(*right)) 3805 ++right; 3806 3807 if (*right == '\0') 3808 break; 3809 3810 while (!isalpha(*left)) 3811 if (++left == tok) { 3812 /* 3813 * Bail out, can't find the name of the argument that is being 3814 * used in the filter, let it try to set this filter, will fail later. 3815 */ 3816 return 0; 3817 } 3818 3819 right_end = right + 1; 3820 while (isalnum(*right_end) || *right_end == '_' || *right_end == '|') 3821 ++right_end; 3822 3823 if (isalpha(*right)) { 3824 struct syscall_arg_fmt *fmt; 3825 int left_size = tok - left, 3826 right_size = right_end - right; 3827 char arg[128]; 3828 3829 while (isspace(left[left_size - 1])) 3830 --left_size; 3831 3832 scnprintf(arg, sizeof(arg), "%.*s", left_size, left); 3833 3834 fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg); 3835 if (fmt == NULL) { 3836 pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n", 3837 arg, evsel->name, evsel->filter); 3838 return -1; 3839 } 3840 3841 pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ", 3842 arg, (int)(right - tok), tok, right_size, right); 3843 3844 if (fmt->strtoul) { 3845 u64 val; 3846 struct syscall_arg syscall_arg = { 3847 .parm = fmt->parm, 3848 }; 3849 3850 if (fmt->strtoul(right, right_size, &syscall_arg, &val)) { 3851 char *n, expansion[19]; 3852 int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val); 3853 int expansion_offset = right - new_filter; 3854 3855 pr_debug("%s", expansion); 3856 3857 if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) { 3858 pr_debug(" out of memory!\n"); 3859 free(new_filter); 3860 return -1; 3861 } 3862 if (new_filter != evsel->filter) 3863 free(new_filter); 3864 left = n + expansion_offset + expansion_lenght; 3865 new_filter = n; 3866 } else { 3867 pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n", 3868 right_size, right, arg, evsel->name, evsel->filter); 3869 return -1; 3870 } 3871 } else { 3872 pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n", 3873 arg, evsel->name, evsel->filter); 3874 return -1; 3875 } 3876 3877 pr_debug("\n"); 3878 } else { 3879 left = right_end; 3880 } 3881 } 3882 3883 if (new_filter != evsel->filter) { 3884 pr_debug("New filter for %s: %s\n", evsel->name, new_filter); 3885 evsel__set_filter(evsel, new_filter); 3886 free(new_filter); 3887 } 3888 3889 return 0; 3890 } 3891 3892 static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel) 3893 { 3894 struct evlist *evlist = trace->evlist; 3895 struct evsel *evsel; 3896 3897 evlist__for_each_entry(evlist, evsel) { 3898 if (evsel->filter == NULL) 3899 continue; 3900 3901 if (trace__expand_filter(trace, evsel)) { 3902 *err_evsel = evsel; 3903 return -1; 3904 } 3905 } 3906 3907 return 0; 3908 } 3909 3910 static int trace__run(struct trace *trace, int argc, const char **argv) 3911 { 3912 struct evlist *evlist = trace->evlist; 3913 struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL; 3914 int err = -1, i; 3915 unsigned long before; 3916 const bool forks = argc > 0; 3917 bool draining = false; 3918 3919 trace->live = true; 3920 3921 if (!trace->raw_augmented_syscalls) { 3922 if (trace->trace_syscalls && trace__add_syscall_newtp(trace)) 3923 goto out_error_raw_syscalls; 3924 3925 if (trace->trace_syscalls) 3926 trace->vfs_getname = evlist__add_vfs_getname(evlist); 3927 } 3928 3929 if ((trace->trace_pgfaults & TRACE_PFMAJ)) { 3930 pgfault_maj = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ); 3931 if (pgfault_maj == NULL) 3932 goto out_error_mem; 3933 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param); 3934 evlist__add(evlist, pgfault_maj); 3935 } 3936 3937 if ((trace->trace_pgfaults & TRACE_PFMIN)) { 3938 pgfault_min = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN); 3939 if (pgfault_min == NULL) 3940 goto out_error_mem; 3941 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param); 3942 evlist__add(evlist, pgfault_min); 3943 } 3944 3945 if (trace->sched && 3946 evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime)) 3947 goto out_error_sched_stat_runtime; 3948 /* 3949 * If a global cgroup was set, apply it to all the events without an 3950 * explicit cgroup. I.e.: 3951 * 3952 * trace -G A -e sched:*switch 3953 * 3954 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc 3955 * _and_ sched:sched_switch to the 'A' cgroup, while: 3956 * 3957 * trace -e sched:*switch -G A 3958 * 3959 * will only set the sched:sched_switch event to the 'A' cgroup, all the 3960 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without" 3961 * a cgroup (on the root cgroup, sys wide, etc). 3962 * 3963 * Multiple cgroups: 3964 * 3965 * trace -G A -e sched:*switch -G B 3966 * 3967 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes 3968 * to the 'B' cgroup. 3969 * 3970 * evlist__set_default_cgroup() grabs a reference of the passed cgroup 3971 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL. 3972 */ 3973 if (trace->cgroup) 3974 evlist__set_default_cgroup(trace->evlist, trace->cgroup); 3975 3976 err = evlist__create_maps(evlist, &trace->opts.target); 3977 if (err < 0) { 3978 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n"); 3979 goto out_delete_evlist; 3980 } 3981 3982 err = trace__symbols_init(trace, evlist); 3983 if (err < 0) { 3984 fprintf(trace->output, "Problems initializing symbol libraries!\n"); 3985 goto out_delete_evlist; 3986 } 3987 3988 evlist__config(evlist, &trace->opts, &callchain_param); 3989 3990 if (forks) { 3991 err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL); 3992 if (err < 0) { 3993 fprintf(trace->output, "Couldn't run the workload!\n"); 3994 goto out_delete_evlist; 3995 } 3996 } 3997 3998 err = evlist__open(evlist); 3999 if (err < 0) 4000 goto out_error_open; 4001 4002 err = bpf__apply_obj_config(); 4003 if (err) { 4004 char errbuf[BUFSIZ]; 4005 4006 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf)); 4007 pr_err("ERROR: Apply config to BPF failed: %s\n", 4008 errbuf); 4009 goto out_error_open; 4010 } 4011 4012 err = trace__set_filter_pids(trace); 4013 if (err < 0) 4014 goto out_error_mem; 4015 4016 if (trace->syscalls.map) 4017 trace__init_syscalls_bpf_map(trace); 4018 4019 if (trace->syscalls.prog_array.sys_enter) 4020 trace__init_syscalls_bpf_prog_array_maps(trace); 4021 4022 if (trace->ev_qualifier_ids.nr > 0) { 4023 err = trace__set_ev_qualifier_filter(trace); 4024 if (err < 0) 4025 goto out_errno; 4026 4027 if (trace->syscalls.events.sys_exit) { 4028 pr_debug("event qualifier tracepoint filter: %s\n", 4029 trace->syscalls.events.sys_exit->filter); 4030 } 4031 } 4032 4033 /* 4034 * If the "close" syscall is not traced, then we will not have the 4035 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the 4036 * fd->pathname table and were ending up showing the last value set by 4037 * syscalls opening a pathname and associating it with a descriptor or 4038 * reading it from /proc/pid/fd/ in cases where that doesn't make 4039 * sense. 4040 * 4041 * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is 4042 * not in use. 4043 */ 4044 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close")); 4045 4046 err = trace__expand_filters(trace, &evsel); 4047 if (err) 4048 goto out_delete_evlist; 4049 err = evlist__apply_filters(evlist, &evsel); 4050 if (err < 0) 4051 goto out_error_apply_filters; 4052 4053 if (trace->dump.map) 4054 bpf_map__fprintf(trace->dump.map, trace->output); 4055 4056 err = evlist__mmap(evlist, trace->opts.mmap_pages); 4057 if (err < 0) 4058 goto out_error_mmap; 4059 4060 if (!target__none(&trace->opts.target) && !trace->opts.initial_delay) 4061 evlist__enable(evlist); 4062 4063 if (forks) 4064 evlist__start_workload(evlist); 4065 4066 if (trace->opts.initial_delay) { 4067 usleep(trace->opts.initial_delay * 1000); 4068 evlist__enable(evlist); 4069 } 4070 4071 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 || 4072 evlist->core.threads->nr > 1 || 4073 evlist__first(evlist)->core.attr.inherit; 4074 4075 /* 4076 * Now that we already used evsel->core.attr to ask the kernel to setup the 4077 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in 4078 * trace__resolve_callchain(), allowing per-event max-stack settings 4079 * to override an explicitly set --max-stack global setting. 4080 */ 4081 evlist__for_each_entry(evlist, evsel) { 4082 if (evsel__has_callchain(evsel) && 4083 evsel->core.attr.sample_max_stack == 0) 4084 evsel->core.attr.sample_max_stack = trace->max_stack; 4085 } 4086 again: 4087 before = trace->nr_events; 4088 4089 for (i = 0; i < evlist->core.nr_mmaps; i++) { 4090 union perf_event *event; 4091 struct mmap *md; 4092 4093 md = &evlist->mmap[i]; 4094 if (perf_mmap__read_init(&md->core) < 0) 4095 continue; 4096 4097 while ((event = perf_mmap__read_event(&md->core)) != NULL) { 4098 ++trace->nr_events; 4099 4100 err = trace__deliver_event(trace, event); 4101 if (err) 4102 goto out_disable; 4103 4104 perf_mmap__consume(&md->core); 4105 4106 if (interrupted) 4107 goto out_disable; 4108 4109 if (done && !draining) { 4110 evlist__disable(evlist); 4111 draining = true; 4112 } 4113 } 4114 perf_mmap__read_done(&md->core); 4115 } 4116 4117 if (trace->nr_events == before) { 4118 int timeout = done ? 100 : -1; 4119 4120 if (!draining && evlist__poll(evlist, timeout) > 0) { 4121 if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0) 4122 draining = true; 4123 4124 goto again; 4125 } else { 4126 if (trace__flush_events(trace)) 4127 goto out_disable; 4128 } 4129 } else { 4130 goto again; 4131 } 4132 4133 out_disable: 4134 thread__zput(trace->current); 4135 4136 evlist__disable(evlist); 4137 4138 if (trace->sort_events) 4139 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL); 4140 4141 if (!err) { 4142 if (trace->summary) 4143 trace__fprintf_thread_summary(trace, trace->output); 4144 4145 if (trace->show_tool_stats) { 4146 fprintf(trace->output, "Stats:\n " 4147 " vfs_getname : %" PRIu64 "\n" 4148 " proc_getname: %" PRIu64 "\n", 4149 trace->stats.vfs_getname, 4150 trace->stats.proc_getname); 4151 } 4152 } 4153 4154 out_delete_evlist: 4155 trace__symbols__exit(trace); 4156 evlist__free_syscall_tp_fields(evlist); 4157 evlist__delete(evlist); 4158 cgroup__put(trace->cgroup); 4159 trace->evlist = NULL; 4160 trace->live = false; 4161 return err; 4162 { 4163 char errbuf[BUFSIZ]; 4164 4165 out_error_sched_stat_runtime: 4166 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime"); 4167 goto out_error; 4168 4169 out_error_raw_syscalls: 4170 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)"); 4171 goto out_error; 4172 4173 out_error_mmap: 4174 evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf)); 4175 goto out_error; 4176 4177 out_error_open: 4178 evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf)); 4179 4180 out_error: 4181 fprintf(trace->output, "%s\n", errbuf); 4182 goto out_delete_evlist; 4183 4184 out_error_apply_filters: 4185 fprintf(trace->output, 4186 "Failed to set filter \"%s\" on event %s with %d (%s)\n", 4187 evsel->filter, evsel__name(evsel), errno, 4188 str_error_r(errno, errbuf, sizeof(errbuf))); 4189 goto out_delete_evlist; 4190 } 4191 out_error_mem: 4192 fprintf(trace->output, "Not enough memory to run!\n"); 4193 goto out_delete_evlist; 4194 4195 out_errno: 4196 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno)); 4197 goto out_delete_evlist; 4198 } 4199 4200 static int trace__replay(struct trace *trace) 4201 { 4202 const struct evsel_str_handler handlers[] = { 4203 { "probe:vfs_getname", trace__vfs_getname, }, 4204 }; 4205 struct perf_data data = { 4206 .path = input_name, 4207 .mode = PERF_DATA_MODE_READ, 4208 .force = trace->force, 4209 }; 4210 struct perf_session *session; 4211 struct evsel *evsel; 4212 int err = -1; 4213 4214 trace->tool.sample = trace__process_sample; 4215 trace->tool.mmap = perf_event__process_mmap; 4216 trace->tool.mmap2 = perf_event__process_mmap2; 4217 trace->tool.comm = perf_event__process_comm; 4218 trace->tool.exit = perf_event__process_exit; 4219 trace->tool.fork = perf_event__process_fork; 4220 trace->tool.attr = perf_event__process_attr; 4221 trace->tool.tracing_data = perf_event__process_tracing_data; 4222 trace->tool.build_id = perf_event__process_build_id; 4223 trace->tool.namespaces = perf_event__process_namespaces; 4224 4225 trace->tool.ordered_events = true; 4226 trace->tool.ordering_requires_timestamps = true; 4227 4228 /* add tid to output */ 4229 trace->multiple_threads = true; 4230 4231 session = perf_session__new(&data, false, &trace->tool); 4232 if (IS_ERR(session)) 4233 return PTR_ERR(session); 4234 4235 if (trace->opts.target.pid) 4236 symbol_conf.pid_list_str = strdup(trace->opts.target.pid); 4237 4238 if (trace->opts.target.tid) 4239 symbol_conf.tid_list_str = strdup(trace->opts.target.tid); 4240 4241 if (symbol__init(&session->header.env) < 0) 4242 goto out; 4243 4244 trace->host = &session->machines.host; 4245 4246 err = perf_session__set_tracepoints_handlers(session, handlers); 4247 if (err) 4248 goto out; 4249 4250 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter"); 4251 /* older kernels have syscalls tp versus raw_syscalls */ 4252 if (evsel == NULL) 4253 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter"); 4254 4255 if (evsel && 4256 (evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 || 4257 perf_evsel__init_sc_tp_ptr_field(evsel, args))) { 4258 pr_err("Error during initialize raw_syscalls:sys_enter event\n"); 4259 goto out; 4260 } 4261 4262 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit"); 4263 if (evsel == NULL) 4264 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit"); 4265 if (evsel && 4266 (evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 || 4267 perf_evsel__init_sc_tp_uint_field(evsel, ret))) { 4268 pr_err("Error during initialize raw_syscalls:sys_exit event\n"); 4269 goto out; 4270 } 4271 4272 evlist__for_each_entry(session->evlist, evsel) { 4273 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE && 4274 (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ || 4275 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN || 4276 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS)) 4277 evsel->handler = trace__pgfault; 4278 } 4279 4280 setup_pager(); 4281 4282 err = perf_session__process_events(session); 4283 if (err) 4284 pr_err("Failed to process events, error %d", err); 4285 4286 else if (trace->summary) 4287 trace__fprintf_thread_summary(trace, trace->output); 4288 4289 out: 4290 perf_session__delete(session); 4291 4292 return err; 4293 } 4294 4295 static size_t trace__fprintf_threads_header(FILE *fp) 4296 { 4297 size_t printed; 4298 4299 printed = fprintf(fp, "\n Summary of events:\n\n"); 4300 4301 return printed; 4302 } 4303 4304 DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs, 4305 struct syscall_stats *stats; 4306 double msecs; 4307 int syscall; 4308 ) 4309 { 4310 struct int_node *source = rb_entry(nd, struct int_node, rb_node); 4311 struct syscall_stats *stats = source->priv; 4312 4313 entry->syscall = source->i; 4314 entry->stats = stats; 4315 entry->msecs = stats ? (u64)stats->stats.n * (avg_stats(&stats->stats) / NSEC_PER_MSEC) : 0; 4316 } 4317 4318 static size_t thread__dump_stats(struct thread_trace *ttrace, 4319 struct trace *trace, FILE *fp) 4320 { 4321 size_t printed = 0; 4322 struct syscall *sc; 4323 struct rb_node *nd; 4324 DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats); 4325 4326 if (syscall_stats == NULL) 4327 return 0; 4328 4329 printed += fprintf(fp, "\n"); 4330 4331 printed += fprintf(fp, " syscall calls errors total min avg max stddev\n"); 4332 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n"); 4333 printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n"); 4334 4335 resort_rb__for_each_entry(nd, syscall_stats) { 4336 struct syscall_stats *stats = syscall_stats_entry->stats; 4337 if (stats) { 4338 double min = (double)(stats->stats.min) / NSEC_PER_MSEC; 4339 double max = (double)(stats->stats.max) / NSEC_PER_MSEC; 4340 double avg = avg_stats(&stats->stats); 4341 double pct; 4342 u64 n = (u64)stats->stats.n; 4343 4344 pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0; 4345 avg /= NSEC_PER_MSEC; 4346 4347 sc = &trace->syscalls.table[syscall_stats_entry->syscall]; 4348 printed += fprintf(fp, " %-15s", sc->name); 4349 printed += fprintf(fp, " %8" PRIu64 " %6" PRIu64 " %9.3f %9.3f %9.3f", 4350 n, stats->nr_failures, syscall_stats_entry->msecs, min, avg); 4351 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct); 4352 4353 if (trace->errno_summary && stats->nr_failures) { 4354 const char *arch_name = perf_env__arch(trace->host->env); 4355 int e; 4356 4357 for (e = 0; e < stats->max_errno; ++e) { 4358 if (stats->errnos[e] != 0) 4359 fprintf(fp, "\t\t\t\t%s: %d\n", arch_syscalls__strerrno(arch_name, e + 1), stats->errnos[e]); 4360 } 4361 } 4362 } 4363 } 4364 4365 resort_rb__delete(syscall_stats); 4366 printed += fprintf(fp, "\n\n"); 4367 4368 return printed; 4369 } 4370 4371 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace) 4372 { 4373 size_t printed = 0; 4374 struct thread_trace *ttrace = thread__priv(thread); 4375 double ratio; 4376 4377 if (ttrace == NULL) 4378 return 0; 4379 4380 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0; 4381 4382 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid); 4383 printed += fprintf(fp, "%lu events, ", ttrace->nr_events); 4384 printed += fprintf(fp, "%.1f%%", ratio); 4385 if (ttrace->pfmaj) 4386 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj); 4387 if (ttrace->pfmin) 4388 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin); 4389 if (trace->sched) 4390 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms); 4391 else if (fputc('\n', fp) != EOF) 4392 ++printed; 4393 4394 printed += thread__dump_stats(ttrace, trace, fp); 4395 4396 return printed; 4397 } 4398 4399 static unsigned long thread__nr_events(struct thread_trace *ttrace) 4400 { 4401 return ttrace ? ttrace->nr_events : 0; 4402 } 4403 4404 DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)), 4405 struct thread *thread; 4406 ) 4407 { 4408 entry->thread = rb_entry(nd, struct thread, rb_node); 4409 } 4410 4411 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp) 4412 { 4413 size_t printed = trace__fprintf_threads_header(fp); 4414 struct rb_node *nd; 4415 int i; 4416 4417 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 4418 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i); 4419 4420 if (threads == NULL) { 4421 fprintf(fp, "%s", "Error sorting output by nr_events!\n"); 4422 return 0; 4423 } 4424 4425 resort_rb__for_each_entry(nd, threads) 4426 printed += trace__fprintf_thread(fp, threads_entry->thread, trace); 4427 4428 resort_rb__delete(threads); 4429 } 4430 return printed; 4431 } 4432 4433 static int trace__set_duration(const struct option *opt, const char *str, 4434 int unset __maybe_unused) 4435 { 4436 struct trace *trace = opt->value; 4437 4438 trace->duration_filter = atof(str); 4439 return 0; 4440 } 4441 4442 static int trace__set_filter_pids_from_option(const struct option *opt, const char *str, 4443 int unset __maybe_unused) 4444 { 4445 int ret = -1; 4446 size_t i; 4447 struct trace *trace = opt->value; 4448 /* 4449 * FIXME: introduce a intarray class, plain parse csv and create a 4450 * { int nr, int entries[] } struct... 4451 */ 4452 struct intlist *list = intlist__new(str); 4453 4454 if (list == NULL) 4455 return -1; 4456 4457 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1; 4458 trace->filter_pids.entries = calloc(i, sizeof(pid_t)); 4459 4460 if (trace->filter_pids.entries == NULL) 4461 goto out; 4462 4463 trace->filter_pids.entries[0] = getpid(); 4464 4465 for (i = 1; i < trace->filter_pids.nr; ++i) 4466 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i; 4467 4468 intlist__delete(list); 4469 ret = 0; 4470 out: 4471 return ret; 4472 } 4473 4474 static int trace__open_output(struct trace *trace, const char *filename) 4475 { 4476 struct stat st; 4477 4478 if (!stat(filename, &st) && st.st_size) { 4479 char oldname[PATH_MAX]; 4480 4481 scnprintf(oldname, sizeof(oldname), "%s.old", filename); 4482 unlink(oldname); 4483 rename(filename, oldname); 4484 } 4485 4486 trace->output = fopen(filename, "w"); 4487 4488 return trace->output == NULL ? -errno : 0; 4489 } 4490 4491 static int parse_pagefaults(const struct option *opt, const char *str, 4492 int unset __maybe_unused) 4493 { 4494 int *trace_pgfaults = opt->value; 4495 4496 if (strcmp(str, "all") == 0) 4497 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN; 4498 else if (strcmp(str, "maj") == 0) 4499 *trace_pgfaults |= TRACE_PFMAJ; 4500 else if (strcmp(str, "min") == 0) 4501 *trace_pgfaults |= TRACE_PFMIN; 4502 else 4503 return -1; 4504 4505 return 0; 4506 } 4507 4508 static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler) 4509 { 4510 struct evsel *evsel; 4511 4512 evlist__for_each_entry(evlist, evsel) { 4513 if (evsel->handler == NULL) 4514 evsel->handler = handler; 4515 } 4516 } 4517 4518 static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name) 4519 { 4520 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel); 4521 4522 if (fmt) { 4523 struct syscall_fmt *scfmt = syscall_fmt__find(name); 4524 4525 if (scfmt) { 4526 int skip = 0; 4527 4528 if (strcmp(evsel->tp_format->format.fields->name, "__syscall_nr") == 0 || 4529 strcmp(evsel->tp_format->format.fields->name, "nr") == 0) 4530 ++skip; 4531 4532 memcpy(fmt + skip, scfmt->arg, (evsel->tp_format->format.nr_fields - skip) * sizeof(*fmt)); 4533 } 4534 } 4535 } 4536 4537 static int evlist__set_syscall_tp_fields(struct evlist *evlist) 4538 { 4539 struct evsel *evsel; 4540 4541 evlist__for_each_entry(evlist, evsel) { 4542 if (evsel->priv || !evsel->tp_format) 4543 continue; 4544 4545 if (strcmp(evsel->tp_format->system, "syscalls")) { 4546 evsel__init_tp_arg_scnprintf(evsel); 4547 continue; 4548 } 4549 4550 if (evsel__init_syscall_tp(evsel)) 4551 return -1; 4552 4553 if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) { 4554 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 4555 4556 if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64))) 4557 return -1; 4558 4559 evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_enter_") - 1); 4560 } else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) { 4561 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 4562 4563 if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap)) 4564 return -1; 4565 4566 evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_exit_") - 1); 4567 } 4568 } 4569 4570 return 0; 4571 } 4572 4573 /* 4574 * XXX: Hackish, just splitting the combined -e+--event (syscalls 4575 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use 4576 * existing facilities unchanged (trace->ev_qualifier + parse_options()). 4577 * 4578 * It'd be better to introduce a parse_options() variant that would return a 4579 * list with the terms it didn't match to an event... 4580 */ 4581 static int trace__parse_events_option(const struct option *opt, const char *str, 4582 int unset __maybe_unused) 4583 { 4584 struct trace *trace = (struct trace *)opt->value; 4585 const char *s = str; 4586 char *sep = NULL, *lists[2] = { NULL, NULL, }; 4587 int len = strlen(str) + 1, err = -1, list, idx; 4588 char *strace_groups_dir = system_path(STRACE_GROUPS_DIR); 4589 char group_name[PATH_MAX]; 4590 struct syscall_fmt *fmt; 4591 4592 if (strace_groups_dir == NULL) 4593 return -1; 4594 4595 if (*s == '!') { 4596 ++s; 4597 trace->not_ev_qualifier = true; 4598 } 4599 4600 while (1) { 4601 if ((sep = strchr(s, ',')) != NULL) 4602 *sep = '\0'; 4603 4604 list = 0; 4605 if (syscalltbl__id(trace->sctbl, s) >= 0 || 4606 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) { 4607 list = 1; 4608 goto do_concat; 4609 } 4610 4611 fmt = syscall_fmt__find_by_alias(s); 4612 if (fmt != NULL) { 4613 list = 1; 4614 s = fmt->name; 4615 } else { 4616 path__join(group_name, sizeof(group_name), strace_groups_dir, s); 4617 if (access(group_name, R_OK) == 0) 4618 list = 1; 4619 } 4620 do_concat: 4621 if (lists[list]) { 4622 sprintf(lists[list] + strlen(lists[list]), ",%s", s); 4623 } else { 4624 lists[list] = malloc(len); 4625 if (lists[list] == NULL) 4626 goto out; 4627 strcpy(lists[list], s); 4628 } 4629 4630 if (!sep) 4631 break; 4632 4633 *sep = ','; 4634 s = sep + 1; 4635 } 4636 4637 if (lists[1] != NULL) { 4638 struct strlist_config slist_config = { 4639 .dirname = strace_groups_dir, 4640 }; 4641 4642 trace->ev_qualifier = strlist__new(lists[1], &slist_config); 4643 if (trace->ev_qualifier == NULL) { 4644 fputs("Not enough memory to parse event qualifier", trace->output); 4645 goto out; 4646 } 4647 4648 if (trace__validate_ev_qualifier(trace)) 4649 goto out; 4650 trace->trace_syscalls = true; 4651 } 4652 4653 err = 0; 4654 4655 if (lists[0]) { 4656 struct option o = { 4657 .value = &trace->evlist, 4658 }; 4659 err = parse_events_option(&o, lists[0], 0); 4660 } 4661 out: 4662 free(strace_groups_dir); 4663 free(lists[0]); 4664 free(lists[1]); 4665 if (sep) 4666 *sep = ','; 4667 4668 return err; 4669 } 4670 4671 static int trace__parse_cgroups(const struct option *opt, const char *str, int unset) 4672 { 4673 struct trace *trace = opt->value; 4674 4675 if (!list_empty(&trace->evlist->core.entries)) { 4676 struct option o = { 4677 .value = &trace->evlist, 4678 }; 4679 return parse_cgroups(&o, str, unset); 4680 } 4681 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str); 4682 4683 return 0; 4684 } 4685 4686 static int trace__config(const char *var, const char *value, void *arg) 4687 { 4688 struct trace *trace = arg; 4689 int err = 0; 4690 4691 if (!strcmp(var, "trace.add_events")) { 4692 trace->perfconfig_events = strdup(value); 4693 if (trace->perfconfig_events == NULL) { 4694 pr_err("Not enough memory for %s\n", "trace.add_events"); 4695 return -1; 4696 } 4697 } else if (!strcmp(var, "trace.show_timestamp")) { 4698 trace->show_tstamp = perf_config_bool(var, value); 4699 } else if (!strcmp(var, "trace.show_duration")) { 4700 trace->show_duration = perf_config_bool(var, value); 4701 } else if (!strcmp(var, "trace.show_arg_names")) { 4702 trace->show_arg_names = perf_config_bool(var, value); 4703 if (!trace->show_arg_names) 4704 trace->show_zeros = true; 4705 } else if (!strcmp(var, "trace.show_zeros")) { 4706 bool new_show_zeros = perf_config_bool(var, value); 4707 if (!trace->show_arg_names && !new_show_zeros) { 4708 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n"); 4709 goto out; 4710 } 4711 trace->show_zeros = new_show_zeros; 4712 } else if (!strcmp(var, "trace.show_prefix")) { 4713 trace->show_string_prefix = perf_config_bool(var, value); 4714 } else if (!strcmp(var, "trace.no_inherit")) { 4715 trace->opts.no_inherit = perf_config_bool(var, value); 4716 } else if (!strcmp(var, "trace.args_alignment")) { 4717 int args_alignment = 0; 4718 if (perf_config_int(&args_alignment, var, value) == 0) 4719 trace->args_alignment = args_alignment; 4720 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) { 4721 if (strcasecmp(value, "libtraceevent") == 0) 4722 trace->libtraceevent_print = true; 4723 else if (strcasecmp(value, "libbeauty") == 0) 4724 trace->libtraceevent_print = false; 4725 } 4726 out: 4727 return err; 4728 } 4729 4730 static void trace__exit(struct trace *trace) 4731 { 4732 int i; 4733 4734 strlist__delete(trace->ev_qualifier); 4735 free(trace->ev_qualifier_ids.entries); 4736 if (trace->syscalls.table) { 4737 for (i = 0; i <= trace->sctbl->syscalls.max_id; i++) 4738 syscall__exit(&trace->syscalls.table[i]); 4739 free(trace->syscalls.table); 4740 } 4741 syscalltbl__delete(trace->sctbl); 4742 zfree(&trace->perfconfig_events); 4743 } 4744 4745 int cmd_trace(int argc, const char **argv) 4746 { 4747 const char *trace_usage[] = { 4748 "perf trace [<options>] [<command>]", 4749 "perf trace [<options>] -- <command> [<options>]", 4750 "perf trace record [<options>] [<command>]", 4751 "perf trace record [<options>] -- <command> [<options>]", 4752 NULL 4753 }; 4754 struct trace trace = { 4755 .opts = { 4756 .target = { 4757 .uid = UINT_MAX, 4758 .uses_mmap = true, 4759 }, 4760 .user_freq = UINT_MAX, 4761 .user_interval = ULLONG_MAX, 4762 .no_buffering = true, 4763 .mmap_pages = UINT_MAX, 4764 }, 4765 .output = stderr, 4766 .show_comm = true, 4767 .show_tstamp = true, 4768 .show_duration = true, 4769 .show_arg_names = true, 4770 .args_alignment = 70, 4771 .trace_syscalls = false, 4772 .kernel_syscallchains = false, 4773 .max_stack = UINT_MAX, 4774 .max_events = ULONG_MAX, 4775 }; 4776 const char *map_dump_str = NULL; 4777 const char *output_name = NULL; 4778 const struct option trace_options[] = { 4779 OPT_CALLBACK('e', "event", &trace, "event", 4780 "event/syscall selector. use 'perf list' to list available events", 4781 trace__parse_events_option), 4782 OPT_CALLBACK(0, "filter", &trace.evlist, "filter", 4783 "event filter", parse_filter), 4784 OPT_BOOLEAN(0, "comm", &trace.show_comm, 4785 "show the thread COMM next to its id"), 4786 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"), 4787 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace", 4788 trace__parse_events_option), 4789 OPT_STRING('o', "output", &output_name, "file", "output file name"), 4790 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"), 4791 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid", 4792 "trace events on existing process id"), 4793 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid", 4794 "trace events on existing thread id"), 4795 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids", 4796 "pids to filter (by the kernel)", trace__set_filter_pids_from_option), 4797 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide, 4798 "system-wide collection from all CPUs"), 4799 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu", 4800 "list of cpus to monitor"), 4801 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit, 4802 "child tasks do not inherit counters"), 4803 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages", 4804 "number of mmap data pages", evlist__parse_mmap_pages), 4805 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user", 4806 "user to profile"), 4807 OPT_CALLBACK(0, "duration", &trace, "float", 4808 "show only events with duration > N.M ms", 4809 trace__set_duration), 4810 #ifdef HAVE_LIBBPF_SUPPORT 4811 OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"), 4812 #endif 4813 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"), 4814 OPT_INCR('v', "verbose", &verbose, "be more verbose"), 4815 OPT_BOOLEAN('T', "time", &trace.full_time, 4816 "Show full timestamp, not time relative to first start"), 4817 OPT_BOOLEAN(0, "failure", &trace.failure_only, 4818 "Show only syscalls that failed"), 4819 OPT_BOOLEAN('s', "summary", &trace.summary_only, 4820 "Show only syscall summary with statistics"), 4821 OPT_BOOLEAN('S', "with-summary", &trace.summary, 4822 "Show all syscalls and summary with statistics"), 4823 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary, 4824 "Show errno stats per syscall, use with -s or -S"), 4825 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min", 4826 "Trace pagefaults", parse_pagefaults, "maj"), 4827 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"), 4828 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"), 4829 OPT_CALLBACK(0, "call-graph", &trace.opts, 4830 "record_mode[,record_size]", record_callchain_help, 4831 &record_parse_callchain_opt), 4832 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print, 4833 "Use libtraceevent to print the tracepoint arguments."), 4834 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains, 4835 "Show the kernel callchains on the syscall exit path"), 4836 OPT_ULONG(0, "max-events", &trace.max_events, 4837 "Set the maximum number of events to print, exit after that is reached. "), 4838 OPT_UINTEGER(0, "min-stack", &trace.min_stack, 4839 "Set the minimum stack depth when parsing the callchain, " 4840 "anything below the specified depth will be ignored."), 4841 OPT_UINTEGER(0, "max-stack", &trace.max_stack, 4842 "Set the maximum stack depth when parsing the callchain, " 4843 "anything beyond the specified depth will be ignored. " 4844 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)), 4845 OPT_BOOLEAN(0, "sort-events", &trace.sort_events, 4846 "Sort batch of events before processing, use if getting out of order events"), 4847 OPT_BOOLEAN(0, "print-sample", &trace.print_sample, 4848 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"), 4849 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout, 4850 "per thread proc mmap processing timeout in ms"), 4851 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only", 4852 trace__parse_cgroups), 4853 OPT_INTEGER('D', "delay", &trace.opts.initial_delay, 4854 "ms to wait before starting measurement after program " 4855 "start"), 4856 OPTS_EVSWITCH(&trace.evswitch), 4857 OPT_END() 4858 }; 4859 bool __maybe_unused max_stack_user_set = true; 4860 bool mmap_pages_user_set = true; 4861 struct evsel *evsel; 4862 const char * const trace_subcommands[] = { "record", NULL }; 4863 int err = -1; 4864 char bf[BUFSIZ]; 4865 4866 signal(SIGSEGV, sighandler_dump_stack); 4867 signal(SIGFPE, sighandler_dump_stack); 4868 signal(SIGCHLD, sig_handler); 4869 signal(SIGINT, sig_handler); 4870 4871 trace.evlist = evlist__new(); 4872 trace.sctbl = syscalltbl__new(); 4873 4874 if (trace.evlist == NULL || trace.sctbl == NULL) { 4875 pr_err("Not enough memory to run!\n"); 4876 err = -ENOMEM; 4877 goto out; 4878 } 4879 4880 /* 4881 * Parsing .perfconfig may entail creating a BPF event, that may need 4882 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting 4883 * is too small. This affects just this process, not touching the 4884 * global setting. If it fails we'll get something in 'perf trace -v' 4885 * to help diagnose the problem. 4886 */ 4887 rlimit__bump_memlock(); 4888 4889 err = perf_config(trace__config, &trace); 4890 if (err) 4891 goto out; 4892 4893 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands, 4894 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION); 4895 4896 /* 4897 * Here we already passed thru trace__parse_events_option() and it has 4898 * already figured out if -e syscall_name, if not but if --event 4899 * foo:bar was used, the user is interested _just_ in those, say, 4900 * tracepoint events, not in the strace-like syscall-name-based mode. 4901 * 4902 * This is important because we need to check if strace-like mode is 4903 * needed to decided if we should filter out the eBPF 4904 * __augmented_syscalls__ code, if it is in the mix, say, via 4905 * .perfconfig trace.add_events, and filter those out. 4906 */ 4907 if (!trace.trace_syscalls && !trace.trace_pgfaults && 4908 trace.evlist->core.nr_entries == 0 /* Was --events used? */) { 4909 trace.trace_syscalls = true; 4910 } 4911 /* 4912 * Now that we have --verbose figured out, lets see if we need to parse 4913 * events from .perfconfig, so that if those events fail parsing, say some 4914 * BPF program fails, then we'll be able to use --verbose to see what went 4915 * wrong in more detail. 4916 */ 4917 if (trace.perfconfig_events != NULL) { 4918 struct parse_events_error parse_err; 4919 4920 bzero(&parse_err, sizeof(parse_err)); 4921 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err); 4922 if (err) { 4923 parse_events_print_error(&parse_err, trace.perfconfig_events); 4924 goto out; 4925 } 4926 } 4927 4928 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) { 4929 usage_with_options_msg(trace_usage, trace_options, 4930 "cgroup monitoring only available in system-wide mode"); 4931 } 4932 4933 evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__"); 4934 if (IS_ERR(evsel)) { 4935 bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf)); 4936 pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf); 4937 goto out; 4938 } 4939 4940 if (evsel) { 4941 trace.syscalls.events.augmented = evsel; 4942 4943 evsel = evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter"); 4944 if (evsel == NULL) { 4945 pr_err("ERROR: raw_syscalls:sys_enter not found in the augmented BPF object\n"); 4946 goto out; 4947 } 4948 4949 if (evsel->bpf_obj == NULL) { 4950 pr_err("ERROR: raw_syscalls:sys_enter not associated to a BPF object\n"); 4951 goto out; 4952 } 4953 4954 trace.bpf_obj = evsel->bpf_obj; 4955 4956 /* 4957 * If we have _just_ the augmenter event but don't have a 4958 * explicit --syscalls, then assume we want all strace-like 4959 * syscalls: 4960 */ 4961 if (!trace.trace_syscalls && trace__only_augmented_syscalls_evsels(&trace)) 4962 trace.trace_syscalls = true; 4963 /* 4964 * So, if we have a syscall augmenter, but trace_syscalls, aka 4965 * strace-like syscall tracing is not set, then we need to trow 4966 * away the augmenter, i.e. all the events that were created 4967 * from that BPF object file. 4968 * 4969 * This is more to fix the current .perfconfig trace.add_events 4970 * style of setting up the strace-like eBPF based syscall point 4971 * payload augmenter. 4972 * 4973 * All this complexity will be avoided by adding an alternative 4974 * to trace.add_events in the form of 4975 * trace.bpf_augmented_syscalls, that will be only parsed if we 4976 * need it. 4977 * 4978 * .perfconfig trace.add_events is still useful if we want, for 4979 * instance, have msr_write.msr in some .perfconfig profile based 4980 * 'perf trace --config determinism.profile' mode, where for some 4981 * particular goal/workload type we want a set of events and 4982 * output mode (with timings, etc) instead of having to add 4983 * all via the command line. 4984 * 4985 * Also --config to specify an alternate .perfconfig file needs 4986 * to be implemented. 4987 */ 4988 if (!trace.trace_syscalls) { 4989 trace__delete_augmented_syscalls(&trace); 4990 } else { 4991 trace__set_bpf_map_filtered_pids(&trace); 4992 trace__set_bpf_map_syscalls(&trace); 4993 trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented"); 4994 } 4995 } 4996 4997 err = bpf__setup_stdout(trace.evlist); 4998 if (err) { 4999 bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf)); 5000 pr_err("ERROR: Setup BPF stdout failed: %s\n", bf); 5001 goto out; 5002 } 5003 5004 err = -1; 5005 5006 if (map_dump_str) { 5007 trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str); 5008 if (trace.dump.map == NULL) { 5009 pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str); 5010 goto out; 5011 } 5012 } 5013 5014 if (trace.trace_pgfaults) { 5015 trace.opts.sample_address = true; 5016 trace.opts.sample_time = true; 5017 } 5018 5019 if (trace.opts.mmap_pages == UINT_MAX) 5020 mmap_pages_user_set = false; 5021 5022 if (trace.max_stack == UINT_MAX) { 5023 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack(); 5024 max_stack_user_set = false; 5025 } 5026 5027 #ifdef HAVE_DWARF_UNWIND_SUPPORT 5028 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) { 5029 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false); 5030 } 5031 #endif 5032 5033 if (callchain_param.enabled) { 5034 if (!mmap_pages_user_set && geteuid() == 0) 5035 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4; 5036 5037 symbol_conf.use_callchain = true; 5038 } 5039 5040 if (trace.evlist->core.nr_entries > 0) { 5041 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler); 5042 if (evlist__set_syscall_tp_fields(trace.evlist)) { 5043 perror("failed to set syscalls:* tracepoint fields"); 5044 goto out; 5045 } 5046 } 5047 5048 if (trace.sort_events) { 5049 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace); 5050 ordered_events__set_copy_on_queue(&trace.oe.data, true); 5051 } 5052 5053 /* 5054 * If we are augmenting syscalls, then combine what we put in the 5055 * __augmented_syscalls__ BPF map with what is in the 5056 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF, 5057 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit. 5058 * 5059 * We'll switch to look at two BPF maps, one for sys_enter and the 5060 * other for sys_exit when we start augmenting the sys_exit paths with 5061 * buffers that are being copied from kernel to userspace, think 'read' 5062 * syscall. 5063 */ 5064 if (trace.syscalls.events.augmented) { 5065 evlist__for_each_entry(trace.evlist, evsel) { 5066 bool raw_syscalls_sys_exit = strcmp(evsel__name(evsel), "raw_syscalls:sys_exit") == 0; 5067 5068 if (raw_syscalls_sys_exit) { 5069 trace.raw_augmented_syscalls = true; 5070 goto init_augmented_syscall_tp; 5071 } 5072 5073 if (trace.syscalls.events.augmented->priv == NULL && 5074 strstr(evsel__name(evsel), "syscalls:sys_enter")) { 5075 struct evsel *augmented = trace.syscalls.events.augmented; 5076 if (evsel__init_augmented_syscall_tp(augmented, evsel) || 5077 evsel__init_augmented_syscall_tp_args(augmented)) 5078 goto out; 5079 /* 5080 * Augmented is __augmented_syscalls__ BPF_OUTPUT event 5081 * Above we made sure we can get from the payload the tp fields 5082 * that we get from syscalls:sys_enter tracefs format file. 5083 */ 5084 augmented->handler = trace__sys_enter; 5085 /* 5086 * Now we do the same for the *syscalls:sys_enter event so that 5087 * if we handle it directly, i.e. if the BPF prog returns 0 so 5088 * as not to filter it, then we'll handle it just like we would 5089 * for the BPF_OUTPUT one: 5090 */ 5091 if (evsel__init_augmented_syscall_tp(evsel, evsel) || 5092 evsel__init_augmented_syscall_tp_args(evsel)) 5093 goto out; 5094 evsel->handler = trace__sys_enter; 5095 } 5096 5097 if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) { 5098 struct syscall_tp *sc; 5099 init_augmented_syscall_tp: 5100 if (evsel__init_augmented_syscall_tp(evsel, evsel)) 5101 goto out; 5102 sc = __evsel__syscall_tp(evsel); 5103 /* 5104 * For now with BPF raw_augmented we hook into 5105 * raw_syscalls:sys_enter and there we get all 5106 * 6 syscall args plus the tracepoint common 5107 * fields and the syscall_nr (another long). 5108 * So we check if that is the case and if so 5109 * don't look after the sc->args_size but 5110 * always after the full raw_syscalls:sys_enter 5111 * payload, which is fixed. 5112 * 5113 * We'll revisit this later to pass 5114 * s->args_size to the BPF augmenter (now 5115 * tools/perf/examples/bpf/augmented_raw_syscalls.c, 5116 * so that it copies only what we need for each 5117 * syscall, like what happens when we use 5118 * syscalls:sys_enter_NAME, so that we reduce 5119 * the kernel/userspace traffic to just what is 5120 * needed for each syscall. 5121 */ 5122 if (trace.raw_augmented_syscalls) 5123 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset; 5124 evsel__init_augmented_syscall_tp_ret(evsel); 5125 evsel->handler = trace__sys_exit; 5126 } 5127 } 5128 } 5129 5130 if ((argc >= 1) && (strcmp(argv[0], "record") == 0)) 5131 return trace__record(&trace, argc-1, &argv[1]); 5132 5133 /* Using just --errno-summary will trigger --summary */ 5134 if (trace.errno_summary && !trace.summary && !trace.summary_only) 5135 trace.summary_only = true; 5136 5137 /* summary_only implies summary option, but don't overwrite summary if set */ 5138 if (trace.summary_only) 5139 trace.summary = trace.summary_only; 5140 5141 if (output_name != NULL) { 5142 err = trace__open_output(&trace, output_name); 5143 if (err < 0) { 5144 perror("failed to create output file"); 5145 goto out; 5146 } 5147 } 5148 5149 err = evswitch__init(&trace.evswitch, trace.evlist, stderr); 5150 if (err) 5151 goto out_close; 5152 5153 err = target__validate(&trace.opts.target); 5154 if (err) { 5155 target__strerror(&trace.opts.target, err, bf, sizeof(bf)); 5156 fprintf(trace.output, "%s", bf); 5157 goto out_close; 5158 } 5159 5160 err = target__parse_uid(&trace.opts.target); 5161 if (err) { 5162 target__strerror(&trace.opts.target, err, bf, sizeof(bf)); 5163 fprintf(trace.output, "%s", bf); 5164 goto out_close; 5165 } 5166 5167 if (!argc && target__none(&trace.opts.target)) 5168 trace.opts.target.system_wide = true; 5169 5170 if (input_name) 5171 err = trace__replay(&trace); 5172 else 5173 err = trace__run(&trace, argc, argv); 5174 5175 out_close: 5176 if (output_name != NULL) 5177 fclose(trace.output); 5178 out: 5179 trace__exit(&trace); 5180 return err; 5181 } 5182