1 /* 2 * builtin-trace.c 3 * 4 * Builtin 'trace' command: 5 * 6 * Display a continuously updated trace of any workload, CPU, specific PID, 7 * system wide, etc. Default format is loosely strace like, but any other 8 * event may be specified using --event. 9 * 10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 11 * 12 * Initially based on the 'trace' prototype by Thomas Gleixner: 13 * 14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'") 15 */ 16 17 #include "util/record.h" 18 #include <traceevent/event-parse.h> 19 #include <api/fs/tracing_path.h> 20 #include <bpf/bpf.h> 21 #include "util/bpf_map.h" 22 #include "util/rlimit.h" 23 #include "builtin.h" 24 #include "util/cgroup.h" 25 #include "util/color.h" 26 #include "util/config.h" 27 #include "util/debug.h" 28 #include "util/dso.h" 29 #include "util/env.h" 30 #include "util/event.h" 31 #include "util/evsel.h" 32 #include "util/evsel_fprintf.h" 33 #include "util/synthetic-events.h" 34 #include "util/evlist.h" 35 #include "util/evswitch.h" 36 #include "util/mmap.h" 37 #include <subcmd/pager.h> 38 #include <subcmd/exec-cmd.h> 39 #include "util/machine.h" 40 #include "util/map.h" 41 #include "util/symbol.h" 42 #include "util/path.h" 43 #include "util/session.h" 44 #include "util/thread.h" 45 #include <subcmd/parse-options.h> 46 #include "util/strlist.h" 47 #include "util/intlist.h" 48 #include "util/thread_map.h" 49 #include "util/stat.h" 50 #include "util/tool.h" 51 #include "util/util.h" 52 #include "trace/beauty/beauty.h" 53 #include "trace-event.h" 54 #include "util/parse-events.h" 55 #include "util/bpf-loader.h" 56 #include "callchain.h" 57 #include "print_binary.h" 58 #include "string2.h" 59 #include "syscalltbl.h" 60 #include "rb_resort.h" 61 #include "../perf.h" 62 63 #include <errno.h> 64 #include <inttypes.h> 65 #include <poll.h> 66 #include <signal.h> 67 #include <stdlib.h> 68 #include <string.h> 69 #include <linux/err.h> 70 #include <linux/filter.h> 71 #include <linux/kernel.h> 72 #include <linux/random.h> 73 #include <linux/stringify.h> 74 #include <linux/time64.h> 75 #include <linux/zalloc.h> 76 #include <fcntl.h> 77 #include <sys/sysmacros.h> 78 79 #include <linux/ctype.h> 80 #include <perf/mmap.h> 81 82 #ifndef O_CLOEXEC 83 # define O_CLOEXEC 02000000 84 #endif 85 86 #ifndef F_LINUX_SPECIFIC_BASE 87 # define F_LINUX_SPECIFIC_BASE 1024 88 #endif 89 90 /* 91 * strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100 92 */ 93 struct syscall_arg_fmt { 94 size_t (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg); 95 bool (*strtoul)(char *bf, size_t size, struct syscall_arg *arg, u64 *val); 96 unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val); 97 void *parm; 98 const char *name; 99 u16 nr_entries; // for arrays 100 bool show_zero; 101 }; 102 103 struct syscall_fmt { 104 const char *name; 105 const char *alias; 106 struct { 107 const char *sys_enter, 108 *sys_exit; 109 } bpf_prog_name; 110 struct syscall_arg_fmt arg[6]; 111 u8 nr_args; 112 bool errpid; 113 bool timeout; 114 bool hexret; 115 }; 116 117 struct trace { 118 struct perf_tool tool; 119 struct syscalltbl *sctbl; 120 struct { 121 struct syscall *table; 122 struct bpf_map *map; 123 struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY 124 struct bpf_map *sys_enter, 125 *sys_exit; 126 } prog_array; 127 struct { 128 struct evsel *sys_enter, 129 *sys_exit, 130 *augmented; 131 } events; 132 struct bpf_program *unaugmented_prog; 133 } syscalls; 134 struct { 135 struct bpf_map *map; 136 } dump; 137 struct record_opts opts; 138 struct evlist *evlist; 139 struct machine *host; 140 struct thread *current; 141 struct bpf_object *bpf_obj; 142 struct cgroup *cgroup; 143 u64 base_time; 144 FILE *output; 145 unsigned long nr_events; 146 unsigned long nr_events_printed; 147 unsigned long max_events; 148 struct evswitch evswitch; 149 struct strlist *ev_qualifier; 150 struct { 151 size_t nr; 152 int *entries; 153 } ev_qualifier_ids; 154 struct { 155 size_t nr; 156 pid_t *entries; 157 struct bpf_map *map; 158 } filter_pids; 159 double duration_filter; 160 double runtime_ms; 161 struct { 162 u64 vfs_getname, 163 proc_getname; 164 } stats; 165 unsigned int max_stack; 166 unsigned int min_stack; 167 int raw_augmented_syscalls_args_size; 168 bool raw_augmented_syscalls; 169 bool fd_path_disabled; 170 bool sort_events; 171 bool not_ev_qualifier; 172 bool live; 173 bool full_time; 174 bool sched; 175 bool multiple_threads; 176 bool summary; 177 bool summary_only; 178 bool errno_summary; 179 bool failure_only; 180 bool show_comm; 181 bool print_sample; 182 bool show_tool_stats; 183 bool trace_syscalls; 184 bool libtraceevent_print; 185 bool kernel_syscallchains; 186 s16 args_alignment; 187 bool show_tstamp; 188 bool show_duration; 189 bool show_zeros; 190 bool show_arg_names; 191 bool show_string_prefix; 192 bool force; 193 bool vfs_getname; 194 int trace_pgfaults; 195 char *perfconfig_events; 196 struct { 197 struct ordered_events data; 198 u64 last; 199 } oe; 200 }; 201 202 struct tp_field { 203 int offset; 204 union { 205 u64 (*integer)(struct tp_field *field, struct perf_sample *sample); 206 void *(*pointer)(struct tp_field *field, struct perf_sample *sample); 207 }; 208 }; 209 210 #define TP_UINT_FIELD(bits) \ 211 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \ 212 { \ 213 u##bits value; \ 214 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \ 215 return value; \ 216 } 217 218 TP_UINT_FIELD(8); 219 TP_UINT_FIELD(16); 220 TP_UINT_FIELD(32); 221 TP_UINT_FIELD(64); 222 223 #define TP_UINT_FIELD__SWAPPED(bits) \ 224 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \ 225 { \ 226 u##bits value; \ 227 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \ 228 return bswap_##bits(value);\ 229 } 230 231 TP_UINT_FIELD__SWAPPED(16); 232 TP_UINT_FIELD__SWAPPED(32); 233 TP_UINT_FIELD__SWAPPED(64); 234 235 static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap) 236 { 237 field->offset = offset; 238 239 switch (size) { 240 case 1: 241 field->integer = tp_field__u8; 242 break; 243 case 2: 244 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16; 245 break; 246 case 4: 247 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32; 248 break; 249 case 8: 250 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64; 251 break; 252 default: 253 return -1; 254 } 255 256 return 0; 257 } 258 259 static int tp_field__init_uint(struct tp_field *field, struct tep_format_field *format_field, bool needs_swap) 260 { 261 return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap); 262 } 263 264 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample) 265 { 266 return sample->raw_data + field->offset; 267 } 268 269 static int __tp_field__init_ptr(struct tp_field *field, int offset) 270 { 271 field->offset = offset; 272 field->pointer = tp_field__ptr; 273 return 0; 274 } 275 276 static int tp_field__init_ptr(struct tp_field *field, struct tep_format_field *format_field) 277 { 278 return __tp_field__init_ptr(field, format_field->offset); 279 } 280 281 struct syscall_tp { 282 struct tp_field id; 283 union { 284 struct tp_field args, ret; 285 }; 286 }; 287 288 /* 289 * The evsel->priv as used by 'perf trace' 290 * sc: for raw_syscalls:sys_{enter,exit} and syscalls:sys_{enter,exit}_SYSCALLNAME 291 * fmt: for all the other tracepoints 292 */ 293 struct evsel_trace { 294 struct syscall_tp sc; 295 struct syscall_arg_fmt *fmt; 296 }; 297 298 static struct evsel_trace *evsel_trace__new(void) 299 { 300 return zalloc(sizeof(struct evsel_trace)); 301 } 302 303 static void evsel_trace__delete(struct evsel_trace *et) 304 { 305 if (et == NULL) 306 return; 307 308 zfree(&et->fmt); 309 free(et); 310 } 311 312 /* 313 * Used with raw_syscalls:sys_{enter,exit} and with the 314 * syscalls:sys_{enter,exit}_SYSCALL tracepoints 315 */ 316 static inline struct syscall_tp *__evsel__syscall_tp(struct evsel *evsel) 317 { 318 struct evsel_trace *et = evsel->priv; 319 320 return &et->sc; 321 } 322 323 static struct syscall_tp *evsel__syscall_tp(struct evsel *evsel) 324 { 325 if (evsel->priv == NULL) { 326 evsel->priv = evsel_trace__new(); 327 if (evsel->priv == NULL) 328 return NULL; 329 } 330 331 return __evsel__syscall_tp(evsel); 332 } 333 334 /* 335 * Used with all the other tracepoints. 336 */ 337 static inline struct syscall_arg_fmt *__evsel__syscall_arg_fmt(struct evsel *evsel) 338 { 339 struct evsel_trace *et = evsel->priv; 340 341 return et->fmt; 342 } 343 344 static struct syscall_arg_fmt *evsel__syscall_arg_fmt(struct evsel *evsel) 345 { 346 struct evsel_trace *et = evsel->priv; 347 348 if (evsel->priv == NULL) { 349 et = evsel->priv = evsel_trace__new(); 350 351 if (et == NULL) 352 return NULL; 353 } 354 355 if (et->fmt == NULL) { 356 et->fmt = calloc(evsel->tp_format->format.nr_fields, sizeof(struct syscall_arg_fmt)); 357 if (et->fmt == NULL) 358 goto out_delete; 359 } 360 361 return __evsel__syscall_arg_fmt(evsel); 362 363 out_delete: 364 evsel_trace__delete(evsel->priv); 365 evsel->priv = NULL; 366 return NULL; 367 } 368 369 static int perf_evsel__init_tp_uint_field(struct evsel *evsel, 370 struct tp_field *field, 371 const char *name) 372 { 373 struct tep_format_field *format_field = perf_evsel__field(evsel, name); 374 375 if (format_field == NULL) 376 return -1; 377 378 return tp_field__init_uint(field, format_field, evsel->needs_swap); 379 } 380 381 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \ 382 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\ 383 perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); }) 384 385 static int perf_evsel__init_tp_ptr_field(struct evsel *evsel, 386 struct tp_field *field, 387 const char *name) 388 { 389 struct tep_format_field *format_field = perf_evsel__field(evsel, name); 390 391 if (format_field == NULL) 392 return -1; 393 394 return tp_field__init_ptr(field, format_field); 395 } 396 397 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \ 398 ({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\ 399 perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); }) 400 401 static void evsel__delete_priv(struct evsel *evsel) 402 { 403 zfree(&evsel->priv); 404 evsel__delete(evsel); 405 } 406 407 static int perf_evsel__init_syscall_tp(struct evsel *evsel) 408 { 409 struct syscall_tp *sc = evsel__syscall_tp(evsel); 410 411 if (sc != NULL) { 412 if (perf_evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") && 413 perf_evsel__init_tp_uint_field(evsel, &sc->id, "nr")) 414 return -ENOENT; 415 return 0; 416 } 417 418 return -ENOMEM; 419 } 420 421 static int perf_evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp) 422 { 423 struct syscall_tp *sc = evsel__syscall_tp(evsel); 424 425 if (sc != NULL) { 426 struct tep_format_field *syscall_id = perf_evsel__field(tp, "id"); 427 if (syscall_id == NULL) 428 syscall_id = perf_evsel__field(tp, "__syscall_nr"); 429 if (syscall_id == NULL || 430 __tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap)) 431 return -EINVAL; 432 433 return 0; 434 } 435 436 return -ENOMEM; 437 } 438 439 static int perf_evsel__init_augmented_syscall_tp_args(struct evsel *evsel) 440 { 441 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 442 443 return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)); 444 } 445 446 static int perf_evsel__init_augmented_syscall_tp_ret(struct evsel *evsel) 447 { 448 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 449 450 return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap); 451 } 452 453 static int perf_evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler) 454 { 455 if (evsel__syscall_tp(evsel) != NULL) { 456 if (perf_evsel__init_sc_tp_uint_field(evsel, id)) 457 return -ENOENT; 458 459 evsel->handler = handler; 460 return 0; 461 } 462 463 return -ENOMEM; 464 } 465 466 static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler) 467 { 468 struct evsel *evsel = perf_evsel__newtp("raw_syscalls", direction); 469 470 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */ 471 if (IS_ERR(evsel)) 472 evsel = perf_evsel__newtp("syscalls", direction); 473 474 if (IS_ERR(evsel)) 475 return NULL; 476 477 if (perf_evsel__init_raw_syscall_tp(evsel, handler)) 478 goto out_delete; 479 480 return evsel; 481 482 out_delete: 483 evsel__delete_priv(evsel); 484 return NULL; 485 } 486 487 #define perf_evsel__sc_tp_uint(evsel, name, sample) \ 488 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \ 489 fields->name.integer(&fields->name, sample); }) 490 491 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \ 492 ({ struct syscall_tp *fields = __evsel__syscall_tp(evsel); \ 493 fields->name.pointer(&fields->name, sample); }) 494 495 size_t strarray__scnprintf_suffix(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_suffix, int val) 496 { 497 int idx = val - sa->offset; 498 499 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { 500 size_t printed = scnprintf(bf, size, intfmt, val); 501 if (show_suffix) 502 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); 503 return printed; 504 } 505 506 return scnprintf(bf, size, "%s%s", sa->entries[idx], show_suffix ? sa->prefix : ""); 507 } 508 509 size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val) 510 { 511 int idx = val - sa->offset; 512 513 if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { 514 size_t printed = scnprintf(bf, size, intfmt, val); 515 if (show_prefix) 516 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix); 517 return printed; 518 } 519 520 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); 521 } 522 523 static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size, 524 const char *intfmt, 525 struct syscall_arg *arg) 526 { 527 return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val); 528 } 529 530 static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size, 531 struct syscall_arg *arg) 532 { 533 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg); 534 } 535 536 #define SCA_STRARRAY syscall_arg__scnprintf_strarray 537 538 bool syscall_arg__strtoul_strarray(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 539 { 540 return strarray__strtoul(arg->parm, bf, size, ret); 541 } 542 543 bool syscall_arg__strtoul_strarray_flags(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 544 { 545 return strarray__strtoul_flags(arg->parm, bf, size, ret); 546 } 547 548 bool syscall_arg__strtoul_strarrays(char *bf, size_t size, struct syscall_arg *arg, u64 *ret) 549 { 550 return strarrays__strtoul(arg->parm, bf, size, ret); 551 } 552 553 size_t syscall_arg__scnprintf_strarray_flags(char *bf, size_t size, struct syscall_arg *arg) 554 { 555 return strarray__scnprintf_flags(arg->parm, bf, size, arg->show_string_prefix, arg->val); 556 } 557 558 size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val) 559 { 560 size_t printed; 561 int i; 562 563 for (i = 0; i < sas->nr_entries; ++i) { 564 struct strarray *sa = sas->entries[i]; 565 int idx = val - sa->offset; 566 567 if (idx >= 0 && idx < sa->nr_entries) { 568 if (sa->entries[idx] == NULL) 569 break; 570 return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); 571 } 572 } 573 574 printed = scnprintf(bf, size, intfmt, val); 575 if (show_prefix) 576 printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix); 577 return printed; 578 } 579 580 bool strarray__strtoul(struct strarray *sa, char *bf, size_t size, u64 *ret) 581 { 582 int i; 583 584 for (i = 0; i < sa->nr_entries; ++i) { 585 if (sa->entries[i] && strncmp(sa->entries[i], bf, size) == 0 && sa->entries[i][size] == '\0') { 586 *ret = sa->offset + i; 587 return true; 588 } 589 } 590 591 return false; 592 } 593 594 bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *ret) 595 { 596 u64 val = 0; 597 char *tok = bf, *sep, *end; 598 599 *ret = 0; 600 601 while (size != 0) { 602 int toklen = size; 603 604 sep = memchr(tok, '|', size); 605 if (sep != NULL) { 606 size -= sep - tok + 1; 607 608 end = sep - 1; 609 while (end > tok && isspace(*end)) 610 --end; 611 612 toklen = end - tok + 1; 613 } 614 615 while (isspace(*tok)) 616 ++tok; 617 618 if (isalpha(*tok) || *tok == '_') { 619 if (!strarray__strtoul(sa, tok, toklen, &val)) 620 return false; 621 } else { 622 bool is_hexa = tok[0] == 0 && (tok[1] = 'x' || tok[1] == 'X'); 623 624 val = strtoul(tok, NULL, is_hexa ? 16 : 0); 625 } 626 627 *ret |= (1 << (val - 1)); 628 629 if (sep == NULL) 630 break; 631 tok = sep + 1; 632 } 633 634 return true; 635 } 636 637 bool strarrays__strtoul(struct strarrays *sas, char *bf, size_t size, u64 *ret) 638 { 639 int i; 640 641 for (i = 0; i < sas->nr_entries; ++i) { 642 struct strarray *sa = sas->entries[i]; 643 644 if (strarray__strtoul(sa, bf, size, ret)) 645 return true; 646 } 647 648 return false; 649 } 650 651 size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size, 652 struct syscall_arg *arg) 653 { 654 return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val); 655 } 656 657 #ifndef AT_FDCWD 658 #define AT_FDCWD -100 659 #endif 660 661 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size, 662 struct syscall_arg *arg) 663 { 664 int fd = arg->val; 665 const char *prefix = "AT_FD"; 666 667 if (fd == AT_FDCWD) 668 return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD"); 669 670 return syscall_arg__scnprintf_fd(bf, size, arg); 671 } 672 673 #define SCA_FDAT syscall_arg__scnprintf_fd_at 674 675 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, 676 struct syscall_arg *arg); 677 678 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd 679 680 size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg) 681 { 682 return scnprintf(bf, size, "%#lx", arg->val); 683 } 684 685 size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg) 686 { 687 if (arg->val == 0) 688 return scnprintf(bf, size, "NULL"); 689 return syscall_arg__scnprintf_hex(bf, size, arg); 690 } 691 692 size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg) 693 { 694 return scnprintf(bf, size, "%d", arg->val); 695 } 696 697 size_t syscall_arg__scnprintf_long(char *bf, size_t size, struct syscall_arg *arg) 698 { 699 return scnprintf(bf, size, "%ld", arg->val); 700 } 701 702 static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct syscall_arg *arg) 703 { 704 // XXX Hey, maybe for sched:sched_switch prev/next comm fields we can 705 // fill missing comms using thread__set_comm()... 706 // here or in a special syscall_arg__scnprintf_pid_sched_tp... 707 return scnprintf(bf, size, "\"%-.*s\"", arg->fmt->nr_entries ?: arg->len, arg->val); 708 } 709 710 #define SCA_CHAR_ARRAY syscall_arg__scnprintf_char_array 711 712 static const char *bpf_cmd[] = { 713 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM", 714 "MAP_GET_NEXT_KEY", "PROG_LOAD", 715 }; 716 static DEFINE_STRARRAY(bpf_cmd, "BPF_"); 717 718 static const char *fsmount_flags[] = { 719 [1] = "CLOEXEC", 720 }; 721 static DEFINE_STRARRAY(fsmount_flags, "FSMOUNT_"); 722 723 #include "trace/beauty/generated/fsconfig_arrays.c" 724 725 static DEFINE_STRARRAY(fsconfig_cmds, "FSCONFIG_"); 726 727 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", }; 728 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1); 729 730 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", }; 731 static DEFINE_STRARRAY(itimers, "ITIMER_"); 732 733 static const char *keyctl_options[] = { 734 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN", 735 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ", 736 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT", 737 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT", 738 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT", 739 }; 740 static DEFINE_STRARRAY(keyctl_options, "KEYCTL_"); 741 742 static const char *whences[] = { "SET", "CUR", "END", 743 #ifdef SEEK_DATA 744 "DATA", 745 #endif 746 #ifdef SEEK_HOLE 747 "HOLE", 748 #endif 749 }; 750 static DEFINE_STRARRAY(whences, "SEEK_"); 751 752 static const char *fcntl_cmds[] = { 753 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK", 754 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "GETLK64", 755 "SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX", 756 "GETOWNER_UIDS", 757 }; 758 static DEFINE_STRARRAY(fcntl_cmds, "F_"); 759 760 static const char *fcntl_linux_specific_cmds[] = { 761 "SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC", 762 "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS", 763 "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT", 764 }; 765 766 static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE); 767 768 static struct strarray *fcntl_cmds_arrays[] = { 769 &strarray__fcntl_cmds, 770 &strarray__fcntl_linux_specific_cmds, 771 }; 772 773 static DEFINE_STRARRAYS(fcntl_cmds_arrays); 774 775 static const char *rlimit_resources[] = { 776 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE", 777 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO", 778 "RTTIME", 779 }; 780 static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_"); 781 782 static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", }; 783 static DEFINE_STRARRAY(sighow, "SIG_"); 784 785 static const char *clockid[] = { 786 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID", 787 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME", 788 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI" 789 }; 790 static DEFINE_STRARRAY(clockid, "CLOCK_"); 791 792 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size, 793 struct syscall_arg *arg) 794 { 795 bool show_prefix = arg->show_string_prefix; 796 const char *suffix = "_OK"; 797 size_t printed = 0; 798 int mode = arg->val; 799 800 if (mode == F_OK) /* 0 */ 801 return scnprintf(bf, size, "F%s", show_prefix ? suffix : ""); 802 #define P_MODE(n) \ 803 if (mode & n##_OK) { \ 804 printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \ 805 mode &= ~n##_OK; \ 806 } 807 808 P_MODE(R); 809 P_MODE(W); 810 P_MODE(X); 811 #undef P_MODE 812 813 if (mode) 814 printed += scnprintf(bf + printed, size - printed, "|%#x", mode); 815 816 return printed; 817 } 818 819 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode 820 821 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size, 822 struct syscall_arg *arg); 823 824 #define SCA_FILENAME syscall_arg__scnprintf_filename 825 826 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size, 827 struct syscall_arg *arg) 828 { 829 bool show_prefix = arg->show_string_prefix; 830 const char *prefix = "O_"; 831 int printed = 0, flags = arg->val; 832 833 #define P_FLAG(n) \ 834 if (flags & O_##n) { \ 835 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ 836 flags &= ~O_##n; \ 837 } 838 839 P_FLAG(CLOEXEC); 840 P_FLAG(NONBLOCK); 841 #undef P_FLAG 842 843 if (flags) 844 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 845 846 return printed; 847 } 848 849 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags 850 851 #ifndef GRND_NONBLOCK 852 #define GRND_NONBLOCK 0x0001 853 #endif 854 #ifndef GRND_RANDOM 855 #define GRND_RANDOM 0x0002 856 #endif 857 858 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size, 859 struct syscall_arg *arg) 860 { 861 bool show_prefix = arg->show_string_prefix; 862 const char *prefix = "GRND_"; 863 int printed = 0, flags = arg->val; 864 865 #define P_FLAG(n) \ 866 if (flags & GRND_##n) { \ 867 printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \ 868 flags &= ~GRND_##n; \ 869 } 870 871 P_FLAG(RANDOM); 872 P_FLAG(NONBLOCK); 873 #undef P_FLAG 874 875 if (flags) 876 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 877 878 return printed; 879 } 880 881 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags 882 883 #define STRARRAY(name, array) \ 884 { .scnprintf = SCA_STRARRAY, \ 885 .strtoul = STUL_STRARRAY, \ 886 .parm = &strarray__##array, } 887 888 #define STRARRAY_FLAGS(name, array) \ 889 { .scnprintf = SCA_STRARRAY_FLAGS, \ 890 .strtoul = STUL_STRARRAY_FLAGS, \ 891 .parm = &strarray__##array, } 892 893 #include "trace/beauty/arch_errno_names.c" 894 #include "trace/beauty/eventfd.c" 895 #include "trace/beauty/futex_op.c" 896 #include "trace/beauty/futex_val3.c" 897 #include "trace/beauty/mmap.c" 898 #include "trace/beauty/mode_t.c" 899 #include "trace/beauty/msg_flags.c" 900 #include "trace/beauty/open_flags.c" 901 #include "trace/beauty/perf_event_open.c" 902 #include "trace/beauty/pid.c" 903 #include "trace/beauty/sched_policy.c" 904 #include "trace/beauty/seccomp.c" 905 #include "trace/beauty/signum.c" 906 #include "trace/beauty/socket_type.c" 907 #include "trace/beauty/waitid_options.c" 908 909 static struct syscall_fmt syscall_fmts[] = { 910 { .name = "access", 911 .arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, }, 912 { .name = "arch_prctl", 913 .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ }, 914 [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, }, 915 { .name = "bind", 916 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ }, 917 [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ }, 918 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, }, 919 { .name = "bpf", 920 .arg = { [0] = STRARRAY(cmd, bpf_cmd), }, }, 921 { .name = "brk", .hexret = true, 922 .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, }, 923 { .name = "clock_gettime", 924 .arg = { [0] = STRARRAY(clk_id, clockid), }, }, 925 { .name = "clone", .errpid = true, .nr_args = 5, 926 .arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, }, 927 [1] = { .name = "child_stack", .scnprintf = SCA_HEX, }, 928 [2] = { .name = "parent_tidptr", .scnprintf = SCA_HEX, }, 929 [3] = { .name = "child_tidptr", .scnprintf = SCA_HEX, }, 930 [4] = { .name = "tls", .scnprintf = SCA_HEX, }, }, }, 931 { .name = "close", 932 .arg = { [0] = { .scnprintf = SCA_CLOSE_FD, /* fd */ }, }, }, 933 { .name = "connect", 934 .arg = { [0] = { .scnprintf = SCA_INT, /* fd */ }, 935 [1] = { .scnprintf = SCA_SOCKADDR, /* servaddr */ }, 936 [2] = { .scnprintf = SCA_INT, /* addrlen */ }, }, }, 937 { .name = "epoll_ctl", 938 .arg = { [1] = STRARRAY(op, epoll_ctl_ops), }, }, 939 { .name = "eventfd2", 940 .arg = { [1] = { .scnprintf = SCA_EFD_FLAGS, /* flags */ }, }, }, 941 { .name = "fchmodat", 942 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 943 { .name = "fchownat", 944 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 945 { .name = "fcntl", 946 .arg = { [1] = { .scnprintf = SCA_FCNTL_CMD, /* cmd */ 947 .strtoul = STUL_STRARRAYS, 948 .parm = &strarrays__fcntl_cmds_arrays, 949 .show_zero = true, }, 950 [2] = { .scnprintf = SCA_FCNTL_ARG, /* arg */ }, }, }, 951 { .name = "flock", 952 .arg = { [1] = { .scnprintf = SCA_FLOCK, /* cmd */ }, }, }, 953 { .name = "fsconfig", 954 .arg = { [1] = STRARRAY(cmd, fsconfig_cmds), }, }, 955 { .name = "fsmount", 956 .arg = { [1] = STRARRAY_FLAGS(flags, fsmount_flags), 957 [2] = { .scnprintf = SCA_FSMOUNT_ATTR_FLAGS, /* attr_flags */ }, }, }, 958 { .name = "fspick", 959 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 960 [1] = { .scnprintf = SCA_FILENAME, /* path */ }, 961 [2] = { .scnprintf = SCA_FSPICK_FLAGS, /* flags */ }, }, }, 962 { .name = "fstat", .alias = "newfstat", }, 963 { .name = "fstatat", .alias = "newfstatat", }, 964 { .name = "futex", 965 .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ }, 966 [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, }, 967 { .name = "futimesat", 968 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 969 { .name = "getitimer", 970 .arg = { [0] = STRARRAY(which, itimers), }, }, 971 { .name = "getpid", .errpid = true, }, 972 { .name = "getpgid", .errpid = true, }, 973 { .name = "getppid", .errpid = true, }, 974 { .name = "getrandom", 975 .arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, }, 976 { .name = "getrlimit", 977 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, }, 978 { .name = "gettid", .errpid = true, }, 979 { .name = "ioctl", 980 .arg = { 981 #if defined(__i386__) || defined(__x86_64__) 982 /* 983 * FIXME: Make this available to all arches. 984 */ 985 [1] = { .scnprintf = SCA_IOCTL_CMD, /* cmd */ }, 986 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, }, 987 #else 988 [2] = { .scnprintf = SCA_HEX, /* arg */ }, }, }, 989 #endif 990 { .name = "kcmp", .nr_args = 5, 991 .arg = { [0] = { .name = "pid1", .scnprintf = SCA_PID, }, 992 [1] = { .name = "pid2", .scnprintf = SCA_PID, }, 993 [2] = { .name = "type", .scnprintf = SCA_KCMP_TYPE, }, 994 [3] = { .name = "idx1", .scnprintf = SCA_KCMP_IDX, }, 995 [4] = { .name = "idx2", .scnprintf = SCA_KCMP_IDX, }, }, }, 996 { .name = "keyctl", 997 .arg = { [0] = STRARRAY(option, keyctl_options), }, }, 998 { .name = "kill", 999 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1000 { .name = "linkat", 1001 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1002 { .name = "lseek", 1003 .arg = { [2] = STRARRAY(whence, whences), }, }, 1004 { .name = "lstat", .alias = "newlstat", }, 1005 { .name = "madvise", 1006 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1007 [2] = { .scnprintf = SCA_MADV_BHV, /* behavior */ }, }, }, 1008 { .name = "mkdirat", 1009 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1010 { .name = "mknodat", 1011 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, }, 1012 { .name = "mmap", .hexret = true, 1013 /* The standard mmap maps to old_mmap on s390x */ 1014 #if defined(__s390x__) 1015 .alias = "old_mmap", 1016 #endif 1017 .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, 1018 [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */ 1019 .strtoul = STUL_STRARRAY_FLAGS, 1020 .parm = &strarray__mmap_flags, }, 1021 [5] = { .scnprintf = SCA_HEX, /* offset */ }, }, }, 1022 { .name = "mount", 1023 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ }, 1024 [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */ 1025 .mask_val = SCAMV_MOUNT_FLAGS, /* flags */ }, }, }, 1026 { .name = "move_mount", 1027 .arg = { [0] = { .scnprintf = SCA_FDAT, /* from_dfd */ }, 1028 [1] = { .scnprintf = SCA_FILENAME, /* from_pathname */ }, 1029 [2] = { .scnprintf = SCA_FDAT, /* to_dfd */ }, 1030 [3] = { .scnprintf = SCA_FILENAME, /* to_pathname */ }, 1031 [4] = { .scnprintf = SCA_MOVE_MOUNT_FLAGS, /* flags */ }, }, }, 1032 { .name = "mprotect", 1033 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1034 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, }, }, 1035 { .name = "mq_unlink", 1036 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, }, 1037 { .name = "mremap", .hexret = true, 1038 .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, }, 1039 { .name = "name_to_handle_at", 1040 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1041 { .name = "newfstatat", 1042 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1043 { .name = "open", 1044 .arg = { [1] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1045 { .name = "open_by_handle_at", 1046 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1047 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1048 { .name = "openat", 1049 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, 1050 [2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, }, 1051 { .name = "perf_event_open", 1052 .arg = { [2] = { .scnprintf = SCA_INT, /* cpu */ }, 1053 [3] = { .scnprintf = SCA_FD, /* group_fd */ }, 1054 [4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, }, 1055 { .name = "pipe2", 1056 .arg = { [1] = { .scnprintf = SCA_PIPE_FLAGS, /* flags */ }, }, }, 1057 { .name = "pkey_alloc", 1058 .arg = { [1] = { .scnprintf = SCA_PKEY_ALLOC_ACCESS_RIGHTS, /* access_rights */ }, }, }, 1059 { .name = "pkey_free", 1060 .arg = { [0] = { .scnprintf = SCA_INT, /* key */ }, }, }, 1061 { .name = "pkey_mprotect", 1062 .arg = { [0] = { .scnprintf = SCA_HEX, /* start */ }, 1063 [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ }, 1064 [3] = { .scnprintf = SCA_INT, /* pkey */ }, }, }, 1065 { .name = "poll", .timeout = true, }, 1066 { .name = "ppoll", .timeout = true, }, 1067 { .name = "prctl", 1068 .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */ 1069 .strtoul = STUL_STRARRAY, 1070 .parm = &strarray__prctl_options, }, 1071 [1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ }, 1072 [2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, }, 1073 { .name = "pread", .alias = "pread64", }, 1074 { .name = "preadv", .alias = "pread", }, 1075 { .name = "prlimit64", 1076 .arg = { [1] = STRARRAY(resource, rlimit_resources), }, }, 1077 { .name = "pwrite", .alias = "pwrite64", }, 1078 { .name = "readlinkat", 1079 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1080 { .name = "recvfrom", 1081 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1082 { .name = "recvmmsg", 1083 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1084 { .name = "recvmsg", 1085 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1086 { .name = "renameat", 1087 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ }, 1088 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, }, 1089 { .name = "renameat2", 1090 .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ }, 1091 [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, 1092 [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, }, 1093 { .name = "rt_sigaction", 1094 .arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1095 { .name = "rt_sigprocmask", 1096 .arg = { [0] = STRARRAY(how, sighow), }, }, 1097 { .name = "rt_sigqueueinfo", 1098 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1099 { .name = "rt_tgsigqueueinfo", 1100 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1101 { .name = "sched_setscheduler", 1102 .arg = { [1] = { .scnprintf = SCA_SCHED_POLICY, /* policy */ }, }, }, 1103 { .name = "seccomp", 1104 .arg = { [0] = { .scnprintf = SCA_SECCOMP_OP, /* op */ }, 1105 [1] = { .scnprintf = SCA_SECCOMP_FLAGS, /* flags */ }, }, }, 1106 { .name = "select", .timeout = true, }, 1107 { .name = "sendfile", .alias = "sendfile64", }, 1108 { .name = "sendmmsg", 1109 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1110 { .name = "sendmsg", 1111 .arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, }, 1112 { .name = "sendto", 1113 .arg = { [3] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, 1114 [4] = { .scnprintf = SCA_SOCKADDR, /* addr */ }, }, }, 1115 { .name = "set_tid_address", .errpid = true, }, 1116 { .name = "setitimer", 1117 .arg = { [0] = STRARRAY(which, itimers), }, }, 1118 { .name = "setrlimit", 1119 .arg = { [0] = STRARRAY(resource, rlimit_resources), }, }, 1120 { .name = "socket", 1121 .arg = { [0] = STRARRAY(family, socket_families), 1122 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, 1123 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, }, 1124 { .name = "socketpair", 1125 .arg = { [0] = STRARRAY(family, socket_families), 1126 [1] = { .scnprintf = SCA_SK_TYPE, /* type */ }, 1127 [2] = { .scnprintf = SCA_SK_PROTO, /* protocol */ }, }, }, 1128 { .name = "stat", .alias = "newstat", }, 1129 { .name = "statx", 1130 .arg = { [0] = { .scnprintf = SCA_FDAT, /* fdat */ }, 1131 [2] = { .scnprintf = SCA_STATX_FLAGS, /* flags */ } , 1132 [3] = { .scnprintf = SCA_STATX_MASK, /* mask */ }, }, }, 1133 { .name = "swapoff", 1134 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, }, 1135 { .name = "swapon", 1136 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* specialfile */ }, }, }, 1137 { .name = "symlinkat", 1138 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1139 { .name = "sync_file_range", 1140 .arg = { [3] = { .scnprintf = SCA_SYNC_FILE_RANGE_FLAGS, /* flags */ }, }, }, 1141 { .name = "tgkill", 1142 .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1143 { .name = "tkill", 1144 .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, }, 1145 { .name = "umount2", .alias = "umount", 1146 .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, }, 1147 { .name = "uname", .alias = "newuname", }, 1148 { .name = "unlinkat", 1149 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, }, 1150 { .name = "utimensat", 1151 .arg = { [0] = { .scnprintf = SCA_FDAT, /* dirfd */ }, }, }, 1152 { .name = "wait4", .errpid = true, 1153 .arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, }, 1154 { .name = "waitid", .errpid = true, 1155 .arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, }, 1156 }; 1157 1158 static int syscall_fmt__cmp(const void *name, const void *fmtp) 1159 { 1160 const struct syscall_fmt *fmt = fmtp; 1161 return strcmp(name, fmt->name); 1162 } 1163 1164 static struct syscall_fmt *__syscall_fmt__find(struct syscall_fmt *fmts, const int nmemb, const char *name) 1165 { 1166 return bsearch(name, fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp); 1167 } 1168 1169 static struct syscall_fmt *syscall_fmt__find(const char *name) 1170 { 1171 const int nmemb = ARRAY_SIZE(syscall_fmts); 1172 return __syscall_fmt__find(syscall_fmts, nmemb, name); 1173 } 1174 1175 static struct syscall_fmt *__syscall_fmt__find_by_alias(struct syscall_fmt *fmts, const int nmemb, const char *alias) 1176 { 1177 int i; 1178 1179 for (i = 0; i < nmemb; ++i) { 1180 if (fmts[i].alias && strcmp(fmts[i].alias, alias) == 0) 1181 return &fmts[i]; 1182 } 1183 1184 return NULL; 1185 } 1186 1187 static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias) 1188 { 1189 const int nmemb = ARRAY_SIZE(syscall_fmts); 1190 return __syscall_fmt__find_by_alias(syscall_fmts, nmemb, alias); 1191 } 1192 1193 /* 1194 * is_exit: is this "exit" or "exit_group"? 1195 * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter. 1196 * args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc. 1197 * nonexistent: Just a hole in the syscall table, syscall id not allocated 1198 */ 1199 struct syscall { 1200 struct tep_event *tp_format; 1201 int nr_args; 1202 int args_size; 1203 struct { 1204 struct bpf_program *sys_enter, 1205 *sys_exit; 1206 } bpf_prog; 1207 bool is_exit; 1208 bool is_open; 1209 bool nonexistent; 1210 struct tep_format_field *args; 1211 const char *name; 1212 struct syscall_fmt *fmt; 1213 struct syscall_arg_fmt *arg_fmt; 1214 }; 1215 1216 /* 1217 * Must match what is in the BPF program: 1218 * 1219 * tools/perf/examples/bpf/augmented_raw_syscalls.c 1220 */ 1221 struct bpf_map_syscall_entry { 1222 bool enabled; 1223 u16 string_args_len[6]; 1224 }; 1225 1226 /* 1227 * We need to have this 'calculated' boolean because in some cases we really 1228 * don't know what is the duration of a syscall, for instance, when we start 1229 * a session and some threads are waiting for a syscall to finish, say 'poll', 1230 * in which case all we can do is to print "( ? ) for duration and for the 1231 * start timestamp. 1232 */ 1233 static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp) 1234 { 1235 double duration = (double)t / NSEC_PER_MSEC; 1236 size_t printed = fprintf(fp, "("); 1237 1238 if (!calculated) 1239 printed += fprintf(fp, " "); 1240 else if (duration >= 1.0) 1241 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration); 1242 else if (duration >= 0.01) 1243 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration); 1244 else 1245 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration); 1246 return printed + fprintf(fp, "): "); 1247 } 1248 1249 /** 1250 * filename.ptr: The filename char pointer that will be vfs_getname'd 1251 * filename.entry_str_pos: Where to insert the string translated from 1252 * filename.ptr by the vfs_getname tracepoint/kprobe. 1253 * ret_scnprintf: syscall args may set this to a different syscall return 1254 * formatter, for instance, fcntl may return fds, file flags, etc. 1255 */ 1256 struct thread_trace { 1257 u64 entry_time; 1258 bool entry_pending; 1259 unsigned long nr_events; 1260 unsigned long pfmaj, pfmin; 1261 char *entry_str; 1262 double runtime_ms; 1263 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg); 1264 struct { 1265 unsigned long ptr; 1266 short int entry_str_pos; 1267 bool pending_open; 1268 unsigned int namelen; 1269 char *name; 1270 } filename; 1271 struct { 1272 int max; 1273 struct file *table; 1274 } files; 1275 1276 struct intlist *syscall_stats; 1277 }; 1278 1279 static struct thread_trace *thread_trace__new(void) 1280 { 1281 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace)); 1282 1283 if (ttrace) { 1284 ttrace->files.max = -1; 1285 ttrace->syscall_stats = intlist__new(NULL); 1286 } 1287 1288 return ttrace; 1289 } 1290 1291 static struct thread_trace *thread__trace(struct thread *thread, FILE *fp) 1292 { 1293 struct thread_trace *ttrace; 1294 1295 if (thread == NULL) 1296 goto fail; 1297 1298 if (thread__priv(thread) == NULL) 1299 thread__set_priv(thread, thread_trace__new()); 1300 1301 if (thread__priv(thread) == NULL) 1302 goto fail; 1303 1304 ttrace = thread__priv(thread); 1305 ++ttrace->nr_events; 1306 1307 return ttrace; 1308 fail: 1309 color_fprintf(fp, PERF_COLOR_RED, 1310 "WARNING: not enough memory, dropping samples!\n"); 1311 return NULL; 1312 } 1313 1314 1315 void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg, 1316 size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg)) 1317 { 1318 struct thread_trace *ttrace = thread__priv(arg->thread); 1319 1320 ttrace->ret_scnprintf = ret_scnprintf; 1321 } 1322 1323 #define TRACE_PFMAJ (1 << 0) 1324 #define TRACE_PFMIN (1 << 1) 1325 1326 static const size_t trace__entry_str_size = 2048; 1327 1328 static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd) 1329 { 1330 if (fd < 0) 1331 return NULL; 1332 1333 if (fd > ttrace->files.max) { 1334 struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file)); 1335 1336 if (nfiles == NULL) 1337 return NULL; 1338 1339 if (ttrace->files.max != -1) { 1340 memset(nfiles + ttrace->files.max + 1, 0, 1341 (fd - ttrace->files.max) * sizeof(struct file)); 1342 } else { 1343 memset(nfiles, 0, (fd + 1) * sizeof(struct file)); 1344 } 1345 1346 ttrace->files.table = nfiles; 1347 ttrace->files.max = fd; 1348 } 1349 1350 return ttrace->files.table + fd; 1351 } 1352 1353 struct file *thread__files_entry(struct thread *thread, int fd) 1354 { 1355 return thread_trace__files_entry(thread__priv(thread), fd); 1356 } 1357 1358 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname) 1359 { 1360 struct thread_trace *ttrace = thread__priv(thread); 1361 struct file *file = thread_trace__files_entry(ttrace, fd); 1362 1363 if (file != NULL) { 1364 struct stat st; 1365 if (stat(pathname, &st) == 0) 1366 file->dev_maj = major(st.st_rdev); 1367 file->pathname = strdup(pathname); 1368 if (file->pathname) 1369 return 0; 1370 } 1371 1372 return -1; 1373 } 1374 1375 static int thread__read_fd_path(struct thread *thread, int fd) 1376 { 1377 char linkname[PATH_MAX], pathname[PATH_MAX]; 1378 struct stat st; 1379 int ret; 1380 1381 if (thread->pid_ == thread->tid) { 1382 scnprintf(linkname, sizeof(linkname), 1383 "/proc/%d/fd/%d", thread->pid_, fd); 1384 } else { 1385 scnprintf(linkname, sizeof(linkname), 1386 "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd); 1387 } 1388 1389 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname)) 1390 return -1; 1391 1392 ret = readlink(linkname, pathname, sizeof(pathname)); 1393 1394 if (ret < 0 || ret > st.st_size) 1395 return -1; 1396 1397 pathname[ret] = '\0'; 1398 return trace__set_fd_pathname(thread, fd, pathname); 1399 } 1400 1401 static const char *thread__fd_path(struct thread *thread, int fd, 1402 struct trace *trace) 1403 { 1404 struct thread_trace *ttrace = thread__priv(thread); 1405 1406 if (ttrace == NULL || trace->fd_path_disabled) 1407 return NULL; 1408 1409 if (fd < 0) 1410 return NULL; 1411 1412 if ((fd > ttrace->files.max || ttrace->files.table[fd].pathname == NULL)) { 1413 if (!trace->live) 1414 return NULL; 1415 ++trace->stats.proc_getname; 1416 if (thread__read_fd_path(thread, fd)) 1417 return NULL; 1418 } 1419 1420 return ttrace->files.table[fd].pathname; 1421 } 1422 1423 size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg) 1424 { 1425 int fd = arg->val; 1426 size_t printed = scnprintf(bf, size, "%d", fd); 1427 const char *path = thread__fd_path(arg->thread, fd, arg->trace); 1428 1429 if (path) 1430 printed += scnprintf(bf + printed, size - printed, "<%s>", path); 1431 1432 return printed; 1433 } 1434 1435 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size) 1436 { 1437 size_t printed = scnprintf(bf, size, "%d", fd); 1438 struct thread *thread = machine__find_thread(trace->host, pid, pid); 1439 1440 if (thread) { 1441 const char *path = thread__fd_path(thread, fd, trace); 1442 1443 if (path) 1444 printed += scnprintf(bf + printed, size - printed, "<%s>", path); 1445 1446 thread__put(thread); 1447 } 1448 1449 return printed; 1450 } 1451 1452 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size, 1453 struct syscall_arg *arg) 1454 { 1455 int fd = arg->val; 1456 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg); 1457 struct thread_trace *ttrace = thread__priv(arg->thread); 1458 1459 if (ttrace && fd >= 0 && fd <= ttrace->files.max) 1460 zfree(&ttrace->files.table[fd].pathname); 1461 1462 return printed; 1463 } 1464 1465 static void thread__set_filename_pos(struct thread *thread, const char *bf, 1466 unsigned long ptr) 1467 { 1468 struct thread_trace *ttrace = thread__priv(thread); 1469 1470 ttrace->filename.ptr = ptr; 1471 ttrace->filename.entry_str_pos = bf - ttrace->entry_str; 1472 } 1473 1474 static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, char *bf, size_t size) 1475 { 1476 struct augmented_arg *augmented_arg = arg->augmented.args; 1477 size_t printed = scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value); 1478 /* 1479 * So that the next arg with a payload can consume its augmented arg, i.e. for rename* syscalls 1480 * we would have two strings, each prefixed by its size. 1481 */ 1482 int consumed = sizeof(*augmented_arg) + augmented_arg->size; 1483 1484 arg->augmented.args = ((void *)arg->augmented.args) + consumed; 1485 arg->augmented.size -= consumed; 1486 1487 return printed; 1488 } 1489 1490 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size, 1491 struct syscall_arg *arg) 1492 { 1493 unsigned long ptr = arg->val; 1494 1495 if (arg->augmented.args) 1496 return syscall_arg__scnprintf_augmented_string(arg, bf, size); 1497 1498 if (!arg->trace->vfs_getname) 1499 return scnprintf(bf, size, "%#x", ptr); 1500 1501 thread__set_filename_pos(arg->thread, bf, ptr); 1502 return 0; 1503 } 1504 1505 static bool trace__filter_duration(struct trace *trace, double t) 1506 { 1507 return t < (trace->duration_filter * NSEC_PER_MSEC); 1508 } 1509 1510 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) 1511 { 1512 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC; 1513 1514 return fprintf(fp, "%10.3f ", ts); 1515 } 1516 1517 /* 1518 * We're handling tstamp=0 as an undefined tstamp, i.e. like when we are 1519 * using ttrace->entry_time for a thread that receives a sys_exit without 1520 * first having received a sys_enter ("poll" issued before tracing session 1521 * starts, lost sys_enter exit due to ring buffer overflow). 1522 */ 1523 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) 1524 { 1525 if (tstamp > 0) 1526 return __trace__fprintf_tstamp(trace, tstamp, fp); 1527 1528 return fprintf(fp, " ? "); 1529 } 1530 1531 static bool done = false; 1532 static bool interrupted = false; 1533 1534 static void sig_handler(int sig) 1535 { 1536 done = true; 1537 interrupted = sig == SIGINT; 1538 } 1539 1540 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp) 1541 { 1542 size_t printed = 0; 1543 1544 if (trace->multiple_threads) { 1545 if (trace->show_comm) 1546 printed += fprintf(fp, "%.14s/", thread__comm_str(thread)); 1547 printed += fprintf(fp, "%d ", thread->tid); 1548 } 1549 1550 return printed; 1551 } 1552 1553 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread, 1554 u64 duration, bool duration_calculated, u64 tstamp, FILE *fp) 1555 { 1556 size_t printed = 0; 1557 1558 if (trace->show_tstamp) 1559 printed = trace__fprintf_tstamp(trace, tstamp, fp); 1560 if (trace->show_duration) 1561 printed += fprintf_duration(duration, duration_calculated, fp); 1562 return printed + trace__fprintf_comm_tid(trace, thread, fp); 1563 } 1564 1565 static int trace__process_event(struct trace *trace, struct machine *machine, 1566 union perf_event *event, struct perf_sample *sample) 1567 { 1568 int ret = 0; 1569 1570 switch (event->header.type) { 1571 case PERF_RECORD_LOST: 1572 color_fprintf(trace->output, PERF_COLOR_RED, 1573 "LOST %" PRIu64 " events!\n", event->lost.lost); 1574 ret = machine__process_lost_event(machine, event, sample); 1575 break; 1576 default: 1577 ret = machine__process_event(machine, event, sample); 1578 break; 1579 } 1580 1581 return ret; 1582 } 1583 1584 static int trace__tool_process(struct perf_tool *tool, 1585 union perf_event *event, 1586 struct perf_sample *sample, 1587 struct machine *machine) 1588 { 1589 struct trace *trace = container_of(tool, struct trace, tool); 1590 return trace__process_event(trace, machine, event, sample); 1591 } 1592 1593 static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp) 1594 { 1595 struct machine *machine = vmachine; 1596 1597 if (machine->kptr_restrict_warned) 1598 return NULL; 1599 1600 if (symbol_conf.kptr_restrict) { 1601 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" 1602 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n" 1603 "Kernel samples will not be resolved.\n"); 1604 machine->kptr_restrict_warned = true; 1605 return NULL; 1606 } 1607 1608 return machine__resolve_kernel_addr(vmachine, addrp, modp); 1609 } 1610 1611 static int trace__symbols_init(struct trace *trace, struct evlist *evlist) 1612 { 1613 int err = symbol__init(NULL); 1614 1615 if (err) 1616 return err; 1617 1618 trace->host = machine__new_host(); 1619 if (trace->host == NULL) 1620 return -ENOMEM; 1621 1622 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr); 1623 if (err < 0) 1624 goto out; 1625 1626 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, 1627 evlist->core.threads, trace__tool_process, false, 1628 1); 1629 out: 1630 if (err) 1631 symbol__exit(); 1632 1633 return err; 1634 } 1635 1636 static void trace__symbols__exit(struct trace *trace) 1637 { 1638 machine__exit(trace->host); 1639 trace->host = NULL; 1640 1641 symbol__exit(); 1642 } 1643 1644 static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args) 1645 { 1646 int idx; 1647 1648 if (nr_args == 6 && sc->fmt && sc->fmt->nr_args != 0) 1649 nr_args = sc->fmt->nr_args; 1650 1651 sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt)); 1652 if (sc->arg_fmt == NULL) 1653 return -1; 1654 1655 for (idx = 0; idx < nr_args; ++idx) { 1656 if (sc->fmt) 1657 sc->arg_fmt[idx] = sc->fmt->arg[idx]; 1658 } 1659 1660 sc->nr_args = nr_args; 1661 return 0; 1662 } 1663 1664 static struct syscall_arg_fmt syscall_arg_fmts__by_name[] = { 1665 { .name = "msr", .scnprintf = SCA_X86_MSR, .strtoul = STUL_X86_MSR, }, 1666 { .name = "vector", .scnprintf = SCA_X86_IRQ_VECTORS, .strtoul = STUL_X86_IRQ_VECTORS, }, 1667 }; 1668 1669 static int syscall_arg_fmt__cmp(const void *name, const void *fmtp) 1670 { 1671 const struct syscall_arg_fmt *fmt = fmtp; 1672 return strcmp(name, fmt->name); 1673 } 1674 1675 static struct syscall_arg_fmt * 1676 __syscall_arg_fmt__find_by_name(struct syscall_arg_fmt *fmts, const int nmemb, const char *name) 1677 { 1678 return bsearch(name, fmts, nmemb, sizeof(struct syscall_arg_fmt), syscall_arg_fmt__cmp); 1679 } 1680 1681 static struct syscall_arg_fmt *syscall_arg_fmt__find_by_name(const char *name) 1682 { 1683 const int nmemb = ARRAY_SIZE(syscall_arg_fmts__by_name); 1684 return __syscall_arg_fmt__find_by_name(syscall_arg_fmts__by_name, nmemb, name); 1685 } 1686 1687 static struct tep_format_field * 1688 syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field *field) 1689 { 1690 struct tep_format_field *last_field = NULL; 1691 int len; 1692 1693 for (; field; field = field->next, ++arg) { 1694 last_field = field; 1695 1696 if (arg->scnprintf) 1697 continue; 1698 1699 len = strlen(field->name); 1700 1701 if (strcmp(field->type, "const char *") == 0 && 1702 ((len >= 4 && strcmp(field->name + len - 4, "name") == 0) || 1703 strstr(field->name, "path") != NULL)) 1704 arg->scnprintf = SCA_FILENAME; 1705 else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr")) 1706 arg->scnprintf = SCA_PTR; 1707 else if (strcmp(field->type, "pid_t") == 0) 1708 arg->scnprintf = SCA_PID; 1709 else if (strcmp(field->type, "umode_t") == 0) 1710 arg->scnprintf = SCA_MODE_T; 1711 else if ((field->flags & TEP_FIELD_IS_ARRAY) && strstr(field->type, "char")) { 1712 arg->scnprintf = SCA_CHAR_ARRAY; 1713 arg->nr_entries = field->arraylen; 1714 } else if ((strcmp(field->type, "int") == 0 || 1715 strcmp(field->type, "unsigned int") == 0 || 1716 strcmp(field->type, "long") == 0) && 1717 len >= 2 && strcmp(field->name + len - 2, "fd") == 0) { 1718 /* 1719 * /sys/kernel/tracing/events/syscalls/sys_enter* 1720 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c 1721 * 65 int 1722 * 23 unsigned int 1723 * 7 unsigned long 1724 */ 1725 arg->scnprintf = SCA_FD; 1726 } else { 1727 struct syscall_arg_fmt *fmt = syscall_arg_fmt__find_by_name(field->name); 1728 1729 if (fmt) { 1730 arg->scnprintf = fmt->scnprintf; 1731 arg->strtoul = fmt->strtoul; 1732 } 1733 } 1734 } 1735 1736 return last_field; 1737 } 1738 1739 static int syscall__set_arg_fmts(struct syscall *sc) 1740 { 1741 struct tep_format_field *last_field = syscall_arg_fmt__init_array(sc->arg_fmt, sc->args); 1742 1743 if (last_field) 1744 sc->args_size = last_field->offset + last_field->size; 1745 1746 return 0; 1747 } 1748 1749 static int trace__read_syscall_info(struct trace *trace, int id) 1750 { 1751 char tp_name[128]; 1752 struct syscall *sc; 1753 const char *name = syscalltbl__name(trace->sctbl, id); 1754 1755 if (trace->syscalls.table == NULL) { 1756 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc)); 1757 if (trace->syscalls.table == NULL) 1758 return -ENOMEM; 1759 } 1760 1761 sc = trace->syscalls.table + id; 1762 if (sc->nonexistent) 1763 return 0; 1764 1765 if (name == NULL) { 1766 sc->nonexistent = true; 1767 return 0; 1768 } 1769 1770 sc->name = name; 1771 sc->fmt = syscall_fmt__find(sc->name); 1772 1773 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name); 1774 sc->tp_format = trace_event__tp_format("syscalls", tp_name); 1775 1776 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) { 1777 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias); 1778 sc->tp_format = trace_event__tp_format("syscalls", tp_name); 1779 } 1780 1781 if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 6 : sc->tp_format->format.nr_fields)) 1782 return -ENOMEM; 1783 1784 if (IS_ERR(sc->tp_format)) 1785 return PTR_ERR(sc->tp_format); 1786 1787 sc->args = sc->tp_format->format.fields; 1788 /* 1789 * We need to check and discard the first variable '__syscall_nr' 1790 * or 'nr' that mean the syscall number. It is needless here. 1791 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels. 1792 */ 1793 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) { 1794 sc->args = sc->args->next; 1795 --sc->nr_args; 1796 } 1797 1798 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit"); 1799 sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat"); 1800 1801 return syscall__set_arg_fmts(sc); 1802 } 1803 1804 static int perf_evsel__init_tp_arg_scnprintf(struct evsel *evsel) 1805 { 1806 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel); 1807 1808 if (fmt != NULL) { 1809 syscall_arg_fmt__init_array(fmt, evsel->tp_format->format.fields); 1810 return 0; 1811 } 1812 1813 return -ENOMEM; 1814 } 1815 1816 static int intcmp(const void *a, const void *b) 1817 { 1818 const int *one = a, *another = b; 1819 1820 return *one - *another; 1821 } 1822 1823 static int trace__validate_ev_qualifier(struct trace *trace) 1824 { 1825 int err = 0; 1826 bool printed_invalid_prefix = false; 1827 struct str_node *pos; 1828 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier); 1829 1830 trace->ev_qualifier_ids.entries = malloc(nr_allocated * 1831 sizeof(trace->ev_qualifier_ids.entries[0])); 1832 1833 if (trace->ev_qualifier_ids.entries == NULL) { 1834 fputs("Error:\tNot enough memory for allocating events qualifier ids\n", 1835 trace->output); 1836 err = -EINVAL; 1837 goto out; 1838 } 1839 1840 strlist__for_each_entry(pos, trace->ev_qualifier) { 1841 const char *sc = pos->s; 1842 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1; 1843 1844 if (id < 0) { 1845 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next); 1846 if (id >= 0) 1847 goto matches; 1848 1849 if (!printed_invalid_prefix) { 1850 pr_debug("Skipping unknown syscalls: "); 1851 printed_invalid_prefix = true; 1852 } else { 1853 pr_debug(", "); 1854 } 1855 1856 pr_debug("%s", sc); 1857 continue; 1858 } 1859 matches: 1860 trace->ev_qualifier_ids.entries[nr_used++] = id; 1861 if (match_next == -1) 1862 continue; 1863 1864 while (1) { 1865 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next); 1866 if (id < 0) 1867 break; 1868 if (nr_allocated == nr_used) { 1869 void *entries; 1870 1871 nr_allocated += 8; 1872 entries = realloc(trace->ev_qualifier_ids.entries, 1873 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0])); 1874 if (entries == NULL) { 1875 err = -ENOMEM; 1876 fputs("\nError:\t Not enough memory for parsing\n", trace->output); 1877 goto out_free; 1878 } 1879 trace->ev_qualifier_ids.entries = entries; 1880 } 1881 trace->ev_qualifier_ids.entries[nr_used++] = id; 1882 } 1883 } 1884 1885 trace->ev_qualifier_ids.nr = nr_used; 1886 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp); 1887 out: 1888 if (printed_invalid_prefix) 1889 pr_debug("\n"); 1890 return err; 1891 out_free: 1892 zfree(&trace->ev_qualifier_ids.entries); 1893 trace->ev_qualifier_ids.nr = 0; 1894 goto out; 1895 } 1896 1897 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id) 1898 { 1899 bool in_ev_qualifier; 1900 1901 if (trace->ev_qualifier_ids.nr == 0) 1902 return true; 1903 1904 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries, 1905 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL; 1906 1907 if (in_ev_qualifier) 1908 return !trace->not_ev_qualifier; 1909 1910 return trace->not_ev_qualifier; 1911 } 1912 1913 /* 1914 * args is to be interpreted as a series of longs but we need to handle 1915 * 8-byte unaligned accesses. args points to raw_data within the event 1916 * and raw_data is guaranteed to be 8-byte unaligned because it is 1917 * preceded by raw_size which is a u32. So we need to copy args to a temp 1918 * variable to read it. Most notably this avoids extended load instructions 1919 * on unaligned addresses 1920 */ 1921 unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx) 1922 { 1923 unsigned long val; 1924 unsigned char *p = arg->args + sizeof(unsigned long) * idx; 1925 1926 memcpy(&val, p, sizeof(val)); 1927 return val; 1928 } 1929 1930 static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size, 1931 struct syscall_arg *arg) 1932 { 1933 if (sc->arg_fmt && sc->arg_fmt[arg->idx].name) 1934 return scnprintf(bf, size, "%s: ", sc->arg_fmt[arg->idx].name); 1935 1936 return scnprintf(bf, size, "arg%d: ", arg->idx); 1937 } 1938 1939 /* 1940 * Check if the value is in fact zero, i.e. mask whatever needs masking, such 1941 * as mount 'flags' argument that needs ignoring some magic flag, see comment 1942 * in tools/perf/trace/beauty/mount_flags.c 1943 */ 1944 static unsigned long syscall_arg_fmt__mask_val(struct syscall_arg_fmt *fmt, struct syscall_arg *arg, unsigned long val) 1945 { 1946 if (fmt && fmt->mask_val) 1947 return fmt->mask_val(arg, val); 1948 1949 return val; 1950 } 1951 1952 static size_t syscall_arg_fmt__scnprintf_val(struct syscall_arg_fmt *fmt, char *bf, size_t size, 1953 struct syscall_arg *arg, unsigned long val) 1954 { 1955 if (fmt && fmt->scnprintf) { 1956 arg->val = val; 1957 if (fmt->parm) 1958 arg->parm = fmt->parm; 1959 return fmt->scnprintf(bf, size, arg); 1960 } 1961 return scnprintf(bf, size, "%ld", val); 1962 } 1963 1964 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size, 1965 unsigned char *args, void *augmented_args, int augmented_args_size, 1966 struct trace *trace, struct thread *thread) 1967 { 1968 size_t printed = 0; 1969 unsigned long val; 1970 u8 bit = 1; 1971 struct syscall_arg arg = { 1972 .args = args, 1973 .augmented = { 1974 .size = augmented_args_size, 1975 .args = augmented_args, 1976 }, 1977 .idx = 0, 1978 .mask = 0, 1979 .trace = trace, 1980 .thread = thread, 1981 .show_string_prefix = trace->show_string_prefix, 1982 }; 1983 struct thread_trace *ttrace = thread__priv(thread); 1984 1985 /* 1986 * Things like fcntl will set this in its 'cmd' formatter to pick the 1987 * right formatter for the return value (an fd? file flags?), which is 1988 * not needed for syscalls that always return a given type, say an fd. 1989 */ 1990 ttrace->ret_scnprintf = NULL; 1991 1992 if (sc->args != NULL) { 1993 struct tep_format_field *field; 1994 1995 for (field = sc->args; field; 1996 field = field->next, ++arg.idx, bit <<= 1) { 1997 if (arg.mask & bit) 1998 continue; 1999 2000 arg.fmt = &sc->arg_fmt[arg.idx]; 2001 val = syscall_arg__val(&arg, arg.idx); 2002 /* 2003 * Some syscall args need some mask, most don't and 2004 * return val untouched. 2005 */ 2006 val = syscall_arg_fmt__mask_val(&sc->arg_fmt[arg.idx], &arg, val); 2007 2008 /* 2009 * Suppress this argument if its value is zero and 2010 * and we don't have a string associated in an 2011 * strarray for it. 2012 */ 2013 if (val == 0 && 2014 !trace->show_zeros && 2015 !(sc->arg_fmt && 2016 (sc->arg_fmt[arg.idx].show_zero || 2017 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY || 2018 sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAYS) && 2019 sc->arg_fmt[arg.idx].parm)) 2020 continue; 2021 2022 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); 2023 2024 if (trace->show_arg_names) 2025 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); 2026 2027 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], 2028 bf + printed, size - printed, &arg, val); 2029 } 2030 } else if (IS_ERR(sc->tp_format)) { 2031 /* 2032 * If we managed to read the tracepoint /format file, then we 2033 * may end up not having any args, like with gettid(), so only 2034 * print the raw args when we didn't manage to read it. 2035 */ 2036 while (arg.idx < sc->nr_args) { 2037 if (arg.mask & bit) 2038 goto next_arg; 2039 val = syscall_arg__val(&arg, arg.idx); 2040 if (printed) 2041 printed += scnprintf(bf + printed, size - printed, ", "); 2042 printed += syscall__scnprintf_name(sc, bf + printed, size - printed, &arg); 2043 printed += syscall_arg_fmt__scnprintf_val(&sc->arg_fmt[arg.idx], bf + printed, size - printed, &arg, val); 2044 next_arg: 2045 ++arg.idx; 2046 bit <<= 1; 2047 } 2048 } 2049 2050 return printed; 2051 } 2052 2053 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel, 2054 union perf_event *event, 2055 struct perf_sample *sample); 2056 2057 static struct syscall *trace__syscall_info(struct trace *trace, 2058 struct evsel *evsel, int id) 2059 { 2060 int err = 0; 2061 2062 if (id < 0) { 2063 2064 /* 2065 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried 2066 * before that, leaving at a higher verbosity level till that is 2067 * explained. Reproduced with plain ftrace with: 2068 * 2069 * echo 1 > /t/events/raw_syscalls/sys_exit/enable 2070 * grep "NR -1 " /t/trace_pipe 2071 * 2072 * After generating some load on the machine. 2073 */ 2074 if (verbose > 1) { 2075 static u64 n; 2076 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n", 2077 id, perf_evsel__name(evsel), ++n); 2078 } 2079 return NULL; 2080 } 2081 2082 err = -EINVAL; 2083 2084 if (id > trace->sctbl->syscalls.max_id) 2085 goto out_cant_read; 2086 2087 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) && 2088 (err = trace__read_syscall_info(trace, id)) != 0) 2089 goto out_cant_read; 2090 2091 if (trace->syscalls.table[id].name == NULL) { 2092 if (trace->syscalls.table[id].nonexistent) 2093 return NULL; 2094 goto out_cant_read; 2095 } 2096 2097 return &trace->syscalls.table[id]; 2098 2099 out_cant_read: 2100 if (verbose > 0) { 2101 char sbuf[STRERR_BUFSIZE]; 2102 fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, sizeof(sbuf))); 2103 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL) 2104 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name); 2105 fputs(" information\n", trace->output); 2106 } 2107 return NULL; 2108 } 2109 2110 struct syscall_stats { 2111 struct stats stats; 2112 u64 nr_failures; 2113 int max_errno; 2114 u32 *errnos; 2115 }; 2116 2117 static void thread__update_stats(struct thread *thread, struct thread_trace *ttrace, 2118 int id, struct perf_sample *sample, long err, bool errno_summary) 2119 { 2120 struct int_node *inode; 2121 struct syscall_stats *stats; 2122 u64 duration = 0; 2123 2124 inode = intlist__findnew(ttrace->syscall_stats, id); 2125 if (inode == NULL) 2126 return; 2127 2128 stats = inode->priv; 2129 if (stats == NULL) { 2130 stats = malloc(sizeof(*stats)); 2131 if (stats == NULL) 2132 return; 2133 2134 stats->nr_failures = 0; 2135 stats->max_errno = 0; 2136 stats->errnos = NULL; 2137 init_stats(&stats->stats); 2138 inode->priv = stats; 2139 } 2140 2141 if (ttrace->entry_time && sample->time > ttrace->entry_time) 2142 duration = sample->time - ttrace->entry_time; 2143 2144 update_stats(&stats->stats, duration); 2145 2146 if (err < 0) { 2147 ++stats->nr_failures; 2148 2149 if (!errno_summary) 2150 return; 2151 2152 err = -err; 2153 if (err > stats->max_errno) { 2154 u32 *new_errnos = realloc(stats->errnos, err * sizeof(u32)); 2155 2156 if (new_errnos) { 2157 memset(new_errnos + stats->max_errno, 0, (err - stats->max_errno) * sizeof(u32)); 2158 } else { 2159 pr_debug("Not enough memory for errno stats for thread \"%s\"(%d/%d), results will be incomplete\n", 2160 thread__comm_str(thread), thread->pid_, thread->tid); 2161 return; 2162 } 2163 2164 stats->errnos = new_errnos; 2165 stats->max_errno = err; 2166 } 2167 2168 ++stats->errnos[err - 1]; 2169 } 2170 } 2171 2172 static int trace__printf_interrupted_entry(struct trace *trace) 2173 { 2174 struct thread_trace *ttrace; 2175 size_t printed; 2176 int len; 2177 2178 if (trace->failure_only || trace->current == NULL) 2179 return 0; 2180 2181 ttrace = thread__priv(trace->current); 2182 2183 if (!ttrace->entry_pending) 2184 return 0; 2185 2186 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output); 2187 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str); 2188 2189 if (len < trace->args_alignment - 4) 2190 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " "); 2191 2192 printed += fprintf(trace->output, " ...\n"); 2193 2194 ttrace->entry_pending = false; 2195 ++trace->nr_events_printed; 2196 2197 return printed; 2198 } 2199 2200 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel, 2201 struct perf_sample *sample, struct thread *thread) 2202 { 2203 int printed = 0; 2204 2205 if (trace->print_sample) { 2206 double ts = (double)sample->time / NSEC_PER_MSEC; 2207 2208 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n", 2209 perf_evsel__name(evsel), ts, 2210 thread__comm_str(thread), 2211 sample->pid, sample->tid, sample->cpu); 2212 } 2213 2214 return printed; 2215 } 2216 2217 static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, int raw_augmented_args_size) 2218 { 2219 void *augmented_args = NULL; 2220 /* 2221 * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter 2222 * and there we get all 6 syscall args plus the tracepoint common fields 2223 * that gets calculated at the start and the syscall_nr (another long). 2224 * So we check if that is the case and if so don't look after the 2225 * sc->args_size but always after the full raw_syscalls:sys_enter payload, 2226 * which is fixed. 2227 * 2228 * We'll revisit this later to pass s->args_size to the BPF augmenter 2229 * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it 2230 * copies only what we need for each syscall, like what happens when we 2231 * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace 2232 * traffic to just what is needed for each syscall. 2233 */ 2234 int args_size = raw_augmented_args_size ?: sc->args_size; 2235 2236 *augmented_args_size = sample->raw_size - args_size; 2237 if (*augmented_args_size > 0) 2238 augmented_args = sample->raw_data + args_size; 2239 2240 return augmented_args; 2241 } 2242 2243 static int trace__sys_enter(struct trace *trace, struct evsel *evsel, 2244 union perf_event *event __maybe_unused, 2245 struct perf_sample *sample) 2246 { 2247 char *msg; 2248 void *args; 2249 int printed = 0; 2250 struct thread *thread; 2251 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; 2252 int augmented_args_size = 0; 2253 void *augmented_args = NULL; 2254 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2255 struct thread_trace *ttrace; 2256 2257 if (sc == NULL) 2258 return -1; 2259 2260 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2261 ttrace = thread__trace(thread, trace->output); 2262 if (ttrace == NULL) 2263 goto out_put; 2264 2265 trace__fprintf_sample(trace, evsel, sample, thread); 2266 2267 args = perf_evsel__sc_tp_ptr(evsel, args, sample); 2268 2269 if (ttrace->entry_str == NULL) { 2270 ttrace->entry_str = malloc(trace__entry_str_size); 2271 if (!ttrace->entry_str) 2272 goto out_put; 2273 } 2274 2275 if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) 2276 trace__printf_interrupted_entry(trace); 2277 /* 2278 * If this is raw_syscalls.sys_enter, then it always comes with the 6 possible 2279 * arguments, even if the syscall being handled, say "openat", uses only 4 arguments 2280 * this breaks syscall__augmented_args() check for augmented args, as we calculate 2281 * syscall->args_size using each syscalls:sys_enter_NAME tracefs format file, 2282 * so when handling, say the openat syscall, we end up getting 6 args for the 2283 * raw_syscalls:sys_enter event, when we expected just 4, we end up mistakenly 2284 * thinking that the extra 2 u64 args are the augmented filename, so just check 2285 * here and avoid using augmented syscalls when the evsel is the raw_syscalls one. 2286 */ 2287 if (evsel != trace->syscalls.events.sys_enter) 2288 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); 2289 ttrace->entry_time = sample->time; 2290 msg = ttrace->entry_str; 2291 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name); 2292 2293 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed, 2294 args, augmented_args, augmented_args_size, trace, thread); 2295 2296 if (sc->is_exit) { 2297 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) { 2298 int alignment = 0; 2299 2300 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output); 2301 printed = fprintf(trace->output, "%s)", ttrace->entry_str); 2302 if (trace->args_alignment > printed) 2303 alignment = trace->args_alignment - printed; 2304 fprintf(trace->output, "%*s= ?\n", alignment, " "); 2305 } 2306 } else { 2307 ttrace->entry_pending = true; 2308 /* See trace__vfs_getname & trace__sys_exit */ 2309 ttrace->filename.pending_open = false; 2310 } 2311 2312 if (trace->current != thread) { 2313 thread__put(trace->current); 2314 trace->current = thread__get(thread); 2315 } 2316 err = 0; 2317 out_put: 2318 thread__put(thread); 2319 return err; 2320 } 2321 2322 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel, 2323 struct perf_sample *sample) 2324 { 2325 struct thread_trace *ttrace; 2326 struct thread *thread; 2327 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1; 2328 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2329 char msg[1024]; 2330 void *args, *augmented_args = NULL; 2331 int augmented_args_size; 2332 2333 if (sc == NULL) 2334 return -1; 2335 2336 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2337 ttrace = thread__trace(thread, trace->output); 2338 /* 2339 * We need to get ttrace just to make sure it is there when syscall__scnprintf_args() 2340 * and the rest of the beautifiers accessing it via struct syscall_arg touches it. 2341 */ 2342 if (ttrace == NULL) 2343 goto out_put; 2344 2345 args = perf_evsel__sc_tp_ptr(evsel, args, sample); 2346 augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); 2347 syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread); 2348 fprintf(trace->output, "%s", msg); 2349 err = 0; 2350 out_put: 2351 thread__put(thread); 2352 return err; 2353 } 2354 2355 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel, 2356 struct perf_sample *sample, 2357 struct callchain_cursor *cursor) 2358 { 2359 struct addr_location al; 2360 int max_stack = evsel->core.attr.sample_max_stack ? 2361 evsel->core.attr.sample_max_stack : 2362 trace->max_stack; 2363 int err; 2364 2365 if (machine__resolve(trace->host, &al, sample) < 0) 2366 return -1; 2367 2368 err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack); 2369 addr_location__put(&al); 2370 return err; 2371 } 2372 2373 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample) 2374 { 2375 /* TODO: user-configurable print_opts */ 2376 const unsigned int print_opts = EVSEL__PRINT_SYM | 2377 EVSEL__PRINT_DSO | 2378 EVSEL__PRINT_UNKNOWN_AS_ADDR; 2379 2380 return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, symbol_conf.bt_stop_list, trace->output); 2381 } 2382 2383 static const char *errno_to_name(struct evsel *evsel, int err) 2384 { 2385 struct perf_env *env = perf_evsel__env(evsel); 2386 const char *arch_name = perf_env__arch(env); 2387 2388 return arch_syscalls__strerrno(arch_name, err); 2389 } 2390 2391 static int trace__sys_exit(struct trace *trace, struct evsel *evsel, 2392 union perf_event *event __maybe_unused, 2393 struct perf_sample *sample) 2394 { 2395 long ret; 2396 u64 duration = 0; 2397 bool duration_calculated = false; 2398 struct thread *thread; 2399 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0; 2400 int alignment = trace->args_alignment; 2401 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2402 struct thread_trace *ttrace; 2403 2404 if (sc == NULL) 2405 return -1; 2406 2407 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2408 ttrace = thread__trace(thread, trace->output); 2409 if (ttrace == NULL) 2410 goto out_put; 2411 2412 trace__fprintf_sample(trace, evsel, sample, thread); 2413 2414 ret = perf_evsel__sc_tp_uint(evsel, ret, sample); 2415 2416 if (trace->summary) 2417 thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary); 2418 2419 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) { 2420 trace__set_fd_pathname(thread, ret, ttrace->filename.name); 2421 ttrace->filename.pending_open = false; 2422 ++trace->stats.vfs_getname; 2423 } 2424 2425 if (ttrace->entry_time) { 2426 duration = sample->time - ttrace->entry_time; 2427 if (trace__filter_duration(trace, duration)) 2428 goto out; 2429 duration_calculated = true; 2430 } else if (trace->duration_filter) 2431 goto out; 2432 2433 if (sample->callchain) { 2434 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor); 2435 if (callchain_ret == 0) { 2436 if (callchain_cursor.nr < trace->min_stack) 2437 goto out; 2438 callchain_ret = 1; 2439 } 2440 } 2441 2442 if (trace->summary_only || (ret >= 0 && trace->failure_only)) 2443 goto out; 2444 2445 trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output); 2446 2447 if (ttrace->entry_pending) { 2448 printed = fprintf(trace->output, "%s", ttrace->entry_str); 2449 } else { 2450 printed += fprintf(trace->output, " ... ["); 2451 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued"); 2452 printed += 9; 2453 printed += fprintf(trace->output, "]: %s()", sc->name); 2454 } 2455 2456 printed++; /* the closing ')' */ 2457 2458 if (alignment > printed) 2459 alignment -= printed; 2460 else 2461 alignment = 0; 2462 2463 fprintf(trace->output, ")%*s= ", alignment, " "); 2464 2465 if (sc->fmt == NULL) { 2466 if (ret < 0) 2467 goto errno_print; 2468 signed_print: 2469 fprintf(trace->output, "%ld", ret); 2470 } else if (ret < 0) { 2471 errno_print: { 2472 char bf[STRERR_BUFSIZE]; 2473 const char *emsg = str_error_r(-ret, bf, sizeof(bf)), 2474 *e = errno_to_name(evsel, -ret); 2475 2476 fprintf(trace->output, "-1 %s (%s)", e, emsg); 2477 } 2478 } else if (ret == 0 && sc->fmt->timeout) 2479 fprintf(trace->output, "0 (Timeout)"); 2480 else if (ttrace->ret_scnprintf) { 2481 char bf[1024]; 2482 struct syscall_arg arg = { 2483 .val = ret, 2484 .thread = thread, 2485 .trace = trace, 2486 }; 2487 ttrace->ret_scnprintf(bf, sizeof(bf), &arg); 2488 ttrace->ret_scnprintf = NULL; 2489 fprintf(trace->output, "%s", bf); 2490 } else if (sc->fmt->hexret) 2491 fprintf(trace->output, "%#lx", ret); 2492 else if (sc->fmt->errpid) { 2493 struct thread *child = machine__find_thread(trace->host, ret, ret); 2494 2495 if (child != NULL) { 2496 fprintf(trace->output, "%ld", ret); 2497 if (child->comm_set) 2498 fprintf(trace->output, " (%s)", thread__comm_str(child)); 2499 thread__put(child); 2500 } 2501 } else 2502 goto signed_print; 2503 2504 fputc('\n', trace->output); 2505 2506 /* 2507 * We only consider an 'event' for the sake of --max-events a non-filtered 2508 * sys_enter + sys_exit and other tracepoint events. 2509 */ 2510 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX) 2511 interrupted = true; 2512 2513 if (callchain_ret > 0) 2514 trace__fprintf_callchain(trace, sample); 2515 else if (callchain_ret < 0) 2516 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel)); 2517 out: 2518 ttrace->entry_pending = false; 2519 err = 0; 2520 out_put: 2521 thread__put(thread); 2522 return err; 2523 } 2524 2525 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel, 2526 union perf_event *event __maybe_unused, 2527 struct perf_sample *sample) 2528 { 2529 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2530 struct thread_trace *ttrace; 2531 size_t filename_len, entry_str_len, to_move; 2532 ssize_t remaining_space; 2533 char *pos; 2534 const char *filename = perf_evsel__rawptr(evsel, sample, "pathname"); 2535 2536 if (!thread) 2537 goto out; 2538 2539 ttrace = thread__priv(thread); 2540 if (!ttrace) 2541 goto out_put; 2542 2543 filename_len = strlen(filename); 2544 if (filename_len == 0) 2545 goto out_put; 2546 2547 if (ttrace->filename.namelen < filename_len) { 2548 char *f = realloc(ttrace->filename.name, filename_len + 1); 2549 2550 if (f == NULL) 2551 goto out_put; 2552 2553 ttrace->filename.namelen = filename_len; 2554 ttrace->filename.name = f; 2555 } 2556 2557 strcpy(ttrace->filename.name, filename); 2558 ttrace->filename.pending_open = true; 2559 2560 if (!ttrace->filename.ptr) 2561 goto out_put; 2562 2563 entry_str_len = strlen(ttrace->entry_str); 2564 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */ 2565 if (remaining_space <= 0) 2566 goto out_put; 2567 2568 if (filename_len > (size_t)remaining_space) { 2569 filename += filename_len - remaining_space; 2570 filename_len = remaining_space; 2571 } 2572 2573 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */ 2574 pos = ttrace->entry_str + ttrace->filename.entry_str_pos; 2575 memmove(pos + filename_len, pos, to_move); 2576 memcpy(pos, filename, filename_len); 2577 2578 ttrace->filename.ptr = 0; 2579 ttrace->filename.entry_str_pos = 0; 2580 out_put: 2581 thread__put(thread); 2582 out: 2583 return 0; 2584 } 2585 2586 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel, 2587 union perf_event *event __maybe_unused, 2588 struct perf_sample *sample) 2589 { 2590 u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); 2591 double runtime_ms = (double)runtime / NSEC_PER_MSEC; 2592 struct thread *thread = machine__findnew_thread(trace->host, 2593 sample->pid, 2594 sample->tid); 2595 struct thread_trace *ttrace = thread__trace(thread, trace->output); 2596 2597 if (ttrace == NULL) 2598 goto out_dump; 2599 2600 ttrace->runtime_ms += runtime_ms; 2601 trace->runtime_ms += runtime_ms; 2602 out_put: 2603 thread__put(thread); 2604 return 0; 2605 2606 out_dump: 2607 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n", 2608 evsel->name, 2609 perf_evsel__strval(evsel, sample, "comm"), 2610 (pid_t)perf_evsel__intval(evsel, sample, "pid"), 2611 runtime, 2612 perf_evsel__intval(evsel, sample, "vruntime")); 2613 goto out_put; 2614 } 2615 2616 static int bpf_output__printer(enum binary_printer_ops op, 2617 unsigned int val, void *extra __maybe_unused, FILE *fp) 2618 { 2619 unsigned char ch = (unsigned char)val; 2620 2621 switch (op) { 2622 case BINARY_PRINT_CHAR_DATA: 2623 return fprintf(fp, "%c", isprint(ch) ? ch : '.'); 2624 case BINARY_PRINT_DATA_BEGIN: 2625 case BINARY_PRINT_LINE_BEGIN: 2626 case BINARY_PRINT_ADDR: 2627 case BINARY_PRINT_NUM_DATA: 2628 case BINARY_PRINT_NUM_PAD: 2629 case BINARY_PRINT_SEP: 2630 case BINARY_PRINT_CHAR_PAD: 2631 case BINARY_PRINT_LINE_END: 2632 case BINARY_PRINT_DATA_END: 2633 default: 2634 break; 2635 } 2636 2637 return 0; 2638 } 2639 2640 static void bpf_output__fprintf(struct trace *trace, 2641 struct perf_sample *sample) 2642 { 2643 binary__fprintf(sample->raw_data, sample->raw_size, 8, 2644 bpf_output__printer, NULL, trace->output); 2645 ++trace->nr_events_printed; 2646 } 2647 2648 static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample *sample, 2649 struct thread *thread, void *augmented_args, int augmented_args_size) 2650 { 2651 char bf[2048]; 2652 size_t size = sizeof(bf); 2653 struct tep_format_field *field = evsel->tp_format->format.fields; 2654 struct syscall_arg_fmt *arg = __evsel__syscall_arg_fmt(evsel); 2655 size_t printed = 0; 2656 unsigned long val; 2657 u8 bit = 1; 2658 struct syscall_arg syscall_arg = { 2659 .augmented = { 2660 .size = augmented_args_size, 2661 .args = augmented_args, 2662 }, 2663 .idx = 0, 2664 .mask = 0, 2665 .trace = trace, 2666 .thread = thread, 2667 .show_string_prefix = trace->show_string_prefix, 2668 }; 2669 2670 for (; field && arg; field = field->next, ++syscall_arg.idx, bit <<= 1, ++arg) { 2671 if (syscall_arg.mask & bit) 2672 continue; 2673 2674 syscall_arg.len = 0; 2675 syscall_arg.fmt = arg; 2676 if (field->flags & TEP_FIELD_IS_ARRAY) { 2677 int offset = field->offset; 2678 2679 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2680 offset = format_field__intval(field, sample, evsel->needs_swap); 2681 syscall_arg.len = offset >> 16; 2682 offset &= 0xffff; 2683 } 2684 2685 val = (uintptr_t)(sample->raw_data + offset); 2686 } else 2687 val = format_field__intval(field, sample, evsel->needs_swap); 2688 /* 2689 * Some syscall args need some mask, most don't and 2690 * return val untouched. 2691 */ 2692 val = syscall_arg_fmt__mask_val(arg, &syscall_arg, val); 2693 2694 /* 2695 * Suppress this argument if its value is zero and 2696 * and we don't have a string associated in an 2697 * strarray for it. 2698 */ 2699 if (val == 0 && 2700 !trace->show_zeros && 2701 !((arg->show_zero || 2702 arg->scnprintf == SCA_STRARRAY || 2703 arg->scnprintf == SCA_STRARRAYS) && 2704 arg->parm)) 2705 continue; 2706 2707 printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : ""); 2708 2709 /* 2710 * XXX Perhaps we should have a show_tp_arg_names, 2711 * leaving show_arg_names just for syscalls? 2712 */ 2713 if (1 || trace->show_arg_names) 2714 printed += scnprintf(bf + printed, size - printed, "%s: ", field->name); 2715 2716 printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val); 2717 } 2718 2719 return printed + fprintf(trace->output, "%s", bf); 2720 } 2721 2722 static int trace__event_handler(struct trace *trace, struct evsel *evsel, 2723 union perf_event *event __maybe_unused, 2724 struct perf_sample *sample) 2725 { 2726 struct thread *thread; 2727 int callchain_ret = 0; 2728 /* 2729 * Check if we called perf_evsel__disable(evsel) due to, for instance, 2730 * this event's max_events having been hit and this is an entry coming 2731 * from the ring buffer that we should discard, since the max events 2732 * have already been considered/printed. 2733 */ 2734 if (evsel->disabled) 2735 return 0; 2736 2737 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2738 2739 if (sample->callchain) { 2740 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor); 2741 if (callchain_ret == 0) { 2742 if (callchain_cursor.nr < trace->min_stack) 2743 goto out; 2744 callchain_ret = 1; 2745 } 2746 } 2747 2748 trace__printf_interrupted_entry(trace); 2749 trace__fprintf_tstamp(trace, sample->time, trace->output); 2750 2751 if (trace->trace_syscalls && trace->show_duration) 2752 fprintf(trace->output, "( ): "); 2753 2754 if (thread) 2755 trace__fprintf_comm_tid(trace, thread, trace->output); 2756 2757 if (evsel == trace->syscalls.events.augmented) { 2758 int id = perf_evsel__sc_tp_uint(evsel, id, sample); 2759 struct syscall *sc = trace__syscall_info(trace, evsel, id); 2760 2761 if (sc) { 2762 fprintf(trace->output, "%s(", sc->name); 2763 trace__fprintf_sys_enter(trace, evsel, sample); 2764 fputc(')', trace->output); 2765 goto newline; 2766 } 2767 2768 /* 2769 * XXX: Not having the associated syscall info or not finding/adding 2770 * the thread should never happen, but if it does... 2771 * fall thru and print it as a bpf_output event. 2772 */ 2773 } 2774 2775 fprintf(trace->output, "%s(", evsel->name); 2776 2777 if (perf_evsel__is_bpf_output(evsel)) { 2778 bpf_output__fprintf(trace, sample); 2779 } else if (evsel->tp_format) { 2780 if (strncmp(evsel->tp_format->name, "sys_enter_", 10) || 2781 trace__fprintf_sys_enter(trace, evsel, sample)) { 2782 if (trace->libtraceevent_print) { 2783 event_format__fprintf(evsel->tp_format, sample->cpu, 2784 sample->raw_data, sample->raw_size, 2785 trace->output); 2786 } else { 2787 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0); 2788 } 2789 } 2790 } 2791 2792 newline: 2793 fprintf(trace->output, ")\n"); 2794 2795 if (callchain_ret > 0) 2796 trace__fprintf_callchain(trace, sample); 2797 else if (callchain_ret < 0) 2798 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel)); 2799 2800 ++trace->nr_events_printed; 2801 2802 if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) { 2803 evsel__disable(evsel); 2804 evsel__close(evsel); 2805 } 2806 out: 2807 thread__put(thread); 2808 return 0; 2809 } 2810 2811 static void print_location(FILE *f, struct perf_sample *sample, 2812 struct addr_location *al, 2813 bool print_dso, bool print_sym) 2814 { 2815 2816 if ((verbose > 0 || print_dso) && al->map) 2817 fprintf(f, "%s@", al->map->dso->long_name); 2818 2819 if ((verbose > 0 || print_sym) && al->sym) 2820 fprintf(f, "%s+0x%" PRIx64, al->sym->name, 2821 al->addr - al->sym->start); 2822 else if (al->map) 2823 fprintf(f, "0x%" PRIx64, al->addr); 2824 else 2825 fprintf(f, "0x%" PRIx64, sample->addr); 2826 } 2827 2828 static int trace__pgfault(struct trace *trace, 2829 struct evsel *evsel, 2830 union perf_event *event __maybe_unused, 2831 struct perf_sample *sample) 2832 { 2833 struct thread *thread; 2834 struct addr_location al; 2835 char map_type = 'd'; 2836 struct thread_trace *ttrace; 2837 int err = -1; 2838 int callchain_ret = 0; 2839 2840 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2841 2842 if (sample->callchain) { 2843 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor); 2844 if (callchain_ret == 0) { 2845 if (callchain_cursor.nr < trace->min_stack) 2846 goto out_put; 2847 callchain_ret = 1; 2848 } 2849 } 2850 2851 ttrace = thread__trace(thread, trace->output); 2852 if (ttrace == NULL) 2853 goto out_put; 2854 2855 if (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ) 2856 ttrace->pfmaj++; 2857 else 2858 ttrace->pfmin++; 2859 2860 if (trace->summary_only) 2861 goto out; 2862 2863 thread__find_symbol(thread, sample->cpumode, sample->ip, &al); 2864 2865 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output); 2866 2867 fprintf(trace->output, "%sfault [", 2868 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ? 2869 "maj" : "min"); 2870 2871 print_location(trace->output, sample, &al, false, true); 2872 2873 fprintf(trace->output, "] => "); 2874 2875 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); 2876 2877 if (!al.map) { 2878 thread__find_symbol(thread, sample->cpumode, sample->addr, &al); 2879 2880 if (al.map) 2881 map_type = 'x'; 2882 else 2883 map_type = '?'; 2884 } 2885 2886 print_location(trace->output, sample, &al, true, false); 2887 2888 fprintf(trace->output, " (%c%c)\n", map_type, al.level); 2889 2890 if (callchain_ret > 0) 2891 trace__fprintf_callchain(trace, sample); 2892 else if (callchain_ret < 0) 2893 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel)); 2894 2895 ++trace->nr_events_printed; 2896 out: 2897 err = 0; 2898 out_put: 2899 thread__put(thread); 2900 return err; 2901 } 2902 2903 static void trace__set_base_time(struct trace *trace, 2904 struct evsel *evsel, 2905 struct perf_sample *sample) 2906 { 2907 /* 2908 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust 2909 * and don't use sample->time unconditionally, we may end up having 2910 * some other event in the future without PERF_SAMPLE_TIME for good 2911 * reason, i.e. we may not be interested in its timestamps, just in 2912 * it taking place, picking some piece of information when it 2913 * appears in our event stream (vfs_getname comes to mind). 2914 */ 2915 if (trace->base_time == 0 && !trace->full_time && 2916 (evsel->core.attr.sample_type & PERF_SAMPLE_TIME)) 2917 trace->base_time = sample->time; 2918 } 2919 2920 static int trace__process_sample(struct perf_tool *tool, 2921 union perf_event *event, 2922 struct perf_sample *sample, 2923 struct evsel *evsel, 2924 struct machine *machine __maybe_unused) 2925 { 2926 struct trace *trace = container_of(tool, struct trace, tool); 2927 struct thread *thread; 2928 int err = 0; 2929 2930 tracepoint_handler handler = evsel->handler; 2931 2932 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); 2933 if (thread && thread__is_filtered(thread)) 2934 goto out; 2935 2936 trace__set_base_time(trace, evsel, sample); 2937 2938 if (handler) { 2939 ++trace->nr_events; 2940 handler(trace, evsel, event, sample); 2941 } 2942 out: 2943 thread__put(thread); 2944 return err; 2945 } 2946 2947 static int trace__record(struct trace *trace, int argc, const char **argv) 2948 { 2949 unsigned int rec_argc, i, j; 2950 const char **rec_argv; 2951 const char * const record_args[] = { 2952 "record", 2953 "-R", 2954 "-m", "1024", 2955 "-c", "1", 2956 }; 2957 pid_t pid = getpid(); 2958 char *filter = asprintf__tp_filter_pids(1, &pid); 2959 const char * const sc_args[] = { "-e", }; 2960 unsigned int sc_args_nr = ARRAY_SIZE(sc_args); 2961 const char * const majpf_args[] = { "-e", "major-faults" }; 2962 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args); 2963 const char * const minpf_args[] = { "-e", "minor-faults" }; 2964 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args); 2965 int err = -1; 2966 2967 /* +3 is for the event string below and the pid filter */ 2968 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 3 + 2969 majpf_args_nr + minpf_args_nr + argc; 2970 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 2971 2972 if (rec_argv == NULL || filter == NULL) 2973 goto out_free; 2974 2975 j = 0; 2976 for (i = 0; i < ARRAY_SIZE(record_args); i++) 2977 rec_argv[j++] = record_args[i]; 2978 2979 if (trace->trace_syscalls) { 2980 for (i = 0; i < sc_args_nr; i++) 2981 rec_argv[j++] = sc_args[i]; 2982 2983 /* event string may be different for older kernels - e.g., RHEL6 */ 2984 if (is_valid_tracepoint("raw_syscalls:sys_enter")) 2985 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit"; 2986 else if (is_valid_tracepoint("syscalls:sys_enter")) 2987 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit"; 2988 else { 2989 pr_err("Neither raw_syscalls nor syscalls events exist.\n"); 2990 goto out_free; 2991 } 2992 } 2993 2994 rec_argv[j++] = "--filter"; 2995 rec_argv[j++] = filter; 2996 2997 if (trace->trace_pgfaults & TRACE_PFMAJ) 2998 for (i = 0; i < majpf_args_nr; i++) 2999 rec_argv[j++] = majpf_args[i]; 3000 3001 if (trace->trace_pgfaults & TRACE_PFMIN) 3002 for (i = 0; i < minpf_args_nr; i++) 3003 rec_argv[j++] = minpf_args[i]; 3004 3005 for (i = 0; i < (unsigned int)argc; i++) 3006 rec_argv[j++] = argv[i]; 3007 3008 err = cmd_record(j, rec_argv); 3009 out_free: 3010 free(filter); 3011 free(rec_argv); 3012 return err; 3013 } 3014 3015 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp); 3016 3017 static bool evlist__add_vfs_getname(struct evlist *evlist) 3018 { 3019 bool found = false; 3020 struct evsel *evsel, *tmp; 3021 struct parse_events_error err; 3022 int ret; 3023 3024 bzero(&err, sizeof(err)); 3025 ret = parse_events(evlist, "probe:vfs_getname*", &err); 3026 if (ret) { 3027 free(err.str); 3028 free(err.help); 3029 free(err.first_str); 3030 free(err.first_help); 3031 return false; 3032 } 3033 3034 evlist__for_each_entry_safe(evlist, evsel, tmp) { 3035 if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname")) 3036 continue; 3037 3038 if (perf_evsel__field(evsel, "pathname")) { 3039 evsel->handler = trace__vfs_getname; 3040 found = true; 3041 continue; 3042 } 3043 3044 list_del_init(&evsel->core.node); 3045 evsel->evlist = NULL; 3046 evsel__delete(evsel); 3047 } 3048 3049 return found; 3050 } 3051 3052 static struct evsel *perf_evsel__new_pgfault(u64 config) 3053 { 3054 struct evsel *evsel; 3055 struct perf_event_attr attr = { 3056 .type = PERF_TYPE_SOFTWARE, 3057 .mmap_data = 1, 3058 }; 3059 3060 attr.config = config; 3061 attr.sample_period = 1; 3062 3063 event_attr_init(&attr); 3064 3065 evsel = evsel__new(&attr); 3066 if (evsel) 3067 evsel->handler = trace__pgfault; 3068 3069 return evsel; 3070 } 3071 3072 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample) 3073 { 3074 const u32 type = event->header.type; 3075 struct evsel *evsel; 3076 3077 if (type != PERF_RECORD_SAMPLE) { 3078 trace__process_event(trace, trace->host, event, sample); 3079 return; 3080 } 3081 3082 evsel = perf_evlist__id2evsel(trace->evlist, sample->id); 3083 if (evsel == NULL) { 3084 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id); 3085 return; 3086 } 3087 3088 if (evswitch__discard(&trace->evswitch, evsel)) 3089 return; 3090 3091 trace__set_base_time(trace, evsel, sample); 3092 3093 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && 3094 sample->raw_data == NULL) { 3095 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n", 3096 perf_evsel__name(evsel), sample->tid, 3097 sample->cpu, sample->raw_size); 3098 } else { 3099 tracepoint_handler handler = evsel->handler; 3100 handler(trace, evsel, event, sample); 3101 } 3102 3103 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX) 3104 interrupted = true; 3105 } 3106 3107 static int trace__add_syscall_newtp(struct trace *trace) 3108 { 3109 int ret = -1; 3110 struct evlist *evlist = trace->evlist; 3111 struct evsel *sys_enter, *sys_exit; 3112 3113 sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter); 3114 if (sys_enter == NULL) 3115 goto out; 3116 3117 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args)) 3118 goto out_delete_sys_enter; 3119 3120 sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit); 3121 if (sys_exit == NULL) 3122 goto out_delete_sys_enter; 3123 3124 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret)) 3125 goto out_delete_sys_exit; 3126 3127 perf_evsel__config_callchain(sys_enter, &trace->opts, &callchain_param); 3128 perf_evsel__config_callchain(sys_exit, &trace->opts, &callchain_param); 3129 3130 evlist__add(evlist, sys_enter); 3131 evlist__add(evlist, sys_exit); 3132 3133 if (callchain_param.enabled && !trace->kernel_syscallchains) { 3134 /* 3135 * We're interested only in the user space callchain 3136 * leading to the syscall, allow overriding that for 3137 * debugging reasons using --kernel_syscall_callchains 3138 */ 3139 sys_exit->core.attr.exclude_callchain_kernel = 1; 3140 } 3141 3142 trace->syscalls.events.sys_enter = sys_enter; 3143 trace->syscalls.events.sys_exit = sys_exit; 3144 3145 ret = 0; 3146 out: 3147 return ret; 3148 3149 out_delete_sys_exit: 3150 evsel__delete_priv(sys_exit); 3151 out_delete_sys_enter: 3152 evsel__delete_priv(sys_enter); 3153 goto out; 3154 } 3155 3156 static int trace__set_ev_qualifier_tp_filter(struct trace *trace) 3157 { 3158 int err = -1; 3159 struct evsel *sys_exit; 3160 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier, 3161 trace->ev_qualifier_ids.nr, 3162 trace->ev_qualifier_ids.entries); 3163 3164 if (filter == NULL) 3165 goto out_enomem; 3166 3167 if (!perf_evsel__append_tp_filter(trace->syscalls.events.sys_enter, 3168 filter)) { 3169 sys_exit = trace->syscalls.events.sys_exit; 3170 err = perf_evsel__append_tp_filter(sys_exit, filter); 3171 } 3172 3173 free(filter); 3174 out: 3175 return err; 3176 out_enomem: 3177 errno = ENOMEM; 3178 goto out; 3179 } 3180 3181 #ifdef HAVE_LIBBPF_SUPPORT 3182 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name) 3183 { 3184 if (trace->bpf_obj == NULL) 3185 return NULL; 3186 3187 return bpf_object__find_program_by_title(trace->bpf_obj, name); 3188 } 3189 3190 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc, 3191 const char *prog_name, const char *type) 3192 { 3193 struct bpf_program *prog; 3194 3195 if (prog_name == NULL) { 3196 char default_prog_name[256]; 3197 scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->name); 3198 prog = trace__find_bpf_program_by_title(trace, default_prog_name); 3199 if (prog != NULL) 3200 goto out_found; 3201 if (sc->fmt && sc->fmt->alias) { 3202 scnprintf(default_prog_name, sizeof(default_prog_name), "!syscalls:sys_%s_%s", type, sc->fmt->alias); 3203 prog = trace__find_bpf_program_by_title(trace, default_prog_name); 3204 if (prog != NULL) 3205 goto out_found; 3206 } 3207 goto out_unaugmented; 3208 } 3209 3210 prog = trace__find_bpf_program_by_title(trace, prog_name); 3211 3212 if (prog != NULL) { 3213 out_found: 3214 return prog; 3215 } 3216 3217 pr_debug("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n", 3218 prog_name, type, sc->name); 3219 out_unaugmented: 3220 return trace->syscalls.unaugmented_prog; 3221 } 3222 3223 static void trace__init_syscall_bpf_progs(struct trace *trace, int id) 3224 { 3225 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3226 3227 if (sc == NULL) 3228 return; 3229 3230 sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_enter : NULL, "enter"); 3231 sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.sys_exit : NULL, "exit"); 3232 } 3233 3234 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id) 3235 { 3236 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3237 return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->syscalls.unaugmented_prog); 3238 } 3239 3240 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id) 3241 { 3242 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3243 return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog); 3244 } 3245 3246 static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_entry *entry) 3247 { 3248 struct syscall *sc = trace__syscall_info(trace, NULL, id); 3249 int arg = 0; 3250 3251 if (sc == NULL) 3252 goto out; 3253 3254 for (; arg < sc->nr_args; ++arg) { 3255 entry->string_args_len[arg] = 0; 3256 if (sc->arg_fmt[arg].scnprintf == SCA_FILENAME) { 3257 /* Should be set like strace -s strsize */ 3258 entry->string_args_len[arg] = PATH_MAX; 3259 } 3260 } 3261 out: 3262 for (; arg < 6; ++arg) 3263 entry->string_args_len[arg] = 0; 3264 } 3265 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace) 3266 { 3267 int fd = bpf_map__fd(trace->syscalls.map); 3268 struct bpf_map_syscall_entry value = { 3269 .enabled = !trace->not_ev_qualifier, 3270 }; 3271 int err = 0; 3272 size_t i; 3273 3274 for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) { 3275 int key = trace->ev_qualifier_ids.entries[i]; 3276 3277 if (value.enabled) { 3278 trace__init_bpf_map_syscall_args(trace, key, &value); 3279 trace__init_syscall_bpf_progs(trace, key); 3280 } 3281 3282 err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST); 3283 if (err) 3284 break; 3285 } 3286 3287 return err; 3288 } 3289 3290 static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled) 3291 { 3292 int fd = bpf_map__fd(trace->syscalls.map); 3293 struct bpf_map_syscall_entry value = { 3294 .enabled = enabled, 3295 }; 3296 int err = 0, key; 3297 3298 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) { 3299 if (enabled) 3300 trace__init_bpf_map_syscall_args(trace, key, &value); 3301 3302 err = bpf_map_update_elem(fd, &key, &value, BPF_ANY); 3303 if (err) 3304 break; 3305 } 3306 3307 return err; 3308 } 3309 3310 static int trace__init_syscalls_bpf_map(struct trace *trace) 3311 { 3312 bool enabled = true; 3313 3314 if (trace->ev_qualifier_ids.nr) 3315 enabled = trace->not_ev_qualifier; 3316 3317 return __trace__init_syscalls_bpf_map(trace, enabled); 3318 } 3319 3320 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc) 3321 { 3322 struct tep_format_field *field, *candidate_field; 3323 int id; 3324 3325 /* 3326 * We're only interested in syscalls that have a pointer: 3327 */ 3328 for (field = sc->args; field; field = field->next) { 3329 if (field->flags & TEP_FIELD_IS_POINTER) 3330 goto try_to_find_pair; 3331 } 3332 3333 return NULL; 3334 3335 try_to_find_pair: 3336 for (id = 0; id < trace->sctbl->syscalls.nr_entries; ++id) { 3337 struct syscall *pair = trace__syscall_info(trace, NULL, id); 3338 struct bpf_program *pair_prog; 3339 bool is_candidate = false; 3340 3341 if (pair == NULL || pair == sc || 3342 pair->bpf_prog.sys_enter == trace->syscalls.unaugmented_prog) 3343 continue; 3344 3345 for (field = sc->args, candidate_field = pair->args; 3346 field && candidate_field; field = field->next, candidate_field = candidate_field->next) { 3347 bool is_pointer = field->flags & TEP_FIELD_IS_POINTER, 3348 candidate_is_pointer = candidate_field->flags & TEP_FIELD_IS_POINTER; 3349 3350 if (is_pointer) { 3351 if (!candidate_is_pointer) { 3352 // The candidate just doesn't copies our pointer arg, might copy other pointers we want. 3353 continue; 3354 } 3355 } else { 3356 if (candidate_is_pointer) { 3357 // The candidate might copy a pointer we don't have, skip it. 3358 goto next_candidate; 3359 } 3360 continue; 3361 } 3362 3363 if (strcmp(field->type, candidate_field->type)) 3364 goto next_candidate; 3365 3366 is_candidate = true; 3367 } 3368 3369 if (!is_candidate) 3370 goto next_candidate; 3371 3372 /* 3373 * Check if the tentative pair syscall augmenter has more pointers, if it has, 3374 * then it may be collecting that and we then can't use it, as it would collect 3375 * more than what is common to the two syscalls. 3376 */ 3377 if (candidate_field) { 3378 for (candidate_field = candidate_field->next; candidate_field; candidate_field = candidate_field->next) 3379 if (candidate_field->flags & TEP_FIELD_IS_POINTER) 3380 goto next_candidate; 3381 } 3382 3383 pair_prog = pair->bpf_prog.sys_enter; 3384 /* 3385 * If the pair isn't enabled, then its bpf_prog.sys_enter will not 3386 * have been searched for, so search it here and if it returns the 3387 * unaugmented one, then ignore it, otherwise we'll reuse that BPF 3388 * program for a filtered syscall on a non-filtered one. 3389 * 3390 * For instance, we have "!syscalls:sys_enter_renameat" and that is 3391 * useful for "renameat2". 3392 */ 3393 if (pair_prog == NULL) { 3394 pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_enter : NULL, "enter"); 3395 if (pair_prog == trace->syscalls.unaugmented_prog) 3396 goto next_candidate; 3397 } 3398 3399 pr_debug("Reusing \"%s\" BPF sys_enter augmenter for \"%s\"\n", pair->name, sc->name); 3400 return pair_prog; 3401 next_candidate: 3402 continue; 3403 } 3404 3405 return NULL; 3406 } 3407 3408 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace) 3409 { 3410 int map_enter_fd = bpf_map__fd(trace->syscalls.prog_array.sys_enter), 3411 map_exit_fd = bpf_map__fd(trace->syscalls.prog_array.sys_exit); 3412 int err = 0, key; 3413 3414 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) { 3415 int prog_fd; 3416 3417 if (!trace__syscall_enabled(trace, key)) 3418 continue; 3419 3420 trace__init_syscall_bpf_progs(trace, key); 3421 3422 // It'll get at least the "!raw_syscalls:unaugmented" 3423 prog_fd = trace__bpf_prog_sys_enter_fd(trace, key); 3424 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY); 3425 if (err) 3426 break; 3427 prog_fd = trace__bpf_prog_sys_exit_fd(trace, key); 3428 err = bpf_map_update_elem(map_exit_fd, &key, &prog_fd, BPF_ANY); 3429 if (err) 3430 break; 3431 } 3432 3433 /* 3434 * Now lets do a second pass looking for enabled syscalls without 3435 * an augmenter that have a signature that is a superset of another 3436 * syscall with an augmenter so that we can auto-reuse it. 3437 * 3438 * I.e. if we have an augmenter for the "open" syscall that has 3439 * this signature: 3440 * 3441 * int open(const char *pathname, int flags, mode_t mode); 3442 * 3443 * I.e. that will collect just the first string argument, then we 3444 * can reuse it for the 'creat' syscall, that has this signature: 3445 * 3446 * int creat(const char *pathname, mode_t mode); 3447 * 3448 * and for: 3449 * 3450 * int stat(const char *pathname, struct stat *statbuf); 3451 * int lstat(const char *pathname, struct stat *statbuf); 3452 * 3453 * Because the 'open' augmenter will collect the first arg as a string, 3454 * and leave alone all the other args, which already helps with 3455 * beautifying 'stat' and 'lstat''s pathname arg. 3456 * 3457 * Then, in time, when 'stat' gets an augmenter that collects both 3458 * first and second arg (this one on the raw_syscalls:sys_exit prog 3459 * array tail call, then that one will be used. 3460 */ 3461 for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) { 3462 struct syscall *sc = trace__syscall_info(trace, NULL, key); 3463 struct bpf_program *pair_prog; 3464 int prog_fd; 3465 3466 if (sc == NULL || sc->bpf_prog.sys_enter == NULL) 3467 continue; 3468 3469 /* 3470 * For now we're just reusing the sys_enter prog, and if it 3471 * already has an augmenter, we don't need to find one. 3472 */ 3473 if (sc->bpf_prog.sys_enter != trace->syscalls.unaugmented_prog) 3474 continue; 3475 3476 /* 3477 * Look at all the other syscalls for one that has a signature 3478 * that is close enough that we can share: 3479 */ 3480 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc); 3481 if (pair_prog == NULL) 3482 continue; 3483 3484 sc->bpf_prog.sys_enter = pair_prog; 3485 3486 /* 3487 * Update the BPF_MAP_TYPE_PROG_SHARED for raw_syscalls:sys_enter 3488 * with the fd for the program we're reusing: 3489 */ 3490 prog_fd = bpf_program__fd(sc->bpf_prog.sys_enter); 3491 err = bpf_map_update_elem(map_enter_fd, &key, &prog_fd, BPF_ANY); 3492 if (err) 3493 break; 3494 } 3495 3496 3497 return err; 3498 } 3499 3500 static void trace__delete_augmented_syscalls(struct trace *trace) 3501 { 3502 struct evsel *evsel, *tmp; 3503 3504 evlist__remove(trace->evlist, trace->syscalls.events.augmented); 3505 evsel__delete(trace->syscalls.events.augmented); 3506 trace->syscalls.events.augmented = NULL; 3507 3508 evlist__for_each_entry_safe(trace->evlist, tmp, evsel) { 3509 if (evsel->bpf_obj == trace->bpf_obj) { 3510 evlist__remove(trace->evlist, evsel); 3511 evsel__delete(evsel); 3512 } 3513 3514 } 3515 3516 bpf_object__close(trace->bpf_obj); 3517 trace->bpf_obj = NULL; 3518 } 3519 #else // HAVE_LIBBPF_SUPPORT 3520 static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused) 3521 { 3522 return 0; 3523 } 3524 3525 static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused) 3526 { 3527 return 0; 3528 } 3529 3530 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused, 3531 const char *name __maybe_unused) 3532 { 3533 return NULL; 3534 } 3535 3536 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace __maybe_unused) 3537 { 3538 return 0; 3539 } 3540 3541 static void trace__delete_augmented_syscalls(struct trace *trace __maybe_unused) 3542 { 3543 } 3544 #endif // HAVE_LIBBPF_SUPPORT 3545 3546 static bool trace__only_augmented_syscalls_evsels(struct trace *trace) 3547 { 3548 struct evsel *evsel; 3549 3550 evlist__for_each_entry(trace->evlist, evsel) { 3551 if (evsel == trace->syscalls.events.augmented || 3552 evsel->bpf_obj == trace->bpf_obj) 3553 continue; 3554 3555 return false; 3556 } 3557 3558 return true; 3559 } 3560 3561 static int trace__set_ev_qualifier_filter(struct trace *trace) 3562 { 3563 if (trace->syscalls.map) 3564 return trace__set_ev_qualifier_bpf_filter(trace); 3565 if (trace->syscalls.events.sys_enter) 3566 return trace__set_ev_qualifier_tp_filter(trace); 3567 return 0; 3568 } 3569 3570 static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused, 3571 size_t npids __maybe_unused, pid_t *pids __maybe_unused) 3572 { 3573 int err = 0; 3574 #ifdef HAVE_LIBBPF_SUPPORT 3575 bool value = true; 3576 int map_fd = bpf_map__fd(map); 3577 size_t i; 3578 3579 for (i = 0; i < npids; ++i) { 3580 err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY); 3581 if (err) 3582 break; 3583 } 3584 #endif 3585 return err; 3586 } 3587 3588 static int trace__set_filter_loop_pids(struct trace *trace) 3589 { 3590 unsigned int nr = 1, err; 3591 pid_t pids[32] = { 3592 getpid(), 3593 }; 3594 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]); 3595 3596 while (thread && nr < ARRAY_SIZE(pids)) { 3597 struct thread *parent = machine__find_thread(trace->host, thread->ppid, thread->ppid); 3598 3599 if (parent == NULL) 3600 break; 3601 3602 if (!strcmp(thread__comm_str(parent), "sshd") || 3603 strstarts(thread__comm_str(parent), "gnome-terminal")) { 3604 pids[nr++] = parent->tid; 3605 break; 3606 } 3607 thread = parent; 3608 } 3609 3610 err = perf_evlist__append_tp_filter_pids(trace->evlist, nr, pids); 3611 if (!err && trace->filter_pids.map) 3612 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids); 3613 3614 return err; 3615 } 3616 3617 static int trace__set_filter_pids(struct trace *trace) 3618 { 3619 int err = 0; 3620 /* 3621 * Better not use !target__has_task() here because we need to cover the 3622 * case where no threads were specified in the command line, but a 3623 * workload was, and in that case we will fill in the thread_map when 3624 * we fork the workload in perf_evlist__prepare_workload. 3625 */ 3626 if (trace->filter_pids.nr > 0) { 3627 err = perf_evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr, 3628 trace->filter_pids.entries); 3629 if (!err && trace->filter_pids.map) { 3630 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr, 3631 trace->filter_pids.entries); 3632 } 3633 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) { 3634 err = trace__set_filter_loop_pids(trace); 3635 } 3636 3637 return err; 3638 } 3639 3640 static int __trace__deliver_event(struct trace *trace, union perf_event *event) 3641 { 3642 struct evlist *evlist = trace->evlist; 3643 struct perf_sample sample; 3644 int err; 3645 3646 err = perf_evlist__parse_sample(evlist, event, &sample); 3647 if (err) 3648 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err); 3649 else 3650 trace__handle_event(trace, event, &sample); 3651 3652 return 0; 3653 } 3654 3655 static int __trace__flush_events(struct trace *trace) 3656 { 3657 u64 first = ordered_events__first_time(&trace->oe.data); 3658 u64 flush = trace->oe.last - NSEC_PER_SEC; 3659 3660 /* Is there some thing to flush.. */ 3661 if (first && first < flush) 3662 return ordered_events__flush_time(&trace->oe.data, flush); 3663 3664 return 0; 3665 } 3666 3667 static int trace__flush_events(struct trace *trace) 3668 { 3669 return !trace->sort_events ? 0 : __trace__flush_events(trace); 3670 } 3671 3672 static int trace__deliver_event(struct trace *trace, union perf_event *event) 3673 { 3674 int err; 3675 3676 if (!trace->sort_events) 3677 return __trace__deliver_event(trace, event); 3678 3679 err = perf_evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last); 3680 if (err && err != -1) 3681 return err; 3682 3683 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0); 3684 if (err) 3685 return err; 3686 3687 return trace__flush_events(trace); 3688 } 3689 3690 static int ordered_events__deliver_event(struct ordered_events *oe, 3691 struct ordered_event *event) 3692 { 3693 struct trace *trace = container_of(oe, struct trace, oe.data); 3694 3695 return __trace__deliver_event(trace, event->event); 3696 } 3697 3698 static struct syscall_arg_fmt *perf_evsel__syscall_arg_fmt(struct evsel *evsel, char *arg) 3699 { 3700 struct tep_format_field *field; 3701 struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel); 3702 3703 if (evsel->tp_format == NULL || fmt == NULL) 3704 return NULL; 3705 3706 for (field = evsel->tp_format->format.fields; field; field = field->next, ++fmt) 3707 if (strcmp(field->name, arg) == 0) 3708 return fmt; 3709 3710 return NULL; 3711 } 3712 3713 static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel *evsel) 3714 { 3715 char *tok, *left = evsel->filter, *new_filter = evsel->filter; 3716 3717 while ((tok = strpbrk(left, "=<>!")) != NULL) { 3718 char *right = tok + 1, *right_end; 3719 3720 if (*right == '=') 3721 ++right; 3722 3723 while (isspace(*right)) 3724 ++right; 3725 3726 if (*right == '\0') 3727 break; 3728 3729 while (!isalpha(*left)) 3730 if (++left == tok) { 3731 /* 3732 * Bail out, can't find the name of the argument that is being 3733 * used in the filter, let it try to set this filter, will fail later. 3734 */ 3735 return 0; 3736 } 3737 3738 right_end = right + 1; 3739 while (isalnum(*right_end) || *right_end == '_' || *right_end == '|') 3740 ++right_end; 3741 3742 if (isalpha(*right)) { 3743 struct syscall_arg_fmt *fmt; 3744 int left_size = tok - left, 3745 right_size = right_end - right; 3746 char arg[128]; 3747 3748 while (isspace(left[left_size - 1])) 3749 --left_size; 3750 3751 scnprintf(arg, sizeof(arg), "%.*s", left_size, left); 3752 3753 fmt = perf_evsel__syscall_arg_fmt(evsel, arg); 3754 if (fmt == NULL) { 3755 pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n", 3756 arg, evsel->name, evsel->filter); 3757 return -1; 3758 } 3759 3760 pr_debug2("trying to expand \"%s\" \"%.*s\" \"%.*s\" -> ", 3761 arg, (int)(right - tok), tok, right_size, right); 3762 3763 if (fmt->strtoul) { 3764 u64 val; 3765 struct syscall_arg syscall_arg = { 3766 .parm = fmt->parm, 3767 }; 3768 3769 if (fmt->strtoul(right, right_size, &syscall_arg, &val)) { 3770 char *n, expansion[19]; 3771 int expansion_lenght = scnprintf(expansion, sizeof(expansion), "%#" PRIx64, val); 3772 int expansion_offset = right - new_filter; 3773 3774 pr_debug("%s", expansion); 3775 3776 if (asprintf(&n, "%.*s%s%s", expansion_offset, new_filter, expansion, right_end) < 0) { 3777 pr_debug(" out of memory!\n"); 3778 free(new_filter); 3779 return -1; 3780 } 3781 if (new_filter != evsel->filter) 3782 free(new_filter); 3783 left = n + expansion_offset + expansion_lenght; 3784 new_filter = n; 3785 } else { 3786 pr_err("\"%.*s\" not found for \"%s\" in \"%s\", can't set filter \"%s\"\n", 3787 right_size, right, arg, evsel->name, evsel->filter); 3788 return -1; 3789 } 3790 } else { 3791 pr_err("No resolver (strtoul) for \"%s\" in \"%s\", can't set filter \"%s\"\n", 3792 arg, evsel->name, evsel->filter); 3793 return -1; 3794 } 3795 3796 pr_debug("\n"); 3797 } else { 3798 left = right_end; 3799 } 3800 } 3801 3802 if (new_filter != evsel->filter) { 3803 pr_debug("New filter for %s: %s\n", evsel->name, new_filter); 3804 perf_evsel__set_filter(evsel, new_filter); 3805 free(new_filter); 3806 } 3807 3808 return 0; 3809 } 3810 3811 static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel) 3812 { 3813 struct evlist *evlist = trace->evlist; 3814 struct evsel *evsel; 3815 3816 evlist__for_each_entry(evlist, evsel) { 3817 if (evsel->filter == NULL) 3818 continue; 3819 3820 if (trace__expand_filter(trace, evsel)) { 3821 *err_evsel = evsel; 3822 return -1; 3823 } 3824 } 3825 3826 return 0; 3827 } 3828 3829 static int trace__run(struct trace *trace, int argc, const char **argv) 3830 { 3831 struct evlist *evlist = trace->evlist; 3832 struct evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL; 3833 int err = -1, i; 3834 unsigned long before; 3835 const bool forks = argc > 0; 3836 bool draining = false; 3837 3838 trace->live = true; 3839 3840 if (!trace->raw_augmented_syscalls) { 3841 if (trace->trace_syscalls && trace__add_syscall_newtp(trace)) 3842 goto out_error_raw_syscalls; 3843 3844 if (trace->trace_syscalls) 3845 trace->vfs_getname = evlist__add_vfs_getname(evlist); 3846 } 3847 3848 if ((trace->trace_pgfaults & TRACE_PFMAJ)) { 3849 pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ); 3850 if (pgfault_maj == NULL) 3851 goto out_error_mem; 3852 perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param); 3853 evlist__add(evlist, pgfault_maj); 3854 } 3855 3856 if ((trace->trace_pgfaults & TRACE_PFMIN)) { 3857 pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN); 3858 if (pgfault_min == NULL) 3859 goto out_error_mem; 3860 perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param); 3861 evlist__add(evlist, pgfault_min); 3862 } 3863 3864 if (trace->sched && 3865 perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime", 3866 trace__sched_stat_runtime)) 3867 goto out_error_sched_stat_runtime; 3868 /* 3869 * If a global cgroup was set, apply it to all the events without an 3870 * explicit cgroup. I.e.: 3871 * 3872 * trace -G A -e sched:*switch 3873 * 3874 * Will set all raw_syscalls:sys_{enter,exit}, pgfault, vfs_getname, etc 3875 * _and_ sched:sched_switch to the 'A' cgroup, while: 3876 * 3877 * trace -e sched:*switch -G A 3878 * 3879 * will only set the sched:sched_switch event to the 'A' cgroup, all the 3880 * other events (raw_syscalls:sys_{enter,exit}, etc are left "without" 3881 * a cgroup (on the root cgroup, sys wide, etc). 3882 * 3883 * Multiple cgroups: 3884 * 3885 * trace -G A -e sched:*switch -G B 3886 * 3887 * the syscall ones go to the 'A' cgroup, the sched:sched_switch goes 3888 * to the 'B' cgroup. 3889 * 3890 * evlist__set_default_cgroup() grabs a reference of the passed cgroup 3891 * only for the evsels still without a cgroup, i.e. evsel->cgroup == NULL. 3892 */ 3893 if (trace->cgroup) 3894 evlist__set_default_cgroup(trace->evlist, trace->cgroup); 3895 3896 err = perf_evlist__create_maps(evlist, &trace->opts.target); 3897 if (err < 0) { 3898 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n"); 3899 goto out_delete_evlist; 3900 } 3901 3902 err = trace__symbols_init(trace, evlist); 3903 if (err < 0) { 3904 fprintf(trace->output, "Problems initializing symbol libraries!\n"); 3905 goto out_delete_evlist; 3906 } 3907 3908 perf_evlist__config(evlist, &trace->opts, &callchain_param); 3909 3910 signal(SIGCHLD, sig_handler); 3911 signal(SIGINT, sig_handler); 3912 3913 if (forks) { 3914 err = perf_evlist__prepare_workload(evlist, &trace->opts.target, 3915 argv, false, NULL); 3916 if (err < 0) { 3917 fprintf(trace->output, "Couldn't run the workload!\n"); 3918 goto out_delete_evlist; 3919 } 3920 } 3921 3922 err = evlist__open(evlist); 3923 if (err < 0) 3924 goto out_error_open; 3925 3926 err = bpf__apply_obj_config(); 3927 if (err) { 3928 char errbuf[BUFSIZ]; 3929 3930 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf)); 3931 pr_err("ERROR: Apply config to BPF failed: %s\n", 3932 errbuf); 3933 goto out_error_open; 3934 } 3935 3936 err = trace__set_filter_pids(trace); 3937 if (err < 0) 3938 goto out_error_mem; 3939 3940 if (trace->syscalls.map) 3941 trace__init_syscalls_bpf_map(trace); 3942 3943 if (trace->syscalls.prog_array.sys_enter) 3944 trace__init_syscalls_bpf_prog_array_maps(trace); 3945 3946 if (trace->ev_qualifier_ids.nr > 0) { 3947 err = trace__set_ev_qualifier_filter(trace); 3948 if (err < 0) 3949 goto out_errno; 3950 3951 if (trace->syscalls.events.sys_exit) { 3952 pr_debug("event qualifier tracepoint filter: %s\n", 3953 trace->syscalls.events.sys_exit->filter); 3954 } 3955 } 3956 3957 /* 3958 * If the "close" syscall is not traced, then we will not have the 3959 * opportunity to, in syscall_arg__scnprintf_close_fd() invalidate the 3960 * fd->pathname table and were ending up showing the last value set by 3961 * syscalls opening a pathname and associating it with a descriptor or 3962 * reading it from /proc/pid/fd/ in cases where that doesn't make 3963 * sense. 3964 * 3965 * So just disable this beautifier (SCA_FD, SCA_FDAT) when 'close' is 3966 * not in use. 3967 */ 3968 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close")); 3969 3970 err = trace__expand_filters(trace, &evsel); 3971 if (err) 3972 goto out_delete_evlist; 3973 err = perf_evlist__apply_filters(evlist, &evsel); 3974 if (err < 0) 3975 goto out_error_apply_filters; 3976 3977 if (trace->dump.map) 3978 bpf_map__fprintf(trace->dump.map, trace->output); 3979 3980 err = evlist__mmap(evlist, trace->opts.mmap_pages); 3981 if (err < 0) 3982 goto out_error_mmap; 3983 3984 if (!target__none(&trace->opts.target) && !trace->opts.initial_delay) 3985 evlist__enable(evlist); 3986 3987 if (forks) 3988 perf_evlist__start_workload(evlist); 3989 3990 if (trace->opts.initial_delay) { 3991 usleep(trace->opts.initial_delay * 1000); 3992 evlist__enable(evlist); 3993 } 3994 3995 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 || 3996 evlist->core.threads->nr > 1 || 3997 evlist__first(evlist)->core.attr.inherit; 3998 3999 /* 4000 * Now that we already used evsel->core.attr to ask the kernel to setup the 4001 * events, lets reuse evsel->core.attr.sample_max_stack as the limit in 4002 * trace__resolve_callchain(), allowing per-event max-stack settings 4003 * to override an explicitly set --max-stack global setting. 4004 */ 4005 evlist__for_each_entry(evlist, evsel) { 4006 if (evsel__has_callchain(evsel) && 4007 evsel->core.attr.sample_max_stack == 0) 4008 evsel->core.attr.sample_max_stack = trace->max_stack; 4009 } 4010 again: 4011 before = trace->nr_events; 4012 4013 for (i = 0; i < evlist->core.nr_mmaps; i++) { 4014 union perf_event *event; 4015 struct mmap *md; 4016 4017 md = &evlist->mmap[i]; 4018 if (perf_mmap__read_init(&md->core) < 0) 4019 continue; 4020 4021 while ((event = perf_mmap__read_event(&md->core)) != NULL) { 4022 ++trace->nr_events; 4023 4024 err = trace__deliver_event(trace, event); 4025 if (err) 4026 goto out_disable; 4027 4028 perf_mmap__consume(&md->core); 4029 4030 if (interrupted) 4031 goto out_disable; 4032 4033 if (done && !draining) { 4034 evlist__disable(evlist); 4035 draining = true; 4036 } 4037 } 4038 perf_mmap__read_done(&md->core); 4039 } 4040 4041 if (trace->nr_events == before) { 4042 int timeout = done ? 100 : -1; 4043 4044 if (!draining && evlist__poll(evlist, timeout) > 0) { 4045 if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0) 4046 draining = true; 4047 4048 goto again; 4049 } else { 4050 if (trace__flush_events(trace)) 4051 goto out_disable; 4052 } 4053 } else { 4054 goto again; 4055 } 4056 4057 out_disable: 4058 thread__zput(trace->current); 4059 4060 evlist__disable(evlist); 4061 4062 if (trace->sort_events) 4063 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL); 4064 4065 if (!err) { 4066 if (trace->summary) 4067 trace__fprintf_thread_summary(trace, trace->output); 4068 4069 if (trace->show_tool_stats) { 4070 fprintf(trace->output, "Stats:\n " 4071 " vfs_getname : %" PRIu64 "\n" 4072 " proc_getname: %" PRIu64 "\n", 4073 trace->stats.vfs_getname, 4074 trace->stats.proc_getname); 4075 } 4076 } 4077 4078 out_delete_evlist: 4079 trace__symbols__exit(trace); 4080 4081 evlist__delete(evlist); 4082 cgroup__put(trace->cgroup); 4083 trace->evlist = NULL; 4084 trace->live = false; 4085 return err; 4086 { 4087 char errbuf[BUFSIZ]; 4088 4089 out_error_sched_stat_runtime: 4090 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime"); 4091 goto out_error; 4092 4093 out_error_raw_syscalls: 4094 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)"); 4095 goto out_error; 4096 4097 out_error_mmap: 4098 perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf)); 4099 goto out_error; 4100 4101 out_error_open: 4102 perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf)); 4103 4104 out_error: 4105 fprintf(trace->output, "%s\n", errbuf); 4106 goto out_delete_evlist; 4107 4108 out_error_apply_filters: 4109 fprintf(trace->output, 4110 "Failed to set filter \"%s\" on event %s with %d (%s)\n", 4111 evsel->filter, perf_evsel__name(evsel), errno, 4112 str_error_r(errno, errbuf, sizeof(errbuf))); 4113 goto out_delete_evlist; 4114 } 4115 out_error_mem: 4116 fprintf(trace->output, "Not enough memory to run!\n"); 4117 goto out_delete_evlist; 4118 4119 out_errno: 4120 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno)); 4121 goto out_delete_evlist; 4122 } 4123 4124 static int trace__replay(struct trace *trace) 4125 { 4126 const struct evsel_str_handler handlers[] = { 4127 { "probe:vfs_getname", trace__vfs_getname, }, 4128 }; 4129 struct perf_data data = { 4130 .path = input_name, 4131 .mode = PERF_DATA_MODE_READ, 4132 .force = trace->force, 4133 }; 4134 struct perf_session *session; 4135 struct evsel *evsel; 4136 int err = -1; 4137 4138 trace->tool.sample = trace__process_sample; 4139 trace->tool.mmap = perf_event__process_mmap; 4140 trace->tool.mmap2 = perf_event__process_mmap2; 4141 trace->tool.comm = perf_event__process_comm; 4142 trace->tool.exit = perf_event__process_exit; 4143 trace->tool.fork = perf_event__process_fork; 4144 trace->tool.attr = perf_event__process_attr; 4145 trace->tool.tracing_data = perf_event__process_tracing_data; 4146 trace->tool.build_id = perf_event__process_build_id; 4147 trace->tool.namespaces = perf_event__process_namespaces; 4148 4149 trace->tool.ordered_events = true; 4150 trace->tool.ordering_requires_timestamps = true; 4151 4152 /* add tid to output */ 4153 trace->multiple_threads = true; 4154 4155 session = perf_session__new(&data, false, &trace->tool); 4156 if (IS_ERR(session)) 4157 return PTR_ERR(session); 4158 4159 if (trace->opts.target.pid) 4160 symbol_conf.pid_list_str = strdup(trace->opts.target.pid); 4161 4162 if (trace->opts.target.tid) 4163 symbol_conf.tid_list_str = strdup(trace->opts.target.tid); 4164 4165 if (symbol__init(&session->header.env) < 0) 4166 goto out; 4167 4168 trace->host = &session->machines.host; 4169 4170 err = perf_session__set_tracepoints_handlers(session, handlers); 4171 if (err) 4172 goto out; 4173 4174 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, 4175 "raw_syscalls:sys_enter"); 4176 /* older kernels have syscalls tp versus raw_syscalls */ 4177 if (evsel == NULL) 4178 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, 4179 "syscalls:sys_enter"); 4180 4181 if (evsel && 4182 (perf_evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 || 4183 perf_evsel__init_sc_tp_ptr_field(evsel, args))) { 4184 pr_err("Error during initialize raw_syscalls:sys_enter event\n"); 4185 goto out; 4186 } 4187 4188 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, 4189 "raw_syscalls:sys_exit"); 4190 if (evsel == NULL) 4191 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, 4192 "syscalls:sys_exit"); 4193 if (evsel && 4194 (perf_evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 || 4195 perf_evsel__init_sc_tp_uint_field(evsel, ret))) { 4196 pr_err("Error during initialize raw_syscalls:sys_exit event\n"); 4197 goto out; 4198 } 4199 4200 evlist__for_each_entry(session->evlist, evsel) { 4201 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE && 4202 (evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ || 4203 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN || 4204 evsel->core.attr.config == PERF_COUNT_SW_PAGE_FAULTS)) 4205 evsel->handler = trace__pgfault; 4206 } 4207 4208 setup_pager(); 4209 4210 err = perf_session__process_events(session); 4211 if (err) 4212 pr_err("Failed to process events, error %d", err); 4213 4214 else if (trace->summary) 4215 trace__fprintf_thread_summary(trace, trace->output); 4216 4217 out: 4218 perf_session__delete(session); 4219 4220 return err; 4221 } 4222 4223 static size_t trace__fprintf_threads_header(FILE *fp) 4224 { 4225 size_t printed; 4226 4227 printed = fprintf(fp, "\n Summary of events:\n\n"); 4228 4229 return printed; 4230 } 4231 4232 DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs, 4233 struct syscall_stats *stats; 4234 double msecs; 4235 int syscall; 4236 ) 4237 { 4238 struct int_node *source = rb_entry(nd, struct int_node, rb_node); 4239 struct syscall_stats *stats = source->priv; 4240 4241 entry->syscall = source->i; 4242 entry->stats = stats; 4243 entry->msecs = stats ? (u64)stats->stats.n * (avg_stats(&stats->stats) / NSEC_PER_MSEC) : 0; 4244 } 4245 4246 static size_t thread__dump_stats(struct thread_trace *ttrace, 4247 struct trace *trace, FILE *fp) 4248 { 4249 size_t printed = 0; 4250 struct syscall *sc; 4251 struct rb_node *nd; 4252 DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats); 4253 4254 if (syscall_stats == NULL) 4255 return 0; 4256 4257 printed += fprintf(fp, "\n"); 4258 4259 printed += fprintf(fp, " syscall calls errors total min avg max stddev\n"); 4260 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n"); 4261 printed += fprintf(fp, " --------------- -------- ------ -------- --------- --------- --------- ------\n"); 4262 4263 resort_rb__for_each_entry(nd, syscall_stats) { 4264 struct syscall_stats *stats = syscall_stats_entry->stats; 4265 if (stats) { 4266 double min = (double)(stats->stats.min) / NSEC_PER_MSEC; 4267 double max = (double)(stats->stats.max) / NSEC_PER_MSEC; 4268 double avg = avg_stats(&stats->stats); 4269 double pct; 4270 u64 n = (u64)stats->stats.n; 4271 4272 pct = avg ? 100.0 * stddev_stats(&stats->stats) / avg : 0.0; 4273 avg /= NSEC_PER_MSEC; 4274 4275 sc = &trace->syscalls.table[syscall_stats_entry->syscall]; 4276 printed += fprintf(fp, " %-15s", sc->name); 4277 printed += fprintf(fp, " %8" PRIu64 " %6" PRIu64 " %9.3f %9.3f %9.3f", 4278 n, stats->nr_failures, syscall_stats_entry->msecs, min, avg); 4279 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct); 4280 4281 if (trace->errno_summary && stats->nr_failures) { 4282 const char *arch_name = perf_env__arch(trace->host->env); 4283 int e; 4284 4285 for (e = 0; e < stats->max_errno; ++e) { 4286 if (stats->errnos[e] != 0) 4287 fprintf(fp, "\t\t\t\t%s: %d\n", arch_syscalls__strerrno(arch_name, e + 1), stats->errnos[e]); 4288 } 4289 } 4290 } 4291 } 4292 4293 resort_rb__delete(syscall_stats); 4294 printed += fprintf(fp, "\n\n"); 4295 4296 return printed; 4297 } 4298 4299 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace) 4300 { 4301 size_t printed = 0; 4302 struct thread_trace *ttrace = thread__priv(thread); 4303 double ratio; 4304 4305 if (ttrace == NULL) 4306 return 0; 4307 4308 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0; 4309 4310 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid); 4311 printed += fprintf(fp, "%lu events, ", ttrace->nr_events); 4312 printed += fprintf(fp, "%.1f%%", ratio); 4313 if (ttrace->pfmaj) 4314 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj); 4315 if (ttrace->pfmin) 4316 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin); 4317 if (trace->sched) 4318 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms); 4319 else if (fputc('\n', fp) != EOF) 4320 ++printed; 4321 4322 printed += thread__dump_stats(ttrace, trace, fp); 4323 4324 return printed; 4325 } 4326 4327 static unsigned long thread__nr_events(struct thread_trace *ttrace) 4328 { 4329 return ttrace ? ttrace->nr_events : 0; 4330 } 4331 4332 DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)), 4333 struct thread *thread; 4334 ) 4335 { 4336 entry->thread = rb_entry(nd, struct thread, rb_node); 4337 } 4338 4339 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp) 4340 { 4341 size_t printed = trace__fprintf_threads_header(fp); 4342 struct rb_node *nd; 4343 int i; 4344 4345 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 4346 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i); 4347 4348 if (threads == NULL) { 4349 fprintf(fp, "%s", "Error sorting output by nr_events!\n"); 4350 return 0; 4351 } 4352 4353 resort_rb__for_each_entry(nd, threads) 4354 printed += trace__fprintf_thread(fp, threads_entry->thread, trace); 4355 4356 resort_rb__delete(threads); 4357 } 4358 return printed; 4359 } 4360 4361 static int trace__set_duration(const struct option *opt, const char *str, 4362 int unset __maybe_unused) 4363 { 4364 struct trace *trace = opt->value; 4365 4366 trace->duration_filter = atof(str); 4367 return 0; 4368 } 4369 4370 static int trace__set_filter_pids_from_option(const struct option *opt, const char *str, 4371 int unset __maybe_unused) 4372 { 4373 int ret = -1; 4374 size_t i; 4375 struct trace *trace = opt->value; 4376 /* 4377 * FIXME: introduce a intarray class, plain parse csv and create a 4378 * { int nr, int entries[] } struct... 4379 */ 4380 struct intlist *list = intlist__new(str); 4381 4382 if (list == NULL) 4383 return -1; 4384 4385 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1; 4386 trace->filter_pids.entries = calloc(i, sizeof(pid_t)); 4387 4388 if (trace->filter_pids.entries == NULL) 4389 goto out; 4390 4391 trace->filter_pids.entries[0] = getpid(); 4392 4393 for (i = 1; i < trace->filter_pids.nr; ++i) 4394 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i; 4395 4396 intlist__delete(list); 4397 ret = 0; 4398 out: 4399 return ret; 4400 } 4401 4402 static int trace__open_output(struct trace *trace, const char *filename) 4403 { 4404 struct stat st; 4405 4406 if (!stat(filename, &st) && st.st_size) { 4407 char oldname[PATH_MAX]; 4408 4409 scnprintf(oldname, sizeof(oldname), "%s.old", filename); 4410 unlink(oldname); 4411 rename(filename, oldname); 4412 } 4413 4414 trace->output = fopen(filename, "w"); 4415 4416 return trace->output == NULL ? -errno : 0; 4417 } 4418 4419 static int parse_pagefaults(const struct option *opt, const char *str, 4420 int unset __maybe_unused) 4421 { 4422 int *trace_pgfaults = opt->value; 4423 4424 if (strcmp(str, "all") == 0) 4425 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN; 4426 else if (strcmp(str, "maj") == 0) 4427 *trace_pgfaults |= TRACE_PFMAJ; 4428 else if (strcmp(str, "min") == 0) 4429 *trace_pgfaults |= TRACE_PFMIN; 4430 else 4431 return -1; 4432 4433 return 0; 4434 } 4435 4436 static void evlist__set_default_evsel_handler(struct evlist *evlist, void *handler) 4437 { 4438 struct evsel *evsel; 4439 4440 evlist__for_each_entry(evlist, evsel) { 4441 if (evsel->handler == NULL) 4442 evsel->handler = handler; 4443 } 4444 } 4445 4446 static void evsel__set_syscall_arg_fmt(struct evsel *evsel, const char *name) 4447 { 4448 struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel); 4449 4450 if (fmt) { 4451 struct syscall_fmt *scfmt = syscall_fmt__find(name); 4452 4453 if (scfmt) { 4454 int skip = 0; 4455 4456 if (strcmp(evsel->tp_format->format.fields->name, "__syscall_nr") == 0 || 4457 strcmp(evsel->tp_format->format.fields->name, "nr") == 0) 4458 ++skip; 4459 4460 memcpy(fmt + skip, scfmt->arg, (evsel->tp_format->format.nr_fields - skip) * sizeof(*fmt)); 4461 } 4462 } 4463 } 4464 4465 static int evlist__set_syscall_tp_fields(struct evlist *evlist) 4466 { 4467 struct evsel *evsel; 4468 4469 evlist__for_each_entry(evlist, evsel) { 4470 if (evsel->priv || !evsel->tp_format) 4471 continue; 4472 4473 if (strcmp(evsel->tp_format->system, "syscalls")) { 4474 perf_evsel__init_tp_arg_scnprintf(evsel); 4475 continue; 4476 } 4477 4478 if (perf_evsel__init_syscall_tp(evsel)) 4479 return -1; 4480 4481 if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) { 4482 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 4483 4484 if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64))) 4485 return -1; 4486 4487 evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_enter_") - 1); 4488 } else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) { 4489 struct syscall_tp *sc = __evsel__syscall_tp(evsel); 4490 4491 if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap)) 4492 return -1; 4493 4494 evsel__set_syscall_arg_fmt(evsel, evsel->tp_format->name + sizeof("sys_exit_") - 1); 4495 } 4496 } 4497 4498 return 0; 4499 } 4500 4501 /* 4502 * XXX: Hackish, just splitting the combined -e+--event (syscalls 4503 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use 4504 * existing facilities unchanged (trace->ev_qualifier + parse_options()). 4505 * 4506 * It'd be better to introduce a parse_options() variant that would return a 4507 * list with the terms it didn't match to an event... 4508 */ 4509 static int trace__parse_events_option(const struct option *opt, const char *str, 4510 int unset __maybe_unused) 4511 { 4512 struct trace *trace = (struct trace *)opt->value; 4513 const char *s = str; 4514 char *sep = NULL, *lists[2] = { NULL, NULL, }; 4515 int len = strlen(str) + 1, err = -1, list, idx; 4516 char *strace_groups_dir = system_path(STRACE_GROUPS_DIR); 4517 char group_name[PATH_MAX]; 4518 struct syscall_fmt *fmt; 4519 4520 if (strace_groups_dir == NULL) 4521 return -1; 4522 4523 if (*s == '!') { 4524 ++s; 4525 trace->not_ev_qualifier = true; 4526 } 4527 4528 while (1) { 4529 if ((sep = strchr(s, ',')) != NULL) 4530 *sep = '\0'; 4531 4532 list = 0; 4533 if (syscalltbl__id(trace->sctbl, s) >= 0 || 4534 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) { 4535 list = 1; 4536 goto do_concat; 4537 } 4538 4539 fmt = syscall_fmt__find_by_alias(s); 4540 if (fmt != NULL) { 4541 list = 1; 4542 s = fmt->name; 4543 } else { 4544 path__join(group_name, sizeof(group_name), strace_groups_dir, s); 4545 if (access(group_name, R_OK) == 0) 4546 list = 1; 4547 } 4548 do_concat: 4549 if (lists[list]) { 4550 sprintf(lists[list] + strlen(lists[list]), ",%s", s); 4551 } else { 4552 lists[list] = malloc(len); 4553 if (lists[list] == NULL) 4554 goto out; 4555 strcpy(lists[list], s); 4556 } 4557 4558 if (!sep) 4559 break; 4560 4561 *sep = ','; 4562 s = sep + 1; 4563 } 4564 4565 if (lists[1] != NULL) { 4566 struct strlist_config slist_config = { 4567 .dirname = strace_groups_dir, 4568 }; 4569 4570 trace->ev_qualifier = strlist__new(lists[1], &slist_config); 4571 if (trace->ev_qualifier == NULL) { 4572 fputs("Not enough memory to parse event qualifier", trace->output); 4573 goto out; 4574 } 4575 4576 if (trace__validate_ev_qualifier(trace)) 4577 goto out; 4578 trace->trace_syscalls = true; 4579 } 4580 4581 err = 0; 4582 4583 if (lists[0]) { 4584 struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event", 4585 "event selector. use 'perf list' to list available events", 4586 parse_events_option); 4587 err = parse_events_option(&o, lists[0], 0); 4588 } 4589 out: 4590 if (sep) 4591 *sep = ','; 4592 4593 return err; 4594 } 4595 4596 static int trace__parse_cgroups(const struct option *opt, const char *str, int unset) 4597 { 4598 struct trace *trace = opt->value; 4599 4600 if (!list_empty(&trace->evlist->core.entries)) 4601 return parse_cgroups(opt, str, unset); 4602 4603 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str); 4604 4605 return 0; 4606 } 4607 4608 static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name) 4609 { 4610 if (trace->bpf_obj == NULL) 4611 return NULL; 4612 4613 return bpf_object__find_map_by_name(trace->bpf_obj, name); 4614 } 4615 4616 static void trace__set_bpf_map_filtered_pids(struct trace *trace) 4617 { 4618 trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered"); 4619 } 4620 4621 static void trace__set_bpf_map_syscalls(struct trace *trace) 4622 { 4623 trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls"); 4624 trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter"); 4625 trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit"); 4626 } 4627 4628 static int trace__config(const char *var, const char *value, void *arg) 4629 { 4630 struct trace *trace = arg; 4631 int err = 0; 4632 4633 if (!strcmp(var, "trace.add_events")) { 4634 trace->perfconfig_events = strdup(value); 4635 if (trace->perfconfig_events == NULL) { 4636 pr_err("Not enough memory for %s\n", "trace.add_events"); 4637 return -1; 4638 } 4639 } else if (!strcmp(var, "trace.show_timestamp")) { 4640 trace->show_tstamp = perf_config_bool(var, value); 4641 } else if (!strcmp(var, "trace.show_duration")) { 4642 trace->show_duration = perf_config_bool(var, value); 4643 } else if (!strcmp(var, "trace.show_arg_names")) { 4644 trace->show_arg_names = perf_config_bool(var, value); 4645 if (!trace->show_arg_names) 4646 trace->show_zeros = true; 4647 } else if (!strcmp(var, "trace.show_zeros")) { 4648 bool new_show_zeros = perf_config_bool(var, value); 4649 if (!trace->show_arg_names && !new_show_zeros) { 4650 pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n"); 4651 goto out; 4652 } 4653 trace->show_zeros = new_show_zeros; 4654 } else if (!strcmp(var, "trace.show_prefix")) { 4655 trace->show_string_prefix = perf_config_bool(var, value); 4656 } else if (!strcmp(var, "trace.no_inherit")) { 4657 trace->opts.no_inherit = perf_config_bool(var, value); 4658 } else if (!strcmp(var, "trace.args_alignment")) { 4659 int args_alignment = 0; 4660 if (perf_config_int(&args_alignment, var, value) == 0) 4661 trace->args_alignment = args_alignment; 4662 } else if (!strcmp(var, "trace.tracepoint_beautifiers")) { 4663 if (strcasecmp(value, "libtraceevent") == 0) 4664 trace->libtraceevent_print = true; 4665 else if (strcasecmp(value, "libbeauty") == 0) 4666 trace->libtraceevent_print = false; 4667 } 4668 out: 4669 return err; 4670 } 4671 4672 int cmd_trace(int argc, const char **argv) 4673 { 4674 const char *trace_usage[] = { 4675 "perf trace [<options>] [<command>]", 4676 "perf trace [<options>] -- <command> [<options>]", 4677 "perf trace record [<options>] [<command>]", 4678 "perf trace record [<options>] -- <command> [<options>]", 4679 NULL 4680 }; 4681 struct trace trace = { 4682 .opts = { 4683 .target = { 4684 .uid = UINT_MAX, 4685 .uses_mmap = true, 4686 }, 4687 .user_freq = UINT_MAX, 4688 .user_interval = ULLONG_MAX, 4689 .no_buffering = true, 4690 .mmap_pages = UINT_MAX, 4691 }, 4692 .output = stderr, 4693 .show_comm = true, 4694 .show_tstamp = true, 4695 .show_duration = true, 4696 .show_arg_names = true, 4697 .args_alignment = 70, 4698 .trace_syscalls = false, 4699 .kernel_syscallchains = false, 4700 .max_stack = UINT_MAX, 4701 .max_events = ULONG_MAX, 4702 }; 4703 const char *map_dump_str = NULL; 4704 const char *output_name = NULL; 4705 const struct option trace_options[] = { 4706 OPT_CALLBACK('e', "event", &trace, "event", 4707 "event/syscall selector. use 'perf list' to list available events", 4708 trace__parse_events_option), 4709 OPT_CALLBACK(0, "filter", &trace.evlist, "filter", 4710 "event filter", parse_filter), 4711 OPT_BOOLEAN(0, "comm", &trace.show_comm, 4712 "show the thread COMM next to its id"), 4713 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"), 4714 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace", 4715 trace__parse_events_option), 4716 OPT_STRING('o', "output", &output_name, "file", "output file name"), 4717 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"), 4718 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid", 4719 "trace events on existing process id"), 4720 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid", 4721 "trace events on existing thread id"), 4722 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids", 4723 "pids to filter (by the kernel)", trace__set_filter_pids_from_option), 4724 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide, 4725 "system-wide collection from all CPUs"), 4726 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu", 4727 "list of cpus to monitor"), 4728 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit, 4729 "child tasks do not inherit counters"), 4730 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages", 4731 "number of mmap data pages", 4732 perf_evlist__parse_mmap_pages), 4733 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user", 4734 "user to profile"), 4735 OPT_CALLBACK(0, "duration", &trace, "float", 4736 "show only events with duration > N.M ms", 4737 trace__set_duration), 4738 #ifdef HAVE_LIBBPF_SUPPORT 4739 OPT_STRING(0, "map-dump", &map_dump_str, "BPF map", "BPF map to periodically dump"), 4740 #endif 4741 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"), 4742 OPT_INCR('v', "verbose", &verbose, "be more verbose"), 4743 OPT_BOOLEAN('T', "time", &trace.full_time, 4744 "Show full timestamp, not time relative to first start"), 4745 OPT_BOOLEAN(0, "failure", &trace.failure_only, 4746 "Show only syscalls that failed"), 4747 OPT_BOOLEAN('s', "summary", &trace.summary_only, 4748 "Show only syscall summary with statistics"), 4749 OPT_BOOLEAN('S', "with-summary", &trace.summary, 4750 "Show all syscalls and summary with statistics"), 4751 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary, 4752 "Show errno stats per syscall, use with -s or -S"), 4753 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min", 4754 "Trace pagefaults", parse_pagefaults, "maj"), 4755 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"), 4756 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"), 4757 OPT_CALLBACK(0, "call-graph", &trace.opts, 4758 "record_mode[,record_size]", record_callchain_help, 4759 &record_parse_callchain_opt), 4760 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print, 4761 "Use libtraceevent to print the tracepoint arguments."), 4762 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains, 4763 "Show the kernel callchains on the syscall exit path"), 4764 OPT_ULONG(0, "max-events", &trace.max_events, 4765 "Set the maximum number of events to print, exit after that is reached. "), 4766 OPT_UINTEGER(0, "min-stack", &trace.min_stack, 4767 "Set the minimum stack depth when parsing the callchain, " 4768 "anything below the specified depth will be ignored."), 4769 OPT_UINTEGER(0, "max-stack", &trace.max_stack, 4770 "Set the maximum stack depth when parsing the callchain, " 4771 "anything beyond the specified depth will be ignored. " 4772 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)), 4773 OPT_BOOLEAN(0, "sort-events", &trace.sort_events, 4774 "Sort batch of events before processing, use if getting out of order events"), 4775 OPT_BOOLEAN(0, "print-sample", &trace.print_sample, 4776 "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"), 4777 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout, 4778 "per thread proc mmap processing timeout in ms"), 4779 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only", 4780 trace__parse_cgroups), 4781 OPT_UINTEGER('D', "delay", &trace.opts.initial_delay, 4782 "ms to wait before starting measurement after program " 4783 "start"), 4784 OPTS_EVSWITCH(&trace.evswitch), 4785 OPT_END() 4786 }; 4787 bool __maybe_unused max_stack_user_set = true; 4788 bool mmap_pages_user_set = true; 4789 struct evsel *evsel; 4790 const char * const trace_subcommands[] = { "record", NULL }; 4791 int err = -1; 4792 char bf[BUFSIZ]; 4793 4794 signal(SIGSEGV, sighandler_dump_stack); 4795 signal(SIGFPE, sighandler_dump_stack); 4796 4797 trace.evlist = evlist__new(); 4798 trace.sctbl = syscalltbl__new(); 4799 4800 if (trace.evlist == NULL || trace.sctbl == NULL) { 4801 pr_err("Not enough memory to run!\n"); 4802 err = -ENOMEM; 4803 goto out; 4804 } 4805 4806 /* 4807 * Parsing .perfconfig may entail creating a BPF event, that may need 4808 * to create BPF maps, so bump RLIM_MEMLOCK as the default 64K setting 4809 * is too small. This affects just this process, not touching the 4810 * global setting. If it fails we'll get something in 'perf trace -v' 4811 * to help diagnose the problem. 4812 */ 4813 rlimit__bump_memlock(); 4814 4815 err = perf_config(trace__config, &trace); 4816 if (err) 4817 goto out; 4818 4819 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands, 4820 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION); 4821 4822 /* 4823 * Here we already passed thru trace__parse_events_option() and it has 4824 * already figured out if -e syscall_name, if not but if --event 4825 * foo:bar was used, the user is interested _just_ in those, say, 4826 * tracepoint events, not in the strace-like syscall-name-based mode. 4827 * 4828 * This is important because we need to check if strace-like mode is 4829 * needed to decided if we should filter out the eBPF 4830 * __augmented_syscalls__ code, if it is in the mix, say, via 4831 * .perfconfig trace.add_events, and filter those out. 4832 */ 4833 if (!trace.trace_syscalls && !trace.trace_pgfaults && 4834 trace.evlist->core.nr_entries == 0 /* Was --events used? */) { 4835 trace.trace_syscalls = true; 4836 } 4837 /* 4838 * Now that we have --verbose figured out, lets see if we need to parse 4839 * events from .perfconfig, so that if those events fail parsing, say some 4840 * BPF program fails, then we'll be able to use --verbose to see what went 4841 * wrong in more detail. 4842 */ 4843 if (trace.perfconfig_events != NULL) { 4844 struct parse_events_error parse_err; 4845 4846 bzero(&parse_err, sizeof(parse_err)); 4847 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err); 4848 if (err) { 4849 parse_events_print_error(&parse_err, trace.perfconfig_events); 4850 goto out; 4851 } 4852 } 4853 4854 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) { 4855 usage_with_options_msg(trace_usage, trace_options, 4856 "cgroup monitoring only available in system-wide mode"); 4857 } 4858 4859 evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__"); 4860 if (IS_ERR(evsel)) { 4861 bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf)); 4862 pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf); 4863 goto out; 4864 } 4865 4866 if (evsel) { 4867 trace.syscalls.events.augmented = evsel; 4868 4869 evsel = perf_evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter"); 4870 if (evsel == NULL) { 4871 pr_err("ERROR: raw_syscalls:sys_enter not found in the augmented BPF object\n"); 4872 goto out; 4873 } 4874 4875 if (evsel->bpf_obj == NULL) { 4876 pr_err("ERROR: raw_syscalls:sys_enter not associated to a BPF object\n"); 4877 goto out; 4878 } 4879 4880 trace.bpf_obj = evsel->bpf_obj; 4881 4882 /* 4883 * If we have _just_ the augmenter event but don't have a 4884 * explicit --syscalls, then assume we want all strace-like 4885 * syscalls: 4886 */ 4887 if (!trace.trace_syscalls && trace__only_augmented_syscalls_evsels(&trace)) 4888 trace.trace_syscalls = true; 4889 /* 4890 * So, if we have a syscall augmenter, but trace_syscalls, aka 4891 * strace-like syscall tracing is not set, then we need to trow 4892 * away the augmenter, i.e. all the events that were created 4893 * from that BPF object file. 4894 * 4895 * This is more to fix the current .perfconfig trace.add_events 4896 * style of setting up the strace-like eBPF based syscall point 4897 * payload augmenter. 4898 * 4899 * All this complexity will be avoided by adding an alternative 4900 * to trace.add_events in the form of 4901 * trace.bpf_augmented_syscalls, that will be only parsed if we 4902 * need it. 4903 * 4904 * .perfconfig trace.add_events is still useful if we want, for 4905 * instance, have msr_write.msr in some .perfconfig profile based 4906 * 'perf trace --config determinism.profile' mode, where for some 4907 * particular goal/workload type we want a set of events and 4908 * output mode (with timings, etc) instead of having to add 4909 * all via the command line. 4910 * 4911 * Also --config to specify an alternate .perfconfig file needs 4912 * to be implemented. 4913 */ 4914 if (!trace.trace_syscalls) { 4915 trace__delete_augmented_syscalls(&trace); 4916 } else { 4917 trace__set_bpf_map_filtered_pids(&trace); 4918 trace__set_bpf_map_syscalls(&trace); 4919 trace.syscalls.unaugmented_prog = trace__find_bpf_program_by_title(&trace, "!raw_syscalls:unaugmented"); 4920 } 4921 } 4922 4923 err = bpf__setup_stdout(trace.evlist); 4924 if (err) { 4925 bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf)); 4926 pr_err("ERROR: Setup BPF stdout failed: %s\n", bf); 4927 goto out; 4928 } 4929 4930 err = -1; 4931 4932 if (map_dump_str) { 4933 trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str); 4934 if (trace.dump.map == NULL) { 4935 pr_err("ERROR: BPF map \"%s\" not found\n", map_dump_str); 4936 goto out; 4937 } 4938 } 4939 4940 if (trace.trace_pgfaults) { 4941 trace.opts.sample_address = true; 4942 trace.opts.sample_time = true; 4943 } 4944 4945 if (trace.opts.mmap_pages == UINT_MAX) 4946 mmap_pages_user_set = false; 4947 4948 if (trace.max_stack == UINT_MAX) { 4949 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack(); 4950 max_stack_user_set = false; 4951 } 4952 4953 #ifdef HAVE_DWARF_UNWIND_SUPPORT 4954 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) { 4955 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false); 4956 } 4957 #endif 4958 4959 if (callchain_param.enabled) { 4960 if (!mmap_pages_user_set && geteuid() == 0) 4961 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4; 4962 4963 symbol_conf.use_callchain = true; 4964 } 4965 4966 if (trace.evlist->core.nr_entries > 0) { 4967 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler); 4968 if (evlist__set_syscall_tp_fields(trace.evlist)) { 4969 perror("failed to set syscalls:* tracepoint fields"); 4970 goto out; 4971 } 4972 } 4973 4974 if (trace.sort_events) { 4975 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace); 4976 ordered_events__set_copy_on_queue(&trace.oe.data, true); 4977 } 4978 4979 /* 4980 * If we are augmenting syscalls, then combine what we put in the 4981 * __augmented_syscalls__ BPF map with what is in the 4982 * syscalls:sys_exit_FOO tracepoints, i.e. just like we do without BPF, 4983 * combining raw_syscalls:sys_enter with raw_syscalls:sys_exit. 4984 * 4985 * We'll switch to look at two BPF maps, one for sys_enter and the 4986 * other for sys_exit when we start augmenting the sys_exit paths with 4987 * buffers that are being copied from kernel to userspace, think 'read' 4988 * syscall. 4989 */ 4990 if (trace.syscalls.events.augmented) { 4991 evlist__for_each_entry(trace.evlist, evsel) { 4992 bool raw_syscalls_sys_exit = strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_exit") == 0; 4993 4994 if (raw_syscalls_sys_exit) { 4995 trace.raw_augmented_syscalls = true; 4996 goto init_augmented_syscall_tp; 4997 } 4998 4999 if (trace.syscalls.events.augmented->priv == NULL && 5000 strstr(perf_evsel__name(evsel), "syscalls:sys_enter")) { 5001 struct evsel *augmented = trace.syscalls.events.augmented; 5002 if (perf_evsel__init_augmented_syscall_tp(augmented, evsel) || 5003 perf_evsel__init_augmented_syscall_tp_args(augmented)) 5004 goto out; 5005 /* 5006 * Augmented is __augmented_syscalls__ BPF_OUTPUT event 5007 * Above we made sure we can get from the payload the tp fields 5008 * that we get from syscalls:sys_enter tracefs format file. 5009 */ 5010 augmented->handler = trace__sys_enter; 5011 /* 5012 * Now we do the same for the *syscalls:sys_enter event so that 5013 * if we handle it directly, i.e. if the BPF prog returns 0 so 5014 * as not to filter it, then we'll handle it just like we would 5015 * for the BPF_OUTPUT one: 5016 */ 5017 if (perf_evsel__init_augmented_syscall_tp(evsel, evsel) || 5018 perf_evsel__init_augmented_syscall_tp_args(evsel)) 5019 goto out; 5020 evsel->handler = trace__sys_enter; 5021 } 5022 5023 if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) { 5024 struct syscall_tp *sc; 5025 init_augmented_syscall_tp: 5026 if (perf_evsel__init_augmented_syscall_tp(evsel, evsel)) 5027 goto out; 5028 sc = __evsel__syscall_tp(evsel); 5029 /* 5030 * For now with BPF raw_augmented we hook into 5031 * raw_syscalls:sys_enter and there we get all 5032 * 6 syscall args plus the tracepoint common 5033 * fields and the syscall_nr (another long). 5034 * So we check if that is the case and if so 5035 * don't look after the sc->args_size but 5036 * always after the full raw_syscalls:sys_enter 5037 * payload, which is fixed. 5038 * 5039 * We'll revisit this later to pass 5040 * s->args_size to the BPF augmenter (now 5041 * tools/perf/examples/bpf/augmented_raw_syscalls.c, 5042 * so that it copies only what we need for each 5043 * syscall, like what happens when we use 5044 * syscalls:sys_enter_NAME, so that we reduce 5045 * the kernel/userspace traffic to just what is 5046 * needed for each syscall. 5047 */ 5048 if (trace.raw_augmented_syscalls) 5049 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset; 5050 perf_evsel__init_augmented_syscall_tp_ret(evsel); 5051 evsel->handler = trace__sys_exit; 5052 } 5053 } 5054 } 5055 5056 if ((argc >= 1) && (strcmp(argv[0], "record") == 0)) 5057 return trace__record(&trace, argc-1, &argv[1]); 5058 5059 /* Using just --errno-summary will trigger --summary */ 5060 if (trace.errno_summary && !trace.summary && !trace.summary_only) 5061 trace.summary_only = true; 5062 5063 /* summary_only implies summary option, but don't overwrite summary if set */ 5064 if (trace.summary_only) 5065 trace.summary = trace.summary_only; 5066 5067 if (output_name != NULL) { 5068 err = trace__open_output(&trace, output_name); 5069 if (err < 0) { 5070 perror("failed to create output file"); 5071 goto out; 5072 } 5073 } 5074 5075 err = evswitch__init(&trace.evswitch, trace.evlist, stderr); 5076 if (err) 5077 goto out_close; 5078 5079 err = target__validate(&trace.opts.target); 5080 if (err) { 5081 target__strerror(&trace.opts.target, err, bf, sizeof(bf)); 5082 fprintf(trace.output, "%s", bf); 5083 goto out_close; 5084 } 5085 5086 err = target__parse_uid(&trace.opts.target); 5087 if (err) { 5088 target__strerror(&trace.opts.target, err, bf, sizeof(bf)); 5089 fprintf(trace.output, "%s", bf); 5090 goto out_close; 5091 } 5092 5093 if (!argc && target__none(&trace.opts.target)) 5094 trace.opts.target.system_wide = true; 5095 5096 if (input_name) 5097 err = trace__replay(&trace); 5098 else 5099 err = trace__run(&trace, argc, argv); 5100 5101 out_close: 5102 if (output_name != NULL) 5103 fclose(trace.output); 5104 out: 5105 zfree(&trace.perfconfig_events); 5106 return err; 5107 } 5108