1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <linux/mman.h> 6 #include <linux/time64.h> 7 #include "sort.h" 8 #include "hist.h" 9 #include "comm.h" 10 #include "map.h" 11 #include "symbol.h" 12 #include "thread.h" 13 #include "evsel.h" 14 #include "evlist.h" 15 #include "strlist.h" 16 #include "strbuf.h" 17 #include <traceevent/event-parse.h> 18 #include "mem-events.h" 19 #include "annotate.h" 20 #include "time-utils.h" 21 #include <linux/kernel.h> 22 23 regex_t parent_regex; 24 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 25 const char *parent_pattern = default_parent_pattern; 26 const char *default_sort_order = "comm,dso,symbol"; 27 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 28 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; 29 const char default_top_sort_order[] = "dso,symbol"; 30 const char default_diff_sort_order[] = "dso,symbol"; 31 const char default_tracepoint_sort_order[] = "trace"; 32 const char *sort_order; 33 const char *field_order; 34 regex_t ignore_callees_regex; 35 int have_ignore_callees = 0; 36 enum sort_mode sort__mode = SORT_MODE__NORMAL; 37 38 /* 39 * Replaces all occurrences of a char used with the: 40 * 41 * -t, --field-separator 42 * 43 * option, that uses a special separator character and don't pad with spaces, 44 * replacing all occurrences of this separator in symbol names (and other 45 * output) with a '.' character, that thus it's the only non valid separator. 46 */ 47 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 48 { 49 int n; 50 va_list ap; 51 52 va_start(ap, fmt); 53 n = vsnprintf(bf, size, fmt, ap); 54 if (symbol_conf.field_sep && n > 0) { 55 char *sep = bf; 56 57 while (1) { 58 sep = strchr(sep, *symbol_conf.field_sep); 59 if (sep == NULL) 60 break; 61 *sep = '.'; 62 } 63 } 64 va_end(ap); 65 66 if (n >= (int)size) 67 return size - 1; 68 return n; 69 } 70 71 static int64_t cmp_null(const void *l, const void *r) 72 { 73 if (!l && !r) 74 return 0; 75 else if (!l) 76 return -1; 77 else 78 return 1; 79 } 80 81 /* --sort pid */ 82 83 static int64_t 84 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 85 { 86 return right->thread->tid - left->thread->tid; 87 } 88 89 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 90 size_t size, unsigned int width) 91 { 92 const char *comm = thread__comm_str(he->thread); 93 94 width = max(7U, width) - 8; 95 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 96 width, width, comm ?: ""); 97 } 98 99 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 100 { 101 const struct thread *th = arg; 102 103 if (type != HIST_FILTER__THREAD) 104 return -1; 105 106 return th && he->thread != th; 107 } 108 109 struct sort_entry sort_thread = { 110 .se_header = " Pid:Command", 111 .se_cmp = sort__thread_cmp, 112 .se_snprintf = hist_entry__thread_snprintf, 113 .se_filter = hist_entry__thread_filter, 114 .se_width_idx = HISTC_THREAD, 115 }; 116 117 /* --sort comm */ 118 119 /* 120 * We can't use pointer comparison in functions below, 121 * because it gives different results based on pointer 122 * values, which could break some sorting assumptions. 123 */ 124 static int64_t 125 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 126 { 127 return strcmp(comm__str(right->comm), comm__str(left->comm)); 128 } 129 130 static int64_t 131 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 132 { 133 return strcmp(comm__str(right->comm), comm__str(left->comm)); 134 } 135 136 static int64_t 137 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 138 { 139 return strcmp(comm__str(right->comm), comm__str(left->comm)); 140 } 141 142 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 143 size_t size, unsigned int width) 144 { 145 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 146 } 147 148 struct sort_entry sort_comm = { 149 .se_header = "Command", 150 .se_cmp = sort__comm_cmp, 151 .se_collapse = sort__comm_collapse, 152 .se_sort = sort__comm_sort, 153 .se_snprintf = hist_entry__comm_snprintf, 154 .se_filter = hist_entry__thread_filter, 155 .se_width_idx = HISTC_COMM, 156 }; 157 158 /* --sort dso */ 159 160 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 161 { 162 struct dso *dso_l = map_l ? map_l->dso : NULL; 163 struct dso *dso_r = map_r ? map_r->dso : NULL; 164 const char *dso_name_l, *dso_name_r; 165 166 if (!dso_l || !dso_r) 167 return cmp_null(dso_r, dso_l); 168 169 if (verbose > 0) { 170 dso_name_l = dso_l->long_name; 171 dso_name_r = dso_r->long_name; 172 } else { 173 dso_name_l = dso_l->short_name; 174 dso_name_r = dso_r->short_name; 175 } 176 177 return strcmp(dso_name_l, dso_name_r); 178 } 179 180 static int64_t 181 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 182 { 183 return _sort__dso_cmp(right->ms.map, left->ms.map); 184 } 185 186 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 187 size_t size, unsigned int width) 188 { 189 if (map && map->dso) { 190 const char *dso_name = verbose > 0 ? map->dso->long_name : 191 map->dso->short_name; 192 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 193 } 194 195 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 196 } 197 198 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 199 size_t size, unsigned int width) 200 { 201 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 202 } 203 204 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 205 { 206 const struct dso *dso = arg; 207 208 if (type != HIST_FILTER__DSO) 209 return -1; 210 211 return dso && (!he->ms.map || he->ms.map->dso != dso); 212 } 213 214 struct sort_entry sort_dso = { 215 .se_header = "Shared Object", 216 .se_cmp = sort__dso_cmp, 217 .se_snprintf = hist_entry__dso_snprintf, 218 .se_filter = hist_entry__dso_filter, 219 .se_width_idx = HISTC_DSO, 220 }; 221 222 /* --sort symbol */ 223 224 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 225 { 226 return (int64_t)(right_ip - left_ip); 227 } 228 229 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 230 { 231 if (!sym_l || !sym_r) 232 return cmp_null(sym_l, sym_r); 233 234 if (sym_l == sym_r) 235 return 0; 236 237 if (sym_l->inlined || sym_r->inlined) { 238 int ret = strcmp(sym_l->name, sym_r->name); 239 240 if (ret) 241 return ret; 242 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start)) 243 return 0; 244 } 245 246 if (sym_l->start != sym_r->start) 247 return (int64_t)(sym_r->start - sym_l->start); 248 249 return (int64_t)(sym_r->end - sym_l->end); 250 } 251 252 static int64_t 253 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 254 { 255 int64_t ret; 256 257 if (!left->ms.sym && !right->ms.sym) 258 return _sort__addr_cmp(left->ip, right->ip); 259 260 /* 261 * comparing symbol address alone is not enough since it's a 262 * relative address within a dso. 263 */ 264 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 265 ret = sort__dso_cmp(left, right); 266 if (ret != 0) 267 return ret; 268 } 269 270 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 271 } 272 273 static int64_t 274 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 275 { 276 if (!left->ms.sym || !right->ms.sym) 277 return cmp_null(left->ms.sym, right->ms.sym); 278 279 return strcmp(right->ms.sym->name, left->ms.sym->name); 280 } 281 282 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, 283 u64 ip, char level, char *bf, size_t size, 284 unsigned int width) 285 { 286 size_t ret = 0; 287 288 if (verbose > 0) { 289 char o = map ? dso__symtab_origin(map->dso) : '!'; 290 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 291 BITS_PER_LONG / 4 + 2, ip, o); 292 } 293 294 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 295 if (sym && map) { 296 if (sym->type == STT_OBJECT) { 297 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 298 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 299 ip - map->unmap_ip(map, sym->start)); 300 } else { 301 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 302 width - ret, 303 sym->name); 304 if (sym->inlined) 305 ret += repsep_snprintf(bf + ret, size - ret, 306 " (inlined)"); 307 } 308 } else { 309 size_t len = BITS_PER_LONG / 4; 310 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 311 len, ip); 312 } 313 314 return ret; 315 } 316 317 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, 318 size_t size, unsigned int width) 319 { 320 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip, 321 he->level, bf, size, width); 322 } 323 324 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 325 { 326 const char *sym = arg; 327 328 if (type != HIST_FILTER__SYMBOL) 329 return -1; 330 331 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 332 } 333 334 struct sort_entry sort_sym = { 335 .se_header = "Symbol", 336 .se_cmp = sort__sym_cmp, 337 .se_sort = sort__sym_sort, 338 .se_snprintf = hist_entry__sym_snprintf, 339 .se_filter = hist_entry__sym_filter, 340 .se_width_idx = HISTC_SYMBOL, 341 }; 342 343 /* --sort srcline */ 344 345 char *hist_entry__srcline(struct hist_entry *he) 346 { 347 return map__srcline(he->ms.map, he->ip, he->ms.sym); 348 } 349 350 static int64_t 351 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 352 { 353 if (!left->srcline) 354 left->srcline = hist_entry__srcline(left); 355 if (!right->srcline) 356 right->srcline = hist_entry__srcline(right); 357 358 return strcmp(right->srcline, left->srcline); 359 } 360 361 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 362 size_t size, unsigned int width) 363 { 364 if (!he->srcline) 365 he->srcline = hist_entry__srcline(he); 366 367 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 368 } 369 370 struct sort_entry sort_srcline = { 371 .se_header = "Source:Line", 372 .se_cmp = sort__srcline_cmp, 373 .se_snprintf = hist_entry__srcline_snprintf, 374 .se_width_idx = HISTC_SRCLINE, 375 }; 376 377 /* --sort srcline_from */ 378 379 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams) 380 { 381 return map__srcline(ams->map, ams->al_addr, ams->sym); 382 } 383 384 static int64_t 385 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 386 { 387 if (!left->branch_info->srcline_from) 388 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from); 389 390 if (!right->branch_info->srcline_from) 391 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from); 392 393 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 394 } 395 396 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 397 size_t size, unsigned int width) 398 { 399 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 400 } 401 402 struct sort_entry sort_srcline_from = { 403 .se_header = "From Source:Line", 404 .se_cmp = sort__srcline_from_cmp, 405 .se_snprintf = hist_entry__srcline_from_snprintf, 406 .se_width_idx = HISTC_SRCLINE_FROM, 407 }; 408 409 /* --sort srcline_to */ 410 411 static int64_t 412 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 413 { 414 if (!left->branch_info->srcline_to) 415 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to); 416 417 if (!right->branch_info->srcline_to) 418 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to); 419 420 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 421 } 422 423 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 424 size_t size, unsigned int width) 425 { 426 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 427 } 428 429 struct sort_entry sort_srcline_to = { 430 .se_header = "To Source:Line", 431 .se_cmp = sort__srcline_to_cmp, 432 .se_snprintf = hist_entry__srcline_to_snprintf, 433 .se_width_idx = HISTC_SRCLINE_TO, 434 }; 435 436 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, 437 size_t size, unsigned int width) 438 { 439 440 struct symbol *sym = he->ms.sym; 441 struct annotation *notes; 442 double ipc = 0.0, coverage = 0.0; 443 char tmp[64]; 444 445 if (!sym) 446 return repsep_snprintf(bf, size, "%-*s", width, "-"); 447 448 notes = symbol__annotation(sym); 449 450 if (notes->hit_cycles) 451 ipc = notes->hit_insn / ((double)notes->hit_cycles); 452 453 if (notes->total_insn) { 454 coverage = notes->cover_insn * 100.0 / 455 ((double)notes->total_insn); 456 } 457 458 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); 459 return repsep_snprintf(bf, size, "%-*s", width, tmp); 460 } 461 462 struct sort_entry sort_sym_ipc = { 463 .se_header = "IPC [IPC Coverage]", 464 .se_cmp = sort__sym_cmp, 465 .se_snprintf = hist_entry__sym_ipc_snprintf, 466 .se_width_idx = HISTC_SYMBOL_IPC, 467 }; 468 469 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he 470 __maybe_unused, 471 char *bf, size_t size, 472 unsigned int width) 473 { 474 char tmp[64]; 475 476 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-"); 477 return repsep_snprintf(bf, size, "%-*s", width, tmp); 478 } 479 480 struct sort_entry sort_sym_ipc_null = { 481 .se_header = "IPC [IPC Coverage]", 482 .se_cmp = sort__sym_cmp, 483 .se_snprintf = hist_entry__sym_ipc_null_snprintf, 484 .se_width_idx = HISTC_SYMBOL_IPC, 485 }; 486 487 /* --sort srcfile */ 488 489 static char no_srcfile[1]; 490 491 static char *hist_entry__get_srcfile(struct hist_entry *e) 492 { 493 char *sf, *p; 494 struct map *map = e->ms.map; 495 496 if (!map) 497 return no_srcfile; 498 499 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 500 e->ms.sym, false, true, true, e->ip); 501 if (!strcmp(sf, SRCLINE_UNKNOWN)) 502 return no_srcfile; 503 p = strchr(sf, ':'); 504 if (p && *sf) { 505 *p = 0; 506 return sf; 507 } 508 free(sf); 509 return no_srcfile; 510 } 511 512 static int64_t 513 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 514 { 515 if (!left->srcfile) 516 left->srcfile = hist_entry__get_srcfile(left); 517 if (!right->srcfile) 518 right->srcfile = hist_entry__get_srcfile(right); 519 520 return strcmp(right->srcfile, left->srcfile); 521 } 522 523 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 524 size_t size, unsigned int width) 525 { 526 if (!he->srcfile) 527 he->srcfile = hist_entry__get_srcfile(he); 528 529 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 530 } 531 532 struct sort_entry sort_srcfile = { 533 .se_header = "Source File", 534 .se_cmp = sort__srcfile_cmp, 535 .se_snprintf = hist_entry__srcfile_snprintf, 536 .se_width_idx = HISTC_SRCFILE, 537 }; 538 539 /* --sort parent */ 540 541 static int64_t 542 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 543 { 544 struct symbol *sym_l = left->parent; 545 struct symbol *sym_r = right->parent; 546 547 if (!sym_l || !sym_r) 548 return cmp_null(sym_l, sym_r); 549 550 return strcmp(sym_r->name, sym_l->name); 551 } 552 553 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 554 size_t size, unsigned int width) 555 { 556 return repsep_snprintf(bf, size, "%-*.*s", width, width, 557 he->parent ? he->parent->name : "[other]"); 558 } 559 560 struct sort_entry sort_parent = { 561 .se_header = "Parent symbol", 562 .se_cmp = sort__parent_cmp, 563 .se_snprintf = hist_entry__parent_snprintf, 564 .se_width_idx = HISTC_PARENT, 565 }; 566 567 /* --sort cpu */ 568 569 static int64_t 570 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 571 { 572 return right->cpu - left->cpu; 573 } 574 575 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 576 size_t size, unsigned int width) 577 { 578 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 579 } 580 581 struct sort_entry sort_cpu = { 582 .se_header = "CPU", 583 .se_cmp = sort__cpu_cmp, 584 .se_snprintf = hist_entry__cpu_snprintf, 585 .se_width_idx = HISTC_CPU, 586 }; 587 588 /* --sort cgroup_id */ 589 590 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 591 { 592 return (int64_t)(right_dev - left_dev); 593 } 594 595 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 596 { 597 return (int64_t)(right_ino - left_ino); 598 } 599 600 static int64_t 601 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 602 { 603 int64_t ret; 604 605 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 606 if (ret != 0) 607 return ret; 608 609 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 610 left->cgroup_id.ino); 611 } 612 613 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 614 char *bf, size_t size, 615 unsigned int width __maybe_unused) 616 { 617 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 618 he->cgroup_id.ino); 619 } 620 621 struct sort_entry sort_cgroup_id = { 622 .se_header = "cgroup id (dev/inode)", 623 .se_cmp = sort__cgroup_id_cmp, 624 .se_snprintf = hist_entry__cgroup_id_snprintf, 625 .se_width_idx = HISTC_CGROUP_ID, 626 }; 627 628 /* --sort socket */ 629 630 static int64_t 631 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 632 { 633 return right->socket - left->socket; 634 } 635 636 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 637 size_t size, unsigned int width) 638 { 639 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 640 } 641 642 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 643 { 644 int sk = *(const int *)arg; 645 646 if (type != HIST_FILTER__SOCKET) 647 return -1; 648 649 return sk >= 0 && he->socket != sk; 650 } 651 652 struct sort_entry sort_socket = { 653 .se_header = "Socket", 654 .se_cmp = sort__socket_cmp, 655 .se_snprintf = hist_entry__socket_snprintf, 656 .se_filter = hist_entry__socket_filter, 657 .se_width_idx = HISTC_SOCKET, 658 }; 659 660 /* --sort time */ 661 662 static int64_t 663 sort__time_cmp(struct hist_entry *left, struct hist_entry *right) 664 { 665 return right->time - left->time; 666 } 667 668 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf, 669 size_t size, unsigned int width) 670 { 671 unsigned long secs; 672 unsigned long long nsecs; 673 char he_time[32]; 674 675 nsecs = he->time; 676 secs = nsecs / NSEC_PER_SEC; 677 nsecs -= secs * NSEC_PER_SEC; 678 679 if (symbol_conf.nanosecs) 680 snprintf(he_time, sizeof he_time, "%5lu.%09llu: ", 681 secs, nsecs); 682 else 683 timestamp__scnprintf_usec(he->time, he_time, 684 sizeof(he_time)); 685 686 return repsep_snprintf(bf, size, "%-.*s", width, he_time); 687 } 688 689 struct sort_entry sort_time = { 690 .se_header = "Time", 691 .se_cmp = sort__time_cmp, 692 .se_snprintf = hist_entry__time_snprintf, 693 .se_width_idx = HISTC_TIME, 694 }; 695 696 /* --sort trace */ 697 698 static char *get_trace_output(struct hist_entry *he) 699 { 700 struct trace_seq seq; 701 struct perf_evsel *evsel; 702 struct tep_record rec = { 703 .data = he->raw_data, 704 .size = he->raw_size, 705 }; 706 707 evsel = hists_to_evsel(he->hists); 708 709 trace_seq_init(&seq); 710 if (symbol_conf.raw_trace) { 711 tep_print_fields(&seq, he->raw_data, he->raw_size, 712 evsel->tp_format); 713 } else { 714 tep_event_info(&seq, evsel->tp_format, &rec); 715 } 716 /* 717 * Trim the buffer, it starts at 4KB and we're not going to 718 * add anything more to this buffer. 719 */ 720 return realloc(seq.buffer, seq.len + 1); 721 } 722 723 static int64_t 724 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 725 { 726 struct perf_evsel *evsel; 727 728 evsel = hists_to_evsel(left->hists); 729 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 730 return 0; 731 732 if (left->trace_output == NULL) 733 left->trace_output = get_trace_output(left); 734 if (right->trace_output == NULL) 735 right->trace_output = get_trace_output(right); 736 737 return strcmp(right->trace_output, left->trace_output); 738 } 739 740 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 741 size_t size, unsigned int width) 742 { 743 struct perf_evsel *evsel; 744 745 evsel = hists_to_evsel(he->hists); 746 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 747 return scnprintf(bf, size, "%-.*s", width, "N/A"); 748 749 if (he->trace_output == NULL) 750 he->trace_output = get_trace_output(he); 751 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 752 } 753 754 struct sort_entry sort_trace = { 755 .se_header = "Trace output", 756 .se_cmp = sort__trace_cmp, 757 .se_snprintf = hist_entry__trace_snprintf, 758 .se_width_idx = HISTC_TRACE, 759 }; 760 761 /* sort keys for branch stacks */ 762 763 static int64_t 764 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 765 { 766 if (!left->branch_info || !right->branch_info) 767 return cmp_null(left->branch_info, right->branch_info); 768 769 return _sort__dso_cmp(left->branch_info->from.map, 770 right->branch_info->from.map); 771 } 772 773 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 774 size_t size, unsigned int width) 775 { 776 if (he->branch_info) 777 return _hist_entry__dso_snprintf(he->branch_info->from.map, 778 bf, size, width); 779 else 780 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 781 } 782 783 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 784 const void *arg) 785 { 786 const struct dso *dso = arg; 787 788 if (type != HIST_FILTER__DSO) 789 return -1; 790 791 return dso && (!he->branch_info || !he->branch_info->from.map || 792 he->branch_info->from.map->dso != dso); 793 } 794 795 static int64_t 796 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 797 { 798 if (!left->branch_info || !right->branch_info) 799 return cmp_null(left->branch_info, right->branch_info); 800 801 return _sort__dso_cmp(left->branch_info->to.map, 802 right->branch_info->to.map); 803 } 804 805 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 806 size_t size, unsigned int width) 807 { 808 if (he->branch_info) 809 return _hist_entry__dso_snprintf(he->branch_info->to.map, 810 bf, size, width); 811 else 812 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 813 } 814 815 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 816 const void *arg) 817 { 818 const struct dso *dso = arg; 819 820 if (type != HIST_FILTER__DSO) 821 return -1; 822 823 return dso && (!he->branch_info || !he->branch_info->to.map || 824 he->branch_info->to.map->dso != dso); 825 } 826 827 static int64_t 828 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 829 { 830 struct addr_map_symbol *from_l = &left->branch_info->from; 831 struct addr_map_symbol *from_r = &right->branch_info->from; 832 833 if (!left->branch_info || !right->branch_info) 834 return cmp_null(left->branch_info, right->branch_info); 835 836 from_l = &left->branch_info->from; 837 from_r = &right->branch_info->from; 838 839 if (!from_l->sym && !from_r->sym) 840 return _sort__addr_cmp(from_l->addr, from_r->addr); 841 842 return _sort__sym_cmp(from_l->sym, from_r->sym); 843 } 844 845 static int64_t 846 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 847 { 848 struct addr_map_symbol *to_l, *to_r; 849 850 if (!left->branch_info || !right->branch_info) 851 return cmp_null(left->branch_info, right->branch_info); 852 853 to_l = &left->branch_info->to; 854 to_r = &right->branch_info->to; 855 856 if (!to_l->sym && !to_r->sym) 857 return _sort__addr_cmp(to_l->addr, to_r->addr); 858 859 return _sort__sym_cmp(to_l->sym, to_r->sym); 860 } 861 862 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 863 size_t size, unsigned int width) 864 { 865 if (he->branch_info) { 866 struct addr_map_symbol *from = &he->branch_info->from; 867 868 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, 869 he->level, bf, size, width); 870 } 871 872 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 873 } 874 875 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 876 size_t size, unsigned int width) 877 { 878 if (he->branch_info) { 879 struct addr_map_symbol *to = &he->branch_info->to; 880 881 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 882 he->level, bf, size, width); 883 } 884 885 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 886 } 887 888 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 889 const void *arg) 890 { 891 const char *sym = arg; 892 893 if (type != HIST_FILTER__SYMBOL) 894 return -1; 895 896 return sym && !(he->branch_info && he->branch_info->from.sym && 897 strstr(he->branch_info->from.sym->name, sym)); 898 } 899 900 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 901 const void *arg) 902 { 903 const char *sym = arg; 904 905 if (type != HIST_FILTER__SYMBOL) 906 return -1; 907 908 return sym && !(he->branch_info && he->branch_info->to.sym && 909 strstr(he->branch_info->to.sym->name, sym)); 910 } 911 912 struct sort_entry sort_dso_from = { 913 .se_header = "Source Shared Object", 914 .se_cmp = sort__dso_from_cmp, 915 .se_snprintf = hist_entry__dso_from_snprintf, 916 .se_filter = hist_entry__dso_from_filter, 917 .se_width_idx = HISTC_DSO_FROM, 918 }; 919 920 struct sort_entry sort_dso_to = { 921 .se_header = "Target Shared Object", 922 .se_cmp = sort__dso_to_cmp, 923 .se_snprintf = hist_entry__dso_to_snprintf, 924 .se_filter = hist_entry__dso_to_filter, 925 .se_width_idx = HISTC_DSO_TO, 926 }; 927 928 struct sort_entry sort_sym_from = { 929 .se_header = "Source Symbol", 930 .se_cmp = sort__sym_from_cmp, 931 .se_snprintf = hist_entry__sym_from_snprintf, 932 .se_filter = hist_entry__sym_from_filter, 933 .se_width_idx = HISTC_SYMBOL_FROM, 934 }; 935 936 struct sort_entry sort_sym_to = { 937 .se_header = "Target Symbol", 938 .se_cmp = sort__sym_to_cmp, 939 .se_snprintf = hist_entry__sym_to_snprintf, 940 .se_filter = hist_entry__sym_to_filter, 941 .se_width_idx = HISTC_SYMBOL_TO, 942 }; 943 944 static int64_t 945 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 946 { 947 unsigned char mp, p; 948 949 if (!left->branch_info || !right->branch_info) 950 return cmp_null(left->branch_info, right->branch_info); 951 952 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 953 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 954 return mp || p; 955 } 956 957 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 958 size_t size, unsigned int width){ 959 static const char *out = "N/A"; 960 961 if (he->branch_info) { 962 if (he->branch_info->flags.predicted) 963 out = "N"; 964 else if (he->branch_info->flags.mispred) 965 out = "Y"; 966 } 967 968 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 969 } 970 971 static int64_t 972 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 973 { 974 if (!left->branch_info || !right->branch_info) 975 return cmp_null(left->branch_info, right->branch_info); 976 977 return left->branch_info->flags.cycles - 978 right->branch_info->flags.cycles; 979 } 980 981 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 982 size_t size, unsigned int width) 983 { 984 if (!he->branch_info) 985 return scnprintf(bf, size, "%-.*s", width, "N/A"); 986 if (he->branch_info->flags.cycles == 0) 987 return repsep_snprintf(bf, size, "%-*s", width, "-"); 988 return repsep_snprintf(bf, size, "%-*hd", width, 989 he->branch_info->flags.cycles); 990 } 991 992 struct sort_entry sort_cycles = { 993 .se_header = "Basic Block Cycles", 994 .se_cmp = sort__cycles_cmp, 995 .se_snprintf = hist_entry__cycles_snprintf, 996 .se_width_idx = HISTC_CYCLES, 997 }; 998 999 /* --sort daddr_sym */ 1000 int64_t 1001 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1002 { 1003 uint64_t l = 0, r = 0; 1004 1005 if (left->mem_info) 1006 l = left->mem_info->daddr.addr; 1007 if (right->mem_info) 1008 r = right->mem_info->daddr.addr; 1009 1010 return (int64_t)(r - l); 1011 } 1012 1013 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 1014 size_t size, unsigned int width) 1015 { 1016 uint64_t addr = 0; 1017 struct map *map = NULL; 1018 struct symbol *sym = NULL; 1019 1020 if (he->mem_info) { 1021 addr = he->mem_info->daddr.addr; 1022 map = he->mem_info->daddr.map; 1023 sym = he->mem_info->daddr.sym; 1024 } 1025 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 1026 width); 1027 } 1028 1029 int64_t 1030 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 1031 { 1032 uint64_t l = 0, r = 0; 1033 1034 if (left->mem_info) 1035 l = left->mem_info->iaddr.addr; 1036 if (right->mem_info) 1037 r = right->mem_info->iaddr.addr; 1038 1039 return (int64_t)(r - l); 1040 } 1041 1042 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 1043 size_t size, unsigned int width) 1044 { 1045 uint64_t addr = 0; 1046 struct map *map = NULL; 1047 struct symbol *sym = NULL; 1048 1049 if (he->mem_info) { 1050 addr = he->mem_info->iaddr.addr; 1051 map = he->mem_info->iaddr.map; 1052 sym = he->mem_info->iaddr.sym; 1053 } 1054 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 1055 width); 1056 } 1057 1058 static int64_t 1059 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1060 { 1061 struct map *map_l = NULL; 1062 struct map *map_r = NULL; 1063 1064 if (left->mem_info) 1065 map_l = left->mem_info->daddr.map; 1066 if (right->mem_info) 1067 map_r = right->mem_info->daddr.map; 1068 1069 return _sort__dso_cmp(map_l, map_r); 1070 } 1071 1072 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1073 size_t size, unsigned int width) 1074 { 1075 struct map *map = NULL; 1076 1077 if (he->mem_info) 1078 map = he->mem_info->daddr.map; 1079 1080 return _hist_entry__dso_snprintf(map, bf, size, width); 1081 } 1082 1083 static int64_t 1084 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1085 { 1086 union perf_mem_data_src data_src_l; 1087 union perf_mem_data_src data_src_r; 1088 1089 if (left->mem_info) 1090 data_src_l = left->mem_info->data_src; 1091 else 1092 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1093 1094 if (right->mem_info) 1095 data_src_r = right->mem_info->data_src; 1096 else 1097 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1098 1099 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1100 } 1101 1102 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1103 size_t size, unsigned int width) 1104 { 1105 char out[10]; 1106 1107 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1108 return repsep_snprintf(bf, size, "%.*s", width, out); 1109 } 1110 1111 static int64_t 1112 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1113 { 1114 union perf_mem_data_src data_src_l; 1115 union perf_mem_data_src data_src_r; 1116 1117 if (left->mem_info) 1118 data_src_l = left->mem_info->data_src; 1119 else 1120 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1121 1122 if (right->mem_info) 1123 data_src_r = right->mem_info->data_src; 1124 else 1125 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1126 1127 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1128 } 1129 1130 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1131 size_t size, unsigned int width) 1132 { 1133 char out[64]; 1134 1135 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1136 return repsep_snprintf(bf, size, "%-*s", width, out); 1137 } 1138 1139 static int64_t 1140 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1141 { 1142 union perf_mem_data_src data_src_l; 1143 union perf_mem_data_src data_src_r; 1144 1145 if (left->mem_info) 1146 data_src_l = left->mem_info->data_src; 1147 else 1148 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1149 1150 if (right->mem_info) 1151 data_src_r = right->mem_info->data_src; 1152 else 1153 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1154 1155 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1156 } 1157 1158 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1159 size_t size, unsigned int width) 1160 { 1161 char out[64]; 1162 1163 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1164 return repsep_snprintf(bf, size, "%-*s", width, out); 1165 } 1166 1167 static int64_t 1168 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1169 { 1170 union perf_mem_data_src data_src_l; 1171 union perf_mem_data_src data_src_r; 1172 1173 if (left->mem_info) 1174 data_src_l = left->mem_info->data_src; 1175 else 1176 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1177 1178 if (right->mem_info) 1179 data_src_r = right->mem_info->data_src; 1180 else 1181 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1182 1183 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1184 } 1185 1186 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1187 size_t size, unsigned int width) 1188 { 1189 char out[64]; 1190 1191 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1192 return repsep_snprintf(bf, size, "%-*s", width, out); 1193 } 1194 1195 int64_t 1196 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1197 { 1198 u64 l, r; 1199 struct map *l_map, *r_map; 1200 1201 if (!left->mem_info) return -1; 1202 if (!right->mem_info) return 1; 1203 1204 /* group event types together */ 1205 if (left->cpumode > right->cpumode) return -1; 1206 if (left->cpumode < right->cpumode) return 1; 1207 1208 l_map = left->mem_info->daddr.map; 1209 r_map = right->mem_info->daddr.map; 1210 1211 /* if both are NULL, jump to sort on al_addr instead */ 1212 if (!l_map && !r_map) 1213 goto addr; 1214 1215 if (!l_map) return -1; 1216 if (!r_map) return 1; 1217 1218 if (l_map->maj > r_map->maj) return -1; 1219 if (l_map->maj < r_map->maj) return 1; 1220 1221 if (l_map->min > r_map->min) return -1; 1222 if (l_map->min < r_map->min) return 1; 1223 1224 if (l_map->ino > r_map->ino) return -1; 1225 if (l_map->ino < r_map->ino) return 1; 1226 1227 if (l_map->ino_generation > r_map->ino_generation) return -1; 1228 if (l_map->ino_generation < r_map->ino_generation) return 1; 1229 1230 /* 1231 * Addresses with no major/minor numbers are assumed to be 1232 * anonymous in userspace. Sort those on pid then address. 1233 * 1234 * The kernel and non-zero major/minor mapped areas are 1235 * assumed to be unity mapped. Sort those on address. 1236 */ 1237 1238 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1239 (!(l_map->flags & MAP_SHARED)) && 1240 !l_map->maj && !l_map->min && !l_map->ino && 1241 !l_map->ino_generation) { 1242 /* userspace anonymous */ 1243 1244 if (left->thread->pid_ > right->thread->pid_) return -1; 1245 if (left->thread->pid_ < right->thread->pid_) return 1; 1246 } 1247 1248 addr: 1249 /* al_addr does all the right addr - start + offset calculations */ 1250 l = cl_address(left->mem_info->daddr.al_addr); 1251 r = cl_address(right->mem_info->daddr.al_addr); 1252 1253 if (l > r) return -1; 1254 if (l < r) return 1; 1255 1256 return 0; 1257 } 1258 1259 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1260 size_t size, unsigned int width) 1261 { 1262 1263 uint64_t addr = 0; 1264 struct map *map = NULL; 1265 struct symbol *sym = NULL; 1266 char level = he->level; 1267 1268 if (he->mem_info) { 1269 addr = cl_address(he->mem_info->daddr.al_addr); 1270 map = he->mem_info->daddr.map; 1271 sym = he->mem_info->daddr.sym; 1272 1273 /* print [s] for shared data mmaps */ 1274 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1275 map && !(map->prot & PROT_EXEC) && 1276 (map->flags & MAP_SHARED) && 1277 (map->maj || map->min || map->ino || 1278 map->ino_generation)) 1279 level = 's'; 1280 else if (!map) 1281 level = 'X'; 1282 } 1283 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size, 1284 width); 1285 } 1286 1287 struct sort_entry sort_mispredict = { 1288 .se_header = "Branch Mispredicted", 1289 .se_cmp = sort__mispredict_cmp, 1290 .se_snprintf = hist_entry__mispredict_snprintf, 1291 .se_width_idx = HISTC_MISPREDICT, 1292 }; 1293 1294 static u64 he_weight(struct hist_entry *he) 1295 { 1296 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; 1297 } 1298 1299 static int64_t 1300 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1301 { 1302 return he_weight(left) - he_weight(right); 1303 } 1304 1305 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1306 size_t size, unsigned int width) 1307 { 1308 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); 1309 } 1310 1311 struct sort_entry sort_local_weight = { 1312 .se_header = "Local Weight", 1313 .se_cmp = sort__local_weight_cmp, 1314 .se_snprintf = hist_entry__local_weight_snprintf, 1315 .se_width_idx = HISTC_LOCAL_WEIGHT, 1316 }; 1317 1318 static int64_t 1319 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1320 { 1321 return left->stat.weight - right->stat.weight; 1322 } 1323 1324 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1325 size_t size, unsigned int width) 1326 { 1327 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); 1328 } 1329 1330 struct sort_entry sort_global_weight = { 1331 .se_header = "Weight", 1332 .se_cmp = sort__global_weight_cmp, 1333 .se_snprintf = hist_entry__global_weight_snprintf, 1334 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1335 }; 1336 1337 struct sort_entry sort_mem_daddr_sym = { 1338 .se_header = "Data Symbol", 1339 .se_cmp = sort__daddr_cmp, 1340 .se_snprintf = hist_entry__daddr_snprintf, 1341 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1342 }; 1343 1344 struct sort_entry sort_mem_iaddr_sym = { 1345 .se_header = "Code Symbol", 1346 .se_cmp = sort__iaddr_cmp, 1347 .se_snprintf = hist_entry__iaddr_snprintf, 1348 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1349 }; 1350 1351 struct sort_entry sort_mem_daddr_dso = { 1352 .se_header = "Data Object", 1353 .se_cmp = sort__dso_daddr_cmp, 1354 .se_snprintf = hist_entry__dso_daddr_snprintf, 1355 .se_width_idx = HISTC_MEM_DADDR_DSO, 1356 }; 1357 1358 struct sort_entry sort_mem_locked = { 1359 .se_header = "Locked", 1360 .se_cmp = sort__locked_cmp, 1361 .se_snprintf = hist_entry__locked_snprintf, 1362 .se_width_idx = HISTC_MEM_LOCKED, 1363 }; 1364 1365 struct sort_entry sort_mem_tlb = { 1366 .se_header = "TLB access", 1367 .se_cmp = sort__tlb_cmp, 1368 .se_snprintf = hist_entry__tlb_snprintf, 1369 .se_width_idx = HISTC_MEM_TLB, 1370 }; 1371 1372 struct sort_entry sort_mem_lvl = { 1373 .se_header = "Memory access", 1374 .se_cmp = sort__lvl_cmp, 1375 .se_snprintf = hist_entry__lvl_snprintf, 1376 .se_width_idx = HISTC_MEM_LVL, 1377 }; 1378 1379 struct sort_entry sort_mem_snoop = { 1380 .se_header = "Snoop", 1381 .se_cmp = sort__snoop_cmp, 1382 .se_snprintf = hist_entry__snoop_snprintf, 1383 .se_width_idx = HISTC_MEM_SNOOP, 1384 }; 1385 1386 struct sort_entry sort_mem_dcacheline = { 1387 .se_header = "Data Cacheline", 1388 .se_cmp = sort__dcacheline_cmp, 1389 .se_snprintf = hist_entry__dcacheline_snprintf, 1390 .se_width_idx = HISTC_MEM_DCACHELINE, 1391 }; 1392 1393 static int64_t 1394 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1395 { 1396 uint64_t l = 0, r = 0; 1397 1398 if (left->mem_info) 1399 l = left->mem_info->daddr.phys_addr; 1400 if (right->mem_info) 1401 r = right->mem_info->daddr.phys_addr; 1402 1403 return (int64_t)(r - l); 1404 } 1405 1406 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1407 size_t size, unsigned int width) 1408 { 1409 uint64_t addr = 0; 1410 size_t ret = 0; 1411 size_t len = BITS_PER_LONG / 4; 1412 1413 addr = he->mem_info->daddr.phys_addr; 1414 1415 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1416 1417 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1418 1419 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1420 1421 if (ret > width) 1422 bf[width] = '\0'; 1423 1424 return width; 1425 } 1426 1427 struct sort_entry sort_mem_phys_daddr = { 1428 .se_header = "Data Physical Address", 1429 .se_cmp = sort__phys_daddr_cmp, 1430 .se_snprintf = hist_entry__phys_daddr_snprintf, 1431 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1432 }; 1433 1434 static int64_t 1435 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1436 { 1437 if (!left->branch_info || !right->branch_info) 1438 return cmp_null(left->branch_info, right->branch_info); 1439 1440 return left->branch_info->flags.abort != 1441 right->branch_info->flags.abort; 1442 } 1443 1444 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1445 size_t size, unsigned int width) 1446 { 1447 static const char *out = "N/A"; 1448 1449 if (he->branch_info) { 1450 if (he->branch_info->flags.abort) 1451 out = "A"; 1452 else 1453 out = "."; 1454 } 1455 1456 return repsep_snprintf(bf, size, "%-*s", width, out); 1457 } 1458 1459 struct sort_entry sort_abort = { 1460 .se_header = "Transaction abort", 1461 .se_cmp = sort__abort_cmp, 1462 .se_snprintf = hist_entry__abort_snprintf, 1463 .se_width_idx = HISTC_ABORT, 1464 }; 1465 1466 static int64_t 1467 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1468 { 1469 if (!left->branch_info || !right->branch_info) 1470 return cmp_null(left->branch_info, right->branch_info); 1471 1472 return left->branch_info->flags.in_tx != 1473 right->branch_info->flags.in_tx; 1474 } 1475 1476 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1477 size_t size, unsigned int width) 1478 { 1479 static const char *out = "N/A"; 1480 1481 if (he->branch_info) { 1482 if (he->branch_info->flags.in_tx) 1483 out = "T"; 1484 else 1485 out = "."; 1486 } 1487 1488 return repsep_snprintf(bf, size, "%-*s", width, out); 1489 } 1490 1491 struct sort_entry sort_in_tx = { 1492 .se_header = "Branch in transaction", 1493 .se_cmp = sort__in_tx_cmp, 1494 .se_snprintf = hist_entry__in_tx_snprintf, 1495 .se_width_idx = HISTC_IN_TX, 1496 }; 1497 1498 static int64_t 1499 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1500 { 1501 return left->transaction - right->transaction; 1502 } 1503 1504 static inline char *add_str(char *p, const char *str) 1505 { 1506 strcpy(p, str); 1507 return p + strlen(str); 1508 } 1509 1510 static struct txbit { 1511 unsigned flag; 1512 const char *name; 1513 int skip_for_len; 1514 } txbits[] = { 1515 { PERF_TXN_ELISION, "EL ", 0 }, 1516 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1517 { PERF_TXN_SYNC, "SYNC ", 1 }, 1518 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1519 { PERF_TXN_RETRY, "RETRY ", 0 }, 1520 { PERF_TXN_CONFLICT, "CON ", 0 }, 1521 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1522 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1523 { 0, NULL, 0 } 1524 }; 1525 1526 int hist_entry__transaction_len(void) 1527 { 1528 int i; 1529 int len = 0; 1530 1531 for (i = 0; txbits[i].name; i++) { 1532 if (!txbits[i].skip_for_len) 1533 len += strlen(txbits[i].name); 1534 } 1535 len += 4; /* :XX<space> */ 1536 return len; 1537 } 1538 1539 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1540 size_t size, unsigned int width) 1541 { 1542 u64 t = he->transaction; 1543 char buf[128]; 1544 char *p = buf; 1545 int i; 1546 1547 buf[0] = 0; 1548 for (i = 0; txbits[i].name; i++) 1549 if (txbits[i].flag & t) 1550 p = add_str(p, txbits[i].name); 1551 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1552 p = add_str(p, "NEITHER "); 1553 if (t & PERF_TXN_ABORT_MASK) { 1554 sprintf(p, ":%" PRIx64, 1555 (t & PERF_TXN_ABORT_MASK) >> 1556 PERF_TXN_ABORT_SHIFT); 1557 p += strlen(p); 1558 } 1559 1560 return repsep_snprintf(bf, size, "%-*s", width, buf); 1561 } 1562 1563 struct sort_entry sort_transaction = { 1564 .se_header = "Transaction ", 1565 .se_cmp = sort__transaction_cmp, 1566 .se_snprintf = hist_entry__transaction_snprintf, 1567 .se_width_idx = HISTC_TRANSACTION, 1568 }; 1569 1570 /* --sort symbol_size */ 1571 1572 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 1573 { 1574 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 1575 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 1576 1577 return size_l < size_r ? -1 : 1578 size_l == size_r ? 0 : 1; 1579 } 1580 1581 static int64_t 1582 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 1583 { 1584 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 1585 } 1586 1587 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 1588 size_t bf_size, unsigned int width) 1589 { 1590 if (sym) 1591 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 1592 1593 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1594 } 1595 1596 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 1597 size_t size, unsigned int width) 1598 { 1599 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 1600 } 1601 1602 struct sort_entry sort_sym_size = { 1603 .se_header = "Symbol size", 1604 .se_cmp = sort__sym_size_cmp, 1605 .se_snprintf = hist_entry__sym_size_snprintf, 1606 .se_width_idx = HISTC_SYM_SIZE, 1607 }; 1608 1609 /* --sort dso_size */ 1610 1611 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r) 1612 { 1613 int64_t size_l = map_l != NULL ? map__size(map_l) : 0; 1614 int64_t size_r = map_r != NULL ? map__size(map_r) : 0; 1615 1616 return size_l < size_r ? -1 : 1617 size_l == size_r ? 0 : 1; 1618 } 1619 1620 static int64_t 1621 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right) 1622 { 1623 return _sort__dso_size_cmp(right->ms.map, left->ms.map); 1624 } 1625 1626 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf, 1627 size_t bf_size, unsigned int width) 1628 { 1629 if (map && map->dso) 1630 return repsep_snprintf(bf, bf_size, "%*d", width, 1631 map__size(map)); 1632 1633 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1634 } 1635 1636 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf, 1637 size_t size, unsigned int width) 1638 { 1639 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width); 1640 } 1641 1642 struct sort_entry sort_dso_size = { 1643 .se_header = "DSO size", 1644 .se_cmp = sort__dso_size_cmp, 1645 .se_snprintf = hist_entry__dso_size_snprintf, 1646 .se_width_idx = HISTC_DSO_SIZE, 1647 }; 1648 1649 1650 struct sort_dimension { 1651 const char *name; 1652 struct sort_entry *entry; 1653 int taken; 1654 }; 1655 1656 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 1657 1658 static struct sort_dimension common_sort_dimensions[] = { 1659 DIM(SORT_PID, "pid", sort_thread), 1660 DIM(SORT_COMM, "comm", sort_comm), 1661 DIM(SORT_DSO, "dso", sort_dso), 1662 DIM(SORT_SYM, "symbol", sort_sym), 1663 DIM(SORT_PARENT, "parent", sort_parent), 1664 DIM(SORT_CPU, "cpu", sort_cpu), 1665 DIM(SORT_SOCKET, "socket", sort_socket), 1666 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1667 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 1668 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1669 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 1670 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 1671 DIM(SORT_TRACE, "trace", sort_trace), 1672 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 1673 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 1674 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 1675 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null), 1676 DIM(SORT_TIME, "time", sort_time), 1677 }; 1678 1679 #undef DIM 1680 1681 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 1682 1683 static struct sort_dimension bstack_sort_dimensions[] = { 1684 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 1685 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 1686 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 1687 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 1688 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 1689 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1690 DIM(SORT_ABORT, "abort", sort_abort), 1691 DIM(SORT_CYCLES, "cycles", sort_cycles), 1692 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1693 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1694 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc), 1695 }; 1696 1697 #undef DIM 1698 1699 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 1700 1701 static struct sort_dimension memory_sort_dimensions[] = { 1702 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1703 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 1704 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1705 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1706 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1707 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 1708 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 1709 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 1710 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 1711 }; 1712 1713 #undef DIM 1714 1715 struct hpp_dimension { 1716 const char *name; 1717 struct perf_hpp_fmt *fmt; 1718 int taken; 1719 }; 1720 1721 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 1722 1723 static struct hpp_dimension hpp_sort_dimensions[] = { 1724 DIM(PERF_HPP__OVERHEAD, "overhead"), 1725 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 1726 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 1727 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 1728 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 1729 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 1730 DIM(PERF_HPP__SAMPLES, "sample"), 1731 DIM(PERF_HPP__PERIOD, "period"), 1732 }; 1733 1734 #undef DIM 1735 1736 struct hpp_sort_entry { 1737 struct perf_hpp_fmt hpp; 1738 struct sort_entry *se; 1739 }; 1740 1741 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 1742 { 1743 struct hpp_sort_entry *hse; 1744 1745 if (!perf_hpp__is_sort_entry(fmt)) 1746 return; 1747 1748 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1749 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 1750 } 1751 1752 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1753 struct hists *hists, int line __maybe_unused, 1754 int *span __maybe_unused) 1755 { 1756 struct hpp_sort_entry *hse; 1757 size_t len = fmt->user_len; 1758 1759 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1760 1761 if (!len) 1762 len = hists__col_len(hists, hse->se->se_width_idx); 1763 1764 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 1765 } 1766 1767 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 1768 struct perf_hpp *hpp __maybe_unused, 1769 struct hists *hists) 1770 { 1771 struct hpp_sort_entry *hse; 1772 size_t len = fmt->user_len; 1773 1774 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1775 1776 if (!len) 1777 len = hists__col_len(hists, hse->se->se_width_idx); 1778 1779 return len; 1780 } 1781 1782 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1783 struct hist_entry *he) 1784 { 1785 struct hpp_sort_entry *hse; 1786 size_t len = fmt->user_len; 1787 1788 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1789 1790 if (!len) 1791 len = hists__col_len(he->hists, hse->se->se_width_idx); 1792 1793 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1794 } 1795 1796 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 1797 struct hist_entry *a, struct hist_entry *b) 1798 { 1799 struct hpp_sort_entry *hse; 1800 1801 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1802 return hse->se->se_cmp(a, b); 1803 } 1804 1805 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 1806 struct hist_entry *a, struct hist_entry *b) 1807 { 1808 struct hpp_sort_entry *hse; 1809 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 1810 1811 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1812 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 1813 return collapse_fn(a, b); 1814 } 1815 1816 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 1817 struct hist_entry *a, struct hist_entry *b) 1818 { 1819 struct hpp_sort_entry *hse; 1820 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 1821 1822 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1823 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 1824 return sort_fn(a, b); 1825 } 1826 1827 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 1828 { 1829 return format->header == __sort__hpp_header; 1830 } 1831 1832 #define MK_SORT_ENTRY_CHK(key) \ 1833 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 1834 { \ 1835 struct hpp_sort_entry *hse; \ 1836 \ 1837 if (!perf_hpp__is_sort_entry(fmt)) \ 1838 return false; \ 1839 \ 1840 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 1841 return hse->se == &sort_ ## key ; \ 1842 } 1843 1844 MK_SORT_ENTRY_CHK(trace) 1845 MK_SORT_ENTRY_CHK(srcline) 1846 MK_SORT_ENTRY_CHK(srcfile) 1847 MK_SORT_ENTRY_CHK(thread) 1848 MK_SORT_ENTRY_CHK(comm) 1849 MK_SORT_ENTRY_CHK(dso) 1850 MK_SORT_ENTRY_CHK(sym) 1851 1852 1853 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1854 { 1855 struct hpp_sort_entry *hse_a; 1856 struct hpp_sort_entry *hse_b; 1857 1858 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 1859 return false; 1860 1861 hse_a = container_of(a, struct hpp_sort_entry, hpp); 1862 hse_b = container_of(b, struct hpp_sort_entry, hpp); 1863 1864 return hse_a->se == hse_b->se; 1865 } 1866 1867 static void hse_free(struct perf_hpp_fmt *fmt) 1868 { 1869 struct hpp_sort_entry *hse; 1870 1871 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1872 free(hse); 1873 } 1874 1875 static struct hpp_sort_entry * 1876 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 1877 { 1878 struct hpp_sort_entry *hse; 1879 1880 hse = malloc(sizeof(*hse)); 1881 if (hse == NULL) { 1882 pr_err("Memory allocation failed\n"); 1883 return NULL; 1884 } 1885 1886 hse->se = sd->entry; 1887 hse->hpp.name = sd->entry->se_header; 1888 hse->hpp.header = __sort__hpp_header; 1889 hse->hpp.width = __sort__hpp_width; 1890 hse->hpp.entry = __sort__hpp_entry; 1891 hse->hpp.color = NULL; 1892 1893 hse->hpp.cmp = __sort__hpp_cmp; 1894 hse->hpp.collapse = __sort__hpp_collapse; 1895 hse->hpp.sort = __sort__hpp_sort; 1896 hse->hpp.equal = __sort__hpp_equal; 1897 hse->hpp.free = hse_free; 1898 1899 INIT_LIST_HEAD(&hse->hpp.list); 1900 INIT_LIST_HEAD(&hse->hpp.sort_list); 1901 hse->hpp.elide = false; 1902 hse->hpp.len = 0; 1903 hse->hpp.user_len = 0; 1904 hse->hpp.level = level; 1905 1906 return hse; 1907 } 1908 1909 static void hpp_free(struct perf_hpp_fmt *fmt) 1910 { 1911 free(fmt); 1912 } 1913 1914 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 1915 int level) 1916 { 1917 struct perf_hpp_fmt *fmt; 1918 1919 fmt = memdup(hd->fmt, sizeof(*fmt)); 1920 if (fmt) { 1921 INIT_LIST_HEAD(&fmt->list); 1922 INIT_LIST_HEAD(&fmt->sort_list); 1923 fmt->free = hpp_free; 1924 fmt->level = level; 1925 } 1926 1927 return fmt; 1928 } 1929 1930 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 1931 { 1932 struct perf_hpp_fmt *fmt; 1933 struct hpp_sort_entry *hse; 1934 int ret = -1; 1935 int r; 1936 1937 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1938 if (!perf_hpp__is_sort_entry(fmt)) 1939 continue; 1940 1941 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1942 if (hse->se->se_filter == NULL) 1943 continue; 1944 1945 /* 1946 * hist entry is filtered if any of sort key in the hpp list 1947 * is applied. But it should skip non-matched filter types. 1948 */ 1949 r = hse->se->se_filter(he, type, arg); 1950 if (r >= 0) { 1951 if (ret < 0) 1952 ret = 0; 1953 ret |= r; 1954 } 1955 } 1956 1957 return ret; 1958 } 1959 1960 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 1961 struct perf_hpp_list *list, 1962 int level) 1963 { 1964 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 1965 1966 if (hse == NULL) 1967 return -1; 1968 1969 perf_hpp_list__register_sort_field(list, &hse->hpp); 1970 return 0; 1971 } 1972 1973 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 1974 struct perf_hpp_list *list) 1975 { 1976 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 1977 1978 if (hse == NULL) 1979 return -1; 1980 1981 perf_hpp_list__column_register(list, &hse->hpp); 1982 return 0; 1983 } 1984 1985 struct hpp_dynamic_entry { 1986 struct perf_hpp_fmt hpp; 1987 struct perf_evsel *evsel; 1988 struct tep_format_field *field; 1989 unsigned dynamic_len; 1990 bool raw_trace; 1991 }; 1992 1993 static int hde_width(struct hpp_dynamic_entry *hde) 1994 { 1995 if (!hde->hpp.len) { 1996 int len = hde->dynamic_len; 1997 int namelen = strlen(hde->field->name); 1998 int fieldlen = hde->field->size; 1999 2000 if (namelen > len) 2001 len = namelen; 2002 2003 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) { 2004 /* length for print hex numbers */ 2005 fieldlen = hde->field->size * 2 + 2; 2006 } 2007 if (fieldlen > len) 2008 len = fieldlen; 2009 2010 hde->hpp.len = len; 2011 } 2012 return hde->hpp.len; 2013 } 2014 2015 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 2016 struct hist_entry *he) 2017 { 2018 char *str, *pos; 2019 struct tep_format_field *field = hde->field; 2020 size_t namelen; 2021 bool last = false; 2022 2023 if (hde->raw_trace) 2024 return; 2025 2026 /* parse pretty print result and update max length */ 2027 if (!he->trace_output) 2028 he->trace_output = get_trace_output(he); 2029 2030 namelen = strlen(field->name); 2031 str = he->trace_output; 2032 2033 while (str) { 2034 pos = strchr(str, ' '); 2035 if (pos == NULL) { 2036 last = true; 2037 pos = str + strlen(str); 2038 } 2039 2040 if (!strncmp(str, field->name, namelen)) { 2041 size_t len; 2042 2043 str += namelen + 1; 2044 len = pos - str; 2045 2046 if (len > hde->dynamic_len) 2047 hde->dynamic_len = len; 2048 break; 2049 } 2050 2051 if (last) 2052 str = NULL; 2053 else 2054 str = pos + 1; 2055 } 2056 } 2057 2058 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2059 struct hists *hists __maybe_unused, 2060 int line __maybe_unused, 2061 int *span __maybe_unused) 2062 { 2063 struct hpp_dynamic_entry *hde; 2064 size_t len = fmt->user_len; 2065 2066 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2067 2068 if (!len) 2069 len = hde_width(hde); 2070 2071 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 2072 } 2073 2074 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 2075 struct perf_hpp *hpp __maybe_unused, 2076 struct hists *hists __maybe_unused) 2077 { 2078 struct hpp_dynamic_entry *hde; 2079 size_t len = fmt->user_len; 2080 2081 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2082 2083 if (!len) 2084 len = hde_width(hde); 2085 2086 return len; 2087 } 2088 2089 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 2090 { 2091 struct hpp_dynamic_entry *hde; 2092 2093 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2094 2095 return hists_to_evsel(hists) == hde->evsel; 2096 } 2097 2098 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2099 struct hist_entry *he) 2100 { 2101 struct hpp_dynamic_entry *hde; 2102 size_t len = fmt->user_len; 2103 char *str, *pos; 2104 struct tep_format_field *field; 2105 size_t namelen; 2106 bool last = false; 2107 int ret; 2108 2109 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2110 2111 if (!len) 2112 len = hde_width(hde); 2113 2114 if (hde->raw_trace) 2115 goto raw_field; 2116 2117 if (!he->trace_output) 2118 he->trace_output = get_trace_output(he); 2119 2120 field = hde->field; 2121 namelen = strlen(field->name); 2122 str = he->trace_output; 2123 2124 while (str) { 2125 pos = strchr(str, ' '); 2126 if (pos == NULL) { 2127 last = true; 2128 pos = str + strlen(str); 2129 } 2130 2131 if (!strncmp(str, field->name, namelen)) { 2132 str += namelen + 1; 2133 str = strndup(str, pos - str); 2134 2135 if (str == NULL) 2136 return scnprintf(hpp->buf, hpp->size, 2137 "%*.*s", len, len, "ERROR"); 2138 break; 2139 } 2140 2141 if (last) 2142 str = NULL; 2143 else 2144 str = pos + 1; 2145 } 2146 2147 if (str == NULL) { 2148 struct trace_seq seq; 2149 raw_field: 2150 trace_seq_init(&seq); 2151 tep_print_field(&seq, he->raw_data, hde->field); 2152 str = seq.buffer; 2153 } 2154 2155 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2156 free(str); 2157 return ret; 2158 } 2159 2160 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2161 struct hist_entry *a, struct hist_entry *b) 2162 { 2163 struct hpp_dynamic_entry *hde; 2164 struct tep_format_field *field; 2165 unsigned offset, size; 2166 2167 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2168 2169 if (b == NULL) { 2170 update_dynamic_len(hde, a); 2171 return 0; 2172 } 2173 2174 field = hde->field; 2175 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2176 unsigned long long dyn; 2177 2178 tep_read_number_field(field, a->raw_data, &dyn); 2179 offset = dyn & 0xffff; 2180 size = (dyn >> 16) & 0xffff; 2181 2182 /* record max width for output */ 2183 if (size > hde->dynamic_len) 2184 hde->dynamic_len = size; 2185 } else { 2186 offset = field->offset; 2187 size = field->size; 2188 } 2189 2190 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2191 } 2192 2193 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2194 { 2195 return fmt->cmp == __sort__hde_cmp; 2196 } 2197 2198 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2199 { 2200 struct hpp_dynamic_entry *hde_a; 2201 struct hpp_dynamic_entry *hde_b; 2202 2203 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2204 return false; 2205 2206 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2207 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2208 2209 return hde_a->field == hde_b->field; 2210 } 2211 2212 static void hde_free(struct perf_hpp_fmt *fmt) 2213 { 2214 struct hpp_dynamic_entry *hde; 2215 2216 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2217 free(hde); 2218 } 2219 2220 static struct hpp_dynamic_entry * 2221 __alloc_dynamic_entry(struct perf_evsel *evsel, struct tep_format_field *field, 2222 int level) 2223 { 2224 struct hpp_dynamic_entry *hde; 2225 2226 hde = malloc(sizeof(*hde)); 2227 if (hde == NULL) { 2228 pr_debug("Memory allocation failed\n"); 2229 return NULL; 2230 } 2231 2232 hde->evsel = evsel; 2233 hde->field = field; 2234 hde->dynamic_len = 0; 2235 2236 hde->hpp.name = field->name; 2237 hde->hpp.header = __sort__hde_header; 2238 hde->hpp.width = __sort__hde_width; 2239 hde->hpp.entry = __sort__hde_entry; 2240 hde->hpp.color = NULL; 2241 2242 hde->hpp.cmp = __sort__hde_cmp; 2243 hde->hpp.collapse = __sort__hde_cmp; 2244 hde->hpp.sort = __sort__hde_cmp; 2245 hde->hpp.equal = __sort__hde_equal; 2246 hde->hpp.free = hde_free; 2247 2248 INIT_LIST_HEAD(&hde->hpp.list); 2249 INIT_LIST_HEAD(&hde->hpp.sort_list); 2250 hde->hpp.elide = false; 2251 hde->hpp.len = 0; 2252 hde->hpp.user_len = 0; 2253 hde->hpp.level = level; 2254 2255 return hde; 2256 } 2257 2258 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2259 { 2260 struct perf_hpp_fmt *new_fmt = NULL; 2261 2262 if (perf_hpp__is_sort_entry(fmt)) { 2263 struct hpp_sort_entry *hse, *new_hse; 2264 2265 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2266 new_hse = memdup(hse, sizeof(*hse)); 2267 if (new_hse) 2268 new_fmt = &new_hse->hpp; 2269 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2270 struct hpp_dynamic_entry *hde, *new_hde; 2271 2272 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2273 new_hde = memdup(hde, sizeof(*hde)); 2274 if (new_hde) 2275 new_fmt = &new_hde->hpp; 2276 } else { 2277 new_fmt = memdup(fmt, sizeof(*fmt)); 2278 } 2279 2280 INIT_LIST_HEAD(&new_fmt->list); 2281 INIT_LIST_HEAD(&new_fmt->sort_list); 2282 2283 return new_fmt; 2284 } 2285 2286 static int parse_field_name(char *str, char **event, char **field, char **opt) 2287 { 2288 char *event_name, *field_name, *opt_name; 2289 2290 event_name = str; 2291 field_name = strchr(str, '.'); 2292 2293 if (field_name) { 2294 *field_name++ = '\0'; 2295 } else { 2296 event_name = NULL; 2297 field_name = str; 2298 } 2299 2300 opt_name = strchr(field_name, '/'); 2301 if (opt_name) 2302 *opt_name++ = '\0'; 2303 2304 *event = event_name; 2305 *field = field_name; 2306 *opt = opt_name; 2307 2308 return 0; 2309 } 2310 2311 /* find match evsel using a given event name. The event name can be: 2312 * 1. '%' + event index (e.g. '%1' for first event) 2313 * 2. full event name (e.g. sched:sched_switch) 2314 * 3. partial event name (should not contain ':') 2315 */ 2316 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name) 2317 { 2318 struct perf_evsel *evsel = NULL; 2319 struct perf_evsel *pos; 2320 bool full_name; 2321 2322 /* case 1 */ 2323 if (event_name[0] == '%') { 2324 int nr = strtol(event_name+1, NULL, 0); 2325 2326 if (nr > evlist->nr_entries) 2327 return NULL; 2328 2329 evsel = perf_evlist__first(evlist); 2330 while (--nr > 0) 2331 evsel = perf_evsel__next(evsel); 2332 2333 return evsel; 2334 } 2335 2336 full_name = !!strchr(event_name, ':'); 2337 evlist__for_each_entry(evlist, pos) { 2338 /* case 2 */ 2339 if (full_name && !strcmp(pos->name, event_name)) 2340 return pos; 2341 /* case 3 */ 2342 if (!full_name && strstr(pos->name, event_name)) { 2343 if (evsel) { 2344 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2345 event_name, evsel->name, pos->name); 2346 return NULL; 2347 } 2348 evsel = pos; 2349 } 2350 } 2351 2352 return evsel; 2353 } 2354 2355 static int __dynamic_dimension__add(struct perf_evsel *evsel, 2356 struct tep_format_field *field, 2357 bool raw_trace, int level) 2358 { 2359 struct hpp_dynamic_entry *hde; 2360 2361 hde = __alloc_dynamic_entry(evsel, field, level); 2362 if (hde == NULL) 2363 return -ENOMEM; 2364 2365 hde->raw_trace = raw_trace; 2366 2367 perf_hpp__register_sort_field(&hde->hpp); 2368 return 0; 2369 } 2370 2371 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level) 2372 { 2373 int ret; 2374 struct tep_format_field *field; 2375 2376 field = evsel->tp_format->format.fields; 2377 while (field) { 2378 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2379 if (ret < 0) 2380 return ret; 2381 2382 field = field->next; 2383 } 2384 return 0; 2385 } 2386 2387 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace, 2388 int level) 2389 { 2390 int ret; 2391 struct perf_evsel *evsel; 2392 2393 evlist__for_each_entry(evlist, evsel) { 2394 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2395 continue; 2396 2397 ret = add_evsel_fields(evsel, raw_trace, level); 2398 if (ret < 0) 2399 return ret; 2400 } 2401 return 0; 2402 } 2403 2404 static int add_all_matching_fields(struct perf_evlist *evlist, 2405 char *field_name, bool raw_trace, int level) 2406 { 2407 int ret = -ESRCH; 2408 struct perf_evsel *evsel; 2409 struct tep_format_field *field; 2410 2411 evlist__for_each_entry(evlist, evsel) { 2412 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2413 continue; 2414 2415 field = tep_find_any_field(evsel->tp_format, field_name); 2416 if (field == NULL) 2417 continue; 2418 2419 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2420 if (ret < 0) 2421 break; 2422 } 2423 return ret; 2424 } 2425 2426 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok, 2427 int level) 2428 { 2429 char *str, *event_name, *field_name, *opt_name; 2430 struct perf_evsel *evsel; 2431 struct tep_format_field *field; 2432 bool raw_trace = symbol_conf.raw_trace; 2433 int ret = 0; 2434 2435 if (evlist == NULL) 2436 return -ENOENT; 2437 2438 str = strdup(tok); 2439 if (str == NULL) 2440 return -ENOMEM; 2441 2442 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2443 ret = -EINVAL; 2444 goto out; 2445 } 2446 2447 if (opt_name) { 2448 if (strcmp(opt_name, "raw")) { 2449 pr_debug("unsupported field option %s\n", opt_name); 2450 ret = -EINVAL; 2451 goto out; 2452 } 2453 raw_trace = true; 2454 } 2455 2456 if (!strcmp(field_name, "trace_fields")) { 2457 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2458 goto out; 2459 } 2460 2461 if (event_name == NULL) { 2462 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2463 goto out; 2464 } 2465 2466 evsel = find_evsel(evlist, event_name); 2467 if (evsel == NULL) { 2468 pr_debug("Cannot find event: %s\n", event_name); 2469 ret = -ENOENT; 2470 goto out; 2471 } 2472 2473 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2474 pr_debug("%s is not a tracepoint event\n", event_name); 2475 ret = -EINVAL; 2476 goto out; 2477 } 2478 2479 if (!strcmp(field_name, "*")) { 2480 ret = add_evsel_fields(evsel, raw_trace, level); 2481 } else { 2482 field = tep_find_any_field(evsel->tp_format, field_name); 2483 if (field == NULL) { 2484 pr_debug("Cannot find event field for %s.%s\n", 2485 event_name, field_name); 2486 return -ENOENT; 2487 } 2488 2489 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2490 } 2491 2492 out: 2493 free(str); 2494 return ret; 2495 } 2496 2497 static int __sort_dimension__add(struct sort_dimension *sd, 2498 struct perf_hpp_list *list, 2499 int level) 2500 { 2501 if (sd->taken) 2502 return 0; 2503 2504 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2505 return -1; 2506 2507 if (sd->entry->se_collapse) 2508 list->need_collapse = 1; 2509 2510 sd->taken = 1; 2511 2512 return 0; 2513 } 2514 2515 static int __hpp_dimension__add(struct hpp_dimension *hd, 2516 struct perf_hpp_list *list, 2517 int level) 2518 { 2519 struct perf_hpp_fmt *fmt; 2520 2521 if (hd->taken) 2522 return 0; 2523 2524 fmt = __hpp_dimension__alloc_hpp(hd, level); 2525 if (!fmt) 2526 return -1; 2527 2528 hd->taken = 1; 2529 perf_hpp_list__register_sort_field(list, fmt); 2530 return 0; 2531 } 2532 2533 static int __sort_dimension__add_output(struct perf_hpp_list *list, 2534 struct sort_dimension *sd) 2535 { 2536 if (sd->taken) 2537 return 0; 2538 2539 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2540 return -1; 2541 2542 sd->taken = 1; 2543 return 0; 2544 } 2545 2546 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2547 struct hpp_dimension *hd) 2548 { 2549 struct perf_hpp_fmt *fmt; 2550 2551 if (hd->taken) 2552 return 0; 2553 2554 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2555 if (!fmt) 2556 return -1; 2557 2558 hd->taken = 1; 2559 perf_hpp_list__column_register(list, fmt); 2560 return 0; 2561 } 2562 2563 int hpp_dimension__add_output(unsigned col) 2564 { 2565 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2566 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2567 } 2568 2569 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2570 struct perf_evlist *evlist, 2571 int level) 2572 { 2573 unsigned int i; 2574 2575 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2576 struct sort_dimension *sd = &common_sort_dimensions[i]; 2577 2578 if (strncasecmp(tok, sd->name, strlen(tok))) 2579 continue; 2580 2581 if (sd->entry == &sort_parent) { 2582 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 2583 if (ret) { 2584 char err[BUFSIZ]; 2585 2586 regerror(ret, &parent_regex, err, sizeof(err)); 2587 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 2588 return -EINVAL; 2589 } 2590 list->parent = 1; 2591 } else if (sd->entry == &sort_sym) { 2592 list->sym = 1; 2593 /* 2594 * perf diff displays the performance difference amongst 2595 * two or more perf.data files. Those files could come 2596 * from different binaries. So we should not compare 2597 * their ips, but the name of symbol. 2598 */ 2599 if (sort__mode == SORT_MODE__DIFF) 2600 sd->entry->se_collapse = sort__sym_sort; 2601 2602 } else if (sd->entry == &sort_dso) { 2603 list->dso = 1; 2604 } else if (sd->entry == &sort_socket) { 2605 list->socket = 1; 2606 } else if (sd->entry == &sort_thread) { 2607 list->thread = 1; 2608 } else if (sd->entry == &sort_comm) { 2609 list->comm = 1; 2610 } 2611 2612 return __sort_dimension__add(sd, list, level); 2613 } 2614 2615 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2616 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2617 2618 if (strncasecmp(tok, hd->name, strlen(tok))) 2619 continue; 2620 2621 return __hpp_dimension__add(hd, list, level); 2622 } 2623 2624 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2625 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2626 2627 if (strncasecmp(tok, sd->name, strlen(tok))) 2628 continue; 2629 2630 if (sort__mode != SORT_MODE__BRANCH) 2631 return -EINVAL; 2632 2633 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 2634 list->sym = 1; 2635 2636 __sort_dimension__add(sd, list, level); 2637 return 0; 2638 } 2639 2640 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2641 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2642 2643 if (strncasecmp(tok, sd->name, strlen(tok))) 2644 continue; 2645 2646 if (sort__mode != SORT_MODE__MEMORY) 2647 return -EINVAL; 2648 2649 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) 2650 return -EINVAL; 2651 2652 if (sd->entry == &sort_mem_daddr_sym) 2653 list->sym = 1; 2654 2655 __sort_dimension__add(sd, list, level); 2656 return 0; 2657 } 2658 2659 if (!add_dynamic_entry(evlist, tok, level)) 2660 return 0; 2661 2662 return -ESRCH; 2663 } 2664 2665 static int setup_sort_list(struct perf_hpp_list *list, char *str, 2666 struct perf_evlist *evlist) 2667 { 2668 char *tmp, *tok; 2669 int ret = 0; 2670 int level = 0; 2671 int next_level = 1; 2672 bool in_group = false; 2673 2674 do { 2675 tok = str; 2676 tmp = strpbrk(str, "{}, "); 2677 if (tmp) { 2678 if (in_group) 2679 next_level = level; 2680 else 2681 next_level = level + 1; 2682 2683 if (*tmp == '{') 2684 in_group = true; 2685 else if (*tmp == '}') 2686 in_group = false; 2687 2688 *tmp = '\0'; 2689 str = tmp + 1; 2690 } 2691 2692 if (*tok) { 2693 ret = sort_dimension__add(list, tok, evlist, level); 2694 if (ret == -EINVAL) { 2695 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) 2696 pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 2697 else 2698 pr_err("Invalid --sort key: `%s'", tok); 2699 break; 2700 } else if (ret == -ESRCH) { 2701 pr_err("Unknown --sort key: `%s'", tok); 2702 break; 2703 } 2704 } 2705 2706 level = next_level; 2707 } while (tmp); 2708 2709 return ret; 2710 } 2711 2712 static const char *get_default_sort_order(struct perf_evlist *evlist) 2713 { 2714 const char *default_sort_orders[] = { 2715 default_sort_order, 2716 default_branch_sort_order, 2717 default_mem_sort_order, 2718 default_top_sort_order, 2719 default_diff_sort_order, 2720 default_tracepoint_sort_order, 2721 }; 2722 bool use_trace = true; 2723 struct perf_evsel *evsel; 2724 2725 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 2726 2727 if (evlist == NULL || perf_evlist__empty(evlist)) 2728 goto out_no_evlist; 2729 2730 evlist__for_each_entry(evlist, evsel) { 2731 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2732 use_trace = false; 2733 break; 2734 } 2735 } 2736 2737 if (use_trace) { 2738 sort__mode = SORT_MODE__TRACEPOINT; 2739 if (symbol_conf.raw_trace) 2740 return "trace_fields"; 2741 } 2742 out_no_evlist: 2743 return default_sort_orders[sort__mode]; 2744 } 2745 2746 static int setup_sort_order(struct perf_evlist *evlist) 2747 { 2748 char *new_sort_order; 2749 2750 /* 2751 * Append '+'-prefixed sort order to the default sort 2752 * order string. 2753 */ 2754 if (!sort_order || is_strict_order(sort_order)) 2755 return 0; 2756 2757 if (sort_order[1] == '\0') { 2758 pr_err("Invalid --sort key: `+'"); 2759 return -EINVAL; 2760 } 2761 2762 /* 2763 * We allocate new sort_order string, but we never free it, 2764 * because it's checked over the rest of the code. 2765 */ 2766 if (asprintf(&new_sort_order, "%s,%s", 2767 get_default_sort_order(evlist), sort_order + 1) < 0) { 2768 pr_err("Not enough memory to set up --sort"); 2769 return -ENOMEM; 2770 } 2771 2772 sort_order = new_sort_order; 2773 return 0; 2774 } 2775 2776 /* 2777 * Adds 'pre,' prefix into 'str' is 'pre' is 2778 * not already part of 'str'. 2779 */ 2780 static char *prefix_if_not_in(const char *pre, char *str) 2781 { 2782 char *n; 2783 2784 if (!str || strstr(str, pre)) 2785 return str; 2786 2787 if (asprintf(&n, "%s,%s", pre, str) < 0) 2788 return NULL; 2789 2790 free(str); 2791 return n; 2792 } 2793 2794 static char *setup_overhead(char *keys) 2795 { 2796 if (sort__mode == SORT_MODE__DIFF) 2797 return keys; 2798 2799 keys = prefix_if_not_in("overhead", keys); 2800 2801 if (symbol_conf.cumulate_callchain) 2802 keys = prefix_if_not_in("overhead_children", keys); 2803 2804 return keys; 2805 } 2806 2807 static int __setup_sorting(struct perf_evlist *evlist) 2808 { 2809 char *str; 2810 const char *sort_keys; 2811 int ret = 0; 2812 2813 ret = setup_sort_order(evlist); 2814 if (ret) 2815 return ret; 2816 2817 sort_keys = sort_order; 2818 if (sort_keys == NULL) { 2819 if (is_strict_order(field_order)) { 2820 /* 2821 * If user specified field order but no sort order, 2822 * we'll honor it and not add default sort orders. 2823 */ 2824 return 0; 2825 } 2826 2827 sort_keys = get_default_sort_order(evlist); 2828 } 2829 2830 str = strdup(sort_keys); 2831 if (str == NULL) { 2832 pr_err("Not enough memory to setup sort keys"); 2833 return -ENOMEM; 2834 } 2835 2836 /* 2837 * Prepend overhead fields for backward compatibility. 2838 */ 2839 if (!is_strict_order(field_order)) { 2840 str = setup_overhead(str); 2841 if (str == NULL) { 2842 pr_err("Not enough memory to setup overhead keys"); 2843 return -ENOMEM; 2844 } 2845 } 2846 2847 ret = setup_sort_list(&perf_hpp_list, str, evlist); 2848 2849 free(str); 2850 return ret; 2851 } 2852 2853 void perf_hpp__set_elide(int idx, bool elide) 2854 { 2855 struct perf_hpp_fmt *fmt; 2856 struct hpp_sort_entry *hse; 2857 2858 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2859 if (!perf_hpp__is_sort_entry(fmt)) 2860 continue; 2861 2862 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2863 if (hse->se->se_width_idx == idx) { 2864 fmt->elide = elide; 2865 break; 2866 } 2867 } 2868 } 2869 2870 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 2871 { 2872 if (list && strlist__nr_entries(list) == 1) { 2873 if (fp != NULL) 2874 fprintf(fp, "# %s: %s\n", list_name, 2875 strlist__entry(list, 0)->s); 2876 return true; 2877 } 2878 return false; 2879 } 2880 2881 static bool get_elide(int idx, FILE *output) 2882 { 2883 switch (idx) { 2884 case HISTC_SYMBOL: 2885 return __get_elide(symbol_conf.sym_list, "symbol", output); 2886 case HISTC_DSO: 2887 return __get_elide(symbol_conf.dso_list, "dso", output); 2888 case HISTC_COMM: 2889 return __get_elide(symbol_conf.comm_list, "comm", output); 2890 default: 2891 break; 2892 } 2893 2894 if (sort__mode != SORT_MODE__BRANCH) 2895 return false; 2896 2897 switch (idx) { 2898 case HISTC_SYMBOL_FROM: 2899 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 2900 case HISTC_SYMBOL_TO: 2901 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 2902 case HISTC_DSO_FROM: 2903 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 2904 case HISTC_DSO_TO: 2905 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 2906 default: 2907 break; 2908 } 2909 2910 return false; 2911 } 2912 2913 void sort__setup_elide(FILE *output) 2914 { 2915 struct perf_hpp_fmt *fmt; 2916 struct hpp_sort_entry *hse; 2917 2918 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2919 if (!perf_hpp__is_sort_entry(fmt)) 2920 continue; 2921 2922 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2923 fmt->elide = get_elide(hse->se->se_width_idx, output); 2924 } 2925 2926 /* 2927 * It makes no sense to elide all of sort entries. 2928 * Just revert them to show up again. 2929 */ 2930 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2931 if (!perf_hpp__is_sort_entry(fmt)) 2932 continue; 2933 2934 if (!fmt->elide) 2935 return; 2936 } 2937 2938 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2939 if (!perf_hpp__is_sort_entry(fmt)) 2940 continue; 2941 2942 fmt->elide = false; 2943 } 2944 } 2945 2946 int output_field_add(struct perf_hpp_list *list, char *tok) 2947 { 2948 unsigned int i; 2949 2950 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2951 struct sort_dimension *sd = &common_sort_dimensions[i]; 2952 2953 if (strncasecmp(tok, sd->name, strlen(tok))) 2954 continue; 2955 2956 return __sort_dimension__add_output(list, sd); 2957 } 2958 2959 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2960 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2961 2962 if (strncasecmp(tok, hd->name, strlen(tok))) 2963 continue; 2964 2965 return __hpp_dimension__add_output(list, hd); 2966 } 2967 2968 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2969 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2970 2971 if (strncasecmp(tok, sd->name, strlen(tok))) 2972 continue; 2973 2974 return __sort_dimension__add_output(list, sd); 2975 } 2976 2977 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2978 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2979 2980 if (strncasecmp(tok, sd->name, strlen(tok))) 2981 continue; 2982 2983 return __sort_dimension__add_output(list, sd); 2984 } 2985 2986 return -ESRCH; 2987 } 2988 2989 static int setup_output_list(struct perf_hpp_list *list, char *str) 2990 { 2991 char *tmp, *tok; 2992 int ret = 0; 2993 2994 for (tok = strtok_r(str, ", ", &tmp); 2995 tok; tok = strtok_r(NULL, ", ", &tmp)) { 2996 ret = output_field_add(list, tok); 2997 if (ret == -EINVAL) { 2998 ui__error("Invalid --fields key: `%s'", tok); 2999 break; 3000 } else if (ret == -ESRCH) { 3001 ui__error("Unknown --fields key: `%s'", tok); 3002 break; 3003 } 3004 } 3005 3006 return ret; 3007 } 3008 3009 void reset_dimensions(void) 3010 { 3011 unsigned int i; 3012 3013 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 3014 common_sort_dimensions[i].taken = 0; 3015 3016 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 3017 hpp_sort_dimensions[i].taken = 0; 3018 3019 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 3020 bstack_sort_dimensions[i].taken = 0; 3021 3022 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 3023 memory_sort_dimensions[i].taken = 0; 3024 } 3025 3026 bool is_strict_order(const char *order) 3027 { 3028 return order && (*order != '+'); 3029 } 3030 3031 static int __setup_output_field(void) 3032 { 3033 char *str, *strp; 3034 int ret = -EINVAL; 3035 3036 if (field_order == NULL) 3037 return 0; 3038 3039 strp = str = strdup(field_order); 3040 if (str == NULL) { 3041 pr_err("Not enough memory to setup output fields"); 3042 return -ENOMEM; 3043 } 3044 3045 if (!is_strict_order(field_order)) 3046 strp++; 3047 3048 if (!strlen(strp)) { 3049 pr_err("Invalid --fields key: `+'"); 3050 goto out; 3051 } 3052 3053 ret = setup_output_list(&perf_hpp_list, strp); 3054 3055 out: 3056 free(str); 3057 return ret; 3058 } 3059 3060 int setup_sorting(struct perf_evlist *evlist) 3061 { 3062 int err; 3063 3064 err = __setup_sorting(evlist); 3065 if (err < 0) 3066 return err; 3067 3068 if (parent_pattern != default_parent_pattern) { 3069 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 3070 if (err < 0) 3071 return err; 3072 } 3073 3074 reset_dimensions(); 3075 3076 /* 3077 * perf diff doesn't use default hpp output fields. 3078 */ 3079 if (sort__mode != SORT_MODE__DIFF) 3080 perf_hpp__init(); 3081 3082 err = __setup_output_field(); 3083 if (err < 0) 3084 return err; 3085 3086 /* copy sort keys to output fields */ 3087 perf_hpp__setup_output_field(&perf_hpp_list); 3088 /* and then copy output fields to sort keys */ 3089 perf_hpp__append_sort_keys(&perf_hpp_list); 3090 3091 /* setup hists-specific output fields */ 3092 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 3093 return -1; 3094 3095 return 0; 3096 } 3097 3098 void reset_output_field(void) 3099 { 3100 perf_hpp_list.need_collapse = 0; 3101 perf_hpp_list.parent = 0; 3102 perf_hpp_list.sym = 0; 3103 perf_hpp_list.dso = 0; 3104 3105 field_order = NULL; 3106 sort_order = NULL; 3107 3108 reset_dimensions(); 3109 perf_hpp__reset_output_field(&perf_hpp_list); 3110 } 3111 3112 #define INDENT (3*8 + 1) 3113 3114 static void add_key(struct strbuf *sb, const char *str, int *llen) 3115 { 3116 if (*llen >= 75) { 3117 strbuf_addstr(sb, "\n\t\t\t "); 3118 *llen = INDENT; 3119 } 3120 strbuf_addf(sb, " %s", str); 3121 *llen += strlen(str) + 1; 3122 } 3123 3124 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n, 3125 int *llen) 3126 { 3127 int i; 3128 3129 for (i = 0; i < n; i++) 3130 add_key(sb, s[i].name, llen); 3131 } 3132 3133 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n, 3134 int *llen) 3135 { 3136 int i; 3137 3138 for (i = 0; i < n; i++) 3139 add_key(sb, s[i].name, llen); 3140 } 3141 3142 const char *sort_help(const char *prefix) 3143 { 3144 struct strbuf sb; 3145 char *s; 3146 int len = strlen(prefix) + INDENT; 3147 3148 strbuf_init(&sb, 300); 3149 strbuf_addstr(&sb, prefix); 3150 add_hpp_sort_string(&sb, hpp_sort_dimensions, 3151 ARRAY_SIZE(hpp_sort_dimensions), &len); 3152 add_sort_string(&sb, common_sort_dimensions, 3153 ARRAY_SIZE(common_sort_dimensions), &len); 3154 add_sort_string(&sb, bstack_sort_dimensions, 3155 ARRAY_SIZE(bstack_sort_dimensions), &len); 3156 add_sort_string(&sb, memory_sort_dimensions, 3157 ARRAY_SIZE(memory_sort_dimensions), &len); 3158 s = strbuf_detach(&sb, NULL); 3159 strbuf_release(&sb); 3160 return s; 3161 } 3162