1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <stdlib.h> 6 #include <linux/mman.h> 7 #include <linux/time64.h> 8 #include "debug.h" 9 #include "dso.h" 10 #include "sort.h" 11 #include "hist.h" 12 #include "cacheline.h" 13 #include "comm.h" 14 #include "map.h" 15 #include "symbol.h" 16 #include "map_symbol.h" 17 #include "branch.h" 18 #include "thread.h" 19 #include "evsel.h" 20 #include "evlist.h" 21 #include "srcline.h" 22 #include "strlist.h" 23 #include "strbuf.h" 24 #include <traceevent/event-parse.h> 25 #include "mem-events.h" 26 #include "annotate.h" 27 #include "time-utils.h" 28 #include <linux/kernel.h> 29 #include <linux/string.h> 30 31 regex_t parent_regex; 32 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 33 const char *parent_pattern = default_parent_pattern; 34 const char *default_sort_order = "comm,dso,symbol"; 35 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 36 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; 37 const char default_top_sort_order[] = "dso,symbol"; 38 const char default_diff_sort_order[] = "dso,symbol"; 39 const char default_tracepoint_sort_order[] = "trace"; 40 const char *sort_order; 41 const char *field_order; 42 regex_t ignore_callees_regex; 43 int have_ignore_callees = 0; 44 enum sort_mode sort__mode = SORT_MODE__NORMAL; 45 46 /* 47 * Replaces all occurrences of a char used with the: 48 * 49 * -t, --field-separator 50 * 51 * option, that uses a special separator character and don't pad with spaces, 52 * replacing all occurrences of this separator in symbol names (and other 53 * output) with a '.' character, that thus it's the only non valid separator. 54 */ 55 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 56 { 57 int n; 58 va_list ap; 59 60 va_start(ap, fmt); 61 n = vsnprintf(bf, size, fmt, ap); 62 if (symbol_conf.field_sep && n > 0) { 63 char *sep = bf; 64 65 while (1) { 66 sep = strchr(sep, *symbol_conf.field_sep); 67 if (sep == NULL) 68 break; 69 *sep = '.'; 70 } 71 } 72 va_end(ap); 73 74 if (n >= (int)size) 75 return size - 1; 76 return n; 77 } 78 79 static int64_t cmp_null(const void *l, const void *r) 80 { 81 if (!l && !r) 82 return 0; 83 else if (!l) 84 return -1; 85 else 86 return 1; 87 } 88 89 /* --sort pid */ 90 91 static int64_t 92 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 93 { 94 return right->thread->tid - left->thread->tid; 95 } 96 97 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 98 size_t size, unsigned int width) 99 { 100 const char *comm = thread__comm_str(he->thread); 101 102 width = max(7U, width) - 8; 103 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 104 width, width, comm ?: ""); 105 } 106 107 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 108 { 109 const struct thread *th = arg; 110 111 if (type != HIST_FILTER__THREAD) 112 return -1; 113 114 return th && he->thread != th; 115 } 116 117 struct sort_entry sort_thread = { 118 .se_header = " Pid:Command", 119 .se_cmp = sort__thread_cmp, 120 .se_snprintf = hist_entry__thread_snprintf, 121 .se_filter = hist_entry__thread_filter, 122 .se_width_idx = HISTC_THREAD, 123 }; 124 125 /* --sort comm */ 126 127 /* 128 * We can't use pointer comparison in functions below, 129 * because it gives different results based on pointer 130 * values, which could break some sorting assumptions. 131 */ 132 static int64_t 133 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 134 { 135 return strcmp(comm__str(right->comm), comm__str(left->comm)); 136 } 137 138 static int64_t 139 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 140 { 141 return strcmp(comm__str(right->comm), comm__str(left->comm)); 142 } 143 144 static int64_t 145 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 146 { 147 return strcmp(comm__str(right->comm), comm__str(left->comm)); 148 } 149 150 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 151 size_t size, unsigned int width) 152 { 153 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 154 } 155 156 struct sort_entry sort_comm = { 157 .se_header = "Command", 158 .se_cmp = sort__comm_cmp, 159 .se_collapse = sort__comm_collapse, 160 .se_sort = sort__comm_sort, 161 .se_snprintf = hist_entry__comm_snprintf, 162 .se_filter = hist_entry__thread_filter, 163 .se_width_idx = HISTC_COMM, 164 }; 165 166 /* --sort dso */ 167 168 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 169 { 170 struct dso *dso_l = map_l ? map_l->dso : NULL; 171 struct dso *dso_r = map_r ? map_r->dso : NULL; 172 const char *dso_name_l, *dso_name_r; 173 174 if (!dso_l || !dso_r) 175 return cmp_null(dso_r, dso_l); 176 177 if (verbose > 0) { 178 dso_name_l = dso_l->long_name; 179 dso_name_r = dso_r->long_name; 180 } else { 181 dso_name_l = dso_l->short_name; 182 dso_name_r = dso_r->short_name; 183 } 184 185 return strcmp(dso_name_l, dso_name_r); 186 } 187 188 static int64_t 189 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 190 { 191 return _sort__dso_cmp(right->ms.map, left->ms.map); 192 } 193 194 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 195 size_t size, unsigned int width) 196 { 197 if (map && map->dso) { 198 const char *dso_name = verbose > 0 ? map->dso->long_name : 199 map->dso->short_name; 200 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 201 } 202 203 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 204 } 205 206 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 207 size_t size, unsigned int width) 208 { 209 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 210 } 211 212 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 213 { 214 const struct dso *dso = arg; 215 216 if (type != HIST_FILTER__DSO) 217 return -1; 218 219 return dso && (!he->ms.map || he->ms.map->dso != dso); 220 } 221 222 struct sort_entry sort_dso = { 223 .se_header = "Shared Object", 224 .se_cmp = sort__dso_cmp, 225 .se_snprintf = hist_entry__dso_snprintf, 226 .se_filter = hist_entry__dso_filter, 227 .se_width_idx = HISTC_DSO, 228 }; 229 230 /* --sort symbol */ 231 232 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 233 { 234 return (int64_t)(right_ip - left_ip); 235 } 236 237 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 238 { 239 if (!sym_l || !sym_r) 240 return cmp_null(sym_l, sym_r); 241 242 if (sym_l == sym_r) 243 return 0; 244 245 if (sym_l->inlined || sym_r->inlined) { 246 int ret = strcmp(sym_l->name, sym_r->name); 247 248 if (ret) 249 return ret; 250 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start)) 251 return 0; 252 } 253 254 if (sym_l->start != sym_r->start) 255 return (int64_t)(sym_r->start - sym_l->start); 256 257 return (int64_t)(sym_r->end - sym_l->end); 258 } 259 260 static int64_t 261 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 262 { 263 int64_t ret; 264 265 if (!left->ms.sym && !right->ms.sym) 266 return _sort__addr_cmp(left->ip, right->ip); 267 268 /* 269 * comparing symbol address alone is not enough since it's a 270 * relative address within a dso. 271 */ 272 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 273 ret = sort__dso_cmp(left, right); 274 if (ret != 0) 275 return ret; 276 } 277 278 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 279 } 280 281 static int64_t 282 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 283 { 284 if (!left->ms.sym || !right->ms.sym) 285 return cmp_null(left->ms.sym, right->ms.sym); 286 287 return strcmp(right->ms.sym->name, left->ms.sym->name); 288 } 289 290 static int _hist_entry__sym_snprintf(struct map_symbol *ms, 291 u64 ip, char level, char *bf, size_t size, 292 unsigned int width) 293 { 294 struct symbol *sym = ms->sym; 295 struct map *map = ms->map; 296 size_t ret = 0; 297 298 if (verbose > 0) { 299 char o = map ? dso__symtab_origin(map->dso) : '!'; 300 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 301 BITS_PER_LONG / 4 + 2, ip, o); 302 } 303 304 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 305 if (sym && map) { 306 if (sym->type == STT_OBJECT) { 307 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 308 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 309 ip - map->unmap_ip(map, sym->start)); 310 } else { 311 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 312 width - ret, 313 sym->name); 314 if (sym->inlined) 315 ret += repsep_snprintf(bf + ret, size - ret, 316 " (inlined)"); 317 } 318 } else { 319 size_t len = BITS_PER_LONG / 4; 320 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 321 len, ip); 322 } 323 324 return ret; 325 } 326 327 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, 328 size_t size, unsigned int width) 329 { 330 return _hist_entry__sym_snprintf(&he->ms, he->ip, 331 he->level, bf, size, width); 332 } 333 334 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 335 { 336 const char *sym = arg; 337 338 if (type != HIST_FILTER__SYMBOL) 339 return -1; 340 341 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 342 } 343 344 struct sort_entry sort_sym = { 345 .se_header = "Symbol", 346 .se_cmp = sort__sym_cmp, 347 .se_sort = sort__sym_sort, 348 .se_snprintf = hist_entry__sym_snprintf, 349 .se_filter = hist_entry__sym_filter, 350 .se_width_idx = HISTC_SYMBOL, 351 }; 352 353 /* --sort srcline */ 354 355 char *hist_entry__srcline(struct hist_entry *he) 356 { 357 return map__srcline(he->ms.map, he->ip, he->ms.sym); 358 } 359 360 static int64_t 361 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 362 { 363 if (!left->srcline) 364 left->srcline = hist_entry__srcline(left); 365 if (!right->srcline) 366 right->srcline = hist_entry__srcline(right); 367 368 return strcmp(right->srcline, left->srcline); 369 } 370 371 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 372 size_t size, unsigned int width) 373 { 374 if (!he->srcline) 375 he->srcline = hist_entry__srcline(he); 376 377 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 378 } 379 380 struct sort_entry sort_srcline = { 381 .se_header = "Source:Line", 382 .se_cmp = sort__srcline_cmp, 383 .se_snprintf = hist_entry__srcline_snprintf, 384 .se_width_idx = HISTC_SRCLINE, 385 }; 386 387 /* --sort srcline_from */ 388 389 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams) 390 { 391 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym); 392 } 393 394 static int64_t 395 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 396 { 397 if (!left->branch_info->srcline_from) 398 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from); 399 400 if (!right->branch_info->srcline_from) 401 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from); 402 403 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 404 } 405 406 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 407 size_t size, unsigned int width) 408 { 409 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 410 } 411 412 struct sort_entry sort_srcline_from = { 413 .se_header = "From Source:Line", 414 .se_cmp = sort__srcline_from_cmp, 415 .se_snprintf = hist_entry__srcline_from_snprintf, 416 .se_width_idx = HISTC_SRCLINE_FROM, 417 }; 418 419 /* --sort srcline_to */ 420 421 static int64_t 422 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 423 { 424 if (!left->branch_info->srcline_to) 425 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to); 426 427 if (!right->branch_info->srcline_to) 428 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to); 429 430 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 431 } 432 433 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 434 size_t size, unsigned int width) 435 { 436 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 437 } 438 439 struct sort_entry sort_srcline_to = { 440 .se_header = "To Source:Line", 441 .se_cmp = sort__srcline_to_cmp, 442 .se_snprintf = hist_entry__srcline_to_snprintf, 443 .se_width_idx = HISTC_SRCLINE_TO, 444 }; 445 446 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, 447 size_t size, unsigned int width) 448 { 449 450 struct symbol *sym = he->ms.sym; 451 struct annotation *notes; 452 double ipc = 0.0, coverage = 0.0; 453 char tmp[64]; 454 455 if (!sym) 456 return repsep_snprintf(bf, size, "%-*s", width, "-"); 457 458 notes = symbol__annotation(sym); 459 460 if (notes->hit_cycles) 461 ipc = notes->hit_insn / ((double)notes->hit_cycles); 462 463 if (notes->total_insn) { 464 coverage = notes->cover_insn * 100.0 / 465 ((double)notes->total_insn); 466 } 467 468 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); 469 return repsep_snprintf(bf, size, "%-*s", width, tmp); 470 } 471 472 struct sort_entry sort_sym_ipc = { 473 .se_header = "IPC [IPC Coverage]", 474 .se_cmp = sort__sym_cmp, 475 .se_snprintf = hist_entry__sym_ipc_snprintf, 476 .se_width_idx = HISTC_SYMBOL_IPC, 477 }; 478 479 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he 480 __maybe_unused, 481 char *bf, size_t size, 482 unsigned int width) 483 { 484 char tmp[64]; 485 486 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-"); 487 return repsep_snprintf(bf, size, "%-*s", width, tmp); 488 } 489 490 struct sort_entry sort_sym_ipc_null = { 491 .se_header = "IPC [IPC Coverage]", 492 .se_cmp = sort__sym_cmp, 493 .se_snprintf = hist_entry__sym_ipc_null_snprintf, 494 .se_width_idx = HISTC_SYMBOL_IPC, 495 }; 496 497 /* --sort srcfile */ 498 499 static char no_srcfile[1]; 500 501 static char *hist_entry__get_srcfile(struct hist_entry *e) 502 { 503 char *sf, *p; 504 struct map *map = e->ms.map; 505 506 if (!map) 507 return no_srcfile; 508 509 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 510 e->ms.sym, false, true, true, e->ip); 511 if (!strcmp(sf, SRCLINE_UNKNOWN)) 512 return no_srcfile; 513 p = strchr(sf, ':'); 514 if (p && *sf) { 515 *p = 0; 516 return sf; 517 } 518 free(sf); 519 return no_srcfile; 520 } 521 522 static int64_t 523 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 524 { 525 if (!left->srcfile) 526 left->srcfile = hist_entry__get_srcfile(left); 527 if (!right->srcfile) 528 right->srcfile = hist_entry__get_srcfile(right); 529 530 return strcmp(right->srcfile, left->srcfile); 531 } 532 533 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 534 size_t size, unsigned int width) 535 { 536 if (!he->srcfile) 537 he->srcfile = hist_entry__get_srcfile(he); 538 539 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 540 } 541 542 struct sort_entry sort_srcfile = { 543 .se_header = "Source File", 544 .se_cmp = sort__srcfile_cmp, 545 .se_snprintf = hist_entry__srcfile_snprintf, 546 .se_width_idx = HISTC_SRCFILE, 547 }; 548 549 /* --sort parent */ 550 551 static int64_t 552 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 553 { 554 struct symbol *sym_l = left->parent; 555 struct symbol *sym_r = right->parent; 556 557 if (!sym_l || !sym_r) 558 return cmp_null(sym_l, sym_r); 559 560 return strcmp(sym_r->name, sym_l->name); 561 } 562 563 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 564 size_t size, unsigned int width) 565 { 566 return repsep_snprintf(bf, size, "%-*.*s", width, width, 567 he->parent ? he->parent->name : "[other]"); 568 } 569 570 struct sort_entry sort_parent = { 571 .se_header = "Parent symbol", 572 .se_cmp = sort__parent_cmp, 573 .se_snprintf = hist_entry__parent_snprintf, 574 .se_width_idx = HISTC_PARENT, 575 }; 576 577 /* --sort cpu */ 578 579 static int64_t 580 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 581 { 582 return right->cpu - left->cpu; 583 } 584 585 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 586 size_t size, unsigned int width) 587 { 588 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 589 } 590 591 struct sort_entry sort_cpu = { 592 .se_header = "CPU", 593 .se_cmp = sort__cpu_cmp, 594 .se_snprintf = hist_entry__cpu_snprintf, 595 .se_width_idx = HISTC_CPU, 596 }; 597 598 /* --sort cgroup_id */ 599 600 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 601 { 602 return (int64_t)(right_dev - left_dev); 603 } 604 605 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 606 { 607 return (int64_t)(right_ino - left_ino); 608 } 609 610 static int64_t 611 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 612 { 613 int64_t ret; 614 615 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 616 if (ret != 0) 617 return ret; 618 619 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 620 left->cgroup_id.ino); 621 } 622 623 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 624 char *bf, size_t size, 625 unsigned int width __maybe_unused) 626 { 627 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 628 he->cgroup_id.ino); 629 } 630 631 struct sort_entry sort_cgroup_id = { 632 .se_header = "cgroup id (dev/inode)", 633 .se_cmp = sort__cgroup_id_cmp, 634 .se_snprintf = hist_entry__cgroup_id_snprintf, 635 .se_width_idx = HISTC_CGROUP_ID, 636 }; 637 638 /* --sort socket */ 639 640 static int64_t 641 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 642 { 643 return right->socket - left->socket; 644 } 645 646 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 647 size_t size, unsigned int width) 648 { 649 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 650 } 651 652 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 653 { 654 int sk = *(const int *)arg; 655 656 if (type != HIST_FILTER__SOCKET) 657 return -1; 658 659 return sk >= 0 && he->socket != sk; 660 } 661 662 struct sort_entry sort_socket = { 663 .se_header = "Socket", 664 .se_cmp = sort__socket_cmp, 665 .se_snprintf = hist_entry__socket_snprintf, 666 .se_filter = hist_entry__socket_filter, 667 .se_width_idx = HISTC_SOCKET, 668 }; 669 670 /* --sort time */ 671 672 static int64_t 673 sort__time_cmp(struct hist_entry *left, struct hist_entry *right) 674 { 675 return right->time - left->time; 676 } 677 678 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf, 679 size_t size, unsigned int width) 680 { 681 char he_time[32]; 682 683 if (symbol_conf.nanosecs) 684 timestamp__scnprintf_nsec(he->time, he_time, 685 sizeof(he_time)); 686 else 687 timestamp__scnprintf_usec(he->time, he_time, 688 sizeof(he_time)); 689 690 return repsep_snprintf(bf, size, "%-.*s", width, he_time); 691 } 692 693 struct sort_entry sort_time = { 694 .se_header = "Time", 695 .se_cmp = sort__time_cmp, 696 .se_snprintf = hist_entry__time_snprintf, 697 .se_width_idx = HISTC_TIME, 698 }; 699 700 /* --sort trace */ 701 702 static char *get_trace_output(struct hist_entry *he) 703 { 704 struct trace_seq seq; 705 struct evsel *evsel; 706 struct tep_record rec = { 707 .data = he->raw_data, 708 .size = he->raw_size, 709 }; 710 711 evsel = hists_to_evsel(he->hists); 712 713 trace_seq_init(&seq); 714 if (symbol_conf.raw_trace) { 715 tep_print_fields(&seq, he->raw_data, he->raw_size, 716 evsel->tp_format); 717 } else { 718 tep_print_event(evsel->tp_format->tep, 719 &seq, &rec, "%s", TEP_PRINT_INFO); 720 } 721 /* 722 * Trim the buffer, it starts at 4KB and we're not going to 723 * add anything more to this buffer. 724 */ 725 return realloc(seq.buffer, seq.len + 1); 726 } 727 728 static int64_t 729 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 730 { 731 struct evsel *evsel; 732 733 evsel = hists_to_evsel(left->hists); 734 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 735 return 0; 736 737 if (left->trace_output == NULL) 738 left->trace_output = get_trace_output(left); 739 if (right->trace_output == NULL) 740 right->trace_output = get_trace_output(right); 741 742 return strcmp(right->trace_output, left->trace_output); 743 } 744 745 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 746 size_t size, unsigned int width) 747 { 748 struct evsel *evsel; 749 750 evsel = hists_to_evsel(he->hists); 751 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 752 return scnprintf(bf, size, "%-.*s", width, "N/A"); 753 754 if (he->trace_output == NULL) 755 he->trace_output = get_trace_output(he); 756 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 757 } 758 759 struct sort_entry sort_trace = { 760 .se_header = "Trace output", 761 .se_cmp = sort__trace_cmp, 762 .se_snprintf = hist_entry__trace_snprintf, 763 .se_width_idx = HISTC_TRACE, 764 }; 765 766 /* sort keys for branch stacks */ 767 768 static int64_t 769 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 770 { 771 if (!left->branch_info || !right->branch_info) 772 return cmp_null(left->branch_info, right->branch_info); 773 774 return _sort__dso_cmp(left->branch_info->from.ms.map, 775 right->branch_info->from.ms.map); 776 } 777 778 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 779 size_t size, unsigned int width) 780 { 781 if (he->branch_info) 782 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map, 783 bf, size, width); 784 else 785 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 786 } 787 788 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 789 const void *arg) 790 { 791 const struct dso *dso = arg; 792 793 if (type != HIST_FILTER__DSO) 794 return -1; 795 796 return dso && (!he->branch_info || !he->branch_info->from.ms.map || 797 he->branch_info->from.ms.map->dso != dso); 798 } 799 800 static int64_t 801 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 802 { 803 if (!left->branch_info || !right->branch_info) 804 return cmp_null(left->branch_info, right->branch_info); 805 806 return _sort__dso_cmp(left->branch_info->to.ms.map, 807 right->branch_info->to.ms.map); 808 } 809 810 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 811 size_t size, unsigned int width) 812 { 813 if (he->branch_info) 814 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map, 815 bf, size, width); 816 else 817 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 818 } 819 820 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 821 const void *arg) 822 { 823 const struct dso *dso = arg; 824 825 if (type != HIST_FILTER__DSO) 826 return -1; 827 828 return dso && (!he->branch_info || !he->branch_info->to.ms.map || 829 he->branch_info->to.ms.map->dso != dso); 830 } 831 832 static int64_t 833 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 834 { 835 struct addr_map_symbol *from_l = &left->branch_info->from; 836 struct addr_map_symbol *from_r = &right->branch_info->from; 837 838 if (!left->branch_info || !right->branch_info) 839 return cmp_null(left->branch_info, right->branch_info); 840 841 from_l = &left->branch_info->from; 842 from_r = &right->branch_info->from; 843 844 if (!from_l->ms.sym && !from_r->ms.sym) 845 return _sort__addr_cmp(from_l->addr, from_r->addr); 846 847 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym); 848 } 849 850 static int64_t 851 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 852 { 853 struct addr_map_symbol *to_l, *to_r; 854 855 if (!left->branch_info || !right->branch_info) 856 return cmp_null(left->branch_info, right->branch_info); 857 858 to_l = &left->branch_info->to; 859 to_r = &right->branch_info->to; 860 861 if (!to_l->ms.sym && !to_r->ms.sym) 862 return _sort__addr_cmp(to_l->addr, to_r->addr); 863 864 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym); 865 } 866 867 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 868 size_t size, unsigned int width) 869 { 870 if (he->branch_info) { 871 struct addr_map_symbol *from = &he->branch_info->from; 872 873 return _hist_entry__sym_snprintf(&from->ms, from->addr, he->level, bf, size, width); 874 } 875 876 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 877 } 878 879 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 880 size_t size, unsigned int width) 881 { 882 if (he->branch_info) { 883 struct addr_map_symbol *to = &he->branch_info->to; 884 885 return _hist_entry__sym_snprintf(&to->ms, to->addr, he->level, bf, size, width); 886 } 887 888 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 889 } 890 891 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 892 const void *arg) 893 { 894 const char *sym = arg; 895 896 if (type != HIST_FILTER__SYMBOL) 897 return -1; 898 899 return sym && !(he->branch_info && he->branch_info->from.ms.sym && 900 strstr(he->branch_info->from.ms.sym->name, sym)); 901 } 902 903 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 904 const void *arg) 905 { 906 const char *sym = arg; 907 908 if (type != HIST_FILTER__SYMBOL) 909 return -1; 910 911 return sym && !(he->branch_info && he->branch_info->to.ms.sym && 912 strstr(he->branch_info->to.ms.sym->name, sym)); 913 } 914 915 struct sort_entry sort_dso_from = { 916 .se_header = "Source Shared Object", 917 .se_cmp = sort__dso_from_cmp, 918 .se_snprintf = hist_entry__dso_from_snprintf, 919 .se_filter = hist_entry__dso_from_filter, 920 .se_width_idx = HISTC_DSO_FROM, 921 }; 922 923 struct sort_entry sort_dso_to = { 924 .se_header = "Target Shared Object", 925 .se_cmp = sort__dso_to_cmp, 926 .se_snprintf = hist_entry__dso_to_snprintf, 927 .se_filter = hist_entry__dso_to_filter, 928 .se_width_idx = HISTC_DSO_TO, 929 }; 930 931 struct sort_entry sort_sym_from = { 932 .se_header = "Source Symbol", 933 .se_cmp = sort__sym_from_cmp, 934 .se_snprintf = hist_entry__sym_from_snprintf, 935 .se_filter = hist_entry__sym_from_filter, 936 .se_width_idx = HISTC_SYMBOL_FROM, 937 }; 938 939 struct sort_entry sort_sym_to = { 940 .se_header = "Target Symbol", 941 .se_cmp = sort__sym_to_cmp, 942 .se_snprintf = hist_entry__sym_to_snprintf, 943 .se_filter = hist_entry__sym_to_filter, 944 .se_width_idx = HISTC_SYMBOL_TO, 945 }; 946 947 static int64_t 948 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 949 { 950 unsigned char mp, p; 951 952 if (!left->branch_info || !right->branch_info) 953 return cmp_null(left->branch_info, right->branch_info); 954 955 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 956 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 957 return mp || p; 958 } 959 960 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 961 size_t size, unsigned int width){ 962 static const char *out = "N/A"; 963 964 if (he->branch_info) { 965 if (he->branch_info->flags.predicted) 966 out = "N"; 967 else if (he->branch_info->flags.mispred) 968 out = "Y"; 969 } 970 971 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 972 } 973 974 static int64_t 975 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 976 { 977 if (!left->branch_info || !right->branch_info) 978 return cmp_null(left->branch_info, right->branch_info); 979 980 return left->branch_info->flags.cycles - 981 right->branch_info->flags.cycles; 982 } 983 984 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 985 size_t size, unsigned int width) 986 { 987 if (!he->branch_info) 988 return scnprintf(bf, size, "%-.*s", width, "N/A"); 989 if (he->branch_info->flags.cycles == 0) 990 return repsep_snprintf(bf, size, "%-*s", width, "-"); 991 return repsep_snprintf(bf, size, "%-*hd", width, 992 he->branch_info->flags.cycles); 993 } 994 995 struct sort_entry sort_cycles = { 996 .se_header = "Basic Block Cycles", 997 .se_cmp = sort__cycles_cmp, 998 .se_snprintf = hist_entry__cycles_snprintf, 999 .se_width_idx = HISTC_CYCLES, 1000 }; 1001 1002 /* --sort daddr_sym */ 1003 int64_t 1004 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1005 { 1006 uint64_t l = 0, r = 0; 1007 1008 if (left->mem_info) 1009 l = left->mem_info->daddr.addr; 1010 if (right->mem_info) 1011 r = right->mem_info->daddr.addr; 1012 1013 return (int64_t)(r - l); 1014 } 1015 1016 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 1017 size_t size, unsigned int width) 1018 { 1019 uint64_t addr = 0; 1020 struct map_symbol *ms = NULL; 1021 1022 if (he->mem_info) { 1023 addr = he->mem_info->daddr.addr; 1024 ms = &he->mem_info->daddr.ms; 1025 } 1026 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1027 } 1028 1029 int64_t 1030 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 1031 { 1032 uint64_t l = 0, r = 0; 1033 1034 if (left->mem_info) 1035 l = left->mem_info->iaddr.addr; 1036 if (right->mem_info) 1037 r = right->mem_info->iaddr.addr; 1038 1039 return (int64_t)(r - l); 1040 } 1041 1042 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 1043 size_t size, unsigned int width) 1044 { 1045 uint64_t addr = 0; 1046 struct map_symbol *ms = NULL; 1047 1048 if (he->mem_info) { 1049 addr = he->mem_info->iaddr.addr; 1050 ms = &he->mem_info->iaddr.ms; 1051 } 1052 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1053 } 1054 1055 static int64_t 1056 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1057 { 1058 struct map *map_l = NULL; 1059 struct map *map_r = NULL; 1060 1061 if (left->mem_info) 1062 map_l = left->mem_info->daddr.ms.map; 1063 if (right->mem_info) 1064 map_r = right->mem_info->daddr.ms.map; 1065 1066 return _sort__dso_cmp(map_l, map_r); 1067 } 1068 1069 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1070 size_t size, unsigned int width) 1071 { 1072 struct map *map = NULL; 1073 1074 if (he->mem_info) 1075 map = he->mem_info->daddr.ms.map; 1076 1077 return _hist_entry__dso_snprintf(map, bf, size, width); 1078 } 1079 1080 static int64_t 1081 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1082 { 1083 union perf_mem_data_src data_src_l; 1084 union perf_mem_data_src data_src_r; 1085 1086 if (left->mem_info) 1087 data_src_l = left->mem_info->data_src; 1088 else 1089 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1090 1091 if (right->mem_info) 1092 data_src_r = right->mem_info->data_src; 1093 else 1094 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1095 1096 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1097 } 1098 1099 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1100 size_t size, unsigned int width) 1101 { 1102 char out[10]; 1103 1104 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1105 return repsep_snprintf(bf, size, "%.*s", width, out); 1106 } 1107 1108 static int64_t 1109 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1110 { 1111 union perf_mem_data_src data_src_l; 1112 union perf_mem_data_src data_src_r; 1113 1114 if (left->mem_info) 1115 data_src_l = left->mem_info->data_src; 1116 else 1117 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1118 1119 if (right->mem_info) 1120 data_src_r = right->mem_info->data_src; 1121 else 1122 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1123 1124 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1125 } 1126 1127 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1128 size_t size, unsigned int width) 1129 { 1130 char out[64]; 1131 1132 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1133 return repsep_snprintf(bf, size, "%-*s", width, out); 1134 } 1135 1136 static int64_t 1137 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1138 { 1139 union perf_mem_data_src data_src_l; 1140 union perf_mem_data_src data_src_r; 1141 1142 if (left->mem_info) 1143 data_src_l = left->mem_info->data_src; 1144 else 1145 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1146 1147 if (right->mem_info) 1148 data_src_r = right->mem_info->data_src; 1149 else 1150 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1151 1152 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1153 } 1154 1155 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1156 size_t size, unsigned int width) 1157 { 1158 char out[64]; 1159 1160 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1161 return repsep_snprintf(bf, size, "%-*s", width, out); 1162 } 1163 1164 static int64_t 1165 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1166 { 1167 union perf_mem_data_src data_src_l; 1168 union perf_mem_data_src data_src_r; 1169 1170 if (left->mem_info) 1171 data_src_l = left->mem_info->data_src; 1172 else 1173 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1174 1175 if (right->mem_info) 1176 data_src_r = right->mem_info->data_src; 1177 else 1178 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1179 1180 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1181 } 1182 1183 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1184 size_t size, unsigned int width) 1185 { 1186 char out[64]; 1187 1188 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1189 return repsep_snprintf(bf, size, "%-*s", width, out); 1190 } 1191 1192 int64_t 1193 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1194 { 1195 u64 l, r; 1196 struct map *l_map, *r_map; 1197 int rc; 1198 1199 if (!left->mem_info) return -1; 1200 if (!right->mem_info) return 1; 1201 1202 /* group event types together */ 1203 if (left->cpumode > right->cpumode) return -1; 1204 if (left->cpumode < right->cpumode) return 1; 1205 1206 l_map = left->mem_info->daddr.ms.map; 1207 r_map = right->mem_info->daddr.ms.map; 1208 1209 /* if both are NULL, jump to sort on al_addr instead */ 1210 if (!l_map && !r_map) 1211 goto addr; 1212 1213 if (!l_map) return -1; 1214 if (!r_map) return 1; 1215 1216 rc = dso__cmp_id(l_map->dso, r_map->dso); 1217 if (rc) 1218 return rc; 1219 /* 1220 * Addresses with no major/minor numbers are assumed to be 1221 * anonymous in userspace. Sort those on pid then address. 1222 * 1223 * The kernel and non-zero major/minor mapped areas are 1224 * assumed to be unity mapped. Sort those on address. 1225 */ 1226 1227 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1228 (!(l_map->flags & MAP_SHARED)) && 1229 !l_map->dso->id.maj && !l_map->dso->id.min && 1230 !l_map->dso->id.ino && !l_map->dso->id.ino_generation) { 1231 /* userspace anonymous */ 1232 1233 if (left->thread->pid_ > right->thread->pid_) return -1; 1234 if (left->thread->pid_ < right->thread->pid_) return 1; 1235 } 1236 1237 addr: 1238 /* al_addr does all the right addr - start + offset calculations */ 1239 l = cl_address(left->mem_info->daddr.al_addr); 1240 r = cl_address(right->mem_info->daddr.al_addr); 1241 1242 if (l > r) return -1; 1243 if (l < r) return 1; 1244 1245 return 0; 1246 } 1247 1248 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1249 size_t size, unsigned int width) 1250 { 1251 1252 uint64_t addr = 0; 1253 struct map_symbol *ms = NULL; 1254 char level = he->level; 1255 1256 if (he->mem_info) { 1257 struct map *map = he->mem_info->daddr.ms.map; 1258 1259 addr = cl_address(he->mem_info->daddr.al_addr); 1260 ms = &he->mem_info->daddr.ms; 1261 1262 /* print [s] for shared data mmaps */ 1263 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1264 map && !(map->prot & PROT_EXEC) && 1265 (map->flags & MAP_SHARED) && 1266 (map->dso->id.maj || map->dso->id.min || 1267 map->dso->id.ino || map->dso->id.ino_generation)) 1268 level = 's'; 1269 else if (!map) 1270 level = 'X'; 1271 } 1272 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width); 1273 } 1274 1275 struct sort_entry sort_mispredict = { 1276 .se_header = "Branch Mispredicted", 1277 .se_cmp = sort__mispredict_cmp, 1278 .se_snprintf = hist_entry__mispredict_snprintf, 1279 .se_width_idx = HISTC_MISPREDICT, 1280 }; 1281 1282 static u64 he_weight(struct hist_entry *he) 1283 { 1284 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; 1285 } 1286 1287 static int64_t 1288 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1289 { 1290 return he_weight(left) - he_weight(right); 1291 } 1292 1293 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1294 size_t size, unsigned int width) 1295 { 1296 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); 1297 } 1298 1299 struct sort_entry sort_local_weight = { 1300 .se_header = "Local Weight", 1301 .se_cmp = sort__local_weight_cmp, 1302 .se_snprintf = hist_entry__local_weight_snprintf, 1303 .se_width_idx = HISTC_LOCAL_WEIGHT, 1304 }; 1305 1306 static int64_t 1307 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1308 { 1309 return left->stat.weight - right->stat.weight; 1310 } 1311 1312 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1313 size_t size, unsigned int width) 1314 { 1315 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); 1316 } 1317 1318 struct sort_entry sort_global_weight = { 1319 .se_header = "Weight", 1320 .se_cmp = sort__global_weight_cmp, 1321 .se_snprintf = hist_entry__global_weight_snprintf, 1322 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1323 }; 1324 1325 struct sort_entry sort_mem_daddr_sym = { 1326 .se_header = "Data Symbol", 1327 .se_cmp = sort__daddr_cmp, 1328 .se_snprintf = hist_entry__daddr_snprintf, 1329 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1330 }; 1331 1332 struct sort_entry sort_mem_iaddr_sym = { 1333 .se_header = "Code Symbol", 1334 .se_cmp = sort__iaddr_cmp, 1335 .se_snprintf = hist_entry__iaddr_snprintf, 1336 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1337 }; 1338 1339 struct sort_entry sort_mem_daddr_dso = { 1340 .se_header = "Data Object", 1341 .se_cmp = sort__dso_daddr_cmp, 1342 .se_snprintf = hist_entry__dso_daddr_snprintf, 1343 .se_width_idx = HISTC_MEM_DADDR_DSO, 1344 }; 1345 1346 struct sort_entry sort_mem_locked = { 1347 .se_header = "Locked", 1348 .se_cmp = sort__locked_cmp, 1349 .se_snprintf = hist_entry__locked_snprintf, 1350 .se_width_idx = HISTC_MEM_LOCKED, 1351 }; 1352 1353 struct sort_entry sort_mem_tlb = { 1354 .se_header = "TLB access", 1355 .se_cmp = sort__tlb_cmp, 1356 .se_snprintf = hist_entry__tlb_snprintf, 1357 .se_width_idx = HISTC_MEM_TLB, 1358 }; 1359 1360 struct sort_entry sort_mem_lvl = { 1361 .se_header = "Memory access", 1362 .se_cmp = sort__lvl_cmp, 1363 .se_snprintf = hist_entry__lvl_snprintf, 1364 .se_width_idx = HISTC_MEM_LVL, 1365 }; 1366 1367 struct sort_entry sort_mem_snoop = { 1368 .se_header = "Snoop", 1369 .se_cmp = sort__snoop_cmp, 1370 .se_snprintf = hist_entry__snoop_snprintf, 1371 .se_width_idx = HISTC_MEM_SNOOP, 1372 }; 1373 1374 struct sort_entry sort_mem_dcacheline = { 1375 .se_header = "Data Cacheline", 1376 .se_cmp = sort__dcacheline_cmp, 1377 .se_snprintf = hist_entry__dcacheline_snprintf, 1378 .se_width_idx = HISTC_MEM_DCACHELINE, 1379 }; 1380 1381 static int64_t 1382 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1383 { 1384 uint64_t l = 0, r = 0; 1385 1386 if (left->mem_info) 1387 l = left->mem_info->daddr.phys_addr; 1388 if (right->mem_info) 1389 r = right->mem_info->daddr.phys_addr; 1390 1391 return (int64_t)(r - l); 1392 } 1393 1394 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1395 size_t size, unsigned int width) 1396 { 1397 uint64_t addr = 0; 1398 size_t ret = 0; 1399 size_t len = BITS_PER_LONG / 4; 1400 1401 addr = he->mem_info->daddr.phys_addr; 1402 1403 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1404 1405 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1406 1407 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1408 1409 if (ret > width) 1410 bf[width] = '\0'; 1411 1412 return width; 1413 } 1414 1415 struct sort_entry sort_mem_phys_daddr = { 1416 .se_header = "Data Physical Address", 1417 .se_cmp = sort__phys_daddr_cmp, 1418 .se_snprintf = hist_entry__phys_daddr_snprintf, 1419 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1420 }; 1421 1422 static int64_t 1423 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1424 { 1425 if (!left->branch_info || !right->branch_info) 1426 return cmp_null(left->branch_info, right->branch_info); 1427 1428 return left->branch_info->flags.abort != 1429 right->branch_info->flags.abort; 1430 } 1431 1432 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1433 size_t size, unsigned int width) 1434 { 1435 static const char *out = "N/A"; 1436 1437 if (he->branch_info) { 1438 if (he->branch_info->flags.abort) 1439 out = "A"; 1440 else 1441 out = "."; 1442 } 1443 1444 return repsep_snprintf(bf, size, "%-*s", width, out); 1445 } 1446 1447 struct sort_entry sort_abort = { 1448 .se_header = "Transaction abort", 1449 .se_cmp = sort__abort_cmp, 1450 .se_snprintf = hist_entry__abort_snprintf, 1451 .se_width_idx = HISTC_ABORT, 1452 }; 1453 1454 static int64_t 1455 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1456 { 1457 if (!left->branch_info || !right->branch_info) 1458 return cmp_null(left->branch_info, right->branch_info); 1459 1460 return left->branch_info->flags.in_tx != 1461 right->branch_info->flags.in_tx; 1462 } 1463 1464 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1465 size_t size, unsigned int width) 1466 { 1467 static const char *out = "N/A"; 1468 1469 if (he->branch_info) { 1470 if (he->branch_info->flags.in_tx) 1471 out = "T"; 1472 else 1473 out = "."; 1474 } 1475 1476 return repsep_snprintf(bf, size, "%-*s", width, out); 1477 } 1478 1479 struct sort_entry sort_in_tx = { 1480 .se_header = "Branch in transaction", 1481 .se_cmp = sort__in_tx_cmp, 1482 .se_snprintf = hist_entry__in_tx_snprintf, 1483 .se_width_idx = HISTC_IN_TX, 1484 }; 1485 1486 static int64_t 1487 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1488 { 1489 return left->transaction - right->transaction; 1490 } 1491 1492 static inline char *add_str(char *p, const char *str) 1493 { 1494 strcpy(p, str); 1495 return p + strlen(str); 1496 } 1497 1498 static struct txbit { 1499 unsigned flag; 1500 const char *name; 1501 int skip_for_len; 1502 } txbits[] = { 1503 { PERF_TXN_ELISION, "EL ", 0 }, 1504 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1505 { PERF_TXN_SYNC, "SYNC ", 1 }, 1506 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1507 { PERF_TXN_RETRY, "RETRY ", 0 }, 1508 { PERF_TXN_CONFLICT, "CON ", 0 }, 1509 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1510 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1511 { 0, NULL, 0 } 1512 }; 1513 1514 int hist_entry__transaction_len(void) 1515 { 1516 int i; 1517 int len = 0; 1518 1519 for (i = 0; txbits[i].name; i++) { 1520 if (!txbits[i].skip_for_len) 1521 len += strlen(txbits[i].name); 1522 } 1523 len += 4; /* :XX<space> */ 1524 return len; 1525 } 1526 1527 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1528 size_t size, unsigned int width) 1529 { 1530 u64 t = he->transaction; 1531 char buf[128]; 1532 char *p = buf; 1533 int i; 1534 1535 buf[0] = 0; 1536 for (i = 0; txbits[i].name; i++) 1537 if (txbits[i].flag & t) 1538 p = add_str(p, txbits[i].name); 1539 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1540 p = add_str(p, "NEITHER "); 1541 if (t & PERF_TXN_ABORT_MASK) { 1542 sprintf(p, ":%" PRIx64, 1543 (t & PERF_TXN_ABORT_MASK) >> 1544 PERF_TXN_ABORT_SHIFT); 1545 p += strlen(p); 1546 } 1547 1548 return repsep_snprintf(bf, size, "%-*s", width, buf); 1549 } 1550 1551 struct sort_entry sort_transaction = { 1552 .se_header = "Transaction ", 1553 .se_cmp = sort__transaction_cmp, 1554 .se_snprintf = hist_entry__transaction_snprintf, 1555 .se_width_idx = HISTC_TRANSACTION, 1556 }; 1557 1558 /* --sort symbol_size */ 1559 1560 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 1561 { 1562 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 1563 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 1564 1565 return size_l < size_r ? -1 : 1566 size_l == size_r ? 0 : 1; 1567 } 1568 1569 static int64_t 1570 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 1571 { 1572 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 1573 } 1574 1575 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 1576 size_t bf_size, unsigned int width) 1577 { 1578 if (sym) 1579 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 1580 1581 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1582 } 1583 1584 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 1585 size_t size, unsigned int width) 1586 { 1587 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 1588 } 1589 1590 struct sort_entry sort_sym_size = { 1591 .se_header = "Symbol size", 1592 .se_cmp = sort__sym_size_cmp, 1593 .se_snprintf = hist_entry__sym_size_snprintf, 1594 .se_width_idx = HISTC_SYM_SIZE, 1595 }; 1596 1597 /* --sort dso_size */ 1598 1599 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r) 1600 { 1601 int64_t size_l = map_l != NULL ? map__size(map_l) : 0; 1602 int64_t size_r = map_r != NULL ? map__size(map_r) : 0; 1603 1604 return size_l < size_r ? -1 : 1605 size_l == size_r ? 0 : 1; 1606 } 1607 1608 static int64_t 1609 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right) 1610 { 1611 return _sort__dso_size_cmp(right->ms.map, left->ms.map); 1612 } 1613 1614 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf, 1615 size_t bf_size, unsigned int width) 1616 { 1617 if (map && map->dso) 1618 return repsep_snprintf(bf, bf_size, "%*d", width, 1619 map__size(map)); 1620 1621 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1622 } 1623 1624 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf, 1625 size_t size, unsigned int width) 1626 { 1627 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width); 1628 } 1629 1630 struct sort_entry sort_dso_size = { 1631 .se_header = "DSO size", 1632 .se_cmp = sort__dso_size_cmp, 1633 .se_snprintf = hist_entry__dso_size_snprintf, 1634 .se_width_idx = HISTC_DSO_SIZE, 1635 }; 1636 1637 1638 struct sort_dimension { 1639 const char *name; 1640 struct sort_entry *entry; 1641 int taken; 1642 }; 1643 1644 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 1645 1646 static struct sort_dimension common_sort_dimensions[] = { 1647 DIM(SORT_PID, "pid", sort_thread), 1648 DIM(SORT_COMM, "comm", sort_comm), 1649 DIM(SORT_DSO, "dso", sort_dso), 1650 DIM(SORT_SYM, "symbol", sort_sym), 1651 DIM(SORT_PARENT, "parent", sort_parent), 1652 DIM(SORT_CPU, "cpu", sort_cpu), 1653 DIM(SORT_SOCKET, "socket", sort_socket), 1654 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1655 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 1656 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1657 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 1658 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 1659 DIM(SORT_TRACE, "trace", sort_trace), 1660 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 1661 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 1662 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 1663 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null), 1664 DIM(SORT_TIME, "time", sort_time), 1665 }; 1666 1667 #undef DIM 1668 1669 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 1670 1671 static struct sort_dimension bstack_sort_dimensions[] = { 1672 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 1673 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 1674 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 1675 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 1676 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 1677 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1678 DIM(SORT_ABORT, "abort", sort_abort), 1679 DIM(SORT_CYCLES, "cycles", sort_cycles), 1680 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1681 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1682 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc), 1683 }; 1684 1685 #undef DIM 1686 1687 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 1688 1689 static struct sort_dimension memory_sort_dimensions[] = { 1690 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1691 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 1692 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1693 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1694 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1695 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 1696 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 1697 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 1698 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 1699 }; 1700 1701 #undef DIM 1702 1703 struct hpp_dimension { 1704 const char *name; 1705 struct perf_hpp_fmt *fmt; 1706 int taken; 1707 }; 1708 1709 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 1710 1711 static struct hpp_dimension hpp_sort_dimensions[] = { 1712 DIM(PERF_HPP__OVERHEAD, "overhead"), 1713 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 1714 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 1715 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 1716 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 1717 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 1718 DIM(PERF_HPP__SAMPLES, "sample"), 1719 DIM(PERF_HPP__PERIOD, "period"), 1720 }; 1721 1722 #undef DIM 1723 1724 struct hpp_sort_entry { 1725 struct perf_hpp_fmt hpp; 1726 struct sort_entry *se; 1727 }; 1728 1729 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 1730 { 1731 struct hpp_sort_entry *hse; 1732 1733 if (!perf_hpp__is_sort_entry(fmt)) 1734 return; 1735 1736 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1737 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 1738 } 1739 1740 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1741 struct hists *hists, int line __maybe_unused, 1742 int *span __maybe_unused) 1743 { 1744 struct hpp_sort_entry *hse; 1745 size_t len = fmt->user_len; 1746 1747 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1748 1749 if (!len) 1750 len = hists__col_len(hists, hse->se->se_width_idx); 1751 1752 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 1753 } 1754 1755 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 1756 struct perf_hpp *hpp __maybe_unused, 1757 struct hists *hists) 1758 { 1759 struct hpp_sort_entry *hse; 1760 size_t len = fmt->user_len; 1761 1762 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1763 1764 if (!len) 1765 len = hists__col_len(hists, hse->se->se_width_idx); 1766 1767 return len; 1768 } 1769 1770 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1771 struct hist_entry *he) 1772 { 1773 struct hpp_sort_entry *hse; 1774 size_t len = fmt->user_len; 1775 1776 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1777 1778 if (!len) 1779 len = hists__col_len(he->hists, hse->se->se_width_idx); 1780 1781 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1782 } 1783 1784 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 1785 struct hist_entry *a, struct hist_entry *b) 1786 { 1787 struct hpp_sort_entry *hse; 1788 1789 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1790 return hse->se->se_cmp(a, b); 1791 } 1792 1793 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 1794 struct hist_entry *a, struct hist_entry *b) 1795 { 1796 struct hpp_sort_entry *hse; 1797 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 1798 1799 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1800 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 1801 return collapse_fn(a, b); 1802 } 1803 1804 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 1805 struct hist_entry *a, struct hist_entry *b) 1806 { 1807 struct hpp_sort_entry *hse; 1808 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 1809 1810 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1811 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 1812 return sort_fn(a, b); 1813 } 1814 1815 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 1816 { 1817 return format->header == __sort__hpp_header; 1818 } 1819 1820 #define MK_SORT_ENTRY_CHK(key) \ 1821 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 1822 { \ 1823 struct hpp_sort_entry *hse; \ 1824 \ 1825 if (!perf_hpp__is_sort_entry(fmt)) \ 1826 return false; \ 1827 \ 1828 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 1829 return hse->se == &sort_ ## key ; \ 1830 } 1831 1832 MK_SORT_ENTRY_CHK(trace) 1833 MK_SORT_ENTRY_CHK(srcline) 1834 MK_SORT_ENTRY_CHK(srcfile) 1835 MK_SORT_ENTRY_CHK(thread) 1836 MK_SORT_ENTRY_CHK(comm) 1837 MK_SORT_ENTRY_CHK(dso) 1838 MK_SORT_ENTRY_CHK(sym) 1839 1840 1841 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1842 { 1843 struct hpp_sort_entry *hse_a; 1844 struct hpp_sort_entry *hse_b; 1845 1846 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 1847 return false; 1848 1849 hse_a = container_of(a, struct hpp_sort_entry, hpp); 1850 hse_b = container_of(b, struct hpp_sort_entry, hpp); 1851 1852 return hse_a->se == hse_b->se; 1853 } 1854 1855 static void hse_free(struct perf_hpp_fmt *fmt) 1856 { 1857 struct hpp_sort_entry *hse; 1858 1859 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1860 free(hse); 1861 } 1862 1863 static struct hpp_sort_entry * 1864 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 1865 { 1866 struct hpp_sort_entry *hse; 1867 1868 hse = malloc(sizeof(*hse)); 1869 if (hse == NULL) { 1870 pr_err("Memory allocation failed\n"); 1871 return NULL; 1872 } 1873 1874 hse->se = sd->entry; 1875 hse->hpp.name = sd->entry->se_header; 1876 hse->hpp.header = __sort__hpp_header; 1877 hse->hpp.width = __sort__hpp_width; 1878 hse->hpp.entry = __sort__hpp_entry; 1879 hse->hpp.color = NULL; 1880 1881 hse->hpp.cmp = __sort__hpp_cmp; 1882 hse->hpp.collapse = __sort__hpp_collapse; 1883 hse->hpp.sort = __sort__hpp_sort; 1884 hse->hpp.equal = __sort__hpp_equal; 1885 hse->hpp.free = hse_free; 1886 1887 INIT_LIST_HEAD(&hse->hpp.list); 1888 INIT_LIST_HEAD(&hse->hpp.sort_list); 1889 hse->hpp.elide = false; 1890 hse->hpp.len = 0; 1891 hse->hpp.user_len = 0; 1892 hse->hpp.level = level; 1893 1894 return hse; 1895 } 1896 1897 static void hpp_free(struct perf_hpp_fmt *fmt) 1898 { 1899 free(fmt); 1900 } 1901 1902 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 1903 int level) 1904 { 1905 struct perf_hpp_fmt *fmt; 1906 1907 fmt = memdup(hd->fmt, sizeof(*fmt)); 1908 if (fmt) { 1909 INIT_LIST_HEAD(&fmt->list); 1910 INIT_LIST_HEAD(&fmt->sort_list); 1911 fmt->free = hpp_free; 1912 fmt->level = level; 1913 } 1914 1915 return fmt; 1916 } 1917 1918 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 1919 { 1920 struct perf_hpp_fmt *fmt; 1921 struct hpp_sort_entry *hse; 1922 int ret = -1; 1923 int r; 1924 1925 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1926 if (!perf_hpp__is_sort_entry(fmt)) 1927 continue; 1928 1929 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1930 if (hse->se->se_filter == NULL) 1931 continue; 1932 1933 /* 1934 * hist entry is filtered if any of sort key in the hpp list 1935 * is applied. But it should skip non-matched filter types. 1936 */ 1937 r = hse->se->se_filter(he, type, arg); 1938 if (r >= 0) { 1939 if (ret < 0) 1940 ret = 0; 1941 ret |= r; 1942 } 1943 } 1944 1945 return ret; 1946 } 1947 1948 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 1949 struct perf_hpp_list *list, 1950 int level) 1951 { 1952 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 1953 1954 if (hse == NULL) 1955 return -1; 1956 1957 perf_hpp_list__register_sort_field(list, &hse->hpp); 1958 return 0; 1959 } 1960 1961 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 1962 struct perf_hpp_list *list) 1963 { 1964 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 1965 1966 if (hse == NULL) 1967 return -1; 1968 1969 perf_hpp_list__column_register(list, &hse->hpp); 1970 return 0; 1971 } 1972 1973 struct hpp_dynamic_entry { 1974 struct perf_hpp_fmt hpp; 1975 struct evsel *evsel; 1976 struct tep_format_field *field; 1977 unsigned dynamic_len; 1978 bool raw_trace; 1979 }; 1980 1981 static int hde_width(struct hpp_dynamic_entry *hde) 1982 { 1983 if (!hde->hpp.len) { 1984 int len = hde->dynamic_len; 1985 int namelen = strlen(hde->field->name); 1986 int fieldlen = hde->field->size; 1987 1988 if (namelen > len) 1989 len = namelen; 1990 1991 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) { 1992 /* length for print hex numbers */ 1993 fieldlen = hde->field->size * 2 + 2; 1994 } 1995 if (fieldlen > len) 1996 len = fieldlen; 1997 1998 hde->hpp.len = len; 1999 } 2000 return hde->hpp.len; 2001 } 2002 2003 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 2004 struct hist_entry *he) 2005 { 2006 char *str, *pos; 2007 struct tep_format_field *field = hde->field; 2008 size_t namelen; 2009 bool last = false; 2010 2011 if (hde->raw_trace) 2012 return; 2013 2014 /* parse pretty print result and update max length */ 2015 if (!he->trace_output) 2016 he->trace_output = get_trace_output(he); 2017 2018 namelen = strlen(field->name); 2019 str = he->trace_output; 2020 2021 while (str) { 2022 pos = strchr(str, ' '); 2023 if (pos == NULL) { 2024 last = true; 2025 pos = str + strlen(str); 2026 } 2027 2028 if (!strncmp(str, field->name, namelen)) { 2029 size_t len; 2030 2031 str += namelen + 1; 2032 len = pos - str; 2033 2034 if (len > hde->dynamic_len) 2035 hde->dynamic_len = len; 2036 break; 2037 } 2038 2039 if (last) 2040 str = NULL; 2041 else 2042 str = pos + 1; 2043 } 2044 } 2045 2046 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2047 struct hists *hists __maybe_unused, 2048 int line __maybe_unused, 2049 int *span __maybe_unused) 2050 { 2051 struct hpp_dynamic_entry *hde; 2052 size_t len = fmt->user_len; 2053 2054 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2055 2056 if (!len) 2057 len = hde_width(hde); 2058 2059 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 2060 } 2061 2062 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 2063 struct perf_hpp *hpp __maybe_unused, 2064 struct hists *hists __maybe_unused) 2065 { 2066 struct hpp_dynamic_entry *hde; 2067 size_t len = fmt->user_len; 2068 2069 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2070 2071 if (!len) 2072 len = hde_width(hde); 2073 2074 return len; 2075 } 2076 2077 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 2078 { 2079 struct hpp_dynamic_entry *hde; 2080 2081 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2082 2083 return hists_to_evsel(hists) == hde->evsel; 2084 } 2085 2086 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2087 struct hist_entry *he) 2088 { 2089 struct hpp_dynamic_entry *hde; 2090 size_t len = fmt->user_len; 2091 char *str, *pos; 2092 struct tep_format_field *field; 2093 size_t namelen; 2094 bool last = false; 2095 int ret; 2096 2097 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2098 2099 if (!len) 2100 len = hde_width(hde); 2101 2102 if (hde->raw_trace) 2103 goto raw_field; 2104 2105 if (!he->trace_output) 2106 he->trace_output = get_trace_output(he); 2107 2108 field = hde->field; 2109 namelen = strlen(field->name); 2110 str = he->trace_output; 2111 2112 while (str) { 2113 pos = strchr(str, ' '); 2114 if (pos == NULL) { 2115 last = true; 2116 pos = str + strlen(str); 2117 } 2118 2119 if (!strncmp(str, field->name, namelen)) { 2120 str += namelen + 1; 2121 str = strndup(str, pos - str); 2122 2123 if (str == NULL) 2124 return scnprintf(hpp->buf, hpp->size, 2125 "%*.*s", len, len, "ERROR"); 2126 break; 2127 } 2128 2129 if (last) 2130 str = NULL; 2131 else 2132 str = pos + 1; 2133 } 2134 2135 if (str == NULL) { 2136 struct trace_seq seq; 2137 raw_field: 2138 trace_seq_init(&seq); 2139 tep_print_field(&seq, he->raw_data, hde->field); 2140 str = seq.buffer; 2141 } 2142 2143 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2144 free(str); 2145 return ret; 2146 } 2147 2148 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2149 struct hist_entry *a, struct hist_entry *b) 2150 { 2151 struct hpp_dynamic_entry *hde; 2152 struct tep_format_field *field; 2153 unsigned offset, size; 2154 2155 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2156 2157 if (b == NULL) { 2158 update_dynamic_len(hde, a); 2159 return 0; 2160 } 2161 2162 field = hde->field; 2163 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2164 unsigned long long dyn; 2165 2166 tep_read_number_field(field, a->raw_data, &dyn); 2167 offset = dyn & 0xffff; 2168 size = (dyn >> 16) & 0xffff; 2169 2170 /* record max width for output */ 2171 if (size > hde->dynamic_len) 2172 hde->dynamic_len = size; 2173 } else { 2174 offset = field->offset; 2175 size = field->size; 2176 } 2177 2178 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2179 } 2180 2181 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2182 { 2183 return fmt->cmp == __sort__hde_cmp; 2184 } 2185 2186 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2187 { 2188 struct hpp_dynamic_entry *hde_a; 2189 struct hpp_dynamic_entry *hde_b; 2190 2191 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2192 return false; 2193 2194 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2195 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2196 2197 return hde_a->field == hde_b->field; 2198 } 2199 2200 static void hde_free(struct perf_hpp_fmt *fmt) 2201 { 2202 struct hpp_dynamic_entry *hde; 2203 2204 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2205 free(hde); 2206 } 2207 2208 static struct hpp_dynamic_entry * 2209 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field, 2210 int level) 2211 { 2212 struct hpp_dynamic_entry *hde; 2213 2214 hde = malloc(sizeof(*hde)); 2215 if (hde == NULL) { 2216 pr_debug("Memory allocation failed\n"); 2217 return NULL; 2218 } 2219 2220 hde->evsel = evsel; 2221 hde->field = field; 2222 hde->dynamic_len = 0; 2223 2224 hde->hpp.name = field->name; 2225 hde->hpp.header = __sort__hde_header; 2226 hde->hpp.width = __sort__hde_width; 2227 hde->hpp.entry = __sort__hde_entry; 2228 hde->hpp.color = NULL; 2229 2230 hde->hpp.cmp = __sort__hde_cmp; 2231 hde->hpp.collapse = __sort__hde_cmp; 2232 hde->hpp.sort = __sort__hde_cmp; 2233 hde->hpp.equal = __sort__hde_equal; 2234 hde->hpp.free = hde_free; 2235 2236 INIT_LIST_HEAD(&hde->hpp.list); 2237 INIT_LIST_HEAD(&hde->hpp.sort_list); 2238 hde->hpp.elide = false; 2239 hde->hpp.len = 0; 2240 hde->hpp.user_len = 0; 2241 hde->hpp.level = level; 2242 2243 return hde; 2244 } 2245 2246 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2247 { 2248 struct perf_hpp_fmt *new_fmt = NULL; 2249 2250 if (perf_hpp__is_sort_entry(fmt)) { 2251 struct hpp_sort_entry *hse, *new_hse; 2252 2253 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2254 new_hse = memdup(hse, sizeof(*hse)); 2255 if (new_hse) 2256 new_fmt = &new_hse->hpp; 2257 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2258 struct hpp_dynamic_entry *hde, *new_hde; 2259 2260 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2261 new_hde = memdup(hde, sizeof(*hde)); 2262 if (new_hde) 2263 new_fmt = &new_hde->hpp; 2264 } else { 2265 new_fmt = memdup(fmt, sizeof(*fmt)); 2266 } 2267 2268 INIT_LIST_HEAD(&new_fmt->list); 2269 INIT_LIST_HEAD(&new_fmt->sort_list); 2270 2271 return new_fmt; 2272 } 2273 2274 static int parse_field_name(char *str, char **event, char **field, char **opt) 2275 { 2276 char *event_name, *field_name, *opt_name; 2277 2278 event_name = str; 2279 field_name = strchr(str, '.'); 2280 2281 if (field_name) { 2282 *field_name++ = '\0'; 2283 } else { 2284 event_name = NULL; 2285 field_name = str; 2286 } 2287 2288 opt_name = strchr(field_name, '/'); 2289 if (opt_name) 2290 *opt_name++ = '\0'; 2291 2292 *event = event_name; 2293 *field = field_name; 2294 *opt = opt_name; 2295 2296 return 0; 2297 } 2298 2299 /* find match evsel using a given event name. The event name can be: 2300 * 1. '%' + event index (e.g. '%1' for first event) 2301 * 2. full event name (e.g. sched:sched_switch) 2302 * 3. partial event name (should not contain ':') 2303 */ 2304 static struct evsel *find_evsel(struct evlist *evlist, char *event_name) 2305 { 2306 struct evsel *evsel = NULL; 2307 struct evsel *pos; 2308 bool full_name; 2309 2310 /* case 1 */ 2311 if (event_name[0] == '%') { 2312 int nr = strtol(event_name+1, NULL, 0); 2313 2314 if (nr > evlist->core.nr_entries) 2315 return NULL; 2316 2317 evsel = evlist__first(evlist); 2318 while (--nr > 0) 2319 evsel = perf_evsel__next(evsel); 2320 2321 return evsel; 2322 } 2323 2324 full_name = !!strchr(event_name, ':'); 2325 evlist__for_each_entry(evlist, pos) { 2326 /* case 2 */ 2327 if (full_name && !strcmp(pos->name, event_name)) 2328 return pos; 2329 /* case 3 */ 2330 if (!full_name && strstr(pos->name, event_name)) { 2331 if (evsel) { 2332 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2333 event_name, evsel->name, pos->name); 2334 return NULL; 2335 } 2336 evsel = pos; 2337 } 2338 } 2339 2340 return evsel; 2341 } 2342 2343 static int __dynamic_dimension__add(struct evsel *evsel, 2344 struct tep_format_field *field, 2345 bool raw_trace, int level) 2346 { 2347 struct hpp_dynamic_entry *hde; 2348 2349 hde = __alloc_dynamic_entry(evsel, field, level); 2350 if (hde == NULL) 2351 return -ENOMEM; 2352 2353 hde->raw_trace = raw_trace; 2354 2355 perf_hpp__register_sort_field(&hde->hpp); 2356 return 0; 2357 } 2358 2359 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level) 2360 { 2361 int ret; 2362 struct tep_format_field *field; 2363 2364 field = evsel->tp_format->format.fields; 2365 while (field) { 2366 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2367 if (ret < 0) 2368 return ret; 2369 2370 field = field->next; 2371 } 2372 return 0; 2373 } 2374 2375 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace, 2376 int level) 2377 { 2378 int ret; 2379 struct evsel *evsel; 2380 2381 evlist__for_each_entry(evlist, evsel) { 2382 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2383 continue; 2384 2385 ret = add_evsel_fields(evsel, raw_trace, level); 2386 if (ret < 0) 2387 return ret; 2388 } 2389 return 0; 2390 } 2391 2392 static int add_all_matching_fields(struct evlist *evlist, 2393 char *field_name, bool raw_trace, int level) 2394 { 2395 int ret = -ESRCH; 2396 struct evsel *evsel; 2397 struct tep_format_field *field; 2398 2399 evlist__for_each_entry(evlist, evsel) { 2400 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2401 continue; 2402 2403 field = tep_find_any_field(evsel->tp_format, field_name); 2404 if (field == NULL) 2405 continue; 2406 2407 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2408 if (ret < 0) 2409 break; 2410 } 2411 return ret; 2412 } 2413 2414 static int add_dynamic_entry(struct evlist *evlist, const char *tok, 2415 int level) 2416 { 2417 char *str, *event_name, *field_name, *opt_name; 2418 struct evsel *evsel; 2419 struct tep_format_field *field; 2420 bool raw_trace = symbol_conf.raw_trace; 2421 int ret = 0; 2422 2423 if (evlist == NULL) 2424 return -ENOENT; 2425 2426 str = strdup(tok); 2427 if (str == NULL) 2428 return -ENOMEM; 2429 2430 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2431 ret = -EINVAL; 2432 goto out; 2433 } 2434 2435 if (opt_name) { 2436 if (strcmp(opt_name, "raw")) { 2437 pr_debug("unsupported field option %s\n", opt_name); 2438 ret = -EINVAL; 2439 goto out; 2440 } 2441 raw_trace = true; 2442 } 2443 2444 if (!strcmp(field_name, "trace_fields")) { 2445 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2446 goto out; 2447 } 2448 2449 if (event_name == NULL) { 2450 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2451 goto out; 2452 } 2453 2454 evsel = find_evsel(evlist, event_name); 2455 if (evsel == NULL) { 2456 pr_debug("Cannot find event: %s\n", event_name); 2457 ret = -ENOENT; 2458 goto out; 2459 } 2460 2461 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2462 pr_debug("%s is not a tracepoint event\n", event_name); 2463 ret = -EINVAL; 2464 goto out; 2465 } 2466 2467 if (!strcmp(field_name, "*")) { 2468 ret = add_evsel_fields(evsel, raw_trace, level); 2469 } else { 2470 field = tep_find_any_field(evsel->tp_format, field_name); 2471 if (field == NULL) { 2472 pr_debug("Cannot find event field for %s.%s\n", 2473 event_name, field_name); 2474 return -ENOENT; 2475 } 2476 2477 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2478 } 2479 2480 out: 2481 free(str); 2482 return ret; 2483 } 2484 2485 static int __sort_dimension__add(struct sort_dimension *sd, 2486 struct perf_hpp_list *list, 2487 int level) 2488 { 2489 if (sd->taken) 2490 return 0; 2491 2492 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2493 return -1; 2494 2495 if (sd->entry->se_collapse) 2496 list->need_collapse = 1; 2497 2498 sd->taken = 1; 2499 2500 return 0; 2501 } 2502 2503 static int __hpp_dimension__add(struct hpp_dimension *hd, 2504 struct perf_hpp_list *list, 2505 int level) 2506 { 2507 struct perf_hpp_fmt *fmt; 2508 2509 if (hd->taken) 2510 return 0; 2511 2512 fmt = __hpp_dimension__alloc_hpp(hd, level); 2513 if (!fmt) 2514 return -1; 2515 2516 hd->taken = 1; 2517 perf_hpp_list__register_sort_field(list, fmt); 2518 return 0; 2519 } 2520 2521 static int __sort_dimension__add_output(struct perf_hpp_list *list, 2522 struct sort_dimension *sd) 2523 { 2524 if (sd->taken) 2525 return 0; 2526 2527 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2528 return -1; 2529 2530 sd->taken = 1; 2531 return 0; 2532 } 2533 2534 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2535 struct hpp_dimension *hd) 2536 { 2537 struct perf_hpp_fmt *fmt; 2538 2539 if (hd->taken) 2540 return 0; 2541 2542 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2543 if (!fmt) 2544 return -1; 2545 2546 hd->taken = 1; 2547 perf_hpp_list__column_register(list, fmt); 2548 return 0; 2549 } 2550 2551 int hpp_dimension__add_output(unsigned col) 2552 { 2553 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2554 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2555 } 2556 2557 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2558 struct evlist *evlist, 2559 int level) 2560 { 2561 unsigned int i; 2562 2563 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2564 struct sort_dimension *sd = &common_sort_dimensions[i]; 2565 2566 if (strncasecmp(tok, sd->name, strlen(tok))) 2567 continue; 2568 2569 if (sd->entry == &sort_parent) { 2570 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 2571 if (ret) { 2572 char err[BUFSIZ]; 2573 2574 regerror(ret, &parent_regex, err, sizeof(err)); 2575 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 2576 return -EINVAL; 2577 } 2578 list->parent = 1; 2579 } else if (sd->entry == &sort_sym) { 2580 list->sym = 1; 2581 /* 2582 * perf diff displays the performance difference amongst 2583 * two or more perf.data files. Those files could come 2584 * from different binaries. So we should not compare 2585 * their ips, but the name of symbol. 2586 */ 2587 if (sort__mode == SORT_MODE__DIFF) 2588 sd->entry->se_collapse = sort__sym_sort; 2589 2590 } else if (sd->entry == &sort_dso) { 2591 list->dso = 1; 2592 } else if (sd->entry == &sort_socket) { 2593 list->socket = 1; 2594 } else if (sd->entry == &sort_thread) { 2595 list->thread = 1; 2596 } else if (sd->entry == &sort_comm) { 2597 list->comm = 1; 2598 } 2599 2600 return __sort_dimension__add(sd, list, level); 2601 } 2602 2603 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2604 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2605 2606 if (strncasecmp(tok, hd->name, strlen(tok))) 2607 continue; 2608 2609 return __hpp_dimension__add(hd, list, level); 2610 } 2611 2612 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2613 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2614 2615 if (strncasecmp(tok, sd->name, strlen(tok))) 2616 continue; 2617 2618 if (sort__mode != SORT_MODE__BRANCH) 2619 return -EINVAL; 2620 2621 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 2622 list->sym = 1; 2623 2624 __sort_dimension__add(sd, list, level); 2625 return 0; 2626 } 2627 2628 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2629 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2630 2631 if (strncasecmp(tok, sd->name, strlen(tok))) 2632 continue; 2633 2634 if (sort__mode != SORT_MODE__MEMORY) 2635 return -EINVAL; 2636 2637 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) 2638 return -EINVAL; 2639 2640 if (sd->entry == &sort_mem_daddr_sym) 2641 list->sym = 1; 2642 2643 __sort_dimension__add(sd, list, level); 2644 return 0; 2645 } 2646 2647 if (!add_dynamic_entry(evlist, tok, level)) 2648 return 0; 2649 2650 return -ESRCH; 2651 } 2652 2653 static int setup_sort_list(struct perf_hpp_list *list, char *str, 2654 struct evlist *evlist) 2655 { 2656 char *tmp, *tok; 2657 int ret = 0; 2658 int level = 0; 2659 int next_level = 1; 2660 bool in_group = false; 2661 2662 do { 2663 tok = str; 2664 tmp = strpbrk(str, "{}, "); 2665 if (tmp) { 2666 if (in_group) 2667 next_level = level; 2668 else 2669 next_level = level + 1; 2670 2671 if (*tmp == '{') 2672 in_group = true; 2673 else if (*tmp == '}') 2674 in_group = false; 2675 2676 *tmp = '\0'; 2677 str = tmp + 1; 2678 } 2679 2680 if (*tok) { 2681 ret = sort_dimension__add(list, tok, evlist, level); 2682 if (ret == -EINVAL) { 2683 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) 2684 pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 2685 else 2686 pr_err("Invalid --sort key: `%s'", tok); 2687 break; 2688 } else if (ret == -ESRCH) { 2689 pr_err("Unknown --sort key: `%s'", tok); 2690 break; 2691 } 2692 } 2693 2694 level = next_level; 2695 } while (tmp); 2696 2697 return ret; 2698 } 2699 2700 static const char *get_default_sort_order(struct evlist *evlist) 2701 { 2702 const char *default_sort_orders[] = { 2703 default_sort_order, 2704 default_branch_sort_order, 2705 default_mem_sort_order, 2706 default_top_sort_order, 2707 default_diff_sort_order, 2708 default_tracepoint_sort_order, 2709 }; 2710 bool use_trace = true; 2711 struct evsel *evsel; 2712 2713 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 2714 2715 if (evlist == NULL || perf_evlist__empty(evlist)) 2716 goto out_no_evlist; 2717 2718 evlist__for_each_entry(evlist, evsel) { 2719 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2720 use_trace = false; 2721 break; 2722 } 2723 } 2724 2725 if (use_trace) { 2726 sort__mode = SORT_MODE__TRACEPOINT; 2727 if (symbol_conf.raw_trace) 2728 return "trace_fields"; 2729 } 2730 out_no_evlist: 2731 return default_sort_orders[sort__mode]; 2732 } 2733 2734 static int setup_sort_order(struct evlist *evlist) 2735 { 2736 char *new_sort_order; 2737 2738 /* 2739 * Append '+'-prefixed sort order to the default sort 2740 * order string. 2741 */ 2742 if (!sort_order || is_strict_order(sort_order)) 2743 return 0; 2744 2745 if (sort_order[1] == '\0') { 2746 pr_err("Invalid --sort key: `+'"); 2747 return -EINVAL; 2748 } 2749 2750 /* 2751 * We allocate new sort_order string, but we never free it, 2752 * because it's checked over the rest of the code. 2753 */ 2754 if (asprintf(&new_sort_order, "%s,%s", 2755 get_default_sort_order(evlist), sort_order + 1) < 0) { 2756 pr_err("Not enough memory to set up --sort"); 2757 return -ENOMEM; 2758 } 2759 2760 sort_order = new_sort_order; 2761 return 0; 2762 } 2763 2764 /* 2765 * Adds 'pre,' prefix into 'str' is 'pre' is 2766 * not already part of 'str'. 2767 */ 2768 static char *prefix_if_not_in(const char *pre, char *str) 2769 { 2770 char *n; 2771 2772 if (!str || strstr(str, pre)) 2773 return str; 2774 2775 if (asprintf(&n, "%s,%s", pre, str) < 0) 2776 return NULL; 2777 2778 free(str); 2779 return n; 2780 } 2781 2782 static char *setup_overhead(char *keys) 2783 { 2784 if (sort__mode == SORT_MODE__DIFF) 2785 return keys; 2786 2787 keys = prefix_if_not_in("overhead", keys); 2788 2789 if (symbol_conf.cumulate_callchain) 2790 keys = prefix_if_not_in("overhead_children", keys); 2791 2792 return keys; 2793 } 2794 2795 static int __setup_sorting(struct evlist *evlist) 2796 { 2797 char *str; 2798 const char *sort_keys; 2799 int ret = 0; 2800 2801 ret = setup_sort_order(evlist); 2802 if (ret) 2803 return ret; 2804 2805 sort_keys = sort_order; 2806 if (sort_keys == NULL) { 2807 if (is_strict_order(field_order)) { 2808 /* 2809 * If user specified field order but no sort order, 2810 * we'll honor it and not add default sort orders. 2811 */ 2812 return 0; 2813 } 2814 2815 sort_keys = get_default_sort_order(evlist); 2816 } 2817 2818 str = strdup(sort_keys); 2819 if (str == NULL) { 2820 pr_err("Not enough memory to setup sort keys"); 2821 return -ENOMEM; 2822 } 2823 2824 /* 2825 * Prepend overhead fields for backward compatibility. 2826 */ 2827 if (!is_strict_order(field_order)) { 2828 str = setup_overhead(str); 2829 if (str == NULL) { 2830 pr_err("Not enough memory to setup overhead keys"); 2831 return -ENOMEM; 2832 } 2833 } 2834 2835 ret = setup_sort_list(&perf_hpp_list, str, evlist); 2836 2837 free(str); 2838 return ret; 2839 } 2840 2841 void perf_hpp__set_elide(int idx, bool elide) 2842 { 2843 struct perf_hpp_fmt *fmt; 2844 struct hpp_sort_entry *hse; 2845 2846 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2847 if (!perf_hpp__is_sort_entry(fmt)) 2848 continue; 2849 2850 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2851 if (hse->se->se_width_idx == idx) { 2852 fmt->elide = elide; 2853 break; 2854 } 2855 } 2856 } 2857 2858 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 2859 { 2860 if (list && strlist__nr_entries(list) == 1) { 2861 if (fp != NULL) 2862 fprintf(fp, "# %s: %s\n", list_name, 2863 strlist__entry(list, 0)->s); 2864 return true; 2865 } 2866 return false; 2867 } 2868 2869 static bool get_elide(int idx, FILE *output) 2870 { 2871 switch (idx) { 2872 case HISTC_SYMBOL: 2873 return __get_elide(symbol_conf.sym_list, "symbol", output); 2874 case HISTC_DSO: 2875 return __get_elide(symbol_conf.dso_list, "dso", output); 2876 case HISTC_COMM: 2877 return __get_elide(symbol_conf.comm_list, "comm", output); 2878 default: 2879 break; 2880 } 2881 2882 if (sort__mode != SORT_MODE__BRANCH) 2883 return false; 2884 2885 switch (idx) { 2886 case HISTC_SYMBOL_FROM: 2887 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 2888 case HISTC_SYMBOL_TO: 2889 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 2890 case HISTC_DSO_FROM: 2891 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 2892 case HISTC_DSO_TO: 2893 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 2894 default: 2895 break; 2896 } 2897 2898 return false; 2899 } 2900 2901 void sort__setup_elide(FILE *output) 2902 { 2903 struct perf_hpp_fmt *fmt; 2904 struct hpp_sort_entry *hse; 2905 2906 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2907 if (!perf_hpp__is_sort_entry(fmt)) 2908 continue; 2909 2910 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2911 fmt->elide = get_elide(hse->se->se_width_idx, output); 2912 } 2913 2914 /* 2915 * It makes no sense to elide all of sort entries. 2916 * Just revert them to show up again. 2917 */ 2918 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2919 if (!perf_hpp__is_sort_entry(fmt)) 2920 continue; 2921 2922 if (!fmt->elide) 2923 return; 2924 } 2925 2926 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2927 if (!perf_hpp__is_sort_entry(fmt)) 2928 continue; 2929 2930 fmt->elide = false; 2931 } 2932 } 2933 2934 int output_field_add(struct perf_hpp_list *list, char *tok) 2935 { 2936 unsigned int i; 2937 2938 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2939 struct sort_dimension *sd = &common_sort_dimensions[i]; 2940 2941 if (strncasecmp(tok, sd->name, strlen(tok))) 2942 continue; 2943 2944 return __sort_dimension__add_output(list, sd); 2945 } 2946 2947 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2948 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2949 2950 if (strncasecmp(tok, hd->name, strlen(tok))) 2951 continue; 2952 2953 return __hpp_dimension__add_output(list, hd); 2954 } 2955 2956 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2957 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2958 2959 if (strncasecmp(tok, sd->name, strlen(tok))) 2960 continue; 2961 2962 return __sort_dimension__add_output(list, sd); 2963 } 2964 2965 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2966 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2967 2968 if (strncasecmp(tok, sd->name, strlen(tok))) 2969 continue; 2970 2971 return __sort_dimension__add_output(list, sd); 2972 } 2973 2974 return -ESRCH; 2975 } 2976 2977 static int setup_output_list(struct perf_hpp_list *list, char *str) 2978 { 2979 char *tmp, *tok; 2980 int ret = 0; 2981 2982 for (tok = strtok_r(str, ", ", &tmp); 2983 tok; tok = strtok_r(NULL, ", ", &tmp)) { 2984 ret = output_field_add(list, tok); 2985 if (ret == -EINVAL) { 2986 ui__error("Invalid --fields key: `%s'", tok); 2987 break; 2988 } else if (ret == -ESRCH) { 2989 ui__error("Unknown --fields key: `%s'", tok); 2990 break; 2991 } 2992 } 2993 2994 return ret; 2995 } 2996 2997 void reset_dimensions(void) 2998 { 2999 unsigned int i; 3000 3001 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 3002 common_sort_dimensions[i].taken = 0; 3003 3004 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 3005 hpp_sort_dimensions[i].taken = 0; 3006 3007 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 3008 bstack_sort_dimensions[i].taken = 0; 3009 3010 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 3011 memory_sort_dimensions[i].taken = 0; 3012 } 3013 3014 bool is_strict_order(const char *order) 3015 { 3016 return order && (*order != '+'); 3017 } 3018 3019 static int __setup_output_field(void) 3020 { 3021 char *str, *strp; 3022 int ret = -EINVAL; 3023 3024 if (field_order == NULL) 3025 return 0; 3026 3027 strp = str = strdup(field_order); 3028 if (str == NULL) { 3029 pr_err("Not enough memory to setup output fields"); 3030 return -ENOMEM; 3031 } 3032 3033 if (!is_strict_order(field_order)) 3034 strp++; 3035 3036 if (!strlen(strp)) { 3037 pr_err("Invalid --fields key: `+'"); 3038 goto out; 3039 } 3040 3041 ret = setup_output_list(&perf_hpp_list, strp); 3042 3043 out: 3044 free(str); 3045 return ret; 3046 } 3047 3048 int setup_sorting(struct evlist *evlist) 3049 { 3050 int err; 3051 3052 err = __setup_sorting(evlist); 3053 if (err < 0) 3054 return err; 3055 3056 if (parent_pattern != default_parent_pattern) { 3057 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 3058 if (err < 0) 3059 return err; 3060 } 3061 3062 reset_dimensions(); 3063 3064 /* 3065 * perf diff doesn't use default hpp output fields. 3066 */ 3067 if (sort__mode != SORT_MODE__DIFF) 3068 perf_hpp__init(); 3069 3070 err = __setup_output_field(); 3071 if (err < 0) 3072 return err; 3073 3074 /* copy sort keys to output fields */ 3075 perf_hpp__setup_output_field(&perf_hpp_list); 3076 /* and then copy output fields to sort keys */ 3077 perf_hpp__append_sort_keys(&perf_hpp_list); 3078 3079 /* setup hists-specific output fields */ 3080 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 3081 return -1; 3082 3083 return 0; 3084 } 3085 3086 void reset_output_field(void) 3087 { 3088 perf_hpp_list.need_collapse = 0; 3089 perf_hpp_list.parent = 0; 3090 perf_hpp_list.sym = 0; 3091 perf_hpp_list.dso = 0; 3092 3093 field_order = NULL; 3094 sort_order = NULL; 3095 3096 reset_dimensions(); 3097 perf_hpp__reset_output_field(&perf_hpp_list); 3098 } 3099 3100 #define INDENT (3*8 + 1) 3101 3102 static void add_key(struct strbuf *sb, const char *str, int *llen) 3103 { 3104 if (*llen >= 75) { 3105 strbuf_addstr(sb, "\n\t\t\t "); 3106 *llen = INDENT; 3107 } 3108 strbuf_addf(sb, " %s", str); 3109 *llen += strlen(str) + 1; 3110 } 3111 3112 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n, 3113 int *llen) 3114 { 3115 int i; 3116 3117 for (i = 0; i < n; i++) 3118 add_key(sb, s[i].name, llen); 3119 } 3120 3121 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n, 3122 int *llen) 3123 { 3124 int i; 3125 3126 for (i = 0; i < n; i++) 3127 add_key(sb, s[i].name, llen); 3128 } 3129 3130 const char *sort_help(const char *prefix) 3131 { 3132 struct strbuf sb; 3133 char *s; 3134 int len = strlen(prefix) + INDENT; 3135 3136 strbuf_init(&sb, 300); 3137 strbuf_addstr(&sb, prefix); 3138 add_hpp_sort_string(&sb, hpp_sort_dimensions, 3139 ARRAY_SIZE(hpp_sort_dimensions), &len); 3140 add_sort_string(&sb, common_sort_dimensions, 3141 ARRAY_SIZE(common_sort_dimensions), &len); 3142 add_sort_string(&sb, bstack_sort_dimensions, 3143 ARRAY_SIZE(bstack_sort_dimensions), &len); 3144 add_sort_string(&sb, memory_sort_dimensions, 3145 ARRAY_SIZE(memory_sort_dimensions), &len); 3146 s = strbuf_detach(&sb, NULL); 3147 strbuf_release(&sb); 3148 return s; 3149 } 3150