1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <stdlib.h> 6 #include <linux/mman.h> 7 #include <linux/time64.h> 8 #include "debug.h" 9 #include "dso.h" 10 #include "sort.h" 11 #include "hist.h" 12 #include "cacheline.h" 13 #include "comm.h" 14 #include "map.h" 15 #include "symbol.h" 16 #include "map_symbol.h" 17 #include "branch.h" 18 #include "thread.h" 19 #include "evsel.h" 20 #include "evlist.h" 21 #include "srcline.h" 22 #include "strlist.h" 23 #include "strbuf.h" 24 #include <traceevent/event-parse.h> 25 #include "mem-events.h" 26 #include "annotate.h" 27 #include "time-utils.h" 28 #include <linux/kernel.h> 29 #include <linux/string.h> 30 31 regex_t parent_regex; 32 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 33 const char *parent_pattern = default_parent_pattern; 34 const char *default_sort_order = "comm,dso,symbol"; 35 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 36 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; 37 const char default_top_sort_order[] = "dso,symbol"; 38 const char default_diff_sort_order[] = "dso,symbol"; 39 const char default_tracepoint_sort_order[] = "trace"; 40 const char *sort_order; 41 const char *field_order; 42 regex_t ignore_callees_regex; 43 int have_ignore_callees = 0; 44 enum sort_mode sort__mode = SORT_MODE__NORMAL; 45 46 /* 47 * Replaces all occurrences of a char used with the: 48 * 49 * -t, --field-separator 50 * 51 * option, that uses a special separator character and don't pad with spaces, 52 * replacing all occurrences of this separator in symbol names (and other 53 * output) with a '.' character, that thus it's the only non valid separator. 54 */ 55 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 56 { 57 int n; 58 va_list ap; 59 60 va_start(ap, fmt); 61 n = vsnprintf(bf, size, fmt, ap); 62 if (symbol_conf.field_sep && n > 0) { 63 char *sep = bf; 64 65 while (1) { 66 sep = strchr(sep, *symbol_conf.field_sep); 67 if (sep == NULL) 68 break; 69 *sep = '.'; 70 } 71 } 72 va_end(ap); 73 74 if (n >= (int)size) 75 return size - 1; 76 return n; 77 } 78 79 static int64_t cmp_null(const void *l, const void *r) 80 { 81 if (!l && !r) 82 return 0; 83 else if (!l) 84 return -1; 85 else 86 return 1; 87 } 88 89 /* --sort pid */ 90 91 static int64_t 92 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 93 { 94 return right->thread->tid - left->thread->tid; 95 } 96 97 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 98 size_t size, unsigned int width) 99 { 100 const char *comm = thread__comm_str(he->thread); 101 102 width = max(7U, width) - 8; 103 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 104 width, width, comm ?: ""); 105 } 106 107 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 108 { 109 const struct thread *th = arg; 110 111 if (type != HIST_FILTER__THREAD) 112 return -1; 113 114 return th && he->thread != th; 115 } 116 117 struct sort_entry sort_thread = { 118 .se_header = " Pid:Command", 119 .se_cmp = sort__thread_cmp, 120 .se_snprintf = hist_entry__thread_snprintf, 121 .se_filter = hist_entry__thread_filter, 122 .se_width_idx = HISTC_THREAD, 123 }; 124 125 /* --sort comm */ 126 127 /* 128 * We can't use pointer comparison in functions below, 129 * because it gives different results based on pointer 130 * values, which could break some sorting assumptions. 131 */ 132 static int64_t 133 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 134 { 135 return strcmp(comm__str(right->comm), comm__str(left->comm)); 136 } 137 138 static int64_t 139 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 140 { 141 return strcmp(comm__str(right->comm), comm__str(left->comm)); 142 } 143 144 static int64_t 145 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 146 { 147 return strcmp(comm__str(right->comm), comm__str(left->comm)); 148 } 149 150 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 151 size_t size, unsigned int width) 152 { 153 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 154 } 155 156 struct sort_entry sort_comm = { 157 .se_header = "Command", 158 .se_cmp = sort__comm_cmp, 159 .se_collapse = sort__comm_collapse, 160 .se_sort = sort__comm_sort, 161 .se_snprintf = hist_entry__comm_snprintf, 162 .se_filter = hist_entry__thread_filter, 163 .se_width_idx = HISTC_COMM, 164 }; 165 166 /* --sort dso */ 167 168 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 169 { 170 struct dso *dso_l = map_l ? map_l->dso : NULL; 171 struct dso *dso_r = map_r ? map_r->dso : NULL; 172 const char *dso_name_l, *dso_name_r; 173 174 if (!dso_l || !dso_r) 175 return cmp_null(dso_r, dso_l); 176 177 if (verbose > 0) { 178 dso_name_l = dso_l->long_name; 179 dso_name_r = dso_r->long_name; 180 } else { 181 dso_name_l = dso_l->short_name; 182 dso_name_r = dso_r->short_name; 183 } 184 185 return strcmp(dso_name_l, dso_name_r); 186 } 187 188 static int64_t 189 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 190 { 191 return _sort__dso_cmp(right->ms.map, left->ms.map); 192 } 193 194 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 195 size_t size, unsigned int width) 196 { 197 if (map && map->dso) { 198 const char *dso_name = verbose > 0 ? map->dso->long_name : 199 map->dso->short_name; 200 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 201 } 202 203 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 204 } 205 206 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 207 size_t size, unsigned int width) 208 { 209 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 210 } 211 212 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 213 { 214 const struct dso *dso = arg; 215 216 if (type != HIST_FILTER__DSO) 217 return -1; 218 219 return dso && (!he->ms.map || he->ms.map->dso != dso); 220 } 221 222 struct sort_entry sort_dso = { 223 .se_header = "Shared Object", 224 .se_cmp = sort__dso_cmp, 225 .se_snprintf = hist_entry__dso_snprintf, 226 .se_filter = hist_entry__dso_filter, 227 .se_width_idx = HISTC_DSO, 228 }; 229 230 /* --sort symbol */ 231 232 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 233 { 234 return (int64_t)(right_ip - left_ip); 235 } 236 237 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 238 { 239 if (!sym_l || !sym_r) 240 return cmp_null(sym_l, sym_r); 241 242 if (sym_l == sym_r) 243 return 0; 244 245 if (sym_l->inlined || sym_r->inlined) { 246 int ret = strcmp(sym_l->name, sym_r->name); 247 248 if (ret) 249 return ret; 250 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start)) 251 return 0; 252 } 253 254 if (sym_l->start != sym_r->start) 255 return (int64_t)(sym_r->start - sym_l->start); 256 257 return (int64_t)(sym_r->end - sym_l->end); 258 } 259 260 static int64_t 261 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 262 { 263 int64_t ret; 264 265 if (!left->ms.sym && !right->ms.sym) 266 return _sort__addr_cmp(left->ip, right->ip); 267 268 /* 269 * comparing symbol address alone is not enough since it's a 270 * relative address within a dso. 271 */ 272 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 273 ret = sort__dso_cmp(left, right); 274 if (ret != 0) 275 return ret; 276 } 277 278 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 279 } 280 281 static int64_t 282 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 283 { 284 if (!left->ms.sym || !right->ms.sym) 285 return cmp_null(left->ms.sym, right->ms.sym); 286 287 return strcmp(right->ms.sym->name, left->ms.sym->name); 288 } 289 290 static int _hist_entry__sym_snprintf(struct map_symbol *ms, 291 u64 ip, char level, char *bf, size_t size, 292 unsigned int width) 293 { 294 struct symbol *sym = ms->sym; 295 struct map *map = ms->map; 296 size_t ret = 0; 297 298 if (verbose > 0) { 299 char o = map ? dso__symtab_origin(map->dso) : '!'; 300 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 301 BITS_PER_LONG / 4 + 2, ip, o); 302 } 303 304 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 305 if (sym && map) { 306 if (sym->type == STT_OBJECT) { 307 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 308 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 309 ip - map->unmap_ip(map, sym->start)); 310 } else { 311 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 312 width - ret, 313 sym->name); 314 if (sym->inlined) 315 ret += repsep_snprintf(bf + ret, size - ret, 316 " (inlined)"); 317 } 318 } else { 319 size_t len = BITS_PER_LONG / 4; 320 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 321 len, ip); 322 } 323 324 return ret; 325 } 326 327 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) 328 { 329 return _hist_entry__sym_snprintf(&he->ms, he->ip, 330 he->level, bf, size, width); 331 } 332 333 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 334 { 335 const char *sym = arg; 336 337 if (type != HIST_FILTER__SYMBOL) 338 return -1; 339 340 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 341 } 342 343 struct sort_entry sort_sym = { 344 .se_header = "Symbol", 345 .se_cmp = sort__sym_cmp, 346 .se_sort = sort__sym_sort, 347 .se_snprintf = hist_entry__sym_snprintf, 348 .se_filter = hist_entry__sym_filter, 349 .se_width_idx = HISTC_SYMBOL, 350 }; 351 352 /* --sort srcline */ 353 354 char *hist_entry__srcline(struct hist_entry *he) 355 { 356 return map__srcline(he->ms.map, he->ip, he->ms.sym); 357 } 358 359 static int64_t 360 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 361 { 362 if (!left->srcline) 363 left->srcline = hist_entry__srcline(left); 364 if (!right->srcline) 365 right->srcline = hist_entry__srcline(right); 366 367 return strcmp(right->srcline, left->srcline); 368 } 369 370 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 371 size_t size, unsigned int width) 372 { 373 if (!he->srcline) 374 he->srcline = hist_entry__srcline(he); 375 376 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 377 } 378 379 struct sort_entry sort_srcline = { 380 .se_header = "Source:Line", 381 .se_cmp = sort__srcline_cmp, 382 .se_snprintf = hist_entry__srcline_snprintf, 383 .se_width_idx = HISTC_SRCLINE, 384 }; 385 386 /* --sort srcline_from */ 387 388 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams) 389 { 390 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym); 391 } 392 393 static int64_t 394 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 395 { 396 if (!left->branch_info->srcline_from) 397 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from); 398 399 if (!right->branch_info->srcline_from) 400 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from); 401 402 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 403 } 404 405 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 406 size_t size, unsigned int width) 407 { 408 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 409 } 410 411 struct sort_entry sort_srcline_from = { 412 .se_header = "From Source:Line", 413 .se_cmp = sort__srcline_from_cmp, 414 .se_snprintf = hist_entry__srcline_from_snprintf, 415 .se_width_idx = HISTC_SRCLINE_FROM, 416 }; 417 418 /* --sort srcline_to */ 419 420 static int64_t 421 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 422 { 423 if (!left->branch_info->srcline_to) 424 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to); 425 426 if (!right->branch_info->srcline_to) 427 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to); 428 429 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 430 } 431 432 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 433 size_t size, unsigned int width) 434 { 435 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 436 } 437 438 struct sort_entry sort_srcline_to = { 439 .se_header = "To Source:Line", 440 .se_cmp = sort__srcline_to_cmp, 441 .se_snprintf = hist_entry__srcline_to_snprintf, 442 .se_width_idx = HISTC_SRCLINE_TO, 443 }; 444 445 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, 446 size_t size, unsigned int width) 447 { 448 449 struct symbol *sym = he->ms.sym; 450 struct annotation *notes; 451 double ipc = 0.0, coverage = 0.0; 452 char tmp[64]; 453 454 if (!sym) 455 return repsep_snprintf(bf, size, "%-*s", width, "-"); 456 457 notes = symbol__annotation(sym); 458 459 if (notes->hit_cycles) 460 ipc = notes->hit_insn / ((double)notes->hit_cycles); 461 462 if (notes->total_insn) { 463 coverage = notes->cover_insn * 100.0 / 464 ((double)notes->total_insn); 465 } 466 467 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); 468 return repsep_snprintf(bf, size, "%-*s", width, tmp); 469 } 470 471 struct sort_entry sort_sym_ipc = { 472 .se_header = "IPC [IPC Coverage]", 473 .se_cmp = sort__sym_cmp, 474 .se_snprintf = hist_entry__sym_ipc_snprintf, 475 .se_width_idx = HISTC_SYMBOL_IPC, 476 }; 477 478 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he 479 __maybe_unused, 480 char *bf, size_t size, 481 unsigned int width) 482 { 483 char tmp[64]; 484 485 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-"); 486 return repsep_snprintf(bf, size, "%-*s", width, tmp); 487 } 488 489 struct sort_entry sort_sym_ipc_null = { 490 .se_header = "IPC [IPC Coverage]", 491 .se_cmp = sort__sym_cmp, 492 .se_snprintf = hist_entry__sym_ipc_null_snprintf, 493 .se_width_idx = HISTC_SYMBOL_IPC, 494 }; 495 496 /* --sort srcfile */ 497 498 static char no_srcfile[1]; 499 500 static char *hist_entry__get_srcfile(struct hist_entry *e) 501 { 502 char *sf, *p; 503 struct map *map = e->ms.map; 504 505 if (!map) 506 return no_srcfile; 507 508 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 509 e->ms.sym, false, true, true, e->ip); 510 if (!strcmp(sf, SRCLINE_UNKNOWN)) 511 return no_srcfile; 512 p = strchr(sf, ':'); 513 if (p && *sf) { 514 *p = 0; 515 return sf; 516 } 517 free(sf); 518 return no_srcfile; 519 } 520 521 static int64_t 522 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 523 { 524 if (!left->srcfile) 525 left->srcfile = hist_entry__get_srcfile(left); 526 if (!right->srcfile) 527 right->srcfile = hist_entry__get_srcfile(right); 528 529 return strcmp(right->srcfile, left->srcfile); 530 } 531 532 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 533 size_t size, unsigned int width) 534 { 535 if (!he->srcfile) 536 he->srcfile = hist_entry__get_srcfile(he); 537 538 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 539 } 540 541 struct sort_entry sort_srcfile = { 542 .se_header = "Source File", 543 .se_cmp = sort__srcfile_cmp, 544 .se_snprintf = hist_entry__srcfile_snprintf, 545 .se_width_idx = HISTC_SRCFILE, 546 }; 547 548 /* --sort parent */ 549 550 static int64_t 551 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 552 { 553 struct symbol *sym_l = left->parent; 554 struct symbol *sym_r = right->parent; 555 556 if (!sym_l || !sym_r) 557 return cmp_null(sym_l, sym_r); 558 559 return strcmp(sym_r->name, sym_l->name); 560 } 561 562 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 563 size_t size, unsigned int width) 564 { 565 return repsep_snprintf(bf, size, "%-*.*s", width, width, 566 he->parent ? he->parent->name : "[other]"); 567 } 568 569 struct sort_entry sort_parent = { 570 .se_header = "Parent symbol", 571 .se_cmp = sort__parent_cmp, 572 .se_snprintf = hist_entry__parent_snprintf, 573 .se_width_idx = HISTC_PARENT, 574 }; 575 576 /* --sort cpu */ 577 578 static int64_t 579 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 580 { 581 return right->cpu - left->cpu; 582 } 583 584 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 585 size_t size, unsigned int width) 586 { 587 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 588 } 589 590 struct sort_entry sort_cpu = { 591 .se_header = "CPU", 592 .se_cmp = sort__cpu_cmp, 593 .se_snprintf = hist_entry__cpu_snprintf, 594 .se_width_idx = HISTC_CPU, 595 }; 596 597 /* --sort cgroup_id */ 598 599 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 600 { 601 return (int64_t)(right_dev - left_dev); 602 } 603 604 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 605 { 606 return (int64_t)(right_ino - left_ino); 607 } 608 609 static int64_t 610 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 611 { 612 int64_t ret; 613 614 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 615 if (ret != 0) 616 return ret; 617 618 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 619 left->cgroup_id.ino); 620 } 621 622 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 623 char *bf, size_t size, 624 unsigned int width __maybe_unused) 625 { 626 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 627 he->cgroup_id.ino); 628 } 629 630 struct sort_entry sort_cgroup_id = { 631 .se_header = "cgroup id (dev/inode)", 632 .se_cmp = sort__cgroup_id_cmp, 633 .se_snprintf = hist_entry__cgroup_id_snprintf, 634 .se_width_idx = HISTC_CGROUP_ID, 635 }; 636 637 /* --sort socket */ 638 639 static int64_t 640 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 641 { 642 return right->socket - left->socket; 643 } 644 645 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 646 size_t size, unsigned int width) 647 { 648 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 649 } 650 651 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 652 { 653 int sk = *(const int *)arg; 654 655 if (type != HIST_FILTER__SOCKET) 656 return -1; 657 658 return sk >= 0 && he->socket != sk; 659 } 660 661 struct sort_entry sort_socket = { 662 .se_header = "Socket", 663 .se_cmp = sort__socket_cmp, 664 .se_snprintf = hist_entry__socket_snprintf, 665 .se_filter = hist_entry__socket_filter, 666 .se_width_idx = HISTC_SOCKET, 667 }; 668 669 /* --sort time */ 670 671 static int64_t 672 sort__time_cmp(struct hist_entry *left, struct hist_entry *right) 673 { 674 return right->time - left->time; 675 } 676 677 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf, 678 size_t size, unsigned int width) 679 { 680 char he_time[32]; 681 682 if (symbol_conf.nanosecs) 683 timestamp__scnprintf_nsec(he->time, he_time, 684 sizeof(he_time)); 685 else 686 timestamp__scnprintf_usec(he->time, he_time, 687 sizeof(he_time)); 688 689 return repsep_snprintf(bf, size, "%-.*s", width, he_time); 690 } 691 692 struct sort_entry sort_time = { 693 .se_header = "Time", 694 .se_cmp = sort__time_cmp, 695 .se_snprintf = hist_entry__time_snprintf, 696 .se_width_idx = HISTC_TIME, 697 }; 698 699 /* --sort trace */ 700 701 static char *get_trace_output(struct hist_entry *he) 702 { 703 struct trace_seq seq; 704 struct evsel *evsel; 705 struct tep_record rec = { 706 .data = he->raw_data, 707 .size = he->raw_size, 708 }; 709 710 evsel = hists_to_evsel(he->hists); 711 712 trace_seq_init(&seq); 713 if (symbol_conf.raw_trace) { 714 tep_print_fields(&seq, he->raw_data, he->raw_size, 715 evsel->tp_format); 716 } else { 717 tep_print_event(evsel->tp_format->tep, 718 &seq, &rec, "%s", TEP_PRINT_INFO); 719 } 720 /* 721 * Trim the buffer, it starts at 4KB and we're not going to 722 * add anything more to this buffer. 723 */ 724 return realloc(seq.buffer, seq.len + 1); 725 } 726 727 static int64_t 728 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 729 { 730 struct evsel *evsel; 731 732 evsel = hists_to_evsel(left->hists); 733 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 734 return 0; 735 736 if (left->trace_output == NULL) 737 left->trace_output = get_trace_output(left); 738 if (right->trace_output == NULL) 739 right->trace_output = get_trace_output(right); 740 741 return strcmp(right->trace_output, left->trace_output); 742 } 743 744 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 745 size_t size, unsigned int width) 746 { 747 struct evsel *evsel; 748 749 evsel = hists_to_evsel(he->hists); 750 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 751 return scnprintf(bf, size, "%-.*s", width, "N/A"); 752 753 if (he->trace_output == NULL) 754 he->trace_output = get_trace_output(he); 755 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 756 } 757 758 struct sort_entry sort_trace = { 759 .se_header = "Trace output", 760 .se_cmp = sort__trace_cmp, 761 .se_snprintf = hist_entry__trace_snprintf, 762 .se_width_idx = HISTC_TRACE, 763 }; 764 765 /* sort keys for branch stacks */ 766 767 static int64_t 768 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 769 { 770 if (!left->branch_info || !right->branch_info) 771 return cmp_null(left->branch_info, right->branch_info); 772 773 return _sort__dso_cmp(left->branch_info->from.ms.map, 774 right->branch_info->from.ms.map); 775 } 776 777 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 778 size_t size, unsigned int width) 779 { 780 if (he->branch_info) 781 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map, 782 bf, size, width); 783 else 784 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 785 } 786 787 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 788 const void *arg) 789 { 790 const struct dso *dso = arg; 791 792 if (type != HIST_FILTER__DSO) 793 return -1; 794 795 return dso && (!he->branch_info || !he->branch_info->from.ms.map || 796 he->branch_info->from.ms.map->dso != dso); 797 } 798 799 static int64_t 800 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 801 { 802 if (!left->branch_info || !right->branch_info) 803 return cmp_null(left->branch_info, right->branch_info); 804 805 return _sort__dso_cmp(left->branch_info->to.ms.map, 806 right->branch_info->to.ms.map); 807 } 808 809 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 810 size_t size, unsigned int width) 811 { 812 if (he->branch_info) 813 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map, 814 bf, size, width); 815 else 816 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 817 } 818 819 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 820 const void *arg) 821 { 822 const struct dso *dso = arg; 823 824 if (type != HIST_FILTER__DSO) 825 return -1; 826 827 return dso && (!he->branch_info || !he->branch_info->to.ms.map || 828 he->branch_info->to.ms.map->dso != dso); 829 } 830 831 static int64_t 832 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 833 { 834 struct addr_map_symbol *from_l = &left->branch_info->from; 835 struct addr_map_symbol *from_r = &right->branch_info->from; 836 837 if (!left->branch_info || !right->branch_info) 838 return cmp_null(left->branch_info, right->branch_info); 839 840 from_l = &left->branch_info->from; 841 from_r = &right->branch_info->from; 842 843 if (!from_l->ms.sym && !from_r->ms.sym) 844 return _sort__addr_cmp(from_l->addr, from_r->addr); 845 846 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym); 847 } 848 849 static int64_t 850 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 851 { 852 struct addr_map_symbol *to_l, *to_r; 853 854 if (!left->branch_info || !right->branch_info) 855 return cmp_null(left->branch_info, right->branch_info); 856 857 to_l = &left->branch_info->to; 858 to_r = &right->branch_info->to; 859 860 if (!to_l->ms.sym && !to_r->ms.sym) 861 return _sort__addr_cmp(to_l->addr, to_r->addr); 862 863 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym); 864 } 865 866 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 867 size_t size, unsigned int width) 868 { 869 if (he->branch_info) { 870 struct addr_map_symbol *from = &he->branch_info->from; 871 872 return _hist_entry__sym_snprintf(&from->ms, from->addr, he->level, bf, size, width); 873 } 874 875 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 876 } 877 878 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 879 size_t size, unsigned int width) 880 { 881 if (he->branch_info) { 882 struct addr_map_symbol *to = &he->branch_info->to; 883 884 return _hist_entry__sym_snprintf(&to->ms, to->addr, he->level, bf, size, width); 885 } 886 887 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 888 } 889 890 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 891 const void *arg) 892 { 893 const char *sym = arg; 894 895 if (type != HIST_FILTER__SYMBOL) 896 return -1; 897 898 return sym && !(he->branch_info && he->branch_info->from.ms.sym && 899 strstr(he->branch_info->from.ms.sym->name, sym)); 900 } 901 902 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 903 const void *arg) 904 { 905 const char *sym = arg; 906 907 if (type != HIST_FILTER__SYMBOL) 908 return -1; 909 910 return sym && !(he->branch_info && he->branch_info->to.ms.sym && 911 strstr(he->branch_info->to.ms.sym->name, sym)); 912 } 913 914 struct sort_entry sort_dso_from = { 915 .se_header = "Source Shared Object", 916 .se_cmp = sort__dso_from_cmp, 917 .se_snprintf = hist_entry__dso_from_snprintf, 918 .se_filter = hist_entry__dso_from_filter, 919 .se_width_idx = HISTC_DSO_FROM, 920 }; 921 922 struct sort_entry sort_dso_to = { 923 .se_header = "Target Shared Object", 924 .se_cmp = sort__dso_to_cmp, 925 .se_snprintf = hist_entry__dso_to_snprintf, 926 .se_filter = hist_entry__dso_to_filter, 927 .se_width_idx = HISTC_DSO_TO, 928 }; 929 930 struct sort_entry sort_sym_from = { 931 .se_header = "Source Symbol", 932 .se_cmp = sort__sym_from_cmp, 933 .se_snprintf = hist_entry__sym_from_snprintf, 934 .se_filter = hist_entry__sym_from_filter, 935 .se_width_idx = HISTC_SYMBOL_FROM, 936 }; 937 938 struct sort_entry sort_sym_to = { 939 .se_header = "Target Symbol", 940 .se_cmp = sort__sym_to_cmp, 941 .se_snprintf = hist_entry__sym_to_snprintf, 942 .se_filter = hist_entry__sym_to_filter, 943 .se_width_idx = HISTC_SYMBOL_TO, 944 }; 945 946 static int64_t 947 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 948 { 949 unsigned char mp, p; 950 951 if (!left->branch_info || !right->branch_info) 952 return cmp_null(left->branch_info, right->branch_info); 953 954 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 955 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 956 return mp || p; 957 } 958 959 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 960 size_t size, unsigned int width){ 961 static const char *out = "N/A"; 962 963 if (he->branch_info) { 964 if (he->branch_info->flags.predicted) 965 out = "N"; 966 else if (he->branch_info->flags.mispred) 967 out = "Y"; 968 } 969 970 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 971 } 972 973 static int64_t 974 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 975 { 976 if (!left->branch_info || !right->branch_info) 977 return cmp_null(left->branch_info, right->branch_info); 978 979 return left->branch_info->flags.cycles - 980 right->branch_info->flags.cycles; 981 } 982 983 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 984 size_t size, unsigned int width) 985 { 986 if (!he->branch_info) 987 return scnprintf(bf, size, "%-.*s", width, "N/A"); 988 if (he->branch_info->flags.cycles == 0) 989 return repsep_snprintf(bf, size, "%-*s", width, "-"); 990 return repsep_snprintf(bf, size, "%-*hd", width, 991 he->branch_info->flags.cycles); 992 } 993 994 struct sort_entry sort_cycles = { 995 .se_header = "Basic Block Cycles", 996 .se_cmp = sort__cycles_cmp, 997 .se_snprintf = hist_entry__cycles_snprintf, 998 .se_width_idx = HISTC_CYCLES, 999 }; 1000 1001 /* --sort daddr_sym */ 1002 int64_t 1003 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1004 { 1005 uint64_t l = 0, r = 0; 1006 1007 if (left->mem_info) 1008 l = left->mem_info->daddr.addr; 1009 if (right->mem_info) 1010 r = right->mem_info->daddr.addr; 1011 1012 return (int64_t)(r - l); 1013 } 1014 1015 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 1016 size_t size, unsigned int width) 1017 { 1018 uint64_t addr = 0; 1019 struct map_symbol *ms = NULL; 1020 1021 if (he->mem_info) { 1022 addr = he->mem_info->daddr.addr; 1023 ms = &he->mem_info->daddr.ms; 1024 } 1025 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1026 } 1027 1028 int64_t 1029 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 1030 { 1031 uint64_t l = 0, r = 0; 1032 1033 if (left->mem_info) 1034 l = left->mem_info->iaddr.addr; 1035 if (right->mem_info) 1036 r = right->mem_info->iaddr.addr; 1037 1038 return (int64_t)(r - l); 1039 } 1040 1041 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 1042 size_t size, unsigned int width) 1043 { 1044 uint64_t addr = 0; 1045 struct map_symbol *ms = NULL; 1046 1047 if (he->mem_info) { 1048 addr = he->mem_info->iaddr.addr; 1049 ms = &he->mem_info->iaddr.ms; 1050 } 1051 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1052 } 1053 1054 static int64_t 1055 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1056 { 1057 struct map *map_l = NULL; 1058 struct map *map_r = NULL; 1059 1060 if (left->mem_info) 1061 map_l = left->mem_info->daddr.ms.map; 1062 if (right->mem_info) 1063 map_r = right->mem_info->daddr.ms.map; 1064 1065 return _sort__dso_cmp(map_l, map_r); 1066 } 1067 1068 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1069 size_t size, unsigned int width) 1070 { 1071 struct map *map = NULL; 1072 1073 if (he->mem_info) 1074 map = he->mem_info->daddr.ms.map; 1075 1076 return _hist_entry__dso_snprintf(map, bf, size, width); 1077 } 1078 1079 static int64_t 1080 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1081 { 1082 union perf_mem_data_src data_src_l; 1083 union perf_mem_data_src data_src_r; 1084 1085 if (left->mem_info) 1086 data_src_l = left->mem_info->data_src; 1087 else 1088 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1089 1090 if (right->mem_info) 1091 data_src_r = right->mem_info->data_src; 1092 else 1093 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1094 1095 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1096 } 1097 1098 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1099 size_t size, unsigned int width) 1100 { 1101 char out[10]; 1102 1103 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1104 return repsep_snprintf(bf, size, "%.*s", width, out); 1105 } 1106 1107 static int64_t 1108 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1109 { 1110 union perf_mem_data_src data_src_l; 1111 union perf_mem_data_src data_src_r; 1112 1113 if (left->mem_info) 1114 data_src_l = left->mem_info->data_src; 1115 else 1116 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1117 1118 if (right->mem_info) 1119 data_src_r = right->mem_info->data_src; 1120 else 1121 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1122 1123 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1124 } 1125 1126 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1127 size_t size, unsigned int width) 1128 { 1129 char out[64]; 1130 1131 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1132 return repsep_snprintf(bf, size, "%-*s", width, out); 1133 } 1134 1135 static int64_t 1136 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1137 { 1138 union perf_mem_data_src data_src_l; 1139 union perf_mem_data_src data_src_r; 1140 1141 if (left->mem_info) 1142 data_src_l = left->mem_info->data_src; 1143 else 1144 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1145 1146 if (right->mem_info) 1147 data_src_r = right->mem_info->data_src; 1148 else 1149 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1150 1151 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1152 } 1153 1154 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1155 size_t size, unsigned int width) 1156 { 1157 char out[64]; 1158 1159 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1160 return repsep_snprintf(bf, size, "%-*s", width, out); 1161 } 1162 1163 static int64_t 1164 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1165 { 1166 union perf_mem_data_src data_src_l; 1167 union perf_mem_data_src data_src_r; 1168 1169 if (left->mem_info) 1170 data_src_l = left->mem_info->data_src; 1171 else 1172 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1173 1174 if (right->mem_info) 1175 data_src_r = right->mem_info->data_src; 1176 else 1177 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1178 1179 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1180 } 1181 1182 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1183 size_t size, unsigned int width) 1184 { 1185 char out[64]; 1186 1187 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1188 return repsep_snprintf(bf, size, "%-*s", width, out); 1189 } 1190 1191 int64_t 1192 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1193 { 1194 u64 l, r; 1195 struct map *l_map, *r_map; 1196 int rc; 1197 1198 if (!left->mem_info) return -1; 1199 if (!right->mem_info) return 1; 1200 1201 /* group event types together */ 1202 if (left->cpumode > right->cpumode) return -1; 1203 if (left->cpumode < right->cpumode) return 1; 1204 1205 l_map = left->mem_info->daddr.ms.map; 1206 r_map = right->mem_info->daddr.ms.map; 1207 1208 /* if both are NULL, jump to sort on al_addr instead */ 1209 if (!l_map && !r_map) 1210 goto addr; 1211 1212 if (!l_map) return -1; 1213 if (!r_map) return 1; 1214 1215 rc = dso__cmp_id(l_map->dso, r_map->dso); 1216 if (rc) 1217 return rc; 1218 /* 1219 * Addresses with no major/minor numbers are assumed to be 1220 * anonymous in userspace. Sort those on pid then address. 1221 * 1222 * The kernel and non-zero major/minor mapped areas are 1223 * assumed to be unity mapped. Sort those on address. 1224 */ 1225 1226 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1227 (!(l_map->flags & MAP_SHARED)) && 1228 !l_map->dso->id.maj && !l_map->dso->id.min && 1229 !l_map->dso->id.ino && !l_map->dso->id.ino_generation) { 1230 /* userspace anonymous */ 1231 1232 if (left->thread->pid_ > right->thread->pid_) return -1; 1233 if (left->thread->pid_ < right->thread->pid_) return 1; 1234 } 1235 1236 addr: 1237 /* al_addr does all the right addr - start + offset calculations */ 1238 l = cl_address(left->mem_info->daddr.al_addr); 1239 r = cl_address(right->mem_info->daddr.al_addr); 1240 1241 if (l > r) return -1; 1242 if (l < r) return 1; 1243 1244 return 0; 1245 } 1246 1247 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1248 size_t size, unsigned int width) 1249 { 1250 1251 uint64_t addr = 0; 1252 struct map_symbol *ms = NULL; 1253 char level = he->level; 1254 1255 if (he->mem_info) { 1256 struct map *map = he->mem_info->daddr.ms.map; 1257 1258 addr = cl_address(he->mem_info->daddr.al_addr); 1259 ms = &he->mem_info->daddr.ms; 1260 1261 /* print [s] for shared data mmaps */ 1262 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1263 map && !(map->prot & PROT_EXEC) && 1264 (map->flags & MAP_SHARED) && 1265 (map->dso->id.maj || map->dso->id.min || 1266 map->dso->id.ino || map->dso->id.ino_generation)) 1267 level = 's'; 1268 else if (!map) 1269 level = 'X'; 1270 } 1271 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width); 1272 } 1273 1274 struct sort_entry sort_mispredict = { 1275 .se_header = "Branch Mispredicted", 1276 .se_cmp = sort__mispredict_cmp, 1277 .se_snprintf = hist_entry__mispredict_snprintf, 1278 .se_width_idx = HISTC_MISPREDICT, 1279 }; 1280 1281 static u64 he_weight(struct hist_entry *he) 1282 { 1283 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; 1284 } 1285 1286 static int64_t 1287 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1288 { 1289 return he_weight(left) - he_weight(right); 1290 } 1291 1292 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1293 size_t size, unsigned int width) 1294 { 1295 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); 1296 } 1297 1298 struct sort_entry sort_local_weight = { 1299 .se_header = "Local Weight", 1300 .se_cmp = sort__local_weight_cmp, 1301 .se_snprintf = hist_entry__local_weight_snprintf, 1302 .se_width_idx = HISTC_LOCAL_WEIGHT, 1303 }; 1304 1305 static int64_t 1306 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1307 { 1308 return left->stat.weight - right->stat.weight; 1309 } 1310 1311 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1312 size_t size, unsigned int width) 1313 { 1314 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); 1315 } 1316 1317 struct sort_entry sort_global_weight = { 1318 .se_header = "Weight", 1319 .se_cmp = sort__global_weight_cmp, 1320 .se_snprintf = hist_entry__global_weight_snprintf, 1321 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1322 }; 1323 1324 struct sort_entry sort_mem_daddr_sym = { 1325 .se_header = "Data Symbol", 1326 .se_cmp = sort__daddr_cmp, 1327 .se_snprintf = hist_entry__daddr_snprintf, 1328 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1329 }; 1330 1331 struct sort_entry sort_mem_iaddr_sym = { 1332 .se_header = "Code Symbol", 1333 .se_cmp = sort__iaddr_cmp, 1334 .se_snprintf = hist_entry__iaddr_snprintf, 1335 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1336 }; 1337 1338 struct sort_entry sort_mem_daddr_dso = { 1339 .se_header = "Data Object", 1340 .se_cmp = sort__dso_daddr_cmp, 1341 .se_snprintf = hist_entry__dso_daddr_snprintf, 1342 .se_width_idx = HISTC_MEM_DADDR_DSO, 1343 }; 1344 1345 struct sort_entry sort_mem_locked = { 1346 .se_header = "Locked", 1347 .se_cmp = sort__locked_cmp, 1348 .se_snprintf = hist_entry__locked_snprintf, 1349 .se_width_idx = HISTC_MEM_LOCKED, 1350 }; 1351 1352 struct sort_entry sort_mem_tlb = { 1353 .se_header = "TLB access", 1354 .se_cmp = sort__tlb_cmp, 1355 .se_snprintf = hist_entry__tlb_snprintf, 1356 .se_width_idx = HISTC_MEM_TLB, 1357 }; 1358 1359 struct sort_entry sort_mem_lvl = { 1360 .se_header = "Memory access", 1361 .se_cmp = sort__lvl_cmp, 1362 .se_snprintf = hist_entry__lvl_snprintf, 1363 .se_width_idx = HISTC_MEM_LVL, 1364 }; 1365 1366 struct sort_entry sort_mem_snoop = { 1367 .se_header = "Snoop", 1368 .se_cmp = sort__snoop_cmp, 1369 .se_snprintf = hist_entry__snoop_snprintf, 1370 .se_width_idx = HISTC_MEM_SNOOP, 1371 }; 1372 1373 struct sort_entry sort_mem_dcacheline = { 1374 .se_header = "Data Cacheline", 1375 .se_cmp = sort__dcacheline_cmp, 1376 .se_snprintf = hist_entry__dcacheline_snprintf, 1377 .se_width_idx = HISTC_MEM_DCACHELINE, 1378 }; 1379 1380 static int64_t 1381 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1382 { 1383 uint64_t l = 0, r = 0; 1384 1385 if (left->mem_info) 1386 l = left->mem_info->daddr.phys_addr; 1387 if (right->mem_info) 1388 r = right->mem_info->daddr.phys_addr; 1389 1390 return (int64_t)(r - l); 1391 } 1392 1393 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1394 size_t size, unsigned int width) 1395 { 1396 uint64_t addr = 0; 1397 size_t ret = 0; 1398 size_t len = BITS_PER_LONG / 4; 1399 1400 addr = he->mem_info->daddr.phys_addr; 1401 1402 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1403 1404 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1405 1406 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1407 1408 if (ret > width) 1409 bf[width] = '\0'; 1410 1411 return width; 1412 } 1413 1414 struct sort_entry sort_mem_phys_daddr = { 1415 .se_header = "Data Physical Address", 1416 .se_cmp = sort__phys_daddr_cmp, 1417 .se_snprintf = hist_entry__phys_daddr_snprintf, 1418 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1419 }; 1420 1421 static int64_t 1422 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1423 { 1424 if (!left->branch_info || !right->branch_info) 1425 return cmp_null(left->branch_info, right->branch_info); 1426 1427 return left->branch_info->flags.abort != 1428 right->branch_info->flags.abort; 1429 } 1430 1431 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1432 size_t size, unsigned int width) 1433 { 1434 static const char *out = "N/A"; 1435 1436 if (he->branch_info) { 1437 if (he->branch_info->flags.abort) 1438 out = "A"; 1439 else 1440 out = "."; 1441 } 1442 1443 return repsep_snprintf(bf, size, "%-*s", width, out); 1444 } 1445 1446 struct sort_entry sort_abort = { 1447 .se_header = "Transaction abort", 1448 .se_cmp = sort__abort_cmp, 1449 .se_snprintf = hist_entry__abort_snprintf, 1450 .se_width_idx = HISTC_ABORT, 1451 }; 1452 1453 static int64_t 1454 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1455 { 1456 if (!left->branch_info || !right->branch_info) 1457 return cmp_null(left->branch_info, right->branch_info); 1458 1459 return left->branch_info->flags.in_tx != 1460 right->branch_info->flags.in_tx; 1461 } 1462 1463 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1464 size_t size, unsigned int width) 1465 { 1466 static const char *out = "N/A"; 1467 1468 if (he->branch_info) { 1469 if (he->branch_info->flags.in_tx) 1470 out = "T"; 1471 else 1472 out = "."; 1473 } 1474 1475 return repsep_snprintf(bf, size, "%-*s", width, out); 1476 } 1477 1478 struct sort_entry sort_in_tx = { 1479 .se_header = "Branch in transaction", 1480 .se_cmp = sort__in_tx_cmp, 1481 .se_snprintf = hist_entry__in_tx_snprintf, 1482 .se_width_idx = HISTC_IN_TX, 1483 }; 1484 1485 static int64_t 1486 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1487 { 1488 return left->transaction - right->transaction; 1489 } 1490 1491 static inline char *add_str(char *p, const char *str) 1492 { 1493 strcpy(p, str); 1494 return p + strlen(str); 1495 } 1496 1497 static struct txbit { 1498 unsigned flag; 1499 const char *name; 1500 int skip_for_len; 1501 } txbits[] = { 1502 { PERF_TXN_ELISION, "EL ", 0 }, 1503 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1504 { PERF_TXN_SYNC, "SYNC ", 1 }, 1505 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1506 { PERF_TXN_RETRY, "RETRY ", 0 }, 1507 { PERF_TXN_CONFLICT, "CON ", 0 }, 1508 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1509 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1510 { 0, NULL, 0 } 1511 }; 1512 1513 int hist_entry__transaction_len(void) 1514 { 1515 int i; 1516 int len = 0; 1517 1518 for (i = 0; txbits[i].name; i++) { 1519 if (!txbits[i].skip_for_len) 1520 len += strlen(txbits[i].name); 1521 } 1522 len += 4; /* :XX<space> */ 1523 return len; 1524 } 1525 1526 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1527 size_t size, unsigned int width) 1528 { 1529 u64 t = he->transaction; 1530 char buf[128]; 1531 char *p = buf; 1532 int i; 1533 1534 buf[0] = 0; 1535 for (i = 0; txbits[i].name; i++) 1536 if (txbits[i].flag & t) 1537 p = add_str(p, txbits[i].name); 1538 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1539 p = add_str(p, "NEITHER "); 1540 if (t & PERF_TXN_ABORT_MASK) { 1541 sprintf(p, ":%" PRIx64, 1542 (t & PERF_TXN_ABORT_MASK) >> 1543 PERF_TXN_ABORT_SHIFT); 1544 p += strlen(p); 1545 } 1546 1547 return repsep_snprintf(bf, size, "%-*s", width, buf); 1548 } 1549 1550 struct sort_entry sort_transaction = { 1551 .se_header = "Transaction ", 1552 .se_cmp = sort__transaction_cmp, 1553 .se_snprintf = hist_entry__transaction_snprintf, 1554 .se_width_idx = HISTC_TRANSACTION, 1555 }; 1556 1557 /* --sort symbol_size */ 1558 1559 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 1560 { 1561 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 1562 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 1563 1564 return size_l < size_r ? -1 : 1565 size_l == size_r ? 0 : 1; 1566 } 1567 1568 static int64_t 1569 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 1570 { 1571 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 1572 } 1573 1574 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 1575 size_t bf_size, unsigned int width) 1576 { 1577 if (sym) 1578 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 1579 1580 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1581 } 1582 1583 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 1584 size_t size, unsigned int width) 1585 { 1586 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 1587 } 1588 1589 struct sort_entry sort_sym_size = { 1590 .se_header = "Symbol size", 1591 .se_cmp = sort__sym_size_cmp, 1592 .se_snprintf = hist_entry__sym_size_snprintf, 1593 .se_width_idx = HISTC_SYM_SIZE, 1594 }; 1595 1596 /* --sort dso_size */ 1597 1598 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r) 1599 { 1600 int64_t size_l = map_l != NULL ? map__size(map_l) : 0; 1601 int64_t size_r = map_r != NULL ? map__size(map_r) : 0; 1602 1603 return size_l < size_r ? -1 : 1604 size_l == size_r ? 0 : 1; 1605 } 1606 1607 static int64_t 1608 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right) 1609 { 1610 return _sort__dso_size_cmp(right->ms.map, left->ms.map); 1611 } 1612 1613 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf, 1614 size_t bf_size, unsigned int width) 1615 { 1616 if (map && map->dso) 1617 return repsep_snprintf(bf, bf_size, "%*d", width, 1618 map__size(map)); 1619 1620 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1621 } 1622 1623 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf, 1624 size_t size, unsigned int width) 1625 { 1626 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width); 1627 } 1628 1629 struct sort_entry sort_dso_size = { 1630 .se_header = "DSO size", 1631 .se_cmp = sort__dso_size_cmp, 1632 .se_snprintf = hist_entry__dso_size_snprintf, 1633 .se_width_idx = HISTC_DSO_SIZE, 1634 }; 1635 1636 1637 struct sort_dimension { 1638 const char *name; 1639 struct sort_entry *entry; 1640 int taken; 1641 }; 1642 1643 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 1644 1645 static struct sort_dimension common_sort_dimensions[] = { 1646 DIM(SORT_PID, "pid", sort_thread), 1647 DIM(SORT_COMM, "comm", sort_comm), 1648 DIM(SORT_DSO, "dso", sort_dso), 1649 DIM(SORT_SYM, "symbol", sort_sym), 1650 DIM(SORT_PARENT, "parent", sort_parent), 1651 DIM(SORT_CPU, "cpu", sort_cpu), 1652 DIM(SORT_SOCKET, "socket", sort_socket), 1653 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1654 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 1655 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1656 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 1657 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 1658 DIM(SORT_TRACE, "trace", sort_trace), 1659 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 1660 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 1661 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 1662 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null), 1663 DIM(SORT_TIME, "time", sort_time), 1664 }; 1665 1666 #undef DIM 1667 1668 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 1669 1670 static struct sort_dimension bstack_sort_dimensions[] = { 1671 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 1672 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 1673 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 1674 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 1675 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 1676 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1677 DIM(SORT_ABORT, "abort", sort_abort), 1678 DIM(SORT_CYCLES, "cycles", sort_cycles), 1679 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1680 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1681 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc), 1682 }; 1683 1684 #undef DIM 1685 1686 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 1687 1688 static struct sort_dimension memory_sort_dimensions[] = { 1689 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1690 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 1691 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1692 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1693 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1694 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 1695 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 1696 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 1697 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 1698 }; 1699 1700 #undef DIM 1701 1702 struct hpp_dimension { 1703 const char *name; 1704 struct perf_hpp_fmt *fmt; 1705 int taken; 1706 }; 1707 1708 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 1709 1710 static struct hpp_dimension hpp_sort_dimensions[] = { 1711 DIM(PERF_HPP__OVERHEAD, "overhead"), 1712 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 1713 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 1714 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 1715 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 1716 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 1717 DIM(PERF_HPP__SAMPLES, "sample"), 1718 DIM(PERF_HPP__PERIOD, "period"), 1719 }; 1720 1721 #undef DIM 1722 1723 struct hpp_sort_entry { 1724 struct perf_hpp_fmt hpp; 1725 struct sort_entry *se; 1726 }; 1727 1728 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 1729 { 1730 struct hpp_sort_entry *hse; 1731 1732 if (!perf_hpp__is_sort_entry(fmt)) 1733 return; 1734 1735 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1736 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 1737 } 1738 1739 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1740 struct hists *hists, int line __maybe_unused, 1741 int *span __maybe_unused) 1742 { 1743 struct hpp_sort_entry *hse; 1744 size_t len = fmt->user_len; 1745 1746 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1747 1748 if (!len) 1749 len = hists__col_len(hists, hse->se->se_width_idx); 1750 1751 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 1752 } 1753 1754 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 1755 struct perf_hpp *hpp __maybe_unused, 1756 struct hists *hists) 1757 { 1758 struct hpp_sort_entry *hse; 1759 size_t len = fmt->user_len; 1760 1761 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1762 1763 if (!len) 1764 len = hists__col_len(hists, hse->se->se_width_idx); 1765 1766 return len; 1767 } 1768 1769 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1770 struct hist_entry *he) 1771 { 1772 struct hpp_sort_entry *hse; 1773 size_t len = fmt->user_len; 1774 1775 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1776 1777 if (!len) 1778 len = hists__col_len(he->hists, hse->se->se_width_idx); 1779 1780 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1781 } 1782 1783 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 1784 struct hist_entry *a, struct hist_entry *b) 1785 { 1786 struct hpp_sort_entry *hse; 1787 1788 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1789 return hse->se->se_cmp(a, b); 1790 } 1791 1792 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 1793 struct hist_entry *a, struct hist_entry *b) 1794 { 1795 struct hpp_sort_entry *hse; 1796 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 1797 1798 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1799 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 1800 return collapse_fn(a, b); 1801 } 1802 1803 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 1804 struct hist_entry *a, struct hist_entry *b) 1805 { 1806 struct hpp_sort_entry *hse; 1807 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 1808 1809 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1810 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 1811 return sort_fn(a, b); 1812 } 1813 1814 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 1815 { 1816 return format->header == __sort__hpp_header; 1817 } 1818 1819 #define MK_SORT_ENTRY_CHK(key) \ 1820 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 1821 { \ 1822 struct hpp_sort_entry *hse; \ 1823 \ 1824 if (!perf_hpp__is_sort_entry(fmt)) \ 1825 return false; \ 1826 \ 1827 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 1828 return hse->se == &sort_ ## key ; \ 1829 } 1830 1831 MK_SORT_ENTRY_CHK(trace) 1832 MK_SORT_ENTRY_CHK(srcline) 1833 MK_SORT_ENTRY_CHK(srcfile) 1834 MK_SORT_ENTRY_CHK(thread) 1835 MK_SORT_ENTRY_CHK(comm) 1836 MK_SORT_ENTRY_CHK(dso) 1837 MK_SORT_ENTRY_CHK(sym) 1838 1839 1840 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1841 { 1842 struct hpp_sort_entry *hse_a; 1843 struct hpp_sort_entry *hse_b; 1844 1845 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 1846 return false; 1847 1848 hse_a = container_of(a, struct hpp_sort_entry, hpp); 1849 hse_b = container_of(b, struct hpp_sort_entry, hpp); 1850 1851 return hse_a->se == hse_b->se; 1852 } 1853 1854 static void hse_free(struct perf_hpp_fmt *fmt) 1855 { 1856 struct hpp_sort_entry *hse; 1857 1858 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1859 free(hse); 1860 } 1861 1862 static struct hpp_sort_entry * 1863 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 1864 { 1865 struct hpp_sort_entry *hse; 1866 1867 hse = malloc(sizeof(*hse)); 1868 if (hse == NULL) { 1869 pr_err("Memory allocation failed\n"); 1870 return NULL; 1871 } 1872 1873 hse->se = sd->entry; 1874 hse->hpp.name = sd->entry->se_header; 1875 hse->hpp.header = __sort__hpp_header; 1876 hse->hpp.width = __sort__hpp_width; 1877 hse->hpp.entry = __sort__hpp_entry; 1878 hse->hpp.color = NULL; 1879 1880 hse->hpp.cmp = __sort__hpp_cmp; 1881 hse->hpp.collapse = __sort__hpp_collapse; 1882 hse->hpp.sort = __sort__hpp_sort; 1883 hse->hpp.equal = __sort__hpp_equal; 1884 hse->hpp.free = hse_free; 1885 1886 INIT_LIST_HEAD(&hse->hpp.list); 1887 INIT_LIST_HEAD(&hse->hpp.sort_list); 1888 hse->hpp.elide = false; 1889 hse->hpp.len = 0; 1890 hse->hpp.user_len = 0; 1891 hse->hpp.level = level; 1892 1893 return hse; 1894 } 1895 1896 static void hpp_free(struct perf_hpp_fmt *fmt) 1897 { 1898 free(fmt); 1899 } 1900 1901 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 1902 int level) 1903 { 1904 struct perf_hpp_fmt *fmt; 1905 1906 fmt = memdup(hd->fmt, sizeof(*fmt)); 1907 if (fmt) { 1908 INIT_LIST_HEAD(&fmt->list); 1909 INIT_LIST_HEAD(&fmt->sort_list); 1910 fmt->free = hpp_free; 1911 fmt->level = level; 1912 } 1913 1914 return fmt; 1915 } 1916 1917 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 1918 { 1919 struct perf_hpp_fmt *fmt; 1920 struct hpp_sort_entry *hse; 1921 int ret = -1; 1922 int r; 1923 1924 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1925 if (!perf_hpp__is_sort_entry(fmt)) 1926 continue; 1927 1928 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1929 if (hse->se->se_filter == NULL) 1930 continue; 1931 1932 /* 1933 * hist entry is filtered if any of sort key in the hpp list 1934 * is applied. But it should skip non-matched filter types. 1935 */ 1936 r = hse->se->se_filter(he, type, arg); 1937 if (r >= 0) { 1938 if (ret < 0) 1939 ret = 0; 1940 ret |= r; 1941 } 1942 } 1943 1944 return ret; 1945 } 1946 1947 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 1948 struct perf_hpp_list *list, 1949 int level) 1950 { 1951 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 1952 1953 if (hse == NULL) 1954 return -1; 1955 1956 perf_hpp_list__register_sort_field(list, &hse->hpp); 1957 return 0; 1958 } 1959 1960 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 1961 struct perf_hpp_list *list) 1962 { 1963 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 1964 1965 if (hse == NULL) 1966 return -1; 1967 1968 perf_hpp_list__column_register(list, &hse->hpp); 1969 return 0; 1970 } 1971 1972 struct hpp_dynamic_entry { 1973 struct perf_hpp_fmt hpp; 1974 struct evsel *evsel; 1975 struct tep_format_field *field; 1976 unsigned dynamic_len; 1977 bool raw_trace; 1978 }; 1979 1980 static int hde_width(struct hpp_dynamic_entry *hde) 1981 { 1982 if (!hde->hpp.len) { 1983 int len = hde->dynamic_len; 1984 int namelen = strlen(hde->field->name); 1985 int fieldlen = hde->field->size; 1986 1987 if (namelen > len) 1988 len = namelen; 1989 1990 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) { 1991 /* length for print hex numbers */ 1992 fieldlen = hde->field->size * 2 + 2; 1993 } 1994 if (fieldlen > len) 1995 len = fieldlen; 1996 1997 hde->hpp.len = len; 1998 } 1999 return hde->hpp.len; 2000 } 2001 2002 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 2003 struct hist_entry *he) 2004 { 2005 char *str, *pos; 2006 struct tep_format_field *field = hde->field; 2007 size_t namelen; 2008 bool last = false; 2009 2010 if (hde->raw_trace) 2011 return; 2012 2013 /* parse pretty print result and update max length */ 2014 if (!he->trace_output) 2015 he->trace_output = get_trace_output(he); 2016 2017 namelen = strlen(field->name); 2018 str = he->trace_output; 2019 2020 while (str) { 2021 pos = strchr(str, ' '); 2022 if (pos == NULL) { 2023 last = true; 2024 pos = str + strlen(str); 2025 } 2026 2027 if (!strncmp(str, field->name, namelen)) { 2028 size_t len; 2029 2030 str += namelen + 1; 2031 len = pos - str; 2032 2033 if (len > hde->dynamic_len) 2034 hde->dynamic_len = len; 2035 break; 2036 } 2037 2038 if (last) 2039 str = NULL; 2040 else 2041 str = pos + 1; 2042 } 2043 } 2044 2045 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2046 struct hists *hists __maybe_unused, 2047 int line __maybe_unused, 2048 int *span __maybe_unused) 2049 { 2050 struct hpp_dynamic_entry *hde; 2051 size_t len = fmt->user_len; 2052 2053 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2054 2055 if (!len) 2056 len = hde_width(hde); 2057 2058 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 2059 } 2060 2061 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 2062 struct perf_hpp *hpp __maybe_unused, 2063 struct hists *hists __maybe_unused) 2064 { 2065 struct hpp_dynamic_entry *hde; 2066 size_t len = fmt->user_len; 2067 2068 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2069 2070 if (!len) 2071 len = hde_width(hde); 2072 2073 return len; 2074 } 2075 2076 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 2077 { 2078 struct hpp_dynamic_entry *hde; 2079 2080 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2081 2082 return hists_to_evsel(hists) == hde->evsel; 2083 } 2084 2085 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2086 struct hist_entry *he) 2087 { 2088 struct hpp_dynamic_entry *hde; 2089 size_t len = fmt->user_len; 2090 char *str, *pos; 2091 struct tep_format_field *field; 2092 size_t namelen; 2093 bool last = false; 2094 int ret; 2095 2096 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2097 2098 if (!len) 2099 len = hde_width(hde); 2100 2101 if (hde->raw_trace) 2102 goto raw_field; 2103 2104 if (!he->trace_output) 2105 he->trace_output = get_trace_output(he); 2106 2107 field = hde->field; 2108 namelen = strlen(field->name); 2109 str = he->trace_output; 2110 2111 while (str) { 2112 pos = strchr(str, ' '); 2113 if (pos == NULL) { 2114 last = true; 2115 pos = str + strlen(str); 2116 } 2117 2118 if (!strncmp(str, field->name, namelen)) { 2119 str += namelen + 1; 2120 str = strndup(str, pos - str); 2121 2122 if (str == NULL) 2123 return scnprintf(hpp->buf, hpp->size, 2124 "%*.*s", len, len, "ERROR"); 2125 break; 2126 } 2127 2128 if (last) 2129 str = NULL; 2130 else 2131 str = pos + 1; 2132 } 2133 2134 if (str == NULL) { 2135 struct trace_seq seq; 2136 raw_field: 2137 trace_seq_init(&seq); 2138 tep_print_field(&seq, he->raw_data, hde->field); 2139 str = seq.buffer; 2140 } 2141 2142 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2143 free(str); 2144 return ret; 2145 } 2146 2147 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2148 struct hist_entry *a, struct hist_entry *b) 2149 { 2150 struct hpp_dynamic_entry *hde; 2151 struct tep_format_field *field; 2152 unsigned offset, size; 2153 2154 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2155 2156 if (b == NULL) { 2157 update_dynamic_len(hde, a); 2158 return 0; 2159 } 2160 2161 field = hde->field; 2162 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2163 unsigned long long dyn; 2164 2165 tep_read_number_field(field, a->raw_data, &dyn); 2166 offset = dyn & 0xffff; 2167 size = (dyn >> 16) & 0xffff; 2168 2169 /* record max width for output */ 2170 if (size > hde->dynamic_len) 2171 hde->dynamic_len = size; 2172 } else { 2173 offset = field->offset; 2174 size = field->size; 2175 } 2176 2177 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2178 } 2179 2180 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2181 { 2182 return fmt->cmp == __sort__hde_cmp; 2183 } 2184 2185 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2186 { 2187 struct hpp_dynamic_entry *hde_a; 2188 struct hpp_dynamic_entry *hde_b; 2189 2190 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2191 return false; 2192 2193 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2194 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2195 2196 return hde_a->field == hde_b->field; 2197 } 2198 2199 static void hde_free(struct perf_hpp_fmt *fmt) 2200 { 2201 struct hpp_dynamic_entry *hde; 2202 2203 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2204 free(hde); 2205 } 2206 2207 static struct hpp_dynamic_entry * 2208 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field, 2209 int level) 2210 { 2211 struct hpp_dynamic_entry *hde; 2212 2213 hde = malloc(sizeof(*hde)); 2214 if (hde == NULL) { 2215 pr_debug("Memory allocation failed\n"); 2216 return NULL; 2217 } 2218 2219 hde->evsel = evsel; 2220 hde->field = field; 2221 hde->dynamic_len = 0; 2222 2223 hde->hpp.name = field->name; 2224 hde->hpp.header = __sort__hde_header; 2225 hde->hpp.width = __sort__hde_width; 2226 hde->hpp.entry = __sort__hde_entry; 2227 hde->hpp.color = NULL; 2228 2229 hde->hpp.cmp = __sort__hde_cmp; 2230 hde->hpp.collapse = __sort__hde_cmp; 2231 hde->hpp.sort = __sort__hde_cmp; 2232 hde->hpp.equal = __sort__hde_equal; 2233 hde->hpp.free = hde_free; 2234 2235 INIT_LIST_HEAD(&hde->hpp.list); 2236 INIT_LIST_HEAD(&hde->hpp.sort_list); 2237 hde->hpp.elide = false; 2238 hde->hpp.len = 0; 2239 hde->hpp.user_len = 0; 2240 hde->hpp.level = level; 2241 2242 return hde; 2243 } 2244 2245 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2246 { 2247 struct perf_hpp_fmt *new_fmt = NULL; 2248 2249 if (perf_hpp__is_sort_entry(fmt)) { 2250 struct hpp_sort_entry *hse, *new_hse; 2251 2252 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2253 new_hse = memdup(hse, sizeof(*hse)); 2254 if (new_hse) 2255 new_fmt = &new_hse->hpp; 2256 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2257 struct hpp_dynamic_entry *hde, *new_hde; 2258 2259 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2260 new_hde = memdup(hde, sizeof(*hde)); 2261 if (new_hde) 2262 new_fmt = &new_hde->hpp; 2263 } else { 2264 new_fmt = memdup(fmt, sizeof(*fmt)); 2265 } 2266 2267 INIT_LIST_HEAD(&new_fmt->list); 2268 INIT_LIST_HEAD(&new_fmt->sort_list); 2269 2270 return new_fmt; 2271 } 2272 2273 static int parse_field_name(char *str, char **event, char **field, char **opt) 2274 { 2275 char *event_name, *field_name, *opt_name; 2276 2277 event_name = str; 2278 field_name = strchr(str, '.'); 2279 2280 if (field_name) { 2281 *field_name++ = '\0'; 2282 } else { 2283 event_name = NULL; 2284 field_name = str; 2285 } 2286 2287 opt_name = strchr(field_name, '/'); 2288 if (opt_name) 2289 *opt_name++ = '\0'; 2290 2291 *event = event_name; 2292 *field = field_name; 2293 *opt = opt_name; 2294 2295 return 0; 2296 } 2297 2298 /* find match evsel using a given event name. The event name can be: 2299 * 1. '%' + event index (e.g. '%1' for first event) 2300 * 2. full event name (e.g. sched:sched_switch) 2301 * 3. partial event name (should not contain ':') 2302 */ 2303 static struct evsel *find_evsel(struct evlist *evlist, char *event_name) 2304 { 2305 struct evsel *evsel = NULL; 2306 struct evsel *pos; 2307 bool full_name; 2308 2309 /* case 1 */ 2310 if (event_name[0] == '%') { 2311 int nr = strtol(event_name+1, NULL, 0); 2312 2313 if (nr > evlist->core.nr_entries) 2314 return NULL; 2315 2316 evsel = evlist__first(evlist); 2317 while (--nr > 0) 2318 evsel = perf_evsel__next(evsel); 2319 2320 return evsel; 2321 } 2322 2323 full_name = !!strchr(event_name, ':'); 2324 evlist__for_each_entry(evlist, pos) { 2325 /* case 2 */ 2326 if (full_name && !strcmp(pos->name, event_name)) 2327 return pos; 2328 /* case 3 */ 2329 if (!full_name && strstr(pos->name, event_name)) { 2330 if (evsel) { 2331 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2332 event_name, evsel->name, pos->name); 2333 return NULL; 2334 } 2335 evsel = pos; 2336 } 2337 } 2338 2339 return evsel; 2340 } 2341 2342 static int __dynamic_dimension__add(struct evsel *evsel, 2343 struct tep_format_field *field, 2344 bool raw_trace, int level) 2345 { 2346 struct hpp_dynamic_entry *hde; 2347 2348 hde = __alloc_dynamic_entry(evsel, field, level); 2349 if (hde == NULL) 2350 return -ENOMEM; 2351 2352 hde->raw_trace = raw_trace; 2353 2354 perf_hpp__register_sort_field(&hde->hpp); 2355 return 0; 2356 } 2357 2358 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level) 2359 { 2360 int ret; 2361 struct tep_format_field *field; 2362 2363 field = evsel->tp_format->format.fields; 2364 while (field) { 2365 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2366 if (ret < 0) 2367 return ret; 2368 2369 field = field->next; 2370 } 2371 return 0; 2372 } 2373 2374 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace, 2375 int level) 2376 { 2377 int ret; 2378 struct evsel *evsel; 2379 2380 evlist__for_each_entry(evlist, evsel) { 2381 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2382 continue; 2383 2384 ret = add_evsel_fields(evsel, raw_trace, level); 2385 if (ret < 0) 2386 return ret; 2387 } 2388 return 0; 2389 } 2390 2391 static int add_all_matching_fields(struct evlist *evlist, 2392 char *field_name, bool raw_trace, int level) 2393 { 2394 int ret = -ESRCH; 2395 struct evsel *evsel; 2396 struct tep_format_field *field; 2397 2398 evlist__for_each_entry(evlist, evsel) { 2399 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2400 continue; 2401 2402 field = tep_find_any_field(evsel->tp_format, field_name); 2403 if (field == NULL) 2404 continue; 2405 2406 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2407 if (ret < 0) 2408 break; 2409 } 2410 return ret; 2411 } 2412 2413 static int add_dynamic_entry(struct evlist *evlist, const char *tok, 2414 int level) 2415 { 2416 char *str, *event_name, *field_name, *opt_name; 2417 struct evsel *evsel; 2418 struct tep_format_field *field; 2419 bool raw_trace = symbol_conf.raw_trace; 2420 int ret = 0; 2421 2422 if (evlist == NULL) 2423 return -ENOENT; 2424 2425 str = strdup(tok); 2426 if (str == NULL) 2427 return -ENOMEM; 2428 2429 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2430 ret = -EINVAL; 2431 goto out; 2432 } 2433 2434 if (opt_name) { 2435 if (strcmp(opt_name, "raw")) { 2436 pr_debug("unsupported field option %s\n", opt_name); 2437 ret = -EINVAL; 2438 goto out; 2439 } 2440 raw_trace = true; 2441 } 2442 2443 if (!strcmp(field_name, "trace_fields")) { 2444 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2445 goto out; 2446 } 2447 2448 if (event_name == NULL) { 2449 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2450 goto out; 2451 } 2452 2453 evsel = find_evsel(evlist, event_name); 2454 if (evsel == NULL) { 2455 pr_debug("Cannot find event: %s\n", event_name); 2456 ret = -ENOENT; 2457 goto out; 2458 } 2459 2460 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2461 pr_debug("%s is not a tracepoint event\n", event_name); 2462 ret = -EINVAL; 2463 goto out; 2464 } 2465 2466 if (!strcmp(field_name, "*")) { 2467 ret = add_evsel_fields(evsel, raw_trace, level); 2468 } else { 2469 field = tep_find_any_field(evsel->tp_format, field_name); 2470 if (field == NULL) { 2471 pr_debug("Cannot find event field for %s.%s\n", 2472 event_name, field_name); 2473 return -ENOENT; 2474 } 2475 2476 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2477 } 2478 2479 out: 2480 free(str); 2481 return ret; 2482 } 2483 2484 static int __sort_dimension__add(struct sort_dimension *sd, 2485 struct perf_hpp_list *list, 2486 int level) 2487 { 2488 if (sd->taken) 2489 return 0; 2490 2491 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2492 return -1; 2493 2494 if (sd->entry->se_collapse) 2495 list->need_collapse = 1; 2496 2497 sd->taken = 1; 2498 2499 return 0; 2500 } 2501 2502 static int __hpp_dimension__add(struct hpp_dimension *hd, 2503 struct perf_hpp_list *list, 2504 int level) 2505 { 2506 struct perf_hpp_fmt *fmt; 2507 2508 if (hd->taken) 2509 return 0; 2510 2511 fmt = __hpp_dimension__alloc_hpp(hd, level); 2512 if (!fmt) 2513 return -1; 2514 2515 hd->taken = 1; 2516 perf_hpp_list__register_sort_field(list, fmt); 2517 return 0; 2518 } 2519 2520 static int __sort_dimension__add_output(struct perf_hpp_list *list, 2521 struct sort_dimension *sd) 2522 { 2523 if (sd->taken) 2524 return 0; 2525 2526 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2527 return -1; 2528 2529 sd->taken = 1; 2530 return 0; 2531 } 2532 2533 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2534 struct hpp_dimension *hd) 2535 { 2536 struct perf_hpp_fmt *fmt; 2537 2538 if (hd->taken) 2539 return 0; 2540 2541 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2542 if (!fmt) 2543 return -1; 2544 2545 hd->taken = 1; 2546 perf_hpp_list__column_register(list, fmt); 2547 return 0; 2548 } 2549 2550 int hpp_dimension__add_output(unsigned col) 2551 { 2552 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2553 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2554 } 2555 2556 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2557 struct evlist *evlist, 2558 int level) 2559 { 2560 unsigned int i; 2561 2562 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2563 struct sort_dimension *sd = &common_sort_dimensions[i]; 2564 2565 if (strncasecmp(tok, sd->name, strlen(tok))) 2566 continue; 2567 2568 if (sd->entry == &sort_parent) { 2569 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 2570 if (ret) { 2571 char err[BUFSIZ]; 2572 2573 regerror(ret, &parent_regex, err, sizeof(err)); 2574 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 2575 return -EINVAL; 2576 } 2577 list->parent = 1; 2578 } else if (sd->entry == &sort_sym) { 2579 list->sym = 1; 2580 /* 2581 * perf diff displays the performance difference amongst 2582 * two or more perf.data files. Those files could come 2583 * from different binaries. So we should not compare 2584 * their ips, but the name of symbol. 2585 */ 2586 if (sort__mode == SORT_MODE__DIFF) 2587 sd->entry->se_collapse = sort__sym_sort; 2588 2589 } else if (sd->entry == &sort_dso) { 2590 list->dso = 1; 2591 } else if (sd->entry == &sort_socket) { 2592 list->socket = 1; 2593 } else if (sd->entry == &sort_thread) { 2594 list->thread = 1; 2595 } else if (sd->entry == &sort_comm) { 2596 list->comm = 1; 2597 } 2598 2599 return __sort_dimension__add(sd, list, level); 2600 } 2601 2602 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2603 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2604 2605 if (strncasecmp(tok, hd->name, strlen(tok))) 2606 continue; 2607 2608 return __hpp_dimension__add(hd, list, level); 2609 } 2610 2611 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2612 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2613 2614 if (strncasecmp(tok, sd->name, strlen(tok))) 2615 continue; 2616 2617 if (sort__mode != SORT_MODE__BRANCH) 2618 return -EINVAL; 2619 2620 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 2621 list->sym = 1; 2622 2623 __sort_dimension__add(sd, list, level); 2624 return 0; 2625 } 2626 2627 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2628 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2629 2630 if (strncasecmp(tok, sd->name, strlen(tok))) 2631 continue; 2632 2633 if (sort__mode != SORT_MODE__MEMORY) 2634 return -EINVAL; 2635 2636 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) 2637 return -EINVAL; 2638 2639 if (sd->entry == &sort_mem_daddr_sym) 2640 list->sym = 1; 2641 2642 __sort_dimension__add(sd, list, level); 2643 return 0; 2644 } 2645 2646 if (!add_dynamic_entry(evlist, tok, level)) 2647 return 0; 2648 2649 return -ESRCH; 2650 } 2651 2652 static int setup_sort_list(struct perf_hpp_list *list, char *str, 2653 struct evlist *evlist) 2654 { 2655 char *tmp, *tok; 2656 int ret = 0; 2657 int level = 0; 2658 int next_level = 1; 2659 bool in_group = false; 2660 2661 do { 2662 tok = str; 2663 tmp = strpbrk(str, "{}, "); 2664 if (tmp) { 2665 if (in_group) 2666 next_level = level; 2667 else 2668 next_level = level + 1; 2669 2670 if (*tmp == '{') 2671 in_group = true; 2672 else if (*tmp == '}') 2673 in_group = false; 2674 2675 *tmp = '\0'; 2676 str = tmp + 1; 2677 } 2678 2679 if (*tok) { 2680 ret = sort_dimension__add(list, tok, evlist, level); 2681 if (ret == -EINVAL) { 2682 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) 2683 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 2684 else 2685 ui__error("Invalid --sort key: `%s'", tok); 2686 break; 2687 } else if (ret == -ESRCH) { 2688 ui__error("Unknown --sort key: `%s'", tok); 2689 break; 2690 } 2691 } 2692 2693 level = next_level; 2694 } while (tmp); 2695 2696 return ret; 2697 } 2698 2699 static const char *get_default_sort_order(struct evlist *evlist) 2700 { 2701 const char *default_sort_orders[] = { 2702 default_sort_order, 2703 default_branch_sort_order, 2704 default_mem_sort_order, 2705 default_top_sort_order, 2706 default_diff_sort_order, 2707 default_tracepoint_sort_order, 2708 }; 2709 bool use_trace = true; 2710 struct evsel *evsel; 2711 2712 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 2713 2714 if (evlist == NULL || perf_evlist__empty(evlist)) 2715 goto out_no_evlist; 2716 2717 evlist__for_each_entry(evlist, evsel) { 2718 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2719 use_trace = false; 2720 break; 2721 } 2722 } 2723 2724 if (use_trace) { 2725 sort__mode = SORT_MODE__TRACEPOINT; 2726 if (symbol_conf.raw_trace) 2727 return "trace_fields"; 2728 } 2729 out_no_evlist: 2730 return default_sort_orders[sort__mode]; 2731 } 2732 2733 static int setup_sort_order(struct evlist *evlist) 2734 { 2735 char *new_sort_order; 2736 2737 /* 2738 * Append '+'-prefixed sort order to the default sort 2739 * order string. 2740 */ 2741 if (!sort_order || is_strict_order(sort_order)) 2742 return 0; 2743 2744 if (sort_order[1] == '\0') { 2745 ui__error("Invalid --sort key: `+'"); 2746 return -EINVAL; 2747 } 2748 2749 /* 2750 * We allocate new sort_order string, but we never free it, 2751 * because it's checked over the rest of the code. 2752 */ 2753 if (asprintf(&new_sort_order, "%s,%s", 2754 get_default_sort_order(evlist), sort_order + 1) < 0) { 2755 pr_err("Not enough memory to set up --sort"); 2756 return -ENOMEM; 2757 } 2758 2759 sort_order = new_sort_order; 2760 return 0; 2761 } 2762 2763 /* 2764 * Adds 'pre,' prefix into 'str' is 'pre' is 2765 * not already part of 'str'. 2766 */ 2767 static char *prefix_if_not_in(const char *pre, char *str) 2768 { 2769 char *n; 2770 2771 if (!str || strstr(str, pre)) 2772 return str; 2773 2774 if (asprintf(&n, "%s,%s", pre, str) < 0) 2775 return NULL; 2776 2777 free(str); 2778 return n; 2779 } 2780 2781 static char *setup_overhead(char *keys) 2782 { 2783 if (sort__mode == SORT_MODE__DIFF) 2784 return keys; 2785 2786 keys = prefix_if_not_in("overhead", keys); 2787 2788 if (symbol_conf.cumulate_callchain) 2789 keys = prefix_if_not_in("overhead_children", keys); 2790 2791 return keys; 2792 } 2793 2794 static int __setup_sorting(struct evlist *evlist) 2795 { 2796 char *str; 2797 const char *sort_keys; 2798 int ret = 0; 2799 2800 ret = setup_sort_order(evlist); 2801 if (ret) 2802 return ret; 2803 2804 sort_keys = sort_order; 2805 if (sort_keys == NULL) { 2806 if (is_strict_order(field_order)) { 2807 /* 2808 * If user specified field order but no sort order, 2809 * we'll honor it and not add default sort orders. 2810 */ 2811 return 0; 2812 } 2813 2814 sort_keys = get_default_sort_order(evlist); 2815 } 2816 2817 str = strdup(sort_keys); 2818 if (str == NULL) { 2819 pr_err("Not enough memory to setup sort keys"); 2820 return -ENOMEM; 2821 } 2822 2823 /* 2824 * Prepend overhead fields for backward compatibility. 2825 */ 2826 if (!is_strict_order(field_order)) { 2827 str = setup_overhead(str); 2828 if (str == NULL) { 2829 pr_err("Not enough memory to setup overhead keys"); 2830 return -ENOMEM; 2831 } 2832 } 2833 2834 ret = setup_sort_list(&perf_hpp_list, str, evlist); 2835 2836 free(str); 2837 return ret; 2838 } 2839 2840 void perf_hpp__set_elide(int idx, bool elide) 2841 { 2842 struct perf_hpp_fmt *fmt; 2843 struct hpp_sort_entry *hse; 2844 2845 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2846 if (!perf_hpp__is_sort_entry(fmt)) 2847 continue; 2848 2849 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2850 if (hse->se->se_width_idx == idx) { 2851 fmt->elide = elide; 2852 break; 2853 } 2854 } 2855 } 2856 2857 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 2858 { 2859 if (list && strlist__nr_entries(list) == 1) { 2860 if (fp != NULL) 2861 fprintf(fp, "# %s: %s\n", list_name, 2862 strlist__entry(list, 0)->s); 2863 return true; 2864 } 2865 return false; 2866 } 2867 2868 static bool get_elide(int idx, FILE *output) 2869 { 2870 switch (idx) { 2871 case HISTC_SYMBOL: 2872 return __get_elide(symbol_conf.sym_list, "symbol", output); 2873 case HISTC_DSO: 2874 return __get_elide(symbol_conf.dso_list, "dso", output); 2875 case HISTC_COMM: 2876 return __get_elide(symbol_conf.comm_list, "comm", output); 2877 default: 2878 break; 2879 } 2880 2881 if (sort__mode != SORT_MODE__BRANCH) 2882 return false; 2883 2884 switch (idx) { 2885 case HISTC_SYMBOL_FROM: 2886 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 2887 case HISTC_SYMBOL_TO: 2888 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 2889 case HISTC_DSO_FROM: 2890 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 2891 case HISTC_DSO_TO: 2892 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 2893 default: 2894 break; 2895 } 2896 2897 return false; 2898 } 2899 2900 void sort__setup_elide(FILE *output) 2901 { 2902 struct perf_hpp_fmt *fmt; 2903 struct hpp_sort_entry *hse; 2904 2905 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2906 if (!perf_hpp__is_sort_entry(fmt)) 2907 continue; 2908 2909 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2910 fmt->elide = get_elide(hse->se->se_width_idx, output); 2911 } 2912 2913 /* 2914 * It makes no sense to elide all of sort entries. 2915 * Just revert them to show up again. 2916 */ 2917 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2918 if (!perf_hpp__is_sort_entry(fmt)) 2919 continue; 2920 2921 if (!fmt->elide) 2922 return; 2923 } 2924 2925 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2926 if (!perf_hpp__is_sort_entry(fmt)) 2927 continue; 2928 2929 fmt->elide = false; 2930 } 2931 } 2932 2933 int output_field_add(struct perf_hpp_list *list, char *tok) 2934 { 2935 unsigned int i; 2936 2937 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2938 struct sort_dimension *sd = &common_sort_dimensions[i]; 2939 2940 if (strncasecmp(tok, sd->name, strlen(tok))) 2941 continue; 2942 2943 return __sort_dimension__add_output(list, sd); 2944 } 2945 2946 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2947 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2948 2949 if (strncasecmp(tok, hd->name, strlen(tok))) 2950 continue; 2951 2952 return __hpp_dimension__add_output(list, hd); 2953 } 2954 2955 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2956 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2957 2958 if (strncasecmp(tok, sd->name, strlen(tok))) 2959 continue; 2960 2961 if (sort__mode != SORT_MODE__MEMORY) 2962 return -EINVAL; 2963 2964 return __sort_dimension__add_output(list, sd); 2965 } 2966 2967 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2968 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2969 2970 if (strncasecmp(tok, sd->name, strlen(tok))) 2971 continue; 2972 2973 if (sort__mode != SORT_MODE__BRANCH) 2974 return -EINVAL; 2975 2976 return __sort_dimension__add_output(list, sd); 2977 } 2978 2979 return -ESRCH; 2980 } 2981 2982 static int setup_output_list(struct perf_hpp_list *list, char *str) 2983 { 2984 char *tmp, *tok; 2985 int ret = 0; 2986 2987 for (tok = strtok_r(str, ", ", &tmp); 2988 tok; tok = strtok_r(NULL, ", ", &tmp)) { 2989 ret = output_field_add(list, tok); 2990 if (ret == -EINVAL) { 2991 ui__error("Invalid --fields key: `%s'", tok); 2992 break; 2993 } else if (ret == -ESRCH) { 2994 ui__error("Unknown --fields key: `%s'", tok); 2995 break; 2996 } 2997 } 2998 2999 return ret; 3000 } 3001 3002 void reset_dimensions(void) 3003 { 3004 unsigned int i; 3005 3006 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 3007 common_sort_dimensions[i].taken = 0; 3008 3009 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 3010 hpp_sort_dimensions[i].taken = 0; 3011 3012 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 3013 bstack_sort_dimensions[i].taken = 0; 3014 3015 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 3016 memory_sort_dimensions[i].taken = 0; 3017 } 3018 3019 bool is_strict_order(const char *order) 3020 { 3021 return order && (*order != '+'); 3022 } 3023 3024 static int __setup_output_field(void) 3025 { 3026 char *str, *strp; 3027 int ret = -EINVAL; 3028 3029 if (field_order == NULL) 3030 return 0; 3031 3032 strp = str = strdup(field_order); 3033 if (str == NULL) { 3034 pr_err("Not enough memory to setup output fields"); 3035 return -ENOMEM; 3036 } 3037 3038 if (!is_strict_order(field_order)) 3039 strp++; 3040 3041 if (!strlen(strp)) { 3042 ui__error("Invalid --fields key: `+'"); 3043 goto out; 3044 } 3045 3046 ret = setup_output_list(&perf_hpp_list, strp); 3047 3048 out: 3049 free(str); 3050 return ret; 3051 } 3052 3053 int setup_sorting(struct evlist *evlist) 3054 { 3055 int err; 3056 3057 err = __setup_sorting(evlist); 3058 if (err < 0) 3059 return err; 3060 3061 if (parent_pattern != default_parent_pattern) { 3062 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 3063 if (err < 0) 3064 return err; 3065 } 3066 3067 reset_dimensions(); 3068 3069 /* 3070 * perf diff doesn't use default hpp output fields. 3071 */ 3072 if (sort__mode != SORT_MODE__DIFF) 3073 perf_hpp__init(); 3074 3075 err = __setup_output_field(); 3076 if (err < 0) 3077 return err; 3078 3079 /* copy sort keys to output fields */ 3080 perf_hpp__setup_output_field(&perf_hpp_list); 3081 /* and then copy output fields to sort keys */ 3082 perf_hpp__append_sort_keys(&perf_hpp_list); 3083 3084 /* setup hists-specific output fields */ 3085 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 3086 return -1; 3087 3088 return 0; 3089 } 3090 3091 void reset_output_field(void) 3092 { 3093 perf_hpp_list.need_collapse = 0; 3094 perf_hpp_list.parent = 0; 3095 perf_hpp_list.sym = 0; 3096 perf_hpp_list.dso = 0; 3097 3098 field_order = NULL; 3099 sort_order = NULL; 3100 3101 reset_dimensions(); 3102 perf_hpp__reset_output_field(&perf_hpp_list); 3103 } 3104 3105 #define INDENT (3*8 + 1) 3106 3107 static void add_key(struct strbuf *sb, const char *str, int *llen) 3108 { 3109 if (*llen >= 75) { 3110 strbuf_addstr(sb, "\n\t\t\t "); 3111 *llen = INDENT; 3112 } 3113 strbuf_addf(sb, " %s", str); 3114 *llen += strlen(str) + 1; 3115 } 3116 3117 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n, 3118 int *llen) 3119 { 3120 int i; 3121 3122 for (i = 0; i < n; i++) 3123 add_key(sb, s[i].name, llen); 3124 } 3125 3126 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n, 3127 int *llen) 3128 { 3129 int i; 3130 3131 for (i = 0; i < n; i++) 3132 add_key(sb, s[i].name, llen); 3133 } 3134 3135 const char *sort_help(const char *prefix) 3136 { 3137 struct strbuf sb; 3138 char *s; 3139 int len = strlen(prefix) + INDENT; 3140 3141 strbuf_init(&sb, 300); 3142 strbuf_addstr(&sb, prefix); 3143 add_hpp_sort_string(&sb, hpp_sort_dimensions, 3144 ARRAY_SIZE(hpp_sort_dimensions), &len); 3145 add_sort_string(&sb, common_sort_dimensions, 3146 ARRAY_SIZE(common_sort_dimensions), &len); 3147 add_sort_string(&sb, bstack_sort_dimensions, 3148 ARRAY_SIZE(bstack_sort_dimensions), &len); 3149 add_sort_string(&sb, memory_sort_dimensions, 3150 ARRAY_SIZE(memory_sort_dimensions), &len); 3151 s = strbuf_detach(&sb, NULL); 3152 strbuf_release(&sb); 3153 return s; 3154 } 3155