1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <stdlib.h> 6 #include <linux/mman.h> 7 #include <linux/time64.h> 8 #include "debug.h" 9 #include "dso.h" 10 #include "sort.h" 11 #include "hist.h" 12 #include "cacheline.h" 13 #include "comm.h" 14 #include "map.h" 15 #include "maps.h" 16 #include "symbol.h" 17 #include "map_symbol.h" 18 #include "branch.h" 19 #include "thread.h" 20 #include "evsel.h" 21 #include "evlist.h" 22 #include "srcline.h" 23 #include "strlist.h" 24 #include "strbuf.h" 25 #include "mem-events.h" 26 #include "annotate.h" 27 #include "event.h" 28 #include "time-utils.h" 29 #include "cgroup.h" 30 #include "machine.h" 31 #include "trace-event.h" 32 #include <linux/kernel.h> 33 #include <linux/string.h> 34 35 #ifdef HAVE_LIBTRACEEVENT 36 #include <traceevent/event-parse.h> 37 #endif 38 39 regex_t parent_regex; 40 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 41 const char *parent_pattern = default_parent_pattern; 42 const char *default_sort_order = "comm,dso,symbol"; 43 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 44 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc"; 45 const char default_top_sort_order[] = "dso,symbol"; 46 const char default_diff_sort_order[] = "dso,symbol"; 47 const char default_tracepoint_sort_order[] = "trace"; 48 const char *sort_order; 49 const char *field_order; 50 regex_t ignore_callees_regex; 51 int have_ignore_callees = 0; 52 enum sort_mode sort__mode = SORT_MODE__NORMAL; 53 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"}; 54 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"}; 55 56 /* 57 * Some architectures have Adjacent Cacheline Prefetch feature, which 58 * behaves like the cacheline size is doubled. Enable this flag to 59 * check things in double cacheline granularity. 60 */ 61 bool chk_double_cl; 62 63 /* 64 * Replaces all occurrences of a char used with the: 65 * 66 * -t, --field-separator 67 * 68 * option, that uses a special separator character and don't pad with spaces, 69 * replacing all occurrences of this separator in symbol names (and other 70 * output) with a '.' character, that thus it's the only non valid separator. 71 */ 72 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 73 { 74 int n; 75 va_list ap; 76 77 va_start(ap, fmt); 78 n = vsnprintf(bf, size, fmt, ap); 79 if (symbol_conf.field_sep && n > 0) { 80 char *sep = bf; 81 82 while (1) { 83 sep = strchr(sep, *symbol_conf.field_sep); 84 if (sep == NULL) 85 break; 86 *sep = '.'; 87 } 88 } 89 va_end(ap); 90 91 if (n >= (int)size) 92 return size - 1; 93 return n; 94 } 95 96 static int64_t cmp_null(const void *l, const void *r) 97 { 98 if (!l && !r) 99 return 0; 100 else if (!l) 101 return -1; 102 else 103 return 1; 104 } 105 106 /* --sort pid */ 107 108 static int64_t 109 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 110 { 111 return right->thread->tid - left->thread->tid; 112 } 113 114 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 115 size_t size, unsigned int width) 116 { 117 const char *comm = thread__comm_str(he->thread); 118 119 width = max(7U, width) - 8; 120 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 121 width, width, comm ?: ""); 122 } 123 124 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 125 { 126 const struct thread *th = arg; 127 128 if (type != HIST_FILTER__THREAD) 129 return -1; 130 131 return th && he->thread != th; 132 } 133 134 struct sort_entry sort_thread = { 135 .se_header = " Pid:Command", 136 .se_cmp = sort__thread_cmp, 137 .se_snprintf = hist_entry__thread_snprintf, 138 .se_filter = hist_entry__thread_filter, 139 .se_width_idx = HISTC_THREAD, 140 }; 141 142 /* --sort simd */ 143 144 static int64_t 145 sort__simd_cmp(struct hist_entry *left, struct hist_entry *right) 146 { 147 if (left->simd_flags.arch != right->simd_flags.arch) 148 return (int64_t) left->simd_flags.arch - right->simd_flags.arch; 149 150 return (int64_t) left->simd_flags.pred - right->simd_flags.pred; 151 } 152 153 static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags) 154 { 155 u64 arch = simd_flags->arch; 156 157 if (arch & SIMD_OP_FLAGS_ARCH_SVE) 158 return "SVE"; 159 else 160 return "n/a"; 161 } 162 163 static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf, 164 size_t size, unsigned int width __maybe_unused) 165 { 166 const char *name; 167 168 if (!he->simd_flags.arch) 169 return repsep_snprintf(bf, size, ""); 170 171 name = hist_entry__get_simd_name(&he->simd_flags); 172 173 if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY) 174 return repsep_snprintf(bf, size, "[e] %s", name); 175 else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL) 176 return repsep_snprintf(bf, size, "[p] %s", name); 177 178 return repsep_snprintf(bf, size, "[.] %s", name); 179 } 180 181 struct sort_entry sort_simd = { 182 .se_header = "Simd ", 183 .se_cmp = sort__simd_cmp, 184 .se_snprintf = hist_entry__simd_snprintf, 185 .se_width_idx = HISTC_SIMD, 186 }; 187 188 /* --sort comm */ 189 190 /* 191 * We can't use pointer comparison in functions below, 192 * because it gives different results based on pointer 193 * values, which could break some sorting assumptions. 194 */ 195 static int64_t 196 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 197 { 198 return strcmp(comm__str(right->comm), comm__str(left->comm)); 199 } 200 201 static int64_t 202 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 203 { 204 return strcmp(comm__str(right->comm), comm__str(left->comm)); 205 } 206 207 static int64_t 208 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 209 { 210 return strcmp(comm__str(right->comm), comm__str(left->comm)); 211 } 212 213 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 214 size_t size, unsigned int width) 215 { 216 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 217 } 218 219 struct sort_entry sort_comm = { 220 .se_header = "Command", 221 .se_cmp = sort__comm_cmp, 222 .se_collapse = sort__comm_collapse, 223 .se_sort = sort__comm_sort, 224 .se_snprintf = hist_entry__comm_snprintf, 225 .se_filter = hist_entry__thread_filter, 226 .se_width_idx = HISTC_COMM, 227 }; 228 229 /* --sort dso */ 230 231 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 232 { 233 struct dso *dso_l = map_l ? map__dso(map_l) : NULL; 234 struct dso *dso_r = map_r ? map__dso(map_r) : NULL; 235 const char *dso_name_l, *dso_name_r; 236 237 if (!dso_l || !dso_r) 238 return cmp_null(dso_r, dso_l); 239 240 if (verbose > 0) { 241 dso_name_l = dso_l->long_name; 242 dso_name_r = dso_r->long_name; 243 } else { 244 dso_name_l = dso_l->short_name; 245 dso_name_r = dso_r->short_name; 246 } 247 248 return strcmp(dso_name_l, dso_name_r); 249 } 250 251 static int64_t 252 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 253 { 254 return _sort__dso_cmp(right->ms.map, left->ms.map); 255 } 256 257 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 258 size_t size, unsigned int width) 259 { 260 const struct dso *dso = map ? map__dso(map) : NULL; 261 const char *dso_name = "[unknown]"; 262 263 if (dso) 264 dso_name = verbose > 0 ? dso->long_name : dso->short_name; 265 266 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 267 } 268 269 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 270 size_t size, unsigned int width) 271 { 272 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 273 } 274 275 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 276 { 277 const struct dso *dso = arg; 278 279 if (type != HIST_FILTER__DSO) 280 return -1; 281 282 return dso && (!he->ms.map || map__dso(he->ms.map) != dso); 283 } 284 285 struct sort_entry sort_dso = { 286 .se_header = "Shared Object", 287 .se_cmp = sort__dso_cmp, 288 .se_snprintf = hist_entry__dso_snprintf, 289 .se_filter = hist_entry__dso_filter, 290 .se_width_idx = HISTC_DSO, 291 }; 292 293 /* --sort symbol */ 294 295 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 296 { 297 return (int64_t)(right_ip - left_ip); 298 } 299 300 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 301 { 302 if (!sym_l || !sym_r) 303 return cmp_null(sym_l, sym_r); 304 305 if (sym_l == sym_r) 306 return 0; 307 308 if (sym_l->inlined || sym_r->inlined) { 309 int ret = strcmp(sym_l->name, sym_r->name); 310 311 if (ret) 312 return ret; 313 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start)) 314 return 0; 315 } 316 317 if (sym_l->start != sym_r->start) 318 return (int64_t)(sym_r->start - sym_l->start); 319 320 return (int64_t)(sym_r->end - sym_l->end); 321 } 322 323 static int64_t 324 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 325 { 326 int64_t ret; 327 328 if (!left->ms.sym && !right->ms.sym) 329 return _sort__addr_cmp(left->ip, right->ip); 330 331 /* 332 * comparing symbol address alone is not enough since it's a 333 * relative address within a dso. 334 */ 335 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 336 ret = sort__dso_cmp(left, right); 337 if (ret != 0) 338 return ret; 339 } 340 341 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 342 } 343 344 static int64_t 345 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 346 { 347 if (!left->ms.sym || !right->ms.sym) 348 return cmp_null(left->ms.sym, right->ms.sym); 349 350 return strcmp(right->ms.sym->name, left->ms.sym->name); 351 } 352 353 static int _hist_entry__sym_snprintf(struct map_symbol *ms, 354 u64 ip, char level, char *bf, size_t size, 355 unsigned int width) 356 { 357 struct symbol *sym = ms->sym; 358 struct map *map = ms->map; 359 size_t ret = 0; 360 361 if (verbose > 0) { 362 struct dso *dso = map ? map__dso(map) : NULL; 363 char o = dso ? dso__symtab_origin(dso) : '!'; 364 u64 rip = ip; 365 366 if (dso && dso->kernel && dso->adjust_symbols) 367 rip = map__unmap_ip(map, ip); 368 369 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 370 BITS_PER_LONG / 4 + 2, rip, o); 371 } 372 373 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 374 if (sym && map) { 375 if (sym->type == STT_OBJECT) { 376 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 377 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 378 ip - map__unmap_ip(map, sym->start)); 379 } else { 380 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 381 width - ret, 382 sym->name); 383 if (sym->inlined) 384 ret += repsep_snprintf(bf + ret, size - ret, 385 " (inlined)"); 386 } 387 } else { 388 size_t len = BITS_PER_LONG / 4; 389 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 390 len, ip); 391 } 392 393 return ret; 394 } 395 396 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) 397 { 398 return _hist_entry__sym_snprintf(&he->ms, he->ip, 399 he->level, bf, size, width); 400 } 401 402 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 403 { 404 const char *sym = arg; 405 406 if (type != HIST_FILTER__SYMBOL) 407 return -1; 408 409 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 410 } 411 412 struct sort_entry sort_sym = { 413 .se_header = "Symbol", 414 .se_cmp = sort__sym_cmp, 415 .se_sort = sort__sym_sort, 416 .se_snprintf = hist_entry__sym_snprintf, 417 .se_filter = hist_entry__sym_filter, 418 .se_width_idx = HISTC_SYMBOL, 419 }; 420 421 /* --sort srcline */ 422 423 char *hist_entry__srcline(struct hist_entry *he) 424 { 425 return map__srcline(he->ms.map, he->ip, he->ms.sym); 426 } 427 428 static int64_t 429 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 430 { 431 int64_t ret; 432 433 ret = _sort__addr_cmp(left->ip, right->ip); 434 if (ret) 435 return ret; 436 437 return sort__dso_cmp(left, right); 438 } 439 440 static int64_t 441 sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right) 442 { 443 if (!left->srcline) 444 left->srcline = hist_entry__srcline(left); 445 if (!right->srcline) 446 right->srcline = hist_entry__srcline(right); 447 448 return strcmp(right->srcline, left->srcline); 449 } 450 451 static int64_t 452 sort__srcline_sort(struct hist_entry *left, struct hist_entry *right) 453 { 454 return sort__srcline_collapse(left, right); 455 } 456 457 static void 458 sort__srcline_init(struct hist_entry *he) 459 { 460 if (!he->srcline) 461 he->srcline = hist_entry__srcline(he); 462 } 463 464 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 465 size_t size, unsigned int width) 466 { 467 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 468 } 469 470 struct sort_entry sort_srcline = { 471 .se_header = "Source:Line", 472 .se_cmp = sort__srcline_cmp, 473 .se_collapse = sort__srcline_collapse, 474 .se_sort = sort__srcline_sort, 475 .se_init = sort__srcline_init, 476 .se_snprintf = hist_entry__srcline_snprintf, 477 .se_width_idx = HISTC_SRCLINE, 478 }; 479 480 /* --sort srcline_from */ 481 482 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams) 483 { 484 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym); 485 } 486 487 static int64_t 488 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 489 { 490 return left->branch_info->from.addr - right->branch_info->from.addr; 491 } 492 493 static int64_t 494 sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right) 495 { 496 if (!left->branch_info->srcline_from) 497 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from); 498 499 if (!right->branch_info->srcline_from) 500 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from); 501 502 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 503 } 504 505 static int64_t 506 sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right) 507 { 508 return sort__srcline_from_collapse(left, right); 509 } 510 511 static void sort__srcline_from_init(struct hist_entry *he) 512 { 513 if (!he->branch_info->srcline_from) 514 he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from); 515 } 516 517 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 518 size_t size, unsigned int width) 519 { 520 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 521 } 522 523 struct sort_entry sort_srcline_from = { 524 .se_header = "From Source:Line", 525 .se_cmp = sort__srcline_from_cmp, 526 .se_collapse = sort__srcline_from_collapse, 527 .se_sort = sort__srcline_from_sort, 528 .se_init = sort__srcline_from_init, 529 .se_snprintf = hist_entry__srcline_from_snprintf, 530 .se_width_idx = HISTC_SRCLINE_FROM, 531 }; 532 533 /* --sort srcline_to */ 534 535 static int64_t 536 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 537 { 538 return left->branch_info->to.addr - right->branch_info->to.addr; 539 } 540 541 static int64_t 542 sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right) 543 { 544 if (!left->branch_info->srcline_to) 545 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to); 546 547 if (!right->branch_info->srcline_to) 548 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to); 549 550 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 551 } 552 553 static int64_t 554 sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right) 555 { 556 return sort__srcline_to_collapse(left, right); 557 } 558 559 static void sort__srcline_to_init(struct hist_entry *he) 560 { 561 if (!he->branch_info->srcline_to) 562 he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to); 563 } 564 565 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 566 size_t size, unsigned int width) 567 { 568 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 569 } 570 571 struct sort_entry sort_srcline_to = { 572 .se_header = "To Source:Line", 573 .se_cmp = sort__srcline_to_cmp, 574 .se_collapse = sort__srcline_to_collapse, 575 .se_sort = sort__srcline_to_sort, 576 .se_init = sort__srcline_to_init, 577 .se_snprintf = hist_entry__srcline_to_snprintf, 578 .se_width_idx = HISTC_SRCLINE_TO, 579 }; 580 581 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, 582 size_t size, unsigned int width) 583 { 584 585 struct symbol *sym = he->ms.sym; 586 struct annotation *notes; 587 double ipc = 0.0, coverage = 0.0; 588 char tmp[64]; 589 590 if (!sym) 591 return repsep_snprintf(bf, size, "%-*s", width, "-"); 592 593 notes = symbol__annotation(sym); 594 595 if (notes->hit_cycles) 596 ipc = notes->hit_insn / ((double)notes->hit_cycles); 597 598 if (notes->total_insn) { 599 coverage = notes->cover_insn * 100.0 / 600 ((double)notes->total_insn); 601 } 602 603 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); 604 return repsep_snprintf(bf, size, "%-*s", width, tmp); 605 } 606 607 struct sort_entry sort_sym_ipc = { 608 .se_header = "IPC [IPC Coverage]", 609 .se_cmp = sort__sym_cmp, 610 .se_snprintf = hist_entry__sym_ipc_snprintf, 611 .se_width_idx = HISTC_SYMBOL_IPC, 612 }; 613 614 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he 615 __maybe_unused, 616 char *bf, size_t size, 617 unsigned int width) 618 { 619 char tmp[64]; 620 621 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-"); 622 return repsep_snprintf(bf, size, "%-*s", width, tmp); 623 } 624 625 struct sort_entry sort_sym_ipc_null = { 626 .se_header = "IPC [IPC Coverage]", 627 .se_cmp = sort__sym_cmp, 628 .se_snprintf = hist_entry__sym_ipc_null_snprintf, 629 .se_width_idx = HISTC_SYMBOL_IPC, 630 }; 631 632 /* --sort srcfile */ 633 634 static char no_srcfile[1]; 635 636 static char *hist_entry__get_srcfile(struct hist_entry *e) 637 { 638 char *sf, *p; 639 struct map *map = e->ms.map; 640 641 if (!map) 642 return no_srcfile; 643 644 sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip), 645 e->ms.sym, false, true, true, e->ip); 646 if (!strcmp(sf, SRCLINE_UNKNOWN)) 647 return no_srcfile; 648 p = strchr(sf, ':'); 649 if (p && *sf) { 650 *p = 0; 651 return sf; 652 } 653 free(sf); 654 return no_srcfile; 655 } 656 657 static int64_t 658 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 659 { 660 return sort__srcline_cmp(left, right); 661 } 662 663 static int64_t 664 sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right) 665 { 666 if (!left->srcfile) 667 left->srcfile = hist_entry__get_srcfile(left); 668 if (!right->srcfile) 669 right->srcfile = hist_entry__get_srcfile(right); 670 671 return strcmp(right->srcfile, left->srcfile); 672 } 673 674 static int64_t 675 sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right) 676 { 677 return sort__srcfile_collapse(left, right); 678 } 679 680 static void sort__srcfile_init(struct hist_entry *he) 681 { 682 if (!he->srcfile) 683 he->srcfile = hist_entry__get_srcfile(he); 684 } 685 686 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 687 size_t size, unsigned int width) 688 { 689 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 690 } 691 692 struct sort_entry sort_srcfile = { 693 .se_header = "Source File", 694 .se_cmp = sort__srcfile_cmp, 695 .se_collapse = sort__srcfile_collapse, 696 .se_sort = sort__srcfile_sort, 697 .se_init = sort__srcfile_init, 698 .se_snprintf = hist_entry__srcfile_snprintf, 699 .se_width_idx = HISTC_SRCFILE, 700 }; 701 702 /* --sort parent */ 703 704 static int64_t 705 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 706 { 707 struct symbol *sym_l = left->parent; 708 struct symbol *sym_r = right->parent; 709 710 if (!sym_l || !sym_r) 711 return cmp_null(sym_l, sym_r); 712 713 return strcmp(sym_r->name, sym_l->name); 714 } 715 716 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 717 size_t size, unsigned int width) 718 { 719 return repsep_snprintf(bf, size, "%-*.*s", width, width, 720 he->parent ? he->parent->name : "[other]"); 721 } 722 723 struct sort_entry sort_parent = { 724 .se_header = "Parent symbol", 725 .se_cmp = sort__parent_cmp, 726 .se_snprintf = hist_entry__parent_snprintf, 727 .se_width_idx = HISTC_PARENT, 728 }; 729 730 /* --sort cpu */ 731 732 static int64_t 733 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 734 { 735 return right->cpu - left->cpu; 736 } 737 738 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 739 size_t size, unsigned int width) 740 { 741 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 742 } 743 744 struct sort_entry sort_cpu = { 745 .se_header = "CPU", 746 .se_cmp = sort__cpu_cmp, 747 .se_snprintf = hist_entry__cpu_snprintf, 748 .se_width_idx = HISTC_CPU, 749 }; 750 751 /* --sort cgroup_id */ 752 753 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 754 { 755 return (int64_t)(right_dev - left_dev); 756 } 757 758 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 759 { 760 return (int64_t)(right_ino - left_ino); 761 } 762 763 static int64_t 764 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 765 { 766 int64_t ret; 767 768 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 769 if (ret != 0) 770 return ret; 771 772 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 773 left->cgroup_id.ino); 774 } 775 776 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 777 char *bf, size_t size, 778 unsigned int width __maybe_unused) 779 { 780 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 781 he->cgroup_id.ino); 782 } 783 784 struct sort_entry sort_cgroup_id = { 785 .se_header = "cgroup id (dev/inode)", 786 .se_cmp = sort__cgroup_id_cmp, 787 .se_snprintf = hist_entry__cgroup_id_snprintf, 788 .se_width_idx = HISTC_CGROUP_ID, 789 }; 790 791 /* --sort cgroup */ 792 793 static int64_t 794 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right) 795 { 796 return right->cgroup - left->cgroup; 797 } 798 799 static int hist_entry__cgroup_snprintf(struct hist_entry *he, 800 char *bf, size_t size, 801 unsigned int width __maybe_unused) 802 { 803 const char *cgrp_name = "N/A"; 804 805 if (he->cgroup) { 806 struct cgroup *cgrp = cgroup__find(maps__machine(he->ms.maps)->env, 807 he->cgroup); 808 if (cgrp != NULL) 809 cgrp_name = cgrp->name; 810 else 811 cgrp_name = "unknown"; 812 } 813 814 return repsep_snprintf(bf, size, "%s", cgrp_name); 815 } 816 817 struct sort_entry sort_cgroup = { 818 .se_header = "Cgroup", 819 .se_cmp = sort__cgroup_cmp, 820 .se_snprintf = hist_entry__cgroup_snprintf, 821 .se_width_idx = HISTC_CGROUP, 822 }; 823 824 /* --sort socket */ 825 826 static int64_t 827 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 828 { 829 return right->socket - left->socket; 830 } 831 832 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 833 size_t size, unsigned int width) 834 { 835 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 836 } 837 838 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 839 { 840 int sk = *(const int *)arg; 841 842 if (type != HIST_FILTER__SOCKET) 843 return -1; 844 845 return sk >= 0 && he->socket != sk; 846 } 847 848 struct sort_entry sort_socket = { 849 .se_header = "Socket", 850 .se_cmp = sort__socket_cmp, 851 .se_snprintf = hist_entry__socket_snprintf, 852 .se_filter = hist_entry__socket_filter, 853 .se_width_idx = HISTC_SOCKET, 854 }; 855 856 /* --sort time */ 857 858 static int64_t 859 sort__time_cmp(struct hist_entry *left, struct hist_entry *right) 860 { 861 return right->time - left->time; 862 } 863 864 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf, 865 size_t size, unsigned int width) 866 { 867 char he_time[32]; 868 869 if (symbol_conf.nanosecs) 870 timestamp__scnprintf_nsec(he->time, he_time, 871 sizeof(he_time)); 872 else 873 timestamp__scnprintf_usec(he->time, he_time, 874 sizeof(he_time)); 875 876 return repsep_snprintf(bf, size, "%-.*s", width, he_time); 877 } 878 879 struct sort_entry sort_time = { 880 .se_header = "Time", 881 .se_cmp = sort__time_cmp, 882 .se_snprintf = hist_entry__time_snprintf, 883 .se_width_idx = HISTC_TIME, 884 }; 885 886 /* --sort trace */ 887 888 #ifdef HAVE_LIBTRACEEVENT 889 static char *get_trace_output(struct hist_entry *he) 890 { 891 struct trace_seq seq; 892 struct evsel *evsel; 893 struct tep_record rec = { 894 .data = he->raw_data, 895 .size = he->raw_size, 896 }; 897 898 evsel = hists_to_evsel(he->hists); 899 900 trace_seq_init(&seq); 901 if (symbol_conf.raw_trace) { 902 tep_print_fields(&seq, he->raw_data, he->raw_size, 903 evsel->tp_format); 904 } else { 905 tep_print_event(evsel->tp_format->tep, 906 &seq, &rec, "%s", TEP_PRINT_INFO); 907 } 908 /* 909 * Trim the buffer, it starts at 4KB and we're not going to 910 * add anything more to this buffer. 911 */ 912 return realloc(seq.buffer, seq.len + 1); 913 } 914 915 static int64_t 916 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 917 { 918 struct evsel *evsel; 919 920 evsel = hists_to_evsel(left->hists); 921 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 922 return 0; 923 924 if (left->trace_output == NULL) 925 left->trace_output = get_trace_output(left); 926 if (right->trace_output == NULL) 927 right->trace_output = get_trace_output(right); 928 929 return strcmp(right->trace_output, left->trace_output); 930 } 931 932 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 933 size_t size, unsigned int width) 934 { 935 struct evsel *evsel; 936 937 evsel = hists_to_evsel(he->hists); 938 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 939 return scnprintf(bf, size, "%-.*s", width, "N/A"); 940 941 if (he->trace_output == NULL) 942 he->trace_output = get_trace_output(he); 943 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 944 } 945 946 struct sort_entry sort_trace = { 947 .se_header = "Trace output", 948 .se_cmp = sort__trace_cmp, 949 .se_snprintf = hist_entry__trace_snprintf, 950 .se_width_idx = HISTC_TRACE, 951 }; 952 #endif /* HAVE_LIBTRACEEVENT */ 953 954 /* sort keys for branch stacks */ 955 956 static int64_t 957 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 958 { 959 if (!left->branch_info || !right->branch_info) 960 return cmp_null(left->branch_info, right->branch_info); 961 962 return _sort__dso_cmp(left->branch_info->from.ms.map, 963 right->branch_info->from.ms.map); 964 } 965 966 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 967 size_t size, unsigned int width) 968 { 969 if (he->branch_info) 970 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map, 971 bf, size, width); 972 else 973 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 974 } 975 976 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 977 const void *arg) 978 { 979 const struct dso *dso = arg; 980 981 if (type != HIST_FILTER__DSO) 982 return -1; 983 984 return dso && (!he->branch_info || !he->branch_info->from.ms.map || 985 map__dso(he->branch_info->from.ms.map) != dso); 986 } 987 988 static int64_t 989 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 990 { 991 if (!left->branch_info || !right->branch_info) 992 return cmp_null(left->branch_info, right->branch_info); 993 994 return _sort__dso_cmp(left->branch_info->to.ms.map, 995 right->branch_info->to.ms.map); 996 } 997 998 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 999 size_t size, unsigned int width) 1000 { 1001 if (he->branch_info) 1002 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map, 1003 bf, size, width); 1004 else 1005 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1006 } 1007 1008 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 1009 const void *arg) 1010 { 1011 const struct dso *dso = arg; 1012 1013 if (type != HIST_FILTER__DSO) 1014 return -1; 1015 1016 return dso && (!he->branch_info || !he->branch_info->to.ms.map || 1017 map__dso(he->branch_info->to.ms.map) != dso); 1018 } 1019 1020 static int64_t 1021 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 1022 { 1023 struct addr_map_symbol *from_l, *from_r; 1024 1025 if (!left->branch_info || !right->branch_info) 1026 return cmp_null(left->branch_info, right->branch_info); 1027 1028 from_l = &left->branch_info->from; 1029 from_r = &right->branch_info->from; 1030 1031 if (!from_l->ms.sym && !from_r->ms.sym) 1032 return _sort__addr_cmp(from_l->addr, from_r->addr); 1033 1034 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym); 1035 } 1036 1037 static int64_t 1038 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 1039 { 1040 struct addr_map_symbol *to_l, *to_r; 1041 1042 if (!left->branch_info || !right->branch_info) 1043 return cmp_null(left->branch_info, right->branch_info); 1044 1045 to_l = &left->branch_info->to; 1046 to_r = &right->branch_info->to; 1047 1048 if (!to_l->ms.sym && !to_r->ms.sym) 1049 return _sort__addr_cmp(to_l->addr, to_r->addr); 1050 1051 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym); 1052 } 1053 1054 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 1055 size_t size, unsigned int width) 1056 { 1057 if (he->branch_info) { 1058 struct addr_map_symbol *from = &he->branch_info->from; 1059 1060 return _hist_entry__sym_snprintf(&from->ms, from->al_addr, 1061 from->al_level, bf, size, width); 1062 } 1063 1064 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1065 } 1066 1067 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 1068 size_t size, unsigned int width) 1069 { 1070 if (he->branch_info) { 1071 struct addr_map_symbol *to = &he->branch_info->to; 1072 1073 return _hist_entry__sym_snprintf(&to->ms, to->al_addr, 1074 to->al_level, bf, size, width); 1075 } 1076 1077 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1078 } 1079 1080 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 1081 const void *arg) 1082 { 1083 const char *sym = arg; 1084 1085 if (type != HIST_FILTER__SYMBOL) 1086 return -1; 1087 1088 return sym && !(he->branch_info && he->branch_info->from.ms.sym && 1089 strstr(he->branch_info->from.ms.sym->name, sym)); 1090 } 1091 1092 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 1093 const void *arg) 1094 { 1095 const char *sym = arg; 1096 1097 if (type != HIST_FILTER__SYMBOL) 1098 return -1; 1099 1100 return sym && !(he->branch_info && he->branch_info->to.ms.sym && 1101 strstr(he->branch_info->to.ms.sym->name, sym)); 1102 } 1103 1104 struct sort_entry sort_dso_from = { 1105 .se_header = "Source Shared Object", 1106 .se_cmp = sort__dso_from_cmp, 1107 .se_snprintf = hist_entry__dso_from_snprintf, 1108 .se_filter = hist_entry__dso_from_filter, 1109 .se_width_idx = HISTC_DSO_FROM, 1110 }; 1111 1112 struct sort_entry sort_dso_to = { 1113 .se_header = "Target Shared Object", 1114 .se_cmp = sort__dso_to_cmp, 1115 .se_snprintf = hist_entry__dso_to_snprintf, 1116 .se_filter = hist_entry__dso_to_filter, 1117 .se_width_idx = HISTC_DSO_TO, 1118 }; 1119 1120 struct sort_entry sort_sym_from = { 1121 .se_header = "Source Symbol", 1122 .se_cmp = sort__sym_from_cmp, 1123 .se_snprintf = hist_entry__sym_from_snprintf, 1124 .se_filter = hist_entry__sym_from_filter, 1125 .se_width_idx = HISTC_SYMBOL_FROM, 1126 }; 1127 1128 struct sort_entry sort_sym_to = { 1129 .se_header = "Target Symbol", 1130 .se_cmp = sort__sym_to_cmp, 1131 .se_snprintf = hist_entry__sym_to_snprintf, 1132 .se_filter = hist_entry__sym_to_filter, 1133 .se_width_idx = HISTC_SYMBOL_TO, 1134 }; 1135 1136 static int _hist_entry__addr_snprintf(struct map_symbol *ms, 1137 u64 ip, char level, char *bf, size_t size, 1138 unsigned int width) 1139 { 1140 struct symbol *sym = ms->sym; 1141 struct map *map = ms->map; 1142 size_t ret = 0, offs; 1143 1144 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 1145 if (sym && map) { 1146 if (sym->type == STT_OBJECT) { 1147 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 1148 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 1149 ip - map__unmap_ip(map, sym->start)); 1150 } else { 1151 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 1152 width - ret, 1153 sym->name); 1154 offs = ip - sym->start; 1155 if (offs) 1156 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs); 1157 } 1158 } else { 1159 size_t len = BITS_PER_LONG / 4; 1160 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 1161 len, ip); 1162 } 1163 1164 return ret; 1165 } 1166 1167 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf, 1168 size_t size, unsigned int width) 1169 { 1170 if (he->branch_info) { 1171 struct addr_map_symbol *from = &he->branch_info->from; 1172 1173 return _hist_entry__addr_snprintf(&from->ms, from->al_addr, 1174 he->level, bf, size, width); 1175 } 1176 1177 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1178 } 1179 1180 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf, 1181 size_t size, unsigned int width) 1182 { 1183 if (he->branch_info) { 1184 struct addr_map_symbol *to = &he->branch_info->to; 1185 1186 return _hist_entry__addr_snprintf(&to->ms, to->al_addr, 1187 he->level, bf, size, width); 1188 } 1189 1190 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 1191 } 1192 1193 static int64_t 1194 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right) 1195 { 1196 struct addr_map_symbol *from_l; 1197 struct addr_map_symbol *from_r; 1198 int64_t ret; 1199 1200 if (!left->branch_info || !right->branch_info) 1201 return cmp_null(left->branch_info, right->branch_info); 1202 1203 from_l = &left->branch_info->from; 1204 from_r = &right->branch_info->from; 1205 1206 /* 1207 * comparing symbol address alone is not enough since it's a 1208 * relative address within a dso. 1209 */ 1210 ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map); 1211 if (ret != 0) 1212 return ret; 1213 1214 return _sort__addr_cmp(from_l->addr, from_r->addr); 1215 } 1216 1217 static int64_t 1218 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right) 1219 { 1220 struct addr_map_symbol *to_l; 1221 struct addr_map_symbol *to_r; 1222 int64_t ret; 1223 1224 if (!left->branch_info || !right->branch_info) 1225 return cmp_null(left->branch_info, right->branch_info); 1226 1227 to_l = &left->branch_info->to; 1228 to_r = &right->branch_info->to; 1229 1230 /* 1231 * comparing symbol address alone is not enough since it's a 1232 * relative address within a dso. 1233 */ 1234 ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map); 1235 if (ret != 0) 1236 return ret; 1237 1238 return _sort__addr_cmp(to_l->addr, to_r->addr); 1239 } 1240 1241 struct sort_entry sort_addr_from = { 1242 .se_header = "Source Address", 1243 .se_cmp = sort__addr_from_cmp, 1244 .se_snprintf = hist_entry__addr_from_snprintf, 1245 .se_filter = hist_entry__sym_from_filter, /* shared with sym_from */ 1246 .se_width_idx = HISTC_ADDR_FROM, 1247 }; 1248 1249 struct sort_entry sort_addr_to = { 1250 .se_header = "Target Address", 1251 .se_cmp = sort__addr_to_cmp, 1252 .se_snprintf = hist_entry__addr_to_snprintf, 1253 .se_filter = hist_entry__sym_to_filter, /* shared with sym_to */ 1254 .se_width_idx = HISTC_ADDR_TO, 1255 }; 1256 1257 1258 static int64_t 1259 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 1260 { 1261 unsigned char mp, p; 1262 1263 if (!left->branch_info || !right->branch_info) 1264 return cmp_null(left->branch_info, right->branch_info); 1265 1266 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 1267 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 1268 return mp || p; 1269 } 1270 1271 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 1272 size_t size, unsigned int width){ 1273 static const char *out = "N/A"; 1274 1275 if (he->branch_info) { 1276 if (he->branch_info->flags.predicted) 1277 out = "N"; 1278 else if (he->branch_info->flags.mispred) 1279 out = "Y"; 1280 } 1281 1282 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 1283 } 1284 1285 static int64_t 1286 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 1287 { 1288 if (!left->branch_info || !right->branch_info) 1289 return cmp_null(left->branch_info, right->branch_info); 1290 1291 return left->branch_info->flags.cycles - 1292 right->branch_info->flags.cycles; 1293 } 1294 1295 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 1296 size_t size, unsigned int width) 1297 { 1298 if (!he->branch_info) 1299 return scnprintf(bf, size, "%-.*s", width, "N/A"); 1300 if (he->branch_info->flags.cycles == 0) 1301 return repsep_snprintf(bf, size, "%-*s", width, "-"); 1302 return repsep_snprintf(bf, size, "%-*hd", width, 1303 he->branch_info->flags.cycles); 1304 } 1305 1306 struct sort_entry sort_cycles = { 1307 .se_header = "Basic Block Cycles", 1308 .se_cmp = sort__cycles_cmp, 1309 .se_snprintf = hist_entry__cycles_snprintf, 1310 .se_width_idx = HISTC_CYCLES, 1311 }; 1312 1313 /* --sort daddr_sym */ 1314 int64_t 1315 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1316 { 1317 uint64_t l = 0, r = 0; 1318 1319 if (left->mem_info) 1320 l = left->mem_info->daddr.addr; 1321 if (right->mem_info) 1322 r = right->mem_info->daddr.addr; 1323 1324 return (int64_t)(r - l); 1325 } 1326 1327 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 1328 size_t size, unsigned int width) 1329 { 1330 uint64_t addr = 0; 1331 struct map_symbol *ms = NULL; 1332 1333 if (he->mem_info) { 1334 addr = he->mem_info->daddr.addr; 1335 ms = &he->mem_info->daddr.ms; 1336 } 1337 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1338 } 1339 1340 int64_t 1341 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 1342 { 1343 uint64_t l = 0, r = 0; 1344 1345 if (left->mem_info) 1346 l = left->mem_info->iaddr.addr; 1347 if (right->mem_info) 1348 r = right->mem_info->iaddr.addr; 1349 1350 return (int64_t)(r - l); 1351 } 1352 1353 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 1354 size_t size, unsigned int width) 1355 { 1356 uint64_t addr = 0; 1357 struct map_symbol *ms = NULL; 1358 1359 if (he->mem_info) { 1360 addr = he->mem_info->iaddr.addr; 1361 ms = &he->mem_info->iaddr.ms; 1362 } 1363 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1364 } 1365 1366 static int64_t 1367 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1368 { 1369 struct map *map_l = NULL; 1370 struct map *map_r = NULL; 1371 1372 if (left->mem_info) 1373 map_l = left->mem_info->daddr.ms.map; 1374 if (right->mem_info) 1375 map_r = right->mem_info->daddr.ms.map; 1376 1377 return _sort__dso_cmp(map_l, map_r); 1378 } 1379 1380 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1381 size_t size, unsigned int width) 1382 { 1383 struct map *map = NULL; 1384 1385 if (he->mem_info) 1386 map = he->mem_info->daddr.ms.map; 1387 1388 return _hist_entry__dso_snprintf(map, bf, size, width); 1389 } 1390 1391 static int64_t 1392 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1393 { 1394 union perf_mem_data_src data_src_l; 1395 union perf_mem_data_src data_src_r; 1396 1397 if (left->mem_info) 1398 data_src_l = left->mem_info->data_src; 1399 else 1400 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1401 1402 if (right->mem_info) 1403 data_src_r = right->mem_info->data_src; 1404 else 1405 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1406 1407 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1408 } 1409 1410 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1411 size_t size, unsigned int width) 1412 { 1413 char out[10]; 1414 1415 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1416 return repsep_snprintf(bf, size, "%.*s", width, out); 1417 } 1418 1419 static int64_t 1420 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1421 { 1422 union perf_mem_data_src data_src_l; 1423 union perf_mem_data_src data_src_r; 1424 1425 if (left->mem_info) 1426 data_src_l = left->mem_info->data_src; 1427 else 1428 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1429 1430 if (right->mem_info) 1431 data_src_r = right->mem_info->data_src; 1432 else 1433 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1434 1435 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1436 } 1437 1438 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1439 size_t size, unsigned int width) 1440 { 1441 char out[64]; 1442 1443 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1444 return repsep_snprintf(bf, size, "%-*s", width, out); 1445 } 1446 1447 static int64_t 1448 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1449 { 1450 union perf_mem_data_src data_src_l; 1451 union perf_mem_data_src data_src_r; 1452 1453 if (left->mem_info) 1454 data_src_l = left->mem_info->data_src; 1455 else 1456 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1457 1458 if (right->mem_info) 1459 data_src_r = right->mem_info->data_src; 1460 else 1461 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1462 1463 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1464 } 1465 1466 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1467 size_t size, unsigned int width) 1468 { 1469 char out[64]; 1470 1471 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1472 return repsep_snprintf(bf, size, "%-*s", width, out); 1473 } 1474 1475 static int64_t 1476 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1477 { 1478 union perf_mem_data_src data_src_l; 1479 union perf_mem_data_src data_src_r; 1480 1481 if (left->mem_info) 1482 data_src_l = left->mem_info->data_src; 1483 else 1484 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1485 1486 if (right->mem_info) 1487 data_src_r = right->mem_info->data_src; 1488 else 1489 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1490 1491 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1492 } 1493 1494 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1495 size_t size, unsigned int width) 1496 { 1497 char out[64]; 1498 1499 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1500 return repsep_snprintf(bf, size, "%-*s", width, out); 1501 } 1502 1503 int64_t 1504 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1505 { 1506 u64 l, r; 1507 struct map *l_map, *r_map; 1508 struct dso *l_dso, *r_dso; 1509 int rc; 1510 1511 if (!left->mem_info) return -1; 1512 if (!right->mem_info) return 1; 1513 1514 /* group event types together */ 1515 if (left->cpumode > right->cpumode) return -1; 1516 if (left->cpumode < right->cpumode) return 1; 1517 1518 l_map = left->mem_info->daddr.ms.map; 1519 r_map = right->mem_info->daddr.ms.map; 1520 1521 /* if both are NULL, jump to sort on al_addr instead */ 1522 if (!l_map && !r_map) 1523 goto addr; 1524 1525 if (!l_map) return -1; 1526 if (!r_map) return 1; 1527 1528 l_dso = map__dso(l_map); 1529 r_dso = map__dso(r_map); 1530 rc = dso__cmp_id(l_dso, r_dso); 1531 if (rc) 1532 return rc; 1533 /* 1534 * Addresses with no major/minor numbers are assumed to be 1535 * anonymous in userspace. Sort those on pid then address. 1536 * 1537 * The kernel and non-zero major/minor mapped areas are 1538 * assumed to be unity mapped. Sort those on address. 1539 */ 1540 1541 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1542 (!(map__flags(l_map) & MAP_SHARED)) && !l_dso->id.maj && !l_dso->id.min && 1543 !l_dso->id.ino && !l_dso->id.ino_generation) { 1544 /* userspace anonymous */ 1545 1546 if (left->thread->pid_ > right->thread->pid_) return -1; 1547 if (left->thread->pid_ < right->thread->pid_) return 1; 1548 } 1549 1550 addr: 1551 /* al_addr does all the right addr - start + offset calculations */ 1552 l = cl_address(left->mem_info->daddr.al_addr, chk_double_cl); 1553 r = cl_address(right->mem_info->daddr.al_addr, chk_double_cl); 1554 1555 if (l > r) return -1; 1556 if (l < r) return 1; 1557 1558 return 0; 1559 } 1560 1561 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1562 size_t size, unsigned int width) 1563 { 1564 1565 uint64_t addr = 0; 1566 struct map_symbol *ms = NULL; 1567 char level = he->level; 1568 1569 if (he->mem_info) { 1570 struct map *map = he->mem_info->daddr.ms.map; 1571 struct dso *dso = map ? map__dso(map) : NULL; 1572 1573 addr = cl_address(he->mem_info->daddr.al_addr, chk_double_cl); 1574 ms = &he->mem_info->daddr.ms; 1575 1576 /* print [s] for shared data mmaps */ 1577 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1578 map && !(map__prot(map) & PROT_EXEC) && 1579 (map__flags(map) & MAP_SHARED) && 1580 (dso->id.maj || dso->id.min || dso->id.ino || dso->id.ino_generation)) 1581 level = 's'; 1582 else if (!map) 1583 level = 'X'; 1584 } 1585 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width); 1586 } 1587 1588 struct sort_entry sort_mispredict = { 1589 .se_header = "Branch Mispredicted", 1590 .se_cmp = sort__mispredict_cmp, 1591 .se_snprintf = hist_entry__mispredict_snprintf, 1592 .se_width_idx = HISTC_MISPREDICT, 1593 }; 1594 1595 static int64_t 1596 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right) 1597 { 1598 return left->weight - right->weight; 1599 } 1600 1601 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1602 size_t size, unsigned int width) 1603 { 1604 return repsep_snprintf(bf, size, "%-*llu", width, he->weight); 1605 } 1606 1607 struct sort_entry sort_local_weight = { 1608 .se_header = "Local Weight", 1609 .se_cmp = sort__weight_cmp, 1610 .se_snprintf = hist_entry__local_weight_snprintf, 1611 .se_width_idx = HISTC_LOCAL_WEIGHT, 1612 }; 1613 1614 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1615 size_t size, unsigned int width) 1616 { 1617 return repsep_snprintf(bf, size, "%-*llu", width, 1618 he->weight * he->stat.nr_events); 1619 } 1620 1621 struct sort_entry sort_global_weight = { 1622 .se_header = "Weight", 1623 .se_cmp = sort__weight_cmp, 1624 .se_snprintf = hist_entry__global_weight_snprintf, 1625 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1626 }; 1627 1628 static int64_t 1629 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right) 1630 { 1631 return left->ins_lat - right->ins_lat; 1632 } 1633 1634 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf, 1635 size_t size, unsigned int width) 1636 { 1637 return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat); 1638 } 1639 1640 struct sort_entry sort_local_ins_lat = { 1641 .se_header = "Local INSTR Latency", 1642 .se_cmp = sort__ins_lat_cmp, 1643 .se_snprintf = hist_entry__local_ins_lat_snprintf, 1644 .se_width_idx = HISTC_LOCAL_INS_LAT, 1645 }; 1646 1647 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf, 1648 size_t size, unsigned int width) 1649 { 1650 return repsep_snprintf(bf, size, "%-*u", width, 1651 he->ins_lat * he->stat.nr_events); 1652 } 1653 1654 struct sort_entry sort_global_ins_lat = { 1655 .se_header = "INSTR Latency", 1656 .se_cmp = sort__ins_lat_cmp, 1657 .se_snprintf = hist_entry__global_ins_lat_snprintf, 1658 .se_width_idx = HISTC_GLOBAL_INS_LAT, 1659 }; 1660 1661 static int64_t 1662 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right) 1663 { 1664 return left->p_stage_cyc - right->p_stage_cyc; 1665 } 1666 1667 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf, 1668 size_t size, unsigned int width) 1669 { 1670 return repsep_snprintf(bf, size, "%-*u", width, 1671 he->p_stage_cyc * he->stat.nr_events); 1672 } 1673 1674 1675 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf, 1676 size_t size, unsigned int width) 1677 { 1678 return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc); 1679 } 1680 1681 struct sort_entry sort_local_p_stage_cyc = { 1682 .se_header = "Local Pipeline Stage Cycle", 1683 .se_cmp = sort__p_stage_cyc_cmp, 1684 .se_snprintf = hist_entry__p_stage_cyc_snprintf, 1685 .se_width_idx = HISTC_LOCAL_P_STAGE_CYC, 1686 }; 1687 1688 struct sort_entry sort_global_p_stage_cyc = { 1689 .se_header = "Pipeline Stage Cycle", 1690 .se_cmp = sort__p_stage_cyc_cmp, 1691 .se_snprintf = hist_entry__global_p_stage_cyc_snprintf, 1692 .se_width_idx = HISTC_GLOBAL_P_STAGE_CYC, 1693 }; 1694 1695 struct sort_entry sort_mem_daddr_sym = { 1696 .se_header = "Data Symbol", 1697 .se_cmp = sort__daddr_cmp, 1698 .se_snprintf = hist_entry__daddr_snprintf, 1699 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1700 }; 1701 1702 struct sort_entry sort_mem_iaddr_sym = { 1703 .se_header = "Code Symbol", 1704 .se_cmp = sort__iaddr_cmp, 1705 .se_snprintf = hist_entry__iaddr_snprintf, 1706 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1707 }; 1708 1709 struct sort_entry sort_mem_daddr_dso = { 1710 .se_header = "Data Object", 1711 .se_cmp = sort__dso_daddr_cmp, 1712 .se_snprintf = hist_entry__dso_daddr_snprintf, 1713 .se_width_idx = HISTC_MEM_DADDR_DSO, 1714 }; 1715 1716 struct sort_entry sort_mem_locked = { 1717 .se_header = "Locked", 1718 .se_cmp = sort__locked_cmp, 1719 .se_snprintf = hist_entry__locked_snprintf, 1720 .se_width_idx = HISTC_MEM_LOCKED, 1721 }; 1722 1723 struct sort_entry sort_mem_tlb = { 1724 .se_header = "TLB access", 1725 .se_cmp = sort__tlb_cmp, 1726 .se_snprintf = hist_entry__tlb_snprintf, 1727 .se_width_idx = HISTC_MEM_TLB, 1728 }; 1729 1730 struct sort_entry sort_mem_lvl = { 1731 .se_header = "Memory access", 1732 .se_cmp = sort__lvl_cmp, 1733 .se_snprintf = hist_entry__lvl_snprintf, 1734 .se_width_idx = HISTC_MEM_LVL, 1735 }; 1736 1737 struct sort_entry sort_mem_snoop = { 1738 .se_header = "Snoop", 1739 .se_cmp = sort__snoop_cmp, 1740 .se_snprintf = hist_entry__snoop_snprintf, 1741 .se_width_idx = HISTC_MEM_SNOOP, 1742 }; 1743 1744 struct sort_entry sort_mem_dcacheline = { 1745 .se_header = "Data Cacheline", 1746 .se_cmp = sort__dcacheline_cmp, 1747 .se_snprintf = hist_entry__dcacheline_snprintf, 1748 .se_width_idx = HISTC_MEM_DCACHELINE, 1749 }; 1750 1751 static int64_t 1752 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right) 1753 { 1754 union perf_mem_data_src data_src_l; 1755 union perf_mem_data_src data_src_r; 1756 1757 if (left->mem_info) 1758 data_src_l = left->mem_info->data_src; 1759 else 1760 data_src_l.mem_blk = PERF_MEM_BLK_NA; 1761 1762 if (right->mem_info) 1763 data_src_r = right->mem_info->data_src; 1764 else 1765 data_src_r.mem_blk = PERF_MEM_BLK_NA; 1766 1767 return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk); 1768 } 1769 1770 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf, 1771 size_t size, unsigned int width) 1772 { 1773 char out[16]; 1774 1775 perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info); 1776 return repsep_snprintf(bf, size, "%.*s", width, out); 1777 } 1778 1779 struct sort_entry sort_mem_blocked = { 1780 .se_header = "Blocked", 1781 .se_cmp = sort__blocked_cmp, 1782 .se_snprintf = hist_entry__blocked_snprintf, 1783 .se_width_idx = HISTC_MEM_BLOCKED, 1784 }; 1785 1786 static int64_t 1787 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1788 { 1789 uint64_t l = 0, r = 0; 1790 1791 if (left->mem_info) 1792 l = left->mem_info->daddr.phys_addr; 1793 if (right->mem_info) 1794 r = right->mem_info->daddr.phys_addr; 1795 1796 return (int64_t)(r - l); 1797 } 1798 1799 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1800 size_t size, unsigned int width) 1801 { 1802 uint64_t addr = 0; 1803 size_t ret = 0; 1804 size_t len = BITS_PER_LONG / 4; 1805 1806 addr = he->mem_info->daddr.phys_addr; 1807 1808 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1809 1810 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1811 1812 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1813 1814 if (ret > width) 1815 bf[width] = '\0'; 1816 1817 return width; 1818 } 1819 1820 struct sort_entry sort_mem_phys_daddr = { 1821 .se_header = "Data Physical Address", 1822 .se_cmp = sort__phys_daddr_cmp, 1823 .se_snprintf = hist_entry__phys_daddr_snprintf, 1824 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1825 }; 1826 1827 static int64_t 1828 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right) 1829 { 1830 uint64_t l = 0, r = 0; 1831 1832 if (left->mem_info) 1833 l = left->mem_info->daddr.data_page_size; 1834 if (right->mem_info) 1835 r = right->mem_info->daddr.data_page_size; 1836 1837 return (int64_t)(r - l); 1838 } 1839 1840 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf, 1841 size_t size, unsigned int width) 1842 { 1843 char str[PAGE_SIZE_NAME_LEN]; 1844 1845 return repsep_snprintf(bf, size, "%-*s", width, 1846 get_page_size_name(he->mem_info->daddr.data_page_size, str)); 1847 } 1848 1849 struct sort_entry sort_mem_data_page_size = { 1850 .se_header = "Data Page Size", 1851 .se_cmp = sort__data_page_size_cmp, 1852 .se_snprintf = hist_entry__data_page_size_snprintf, 1853 .se_width_idx = HISTC_MEM_DATA_PAGE_SIZE, 1854 }; 1855 1856 static int64_t 1857 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right) 1858 { 1859 uint64_t l = left->code_page_size; 1860 uint64_t r = right->code_page_size; 1861 1862 return (int64_t)(r - l); 1863 } 1864 1865 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf, 1866 size_t size, unsigned int width) 1867 { 1868 char str[PAGE_SIZE_NAME_LEN]; 1869 1870 return repsep_snprintf(bf, size, "%-*s", width, 1871 get_page_size_name(he->code_page_size, str)); 1872 } 1873 1874 struct sort_entry sort_code_page_size = { 1875 .se_header = "Code Page Size", 1876 .se_cmp = sort__code_page_size_cmp, 1877 .se_snprintf = hist_entry__code_page_size_snprintf, 1878 .se_width_idx = HISTC_CODE_PAGE_SIZE, 1879 }; 1880 1881 static int64_t 1882 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1883 { 1884 if (!left->branch_info || !right->branch_info) 1885 return cmp_null(left->branch_info, right->branch_info); 1886 1887 return left->branch_info->flags.abort != 1888 right->branch_info->flags.abort; 1889 } 1890 1891 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1892 size_t size, unsigned int width) 1893 { 1894 static const char *out = "N/A"; 1895 1896 if (he->branch_info) { 1897 if (he->branch_info->flags.abort) 1898 out = "A"; 1899 else 1900 out = "."; 1901 } 1902 1903 return repsep_snprintf(bf, size, "%-*s", width, out); 1904 } 1905 1906 struct sort_entry sort_abort = { 1907 .se_header = "Transaction abort", 1908 .se_cmp = sort__abort_cmp, 1909 .se_snprintf = hist_entry__abort_snprintf, 1910 .se_width_idx = HISTC_ABORT, 1911 }; 1912 1913 static int64_t 1914 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1915 { 1916 if (!left->branch_info || !right->branch_info) 1917 return cmp_null(left->branch_info, right->branch_info); 1918 1919 return left->branch_info->flags.in_tx != 1920 right->branch_info->flags.in_tx; 1921 } 1922 1923 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1924 size_t size, unsigned int width) 1925 { 1926 static const char *out = "N/A"; 1927 1928 if (he->branch_info) { 1929 if (he->branch_info->flags.in_tx) 1930 out = "T"; 1931 else 1932 out = "."; 1933 } 1934 1935 return repsep_snprintf(bf, size, "%-*s", width, out); 1936 } 1937 1938 struct sort_entry sort_in_tx = { 1939 .se_header = "Branch in transaction", 1940 .se_cmp = sort__in_tx_cmp, 1941 .se_snprintf = hist_entry__in_tx_snprintf, 1942 .se_width_idx = HISTC_IN_TX, 1943 }; 1944 1945 static int64_t 1946 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1947 { 1948 return left->transaction - right->transaction; 1949 } 1950 1951 static inline char *add_str(char *p, const char *str) 1952 { 1953 strcpy(p, str); 1954 return p + strlen(str); 1955 } 1956 1957 static struct txbit { 1958 unsigned flag; 1959 const char *name; 1960 int skip_for_len; 1961 } txbits[] = { 1962 { PERF_TXN_ELISION, "EL ", 0 }, 1963 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1964 { PERF_TXN_SYNC, "SYNC ", 1 }, 1965 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1966 { PERF_TXN_RETRY, "RETRY ", 0 }, 1967 { PERF_TXN_CONFLICT, "CON ", 0 }, 1968 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1969 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1970 { 0, NULL, 0 } 1971 }; 1972 1973 int hist_entry__transaction_len(void) 1974 { 1975 int i; 1976 int len = 0; 1977 1978 for (i = 0; txbits[i].name; i++) { 1979 if (!txbits[i].skip_for_len) 1980 len += strlen(txbits[i].name); 1981 } 1982 len += 4; /* :XX<space> */ 1983 return len; 1984 } 1985 1986 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1987 size_t size, unsigned int width) 1988 { 1989 u64 t = he->transaction; 1990 char buf[128]; 1991 char *p = buf; 1992 int i; 1993 1994 buf[0] = 0; 1995 for (i = 0; txbits[i].name; i++) 1996 if (txbits[i].flag & t) 1997 p = add_str(p, txbits[i].name); 1998 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1999 p = add_str(p, "NEITHER "); 2000 if (t & PERF_TXN_ABORT_MASK) { 2001 sprintf(p, ":%" PRIx64, 2002 (t & PERF_TXN_ABORT_MASK) >> 2003 PERF_TXN_ABORT_SHIFT); 2004 p += strlen(p); 2005 } 2006 2007 return repsep_snprintf(bf, size, "%-*s", width, buf); 2008 } 2009 2010 struct sort_entry sort_transaction = { 2011 .se_header = "Transaction ", 2012 .se_cmp = sort__transaction_cmp, 2013 .se_snprintf = hist_entry__transaction_snprintf, 2014 .se_width_idx = HISTC_TRANSACTION, 2015 }; 2016 2017 /* --sort symbol_size */ 2018 2019 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 2020 { 2021 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 2022 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 2023 2024 return size_l < size_r ? -1 : 2025 size_l == size_r ? 0 : 1; 2026 } 2027 2028 static int64_t 2029 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 2030 { 2031 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 2032 } 2033 2034 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 2035 size_t bf_size, unsigned int width) 2036 { 2037 if (sym) 2038 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 2039 2040 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 2041 } 2042 2043 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 2044 size_t size, unsigned int width) 2045 { 2046 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 2047 } 2048 2049 struct sort_entry sort_sym_size = { 2050 .se_header = "Symbol size", 2051 .se_cmp = sort__sym_size_cmp, 2052 .se_snprintf = hist_entry__sym_size_snprintf, 2053 .se_width_idx = HISTC_SYM_SIZE, 2054 }; 2055 2056 /* --sort dso_size */ 2057 2058 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r) 2059 { 2060 int64_t size_l = map_l != NULL ? map__size(map_l) : 0; 2061 int64_t size_r = map_r != NULL ? map__size(map_r) : 0; 2062 2063 return size_l < size_r ? -1 : 2064 size_l == size_r ? 0 : 1; 2065 } 2066 2067 static int64_t 2068 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right) 2069 { 2070 return _sort__dso_size_cmp(right->ms.map, left->ms.map); 2071 } 2072 2073 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf, 2074 size_t bf_size, unsigned int width) 2075 { 2076 if (map && map__dso(map)) 2077 return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map)); 2078 2079 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 2080 } 2081 2082 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf, 2083 size_t size, unsigned int width) 2084 { 2085 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width); 2086 } 2087 2088 struct sort_entry sort_dso_size = { 2089 .se_header = "DSO size", 2090 .se_cmp = sort__dso_size_cmp, 2091 .se_snprintf = hist_entry__dso_size_snprintf, 2092 .se_width_idx = HISTC_DSO_SIZE, 2093 }; 2094 2095 /* --sort dso_size */ 2096 2097 static int64_t 2098 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right) 2099 { 2100 u64 left_ip = left->ip; 2101 u64 right_ip = right->ip; 2102 struct map *left_map = left->ms.map; 2103 struct map *right_map = right->ms.map; 2104 2105 if (left_map) 2106 left_ip = map__unmap_ip(left_map, left_ip); 2107 if (right_map) 2108 right_ip = map__unmap_ip(right_map, right_ip); 2109 2110 return _sort__addr_cmp(left_ip, right_ip); 2111 } 2112 2113 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf, 2114 size_t size, unsigned int width) 2115 { 2116 u64 ip = he->ip; 2117 struct map *map = he->ms.map; 2118 2119 if (map) 2120 ip = map__unmap_ip(map, ip); 2121 2122 return repsep_snprintf(bf, size, "%-#*llx", width, ip); 2123 } 2124 2125 struct sort_entry sort_addr = { 2126 .se_header = "Address", 2127 .se_cmp = sort__addr_cmp, 2128 .se_snprintf = hist_entry__addr_snprintf, 2129 .se_width_idx = HISTC_ADDR, 2130 }; 2131 2132 2133 struct sort_dimension { 2134 const char *name; 2135 struct sort_entry *entry; 2136 int taken; 2137 }; 2138 2139 int __weak arch_support_sort_key(const char *sort_key __maybe_unused) 2140 { 2141 return 0; 2142 } 2143 2144 const char * __weak arch_perf_header_entry(const char *se_header) 2145 { 2146 return se_header; 2147 } 2148 2149 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd) 2150 { 2151 sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header); 2152 } 2153 2154 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 2155 2156 static struct sort_dimension common_sort_dimensions[] = { 2157 DIM(SORT_PID, "pid", sort_thread), 2158 DIM(SORT_COMM, "comm", sort_comm), 2159 DIM(SORT_DSO, "dso", sort_dso), 2160 DIM(SORT_SYM, "symbol", sort_sym), 2161 DIM(SORT_PARENT, "parent", sort_parent), 2162 DIM(SORT_CPU, "cpu", sort_cpu), 2163 DIM(SORT_SOCKET, "socket", sort_socket), 2164 DIM(SORT_SRCLINE, "srcline", sort_srcline), 2165 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 2166 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 2167 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 2168 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 2169 #ifdef HAVE_LIBTRACEEVENT 2170 DIM(SORT_TRACE, "trace", sort_trace), 2171 #endif 2172 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 2173 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 2174 DIM(SORT_CGROUP, "cgroup", sort_cgroup), 2175 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 2176 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null), 2177 DIM(SORT_TIME, "time", sort_time), 2178 DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size), 2179 DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat), 2180 DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat), 2181 DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc), 2182 DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc), 2183 DIM(SORT_ADDR, "addr", sort_addr), 2184 DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc), 2185 DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc), 2186 DIM(SORT_SIMD, "simd", sort_simd) 2187 }; 2188 2189 #undef DIM 2190 2191 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 2192 2193 static struct sort_dimension bstack_sort_dimensions[] = { 2194 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 2195 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 2196 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 2197 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 2198 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 2199 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 2200 DIM(SORT_ABORT, "abort", sort_abort), 2201 DIM(SORT_CYCLES, "cycles", sort_cycles), 2202 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 2203 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 2204 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc), 2205 DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from), 2206 DIM(SORT_ADDR_TO, "addr_to", sort_addr_to), 2207 }; 2208 2209 #undef DIM 2210 2211 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 2212 2213 static struct sort_dimension memory_sort_dimensions[] = { 2214 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 2215 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 2216 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 2217 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 2218 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 2219 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 2220 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 2221 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 2222 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 2223 DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size), 2224 DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked), 2225 }; 2226 2227 #undef DIM 2228 2229 struct hpp_dimension { 2230 const char *name; 2231 struct perf_hpp_fmt *fmt; 2232 int taken; 2233 }; 2234 2235 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 2236 2237 static struct hpp_dimension hpp_sort_dimensions[] = { 2238 DIM(PERF_HPP__OVERHEAD, "overhead"), 2239 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 2240 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 2241 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 2242 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 2243 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 2244 DIM(PERF_HPP__SAMPLES, "sample"), 2245 DIM(PERF_HPP__PERIOD, "period"), 2246 }; 2247 2248 #undef DIM 2249 2250 struct hpp_sort_entry { 2251 struct perf_hpp_fmt hpp; 2252 struct sort_entry *se; 2253 }; 2254 2255 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 2256 { 2257 struct hpp_sort_entry *hse; 2258 2259 if (!perf_hpp__is_sort_entry(fmt)) 2260 return; 2261 2262 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2263 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 2264 } 2265 2266 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2267 struct hists *hists, int line __maybe_unused, 2268 int *span __maybe_unused) 2269 { 2270 struct hpp_sort_entry *hse; 2271 size_t len = fmt->user_len; 2272 2273 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2274 2275 if (!len) 2276 len = hists__col_len(hists, hse->se->se_width_idx); 2277 2278 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 2279 } 2280 2281 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 2282 struct perf_hpp *hpp __maybe_unused, 2283 struct hists *hists) 2284 { 2285 struct hpp_sort_entry *hse; 2286 size_t len = fmt->user_len; 2287 2288 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2289 2290 if (!len) 2291 len = hists__col_len(hists, hse->se->se_width_idx); 2292 2293 return len; 2294 } 2295 2296 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2297 struct hist_entry *he) 2298 { 2299 struct hpp_sort_entry *hse; 2300 size_t len = fmt->user_len; 2301 2302 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2303 2304 if (!len) 2305 len = hists__col_len(he->hists, hse->se->se_width_idx); 2306 2307 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 2308 } 2309 2310 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 2311 struct hist_entry *a, struct hist_entry *b) 2312 { 2313 struct hpp_sort_entry *hse; 2314 2315 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2316 return hse->se->se_cmp(a, b); 2317 } 2318 2319 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 2320 struct hist_entry *a, struct hist_entry *b) 2321 { 2322 struct hpp_sort_entry *hse; 2323 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 2324 2325 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2326 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 2327 return collapse_fn(a, b); 2328 } 2329 2330 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 2331 struct hist_entry *a, struct hist_entry *b) 2332 { 2333 struct hpp_sort_entry *hse; 2334 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 2335 2336 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2337 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 2338 return sort_fn(a, b); 2339 } 2340 2341 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 2342 { 2343 return format->header == __sort__hpp_header; 2344 } 2345 2346 #define MK_SORT_ENTRY_CHK(key) \ 2347 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 2348 { \ 2349 struct hpp_sort_entry *hse; \ 2350 \ 2351 if (!perf_hpp__is_sort_entry(fmt)) \ 2352 return false; \ 2353 \ 2354 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 2355 return hse->se == &sort_ ## key ; \ 2356 } 2357 2358 #ifdef HAVE_LIBTRACEEVENT 2359 MK_SORT_ENTRY_CHK(trace) 2360 #else 2361 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused) 2362 { 2363 return false; 2364 } 2365 #endif 2366 MK_SORT_ENTRY_CHK(srcline) 2367 MK_SORT_ENTRY_CHK(srcfile) 2368 MK_SORT_ENTRY_CHK(thread) 2369 MK_SORT_ENTRY_CHK(comm) 2370 MK_SORT_ENTRY_CHK(dso) 2371 MK_SORT_ENTRY_CHK(sym) 2372 2373 2374 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2375 { 2376 struct hpp_sort_entry *hse_a; 2377 struct hpp_sort_entry *hse_b; 2378 2379 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 2380 return false; 2381 2382 hse_a = container_of(a, struct hpp_sort_entry, hpp); 2383 hse_b = container_of(b, struct hpp_sort_entry, hpp); 2384 2385 return hse_a->se == hse_b->se; 2386 } 2387 2388 static void hse_free(struct perf_hpp_fmt *fmt) 2389 { 2390 struct hpp_sort_entry *hse; 2391 2392 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2393 free(hse); 2394 } 2395 2396 static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he) 2397 { 2398 struct hpp_sort_entry *hse; 2399 2400 if (!perf_hpp__is_sort_entry(fmt)) 2401 return; 2402 2403 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2404 2405 if (hse->se->se_init) 2406 hse->se->se_init(he); 2407 } 2408 2409 static struct hpp_sort_entry * 2410 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 2411 { 2412 struct hpp_sort_entry *hse; 2413 2414 hse = malloc(sizeof(*hse)); 2415 if (hse == NULL) { 2416 pr_err("Memory allocation failed\n"); 2417 return NULL; 2418 } 2419 2420 hse->se = sd->entry; 2421 hse->hpp.name = sd->entry->se_header; 2422 hse->hpp.header = __sort__hpp_header; 2423 hse->hpp.width = __sort__hpp_width; 2424 hse->hpp.entry = __sort__hpp_entry; 2425 hse->hpp.color = NULL; 2426 2427 hse->hpp.cmp = __sort__hpp_cmp; 2428 hse->hpp.collapse = __sort__hpp_collapse; 2429 hse->hpp.sort = __sort__hpp_sort; 2430 hse->hpp.equal = __sort__hpp_equal; 2431 hse->hpp.free = hse_free; 2432 hse->hpp.init = hse_init; 2433 2434 INIT_LIST_HEAD(&hse->hpp.list); 2435 INIT_LIST_HEAD(&hse->hpp.sort_list); 2436 hse->hpp.elide = false; 2437 hse->hpp.len = 0; 2438 hse->hpp.user_len = 0; 2439 hse->hpp.level = level; 2440 2441 return hse; 2442 } 2443 2444 static void hpp_free(struct perf_hpp_fmt *fmt) 2445 { 2446 free(fmt); 2447 } 2448 2449 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 2450 int level) 2451 { 2452 struct perf_hpp_fmt *fmt; 2453 2454 fmt = memdup(hd->fmt, sizeof(*fmt)); 2455 if (fmt) { 2456 INIT_LIST_HEAD(&fmt->list); 2457 INIT_LIST_HEAD(&fmt->sort_list); 2458 fmt->free = hpp_free; 2459 fmt->level = level; 2460 } 2461 2462 return fmt; 2463 } 2464 2465 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 2466 { 2467 struct perf_hpp_fmt *fmt; 2468 struct hpp_sort_entry *hse; 2469 int ret = -1; 2470 int r; 2471 2472 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 2473 if (!perf_hpp__is_sort_entry(fmt)) 2474 continue; 2475 2476 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2477 if (hse->se->se_filter == NULL) 2478 continue; 2479 2480 /* 2481 * hist entry is filtered if any of sort key in the hpp list 2482 * is applied. But it should skip non-matched filter types. 2483 */ 2484 r = hse->se->se_filter(he, type, arg); 2485 if (r >= 0) { 2486 if (ret < 0) 2487 ret = 0; 2488 ret |= r; 2489 } 2490 } 2491 2492 return ret; 2493 } 2494 2495 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 2496 struct perf_hpp_list *list, 2497 int level) 2498 { 2499 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 2500 2501 if (hse == NULL) 2502 return -1; 2503 2504 perf_hpp_list__register_sort_field(list, &hse->hpp); 2505 return 0; 2506 } 2507 2508 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 2509 struct perf_hpp_list *list) 2510 { 2511 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 2512 2513 if (hse == NULL) 2514 return -1; 2515 2516 perf_hpp_list__column_register(list, &hse->hpp); 2517 return 0; 2518 } 2519 2520 #ifndef HAVE_LIBTRACEEVENT 2521 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused) 2522 { 2523 return false; 2524 } 2525 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused, 2526 struct hists *hists __maybe_unused) 2527 { 2528 return false; 2529 } 2530 #else 2531 struct hpp_dynamic_entry { 2532 struct perf_hpp_fmt hpp; 2533 struct evsel *evsel; 2534 struct tep_format_field *field; 2535 unsigned dynamic_len; 2536 bool raw_trace; 2537 }; 2538 2539 static int hde_width(struct hpp_dynamic_entry *hde) 2540 { 2541 if (!hde->hpp.len) { 2542 int len = hde->dynamic_len; 2543 int namelen = strlen(hde->field->name); 2544 int fieldlen = hde->field->size; 2545 2546 if (namelen > len) 2547 len = namelen; 2548 2549 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) { 2550 /* length for print hex numbers */ 2551 fieldlen = hde->field->size * 2 + 2; 2552 } 2553 if (fieldlen > len) 2554 len = fieldlen; 2555 2556 hde->hpp.len = len; 2557 } 2558 return hde->hpp.len; 2559 } 2560 2561 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 2562 struct hist_entry *he) 2563 { 2564 char *str, *pos; 2565 struct tep_format_field *field = hde->field; 2566 size_t namelen; 2567 bool last = false; 2568 2569 if (hde->raw_trace) 2570 return; 2571 2572 /* parse pretty print result and update max length */ 2573 if (!he->trace_output) 2574 he->trace_output = get_trace_output(he); 2575 2576 namelen = strlen(field->name); 2577 str = he->trace_output; 2578 2579 while (str) { 2580 pos = strchr(str, ' '); 2581 if (pos == NULL) { 2582 last = true; 2583 pos = str + strlen(str); 2584 } 2585 2586 if (!strncmp(str, field->name, namelen)) { 2587 size_t len; 2588 2589 str += namelen + 1; 2590 len = pos - str; 2591 2592 if (len > hde->dynamic_len) 2593 hde->dynamic_len = len; 2594 break; 2595 } 2596 2597 if (last) 2598 str = NULL; 2599 else 2600 str = pos + 1; 2601 } 2602 } 2603 2604 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2605 struct hists *hists __maybe_unused, 2606 int line __maybe_unused, 2607 int *span __maybe_unused) 2608 { 2609 struct hpp_dynamic_entry *hde; 2610 size_t len = fmt->user_len; 2611 2612 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2613 2614 if (!len) 2615 len = hde_width(hde); 2616 2617 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 2618 } 2619 2620 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 2621 struct perf_hpp *hpp __maybe_unused, 2622 struct hists *hists __maybe_unused) 2623 { 2624 struct hpp_dynamic_entry *hde; 2625 size_t len = fmt->user_len; 2626 2627 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2628 2629 if (!len) 2630 len = hde_width(hde); 2631 2632 return len; 2633 } 2634 2635 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 2636 { 2637 struct hpp_dynamic_entry *hde; 2638 2639 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2640 2641 return hists_to_evsel(hists) == hde->evsel; 2642 } 2643 2644 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2645 struct hist_entry *he) 2646 { 2647 struct hpp_dynamic_entry *hde; 2648 size_t len = fmt->user_len; 2649 char *str, *pos; 2650 struct tep_format_field *field; 2651 size_t namelen; 2652 bool last = false; 2653 int ret; 2654 2655 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2656 2657 if (!len) 2658 len = hde_width(hde); 2659 2660 if (hde->raw_trace) 2661 goto raw_field; 2662 2663 if (!he->trace_output) 2664 he->trace_output = get_trace_output(he); 2665 2666 field = hde->field; 2667 namelen = strlen(field->name); 2668 str = he->trace_output; 2669 2670 while (str) { 2671 pos = strchr(str, ' '); 2672 if (pos == NULL) { 2673 last = true; 2674 pos = str + strlen(str); 2675 } 2676 2677 if (!strncmp(str, field->name, namelen)) { 2678 str += namelen + 1; 2679 str = strndup(str, pos - str); 2680 2681 if (str == NULL) 2682 return scnprintf(hpp->buf, hpp->size, 2683 "%*.*s", len, len, "ERROR"); 2684 break; 2685 } 2686 2687 if (last) 2688 str = NULL; 2689 else 2690 str = pos + 1; 2691 } 2692 2693 if (str == NULL) { 2694 struct trace_seq seq; 2695 raw_field: 2696 trace_seq_init(&seq); 2697 tep_print_field(&seq, he->raw_data, hde->field); 2698 str = seq.buffer; 2699 } 2700 2701 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2702 free(str); 2703 return ret; 2704 } 2705 2706 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2707 struct hist_entry *a, struct hist_entry *b) 2708 { 2709 struct hpp_dynamic_entry *hde; 2710 struct tep_format_field *field; 2711 unsigned offset, size; 2712 2713 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2714 2715 field = hde->field; 2716 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2717 unsigned long long dyn; 2718 2719 tep_read_number_field(field, a->raw_data, &dyn); 2720 offset = dyn & 0xffff; 2721 size = (dyn >> 16) & 0xffff; 2722 if (tep_field_is_relative(field->flags)) 2723 offset += field->offset + field->size; 2724 /* record max width for output */ 2725 if (size > hde->dynamic_len) 2726 hde->dynamic_len = size; 2727 } else { 2728 offset = field->offset; 2729 size = field->size; 2730 } 2731 2732 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2733 } 2734 2735 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2736 { 2737 return fmt->cmp == __sort__hde_cmp; 2738 } 2739 2740 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2741 { 2742 struct hpp_dynamic_entry *hde_a; 2743 struct hpp_dynamic_entry *hde_b; 2744 2745 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2746 return false; 2747 2748 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2749 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2750 2751 return hde_a->field == hde_b->field; 2752 } 2753 2754 static void hde_free(struct perf_hpp_fmt *fmt) 2755 { 2756 struct hpp_dynamic_entry *hde; 2757 2758 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2759 free(hde); 2760 } 2761 2762 static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he) 2763 { 2764 struct hpp_dynamic_entry *hde; 2765 2766 if (!perf_hpp__is_dynamic_entry(fmt)) 2767 return; 2768 2769 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2770 update_dynamic_len(hde, he); 2771 } 2772 2773 static struct hpp_dynamic_entry * 2774 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field, 2775 int level) 2776 { 2777 struct hpp_dynamic_entry *hde; 2778 2779 hde = malloc(sizeof(*hde)); 2780 if (hde == NULL) { 2781 pr_debug("Memory allocation failed\n"); 2782 return NULL; 2783 } 2784 2785 hde->evsel = evsel; 2786 hde->field = field; 2787 hde->dynamic_len = 0; 2788 2789 hde->hpp.name = field->name; 2790 hde->hpp.header = __sort__hde_header; 2791 hde->hpp.width = __sort__hde_width; 2792 hde->hpp.entry = __sort__hde_entry; 2793 hde->hpp.color = NULL; 2794 2795 hde->hpp.init = __sort__hde_init; 2796 hde->hpp.cmp = __sort__hde_cmp; 2797 hde->hpp.collapse = __sort__hde_cmp; 2798 hde->hpp.sort = __sort__hde_cmp; 2799 hde->hpp.equal = __sort__hde_equal; 2800 hde->hpp.free = hde_free; 2801 2802 INIT_LIST_HEAD(&hde->hpp.list); 2803 INIT_LIST_HEAD(&hde->hpp.sort_list); 2804 hde->hpp.elide = false; 2805 hde->hpp.len = 0; 2806 hde->hpp.user_len = 0; 2807 hde->hpp.level = level; 2808 2809 return hde; 2810 } 2811 #endif /* HAVE_LIBTRACEEVENT */ 2812 2813 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2814 { 2815 struct perf_hpp_fmt *new_fmt = NULL; 2816 2817 if (perf_hpp__is_sort_entry(fmt)) { 2818 struct hpp_sort_entry *hse, *new_hse; 2819 2820 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2821 new_hse = memdup(hse, sizeof(*hse)); 2822 if (new_hse) 2823 new_fmt = &new_hse->hpp; 2824 #ifdef HAVE_LIBTRACEEVENT 2825 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2826 struct hpp_dynamic_entry *hde, *new_hde; 2827 2828 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2829 new_hde = memdup(hde, sizeof(*hde)); 2830 if (new_hde) 2831 new_fmt = &new_hde->hpp; 2832 #endif 2833 } else { 2834 new_fmt = memdup(fmt, sizeof(*fmt)); 2835 } 2836 2837 INIT_LIST_HEAD(&new_fmt->list); 2838 INIT_LIST_HEAD(&new_fmt->sort_list); 2839 2840 return new_fmt; 2841 } 2842 2843 static int parse_field_name(char *str, char **event, char **field, char **opt) 2844 { 2845 char *event_name, *field_name, *opt_name; 2846 2847 event_name = str; 2848 field_name = strchr(str, '.'); 2849 2850 if (field_name) { 2851 *field_name++ = '\0'; 2852 } else { 2853 event_name = NULL; 2854 field_name = str; 2855 } 2856 2857 opt_name = strchr(field_name, '/'); 2858 if (opt_name) 2859 *opt_name++ = '\0'; 2860 2861 *event = event_name; 2862 *field = field_name; 2863 *opt = opt_name; 2864 2865 return 0; 2866 } 2867 2868 /* find match evsel using a given event name. The event name can be: 2869 * 1. '%' + event index (e.g. '%1' for first event) 2870 * 2. full event name (e.g. sched:sched_switch) 2871 * 3. partial event name (should not contain ':') 2872 */ 2873 static struct evsel *find_evsel(struct evlist *evlist, char *event_name) 2874 { 2875 struct evsel *evsel = NULL; 2876 struct evsel *pos; 2877 bool full_name; 2878 2879 /* case 1 */ 2880 if (event_name[0] == '%') { 2881 int nr = strtol(event_name+1, NULL, 0); 2882 2883 if (nr > evlist->core.nr_entries) 2884 return NULL; 2885 2886 evsel = evlist__first(evlist); 2887 while (--nr > 0) 2888 evsel = evsel__next(evsel); 2889 2890 return evsel; 2891 } 2892 2893 full_name = !!strchr(event_name, ':'); 2894 evlist__for_each_entry(evlist, pos) { 2895 /* case 2 */ 2896 if (full_name && evsel__name_is(pos, event_name)) 2897 return pos; 2898 /* case 3 */ 2899 if (!full_name && strstr(pos->name, event_name)) { 2900 if (evsel) { 2901 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2902 event_name, evsel->name, pos->name); 2903 return NULL; 2904 } 2905 evsel = pos; 2906 } 2907 } 2908 2909 return evsel; 2910 } 2911 2912 #ifdef HAVE_LIBTRACEEVENT 2913 static int __dynamic_dimension__add(struct evsel *evsel, 2914 struct tep_format_field *field, 2915 bool raw_trace, int level) 2916 { 2917 struct hpp_dynamic_entry *hde; 2918 2919 hde = __alloc_dynamic_entry(evsel, field, level); 2920 if (hde == NULL) 2921 return -ENOMEM; 2922 2923 hde->raw_trace = raw_trace; 2924 2925 perf_hpp__register_sort_field(&hde->hpp); 2926 return 0; 2927 } 2928 2929 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level) 2930 { 2931 int ret; 2932 struct tep_format_field *field; 2933 2934 field = evsel->tp_format->format.fields; 2935 while (field) { 2936 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2937 if (ret < 0) 2938 return ret; 2939 2940 field = field->next; 2941 } 2942 return 0; 2943 } 2944 2945 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace, 2946 int level) 2947 { 2948 int ret; 2949 struct evsel *evsel; 2950 2951 evlist__for_each_entry(evlist, evsel) { 2952 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2953 continue; 2954 2955 ret = add_evsel_fields(evsel, raw_trace, level); 2956 if (ret < 0) 2957 return ret; 2958 } 2959 return 0; 2960 } 2961 2962 static int add_all_matching_fields(struct evlist *evlist, 2963 char *field_name, bool raw_trace, int level) 2964 { 2965 int ret = -ESRCH; 2966 struct evsel *evsel; 2967 struct tep_format_field *field; 2968 2969 evlist__for_each_entry(evlist, evsel) { 2970 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2971 continue; 2972 2973 field = tep_find_any_field(evsel->tp_format, field_name); 2974 if (field == NULL) 2975 continue; 2976 2977 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2978 if (ret < 0) 2979 break; 2980 } 2981 return ret; 2982 } 2983 #endif /* HAVE_LIBTRACEEVENT */ 2984 2985 static int add_dynamic_entry(struct evlist *evlist, const char *tok, 2986 int level) 2987 { 2988 char *str, *event_name, *field_name, *opt_name; 2989 struct evsel *evsel; 2990 bool raw_trace = symbol_conf.raw_trace; 2991 int ret = 0; 2992 2993 if (evlist == NULL) 2994 return -ENOENT; 2995 2996 str = strdup(tok); 2997 if (str == NULL) 2998 return -ENOMEM; 2999 3000 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 3001 ret = -EINVAL; 3002 goto out; 3003 } 3004 3005 if (opt_name) { 3006 if (strcmp(opt_name, "raw")) { 3007 pr_debug("unsupported field option %s\n", opt_name); 3008 ret = -EINVAL; 3009 goto out; 3010 } 3011 raw_trace = true; 3012 } 3013 3014 #ifdef HAVE_LIBTRACEEVENT 3015 if (!strcmp(field_name, "trace_fields")) { 3016 ret = add_all_dynamic_fields(evlist, raw_trace, level); 3017 goto out; 3018 } 3019 3020 if (event_name == NULL) { 3021 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 3022 goto out; 3023 } 3024 #else 3025 evlist__for_each_entry(evlist, evsel) { 3026 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 3027 pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel)); 3028 ret = -ENOTSUP; 3029 } 3030 } 3031 3032 if (ret) { 3033 pr_err("\n"); 3034 goto out; 3035 } 3036 #endif 3037 3038 evsel = find_evsel(evlist, event_name); 3039 if (evsel == NULL) { 3040 pr_debug("Cannot find event: %s\n", event_name); 3041 ret = -ENOENT; 3042 goto out; 3043 } 3044 3045 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 3046 pr_debug("%s is not a tracepoint event\n", event_name); 3047 ret = -EINVAL; 3048 goto out; 3049 } 3050 3051 #ifdef HAVE_LIBTRACEEVENT 3052 if (!strcmp(field_name, "*")) { 3053 ret = add_evsel_fields(evsel, raw_trace, level); 3054 } else { 3055 struct tep_format_field *field = tep_find_any_field(evsel->tp_format, field_name); 3056 3057 if (field == NULL) { 3058 pr_debug("Cannot find event field for %s.%s\n", 3059 event_name, field_name); 3060 return -ENOENT; 3061 } 3062 3063 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 3064 } 3065 #else 3066 (void)level; 3067 (void)raw_trace; 3068 #endif /* HAVE_LIBTRACEEVENT */ 3069 3070 out: 3071 free(str); 3072 return ret; 3073 } 3074 3075 static int __sort_dimension__add(struct sort_dimension *sd, 3076 struct perf_hpp_list *list, 3077 int level) 3078 { 3079 if (sd->taken) 3080 return 0; 3081 3082 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 3083 return -1; 3084 3085 if (sd->entry->se_collapse) 3086 list->need_collapse = 1; 3087 3088 sd->taken = 1; 3089 3090 return 0; 3091 } 3092 3093 static int __hpp_dimension__add(struct hpp_dimension *hd, 3094 struct perf_hpp_list *list, 3095 int level) 3096 { 3097 struct perf_hpp_fmt *fmt; 3098 3099 if (hd->taken) 3100 return 0; 3101 3102 fmt = __hpp_dimension__alloc_hpp(hd, level); 3103 if (!fmt) 3104 return -1; 3105 3106 hd->taken = 1; 3107 perf_hpp_list__register_sort_field(list, fmt); 3108 return 0; 3109 } 3110 3111 static int __sort_dimension__add_output(struct perf_hpp_list *list, 3112 struct sort_dimension *sd) 3113 { 3114 if (sd->taken) 3115 return 0; 3116 3117 if (__sort_dimension__add_hpp_output(sd, list) < 0) 3118 return -1; 3119 3120 sd->taken = 1; 3121 return 0; 3122 } 3123 3124 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 3125 struct hpp_dimension *hd) 3126 { 3127 struct perf_hpp_fmt *fmt; 3128 3129 if (hd->taken) 3130 return 0; 3131 3132 fmt = __hpp_dimension__alloc_hpp(hd, 0); 3133 if (!fmt) 3134 return -1; 3135 3136 hd->taken = 1; 3137 perf_hpp_list__column_register(list, fmt); 3138 return 0; 3139 } 3140 3141 int hpp_dimension__add_output(unsigned col) 3142 { 3143 BUG_ON(col >= PERF_HPP__MAX_INDEX); 3144 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 3145 } 3146 3147 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 3148 struct evlist *evlist, 3149 int level) 3150 { 3151 unsigned int i, j; 3152 3153 /* 3154 * Check to see if there are any arch specific 3155 * sort dimensions not applicable for the current 3156 * architecture. If so, Skip that sort key since 3157 * we don't want to display it in the output fields. 3158 */ 3159 for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) { 3160 if (!strcmp(arch_specific_sort_keys[j], tok) && 3161 !arch_support_sort_key(tok)) { 3162 return 0; 3163 } 3164 } 3165 3166 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 3167 struct sort_dimension *sd = &common_sort_dimensions[i]; 3168 3169 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3170 continue; 3171 3172 for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) { 3173 if (sd->name && !strcmp(dynamic_headers[j], sd->name)) 3174 sort_dimension_add_dynamic_header(sd); 3175 } 3176 3177 if (sd->entry == &sort_parent) { 3178 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 3179 if (ret) { 3180 char err[BUFSIZ]; 3181 3182 regerror(ret, &parent_regex, err, sizeof(err)); 3183 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 3184 return -EINVAL; 3185 } 3186 list->parent = 1; 3187 } else if (sd->entry == &sort_sym) { 3188 list->sym = 1; 3189 /* 3190 * perf diff displays the performance difference amongst 3191 * two or more perf.data files. Those files could come 3192 * from different binaries. So we should not compare 3193 * their ips, but the name of symbol. 3194 */ 3195 if (sort__mode == SORT_MODE__DIFF) 3196 sd->entry->se_collapse = sort__sym_sort; 3197 3198 } else if (sd->entry == &sort_dso) { 3199 list->dso = 1; 3200 } else if (sd->entry == &sort_socket) { 3201 list->socket = 1; 3202 } else if (sd->entry == &sort_thread) { 3203 list->thread = 1; 3204 } else if (sd->entry == &sort_comm) { 3205 list->comm = 1; 3206 } 3207 3208 return __sort_dimension__add(sd, list, level); 3209 } 3210 3211 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 3212 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 3213 3214 if (strncasecmp(tok, hd->name, strlen(tok))) 3215 continue; 3216 3217 return __hpp_dimension__add(hd, list, level); 3218 } 3219 3220 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 3221 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 3222 3223 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3224 continue; 3225 3226 if (sort__mode != SORT_MODE__BRANCH) 3227 return -EINVAL; 3228 3229 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 3230 list->sym = 1; 3231 3232 __sort_dimension__add(sd, list, level); 3233 return 0; 3234 } 3235 3236 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 3237 struct sort_dimension *sd = &memory_sort_dimensions[i]; 3238 3239 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3240 continue; 3241 3242 if (sort__mode != SORT_MODE__MEMORY) 3243 return -EINVAL; 3244 3245 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) 3246 return -EINVAL; 3247 3248 if (sd->entry == &sort_mem_daddr_sym) 3249 list->sym = 1; 3250 3251 __sort_dimension__add(sd, list, level); 3252 return 0; 3253 } 3254 3255 if (!add_dynamic_entry(evlist, tok, level)) 3256 return 0; 3257 3258 return -ESRCH; 3259 } 3260 3261 static int setup_sort_list(struct perf_hpp_list *list, char *str, 3262 struct evlist *evlist) 3263 { 3264 char *tmp, *tok; 3265 int ret = 0; 3266 int level = 0; 3267 int next_level = 1; 3268 bool in_group = false; 3269 3270 do { 3271 tok = str; 3272 tmp = strpbrk(str, "{}, "); 3273 if (tmp) { 3274 if (in_group) 3275 next_level = level; 3276 else 3277 next_level = level + 1; 3278 3279 if (*tmp == '{') 3280 in_group = true; 3281 else if (*tmp == '}') 3282 in_group = false; 3283 3284 *tmp = '\0'; 3285 str = tmp + 1; 3286 } 3287 3288 if (*tok) { 3289 ret = sort_dimension__add(list, tok, evlist, level); 3290 if (ret == -EINVAL) { 3291 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) 3292 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 3293 else 3294 ui__error("Invalid --sort key: `%s'", tok); 3295 break; 3296 } else if (ret == -ESRCH) { 3297 ui__error("Unknown --sort key: `%s'", tok); 3298 break; 3299 } 3300 } 3301 3302 level = next_level; 3303 } while (tmp); 3304 3305 return ret; 3306 } 3307 3308 static const char *get_default_sort_order(struct evlist *evlist) 3309 { 3310 const char *default_sort_orders[] = { 3311 default_sort_order, 3312 default_branch_sort_order, 3313 default_mem_sort_order, 3314 default_top_sort_order, 3315 default_diff_sort_order, 3316 default_tracepoint_sort_order, 3317 }; 3318 bool use_trace = true; 3319 struct evsel *evsel; 3320 3321 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 3322 3323 if (evlist == NULL || evlist__empty(evlist)) 3324 goto out_no_evlist; 3325 3326 evlist__for_each_entry(evlist, evsel) { 3327 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 3328 use_trace = false; 3329 break; 3330 } 3331 } 3332 3333 if (use_trace) { 3334 sort__mode = SORT_MODE__TRACEPOINT; 3335 if (symbol_conf.raw_trace) 3336 return "trace_fields"; 3337 } 3338 out_no_evlist: 3339 return default_sort_orders[sort__mode]; 3340 } 3341 3342 static int setup_sort_order(struct evlist *evlist) 3343 { 3344 char *new_sort_order; 3345 3346 /* 3347 * Append '+'-prefixed sort order to the default sort 3348 * order string. 3349 */ 3350 if (!sort_order || is_strict_order(sort_order)) 3351 return 0; 3352 3353 if (sort_order[1] == '\0') { 3354 ui__error("Invalid --sort key: `+'"); 3355 return -EINVAL; 3356 } 3357 3358 /* 3359 * We allocate new sort_order string, but we never free it, 3360 * because it's checked over the rest of the code. 3361 */ 3362 if (asprintf(&new_sort_order, "%s,%s", 3363 get_default_sort_order(evlist), sort_order + 1) < 0) { 3364 pr_err("Not enough memory to set up --sort"); 3365 return -ENOMEM; 3366 } 3367 3368 sort_order = new_sort_order; 3369 return 0; 3370 } 3371 3372 /* 3373 * Adds 'pre,' prefix into 'str' is 'pre' is 3374 * not already part of 'str'. 3375 */ 3376 static char *prefix_if_not_in(const char *pre, char *str) 3377 { 3378 char *n; 3379 3380 if (!str || strstr(str, pre)) 3381 return str; 3382 3383 if (asprintf(&n, "%s,%s", pre, str) < 0) 3384 n = NULL; 3385 3386 free(str); 3387 return n; 3388 } 3389 3390 static char *setup_overhead(char *keys) 3391 { 3392 if (sort__mode == SORT_MODE__DIFF) 3393 return keys; 3394 3395 keys = prefix_if_not_in("overhead", keys); 3396 3397 if (symbol_conf.cumulate_callchain) 3398 keys = prefix_if_not_in("overhead_children", keys); 3399 3400 return keys; 3401 } 3402 3403 static int __setup_sorting(struct evlist *evlist) 3404 { 3405 char *str; 3406 const char *sort_keys; 3407 int ret = 0; 3408 3409 ret = setup_sort_order(evlist); 3410 if (ret) 3411 return ret; 3412 3413 sort_keys = sort_order; 3414 if (sort_keys == NULL) { 3415 if (is_strict_order(field_order)) { 3416 /* 3417 * If user specified field order but no sort order, 3418 * we'll honor it and not add default sort orders. 3419 */ 3420 return 0; 3421 } 3422 3423 sort_keys = get_default_sort_order(evlist); 3424 } 3425 3426 str = strdup(sort_keys); 3427 if (str == NULL) { 3428 pr_err("Not enough memory to setup sort keys"); 3429 return -ENOMEM; 3430 } 3431 3432 /* 3433 * Prepend overhead fields for backward compatibility. 3434 */ 3435 if (!is_strict_order(field_order)) { 3436 str = setup_overhead(str); 3437 if (str == NULL) { 3438 pr_err("Not enough memory to setup overhead keys"); 3439 return -ENOMEM; 3440 } 3441 } 3442 3443 ret = setup_sort_list(&perf_hpp_list, str, evlist); 3444 3445 free(str); 3446 return ret; 3447 } 3448 3449 void perf_hpp__set_elide(int idx, bool elide) 3450 { 3451 struct perf_hpp_fmt *fmt; 3452 struct hpp_sort_entry *hse; 3453 3454 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3455 if (!perf_hpp__is_sort_entry(fmt)) 3456 continue; 3457 3458 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3459 if (hse->se->se_width_idx == idx) { 3460 fmt->elide = elide; 3461 break; 3462 } 3463 } 3464 } 3465 3466 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 3467 { 3468 if (list && strlist__nr_entries(list) == 1) { 3469 if (fp != NULL) 3470 fprintf(fp, "# %s: %s\n", list_name, 3471 strlist__entry(list, 0)->s); 3472 return true; 3473 } 3474 return false; 3475 } 3476 3477 static bool get_elide(int idx, FILE *output) 3478 { 3479 switch (idx) { 3480 case HISTC_SYMBOL: 3481 return __get_elide(symbol_conf.sym_list, "symbol", output); 3482 case HISTC_DSO: 3483 return __get_elide(symbol_conf.dso_list, "dso", output); 3484 case HISTC_COMM: 3485 return __get_elide(symbol_conf.comm_list, "comm", output); 3486 default: 3487 break; 3488 } 3489 3490 if (sort__mode != SORT_MODE__BRANCH) 3491 return false; 3492 3493 switch (idx) { 3494 case HISTC_SYMBOL_FROM: 3495 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 3496 case HISTC_SYMBOL_TO: 3497 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 3498 case HISTC_DSO_FROM: 3499 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 3500 case HISTC_DSO_TO: 3501 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 3502 case HISTC_ADDR_FROM: 3503 return __get_elide(symbol_conf.sym_from_list, "addr_from", output); 3504 case HISTC_ADDR_TO: 3505 return __get_elide(symbol_conf.sym_to_list, "addr_to", output); 3506 default: 3507 break; 3508 } 3509 3510 return false; 3511 } 3512 3513 void sort__setup_elide(FILE *output) 3514 { 3515 struct perf_hpp_fmt *fmt; 3516 struct hpp_sort_entry *hse; 3517 3518 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3519 if (!perf_hpp__is_sort_entry(fmt)) 3520 continue; 3521 3522 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3523 fmt->elide = get_elide(hse->se->se_width_idx, output); 3524 } 3525 3526 /* 3527 * It makes no sense to elide all of sort entries. 3528 * Just revert them to show up again. 3529 */ 3530 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3531 if (!perf_hpp__is_sort_entry(fmt)) 3532 continue; 3533 3534 if (!fmt->elide) 3535 return; 3536 } 3537 3538 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3539 if (!perf_hpp__is_sort_entry(fmt)) 3540 continue; 3541 3542 fmt->elide = false; 3543 } 3544 } 3545 3546 int output_field_add(struct perf_hpp_list *list, char *tok) 3547 { 3548 unsigned int i; 3549 3550 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 3551 struct sort_dimension *sd = &common_sort_dimensions[i]; 3552 3553 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3554 continue; 3555 3556 return __sort_dimension__add_output(list, sd); 3557 } 3558 3559 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 3560 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 3561 3562 if (strncasecmp(tok, hd->name, strlen(tok))) 3563 continue; 3564 3565 return __hpp_dimension__add_output(list, hd); 3566 } 3567 3568 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 3569 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 3570 3571 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3572 continue; 3573 3574 if (sort__mode != SORT_MODE__BRANCH) 3575 return -EINVAL; 3576 3577 return __sort_dimension__add_output(list, sd); 3578 } 3579 3580 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 3581 struct sort_dimension *sd = &memory_sort_dimensions[i]; 3582 3583 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok))) 3584 continue; 3585 3586 if (sort__mode != SORT_MODE__MEMORY) 3587 return -EINVAL; 3588 3589 return __sort_dimension__add_output(list, sd); 3590 } 3591 3592 return -ESRCH; 3593 } 3594 3595 static int setup_output_list(struct perf_hpp_list *list, char *str) 3596 { 3597 char *tmp, *tok; 3598 int ret = 0; 3599 3600 for (tok = strtok_r(str, ", ", &tmp); 3601 tok; tok = strtok_r(NULL, ", ", &tmp)) { 3602 ret = output_field_add(list, tok); 3603 if (ret == -EINVAL) { 3604 ui__error("Invalid --fields key: `%s'", tok); 3605 break; 3606 } else if (ret == -ESRCH) { 3607 ui__error("Unknown --fields key: `%s'", tok); 3608 break; 3609 } 3610 } 3611 3612 return ret; 3613 } 3614 3615 void reset_dimensions(void) 3616 { 3617 unsigned int i; 3618 3619 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 3620 common_sort_dimensions[i].taken = 0; 3621 3622 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 3623 hpp_sort_dimensions[i].taken = 0; 3624 3625 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 3626 bstack_sort_dimensions[i].taken = 0; 3627 3628 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 3629 memory_sort_dimensions[i].taken = 0; 3630 } 3631 3632 bool is_strict_order(const char *order) 3633 { 3634 return order && (*order != '+'); 3635 } 3636 3637 static int __setup_output_field(void) 3638 { 3639 char *str, *strp; 3640 int ret = -EINVAL; 3641 3642 if (field_order == NULL) 3643 return 0; 3644 3645 strp = str = strdup(field_order); 3646 if (str == NULL) { 3647 pr_err("Not enough memory to setup output fields"); 3648 return -ENOMEM; 3649 } 3650 3651 if (!is_strict_order(field_order)) 3652 strp++; 3653 3654 if (!strlen(strp)) { 3655 ui__error("Invalid --fields key: `+'"); 3656 goto out; 3657 } 3658 3659 ret = setup_output_list(&perf_hpp_list, strp); 3660 3661 out: 3662 free(str); 3663 return ret; 3664 } 3665 3666 int setup_sorting(struct evlist *evlist) 3667 { 3668 int err; 3669 3670 err = __setup_sorting(evlist); 3671 if (err < 0) 3672 return err; 3673 3674 if (parent_pattern != default_parent_pattern) { 3675 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 3676 if (err < 0) 3677 return err; 3678 } 3679 3680 reset_dimensions(); 3681 3682 /* 3683 * perf diff doesn't use default hpp output fields. 3684 */ 3685 if (sort__mode != SORT_MODE__DIFF) 3686 perf_hpp__init(); 3687 3688 err = __setup_output_field(); 3689 if (err < 0) 3690 return err; 3691 3692 /* copy sort keys to output fields */ 3693 perf_hpp__setup_output_field(&perf_hpp_list); 3694 /* and then copy output fields to sort keys */ 3695 perf_hpp__append_sort_keys(&perf_hpp_list); 3696 3697 /* setup hists-specific output fields */ 3698 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 3699 return -1; 3700 3701 return 0; 3702 } 3703 3704 void reset_output_field(void) 3705 { 3706 perf_hpp_list.need_collapse = 0; 3707 perf_hpp_list.parent = 0; 3708 perf_hpp_list.sym = 0; 3709 perf_hpp_list.dso = 0; 3710 3711 field_order = NULL; 3712 sort_order = NULL; 3713 3714 reset_dimensions(); 3715 perf_hpp__reset_output_field(&perf_hpp_list); 3716 } 3717 3718 #define INDENT (3*8 + 1) 3719 3720 static void add_key(struct strbuf *sb, const char *str, int *llen) 3721 { 3722 if (!str) 3723 return; 3724 3725 if (*llen >= 75) { 3726 strbuf_addstr(sb, "\n\t\t\t "); 3727 *llen = INDENT; 3728 } 3729 strbuf_addf(sb, " %s", str); 3730 *llen += strlen(str) + 1; 3731 } 3732 3733 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n, 3734 int *llen) 3735 { 3736 int i; 3737 3738 for (i = 0; i < n; i++) 3739 add_key(sb, s[i].name, llen); 3740 } 3741 3742 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n, 3743 int *llen) 3744 { 3745 int i; 3746 3747 for (i = 0; i < n; i++) 3748 add_key(sb, s[i].name, llen); 3749 } 3750 3751 char *sort_help(const char *prefix) 3752 { 3753 struct strbuf sb; 3754 char *s; 3755 int len = strlen(prefix) + INDENT; 3756 3757 strbuf_init(&sb, 300); 3758 strbuf_addstr(&sb, prefix); 3759 add_hpp_sort_string(&sb, hpp_sort_dimensions, 3760 ARRAY_SIZE(hpp_sort_dimensions), &len); 3761 add_sort_string(&sb, common_sort_dimensions, 3762 ARRAY_SIZE(common_sort_dimensions), &len); 3763 add_sort_string(&sb, bstack_sort_dimensions, 3764 ARRAY_SIZE(bstack_sort_dimensions), &len); 3765 add_sort_string(&sb, memory_sort_dimensions, 3766 ARRAY_SIZE(memory_sort_dimensions), &len); 3767 s = strbuf_detach(&sb, NULL); 3768 strbuf_release(&sb); 3769 return s; 3770 } 3771