1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <stdlib.h> 6 #include <linux/mman.h> 7 #include <linux/time64.h> 8 #include "debug.h" 9 #include "dso.h" 10 #include "sort.h" 11 #include "hist.h" 12 #include "cacheline.h" 13 #include "comm.h" 14 #include "map.h" 15 #include "maps.h" 16 #include "symbol.h" 17 #include "map_symbol.h" 18 #include "branch.h" 19 #include "thread.h" 20 #include "evsel.h" 21 #include "evlist.h" 22 #include "srcline.h" 23 #include "strlist.h" 24 #include "strbuf.h" 25 #include <traceevent/event-parse.h> 26 #include "mem-events.h" 27 #include "annotate.h" 28 #include "time-utils.h" 29 #include "cgroup.h" 30 #include "machine.h" 31 #include <linux/kernel.h> 32 #include <linux/string.h> 33 34 regex_t parent_regex; 35 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 36 const char *parent_pattern = default_parent_pattern; 37 const char *default_sort_order = "comm,dso,symbol"; 38 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 39 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat"; 40 const char default_top_sort_order[] = "dso,symbol"; 41 const char default_diff_sort_order[] = "dso,symbol"; 42 const char default_tracepoint_sort_order[] = "trace"; 43 const char *sort_order; 44 const char *field_order; 45 regex_t ignore_callees_regex; 46 int have_ignore_callees = 0; 47 enum sort_mode sort__mode = SORT_MODE__NORMAL; 48 49 /* 50 * Replaces all occurrences of a char used with the: 51 * 52 * -t, --field-separator 53 * 54 * option, that uses a special separator character and don't pad with spaces, 55 * replacing all occurrences of this separator in symbol names (and other 56 * output) with a '.' character, that thus it's the only non valid separator. 57 */ 58 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 59 { 60 int n; 61 va_list ap; 62 63 va_start(ap, fmt); 64 n = vsnprintf(bf, size, fmt, ap); 65 if (symbol_conf.field_sep && n > 0) { 66 char *sep = bf; 67 68 while (1) { 69 sep = strchr(sep, *symbol_conf.field_sep); 70 if (sep == NULL) 71 break; 72 *sep = '.'; 73 } 74 } 75 va_end(ap); 76 77 if (n >= (int)size) 78 return size - 1; 79 return n; 80 } 81 82 static int64_t cmp_null(const void *l, const void *r) 83 { 84 if (!l && !r) 85 return 0; 86 else if (!l) 87 return -1; 88 else 89 return 1; 90 } 91 92 /* --sort pid */ 93 94 static int64_t 95 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 96 { 97 return right->thread->tid - left->thread->tid; 98 } 99 100 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 101 size_t size, unsigned int width) 102 { 103 const char *comm = thread__comm_str(he->thread); 104 105 width = max(7U, width) - 8; 106 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 107 width, width, comm ?: ""); 108 } 109 110 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 111 { 112 const struct thread *th = arg; 113 114 if (type != HIST_FILTER__THREAD) 115 return -1; 116 117 return th && he->thread != th; 118 } 119 120 struct sort_entry sort_thread = { 121 .se_header = " Pid:Command", 122 .se_cmp = sort__thread_cmp, 123 .se_snprintf = hist_entry__thread_snprintf, 124 .se_filter = hist_entry__thread_filter, 125 .se_width_idx = HISTC_THREAD, 126 }; 127 128 /* --sort comm */ 129 130 /* 131 * We can't use pointer comparison in functions below, 132 * because it gives different results based on pointer 133 * values, which could break some sorting assumptions. 134 */ 135 static int64_t 136 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 137 { 138 return strcmp(comm__str(right->comm), comm__str(left->comm)); 139 } 140 141 static int64_t 142 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 143 { 144 return strcmp(comm__str(right->comm), comm__str(left->comm)); 145 } 146 147 static int64_t 148 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 149 { 150 return strcmp(comm__str(right->comm), comm__str(left->comm)); 151 } 152 153 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 154 size_t size, unsigned int width) 155 { 156 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 157 } 158 159 struct sort_entry sort_comm = { 160 .se_header = "Command", 161 .se_cmp = sort__comm_cmp, 162 .se_collapse = sort__comm_collapse, 163 .se_sort = sort__comm_sort, 164 .se_snprintf = hist_entry__comm_snprintf, 165 .se_filter = hist_entry__thread_filter, 166 .se_width_idx = HISTC_COMM, 167 }; 168 169 /* --sort dso */ 170 171 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 172 { 173 struct dso *dso_l = map_l ? map_l->dso : NULL; 174 struct dso *dso_r = map_r ? map_r->dso : NULL; 175 const char *dso_name_l, *dso_name_r; 176 177 if (!dso_l || !dso_r) 178 return cmp_null(dso_r, dso_l); 179 180 if (verbose > 0) { 181 dso_name_l = dso_l->long_name; 182 dso_name_r = dso_r->long_name; 183 } else { 184 dso_name_l = dso_l->short_name; 185 dso_name_r = dso_r->short_name; 186 } 187 188 return strcmp(dso_name_l, dso_name_r); 189 } 190 191 static int64_t 192 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 193 { 194 return _sort__dso_cmp(right->ms.map, left->ms.map); 195 } 196 197 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 198 size_t size, unsigned int width) 199 { 200 if (map && map->dso) { 201 const char *dso_name = verbose > 0 ? map->dso->long_name : 202 map->dso->short_name; 203 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 204 } 205 206 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 207 } 208 209 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 210 size_t size, unsigned int width) 211 { 212 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 213 } 214 215 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 216 { 217 const struct dso *dso = arg; 218 219 if (type != HIST_FILTER__DSO) 220 return -1; 221 222 return dso && (!he->ms.map || he->ms.map->dso != dso); 223 } 224 225 struct sort_entry sort_dso = { 226 .se_header = "Shared Object", 227 .se_cmp = sort__dso_cmp, 228 .se_snprintf = hist_entry__dso_snprintf, 229 .se_filter = hist_entry__dso_filter, 230 .se_width_idx = HISTC_DSO, 231 }; 232 233 /* --sort symbol */ 234 235 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 236 { 237 return (int64_t)(right_ip - left_ip); 238 } 239 240 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 241 { 242 if (!sym_l || !sym_r) 243 return cmp_null(sym_l, sym_r); 244 245 if (sym_l == sym_r) 246 return 0; 247 248 if (sym_l->inlined || sym_r->inlined) { 249 int ret = strcmp(sym_l->name, sym_r->name); 250 251 if (ret) 252 return ret; 253 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start)) 254 return 0; 255 } 256 257 if (sym_l->start != sym_r->start) 258 return (int64_t)(sym_r->start - sym_l->start); 259 260 return (int64_t)(sym_r->end - sym_l->end); 261 } 262 263 static int64_t 264 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 265 { 266 int64_t ret; 267 268 if (!left->ms.sym && !right->ms.sym) 269 return _sort__addr_cmp(left->ip, right->ip); 270 271 /* 272 * comparing symbol address alone is not enough since it's a 273 * relative address within a dso. 274 */ 275 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 276 ret = sort__dso_cmp(left, right); 277 if (ret != 0) 278 return ret; 279 } 280 281 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 282 } 283 284 static int64_t 285 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 286 { 287 if (!left->ms.sym || !right->ms.sym) 288 return cmp_null(left->ms.sym, right->ms.sym); 289 290 return strcmp(right->ms.sym->name, left->ms.sym->name); 291 } 292 293 static int _hist_entry__sym_snprintf(struct map_symbol *ms, 294 u64 ip, char level, char *bf, size_t size, 295 unsigned int width) 296 { 297 struct symbol *sym = ms->sym; 298 struct map *map = ms->map; 299 size_t ret = 0; 300 301 if (verbose > 0) { 302 char o = map ? dso__symtab_origin(map->dso) : '!'; 303 u64 rip = ip; 304 305 if (map && map->dso && map->dso->kernel 306 && map->dso->adjust_symbols) 307 rip = map->unmap_ip(map, ip); 308 309 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 310 BITS_PER_LONG / 4 + 2, rip, o); 311 } 312 313 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 314 if (sym && map) { 315 if (sym->type == STT_OBJECT) { 316 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 317 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 318 ip - map->unmap_ip(map, sym->start)); 319 } else { 320 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 321 width - ret, 322 sym->name); 323 if (sym->inlined) 324 ret += repsep_snprintf(bf + ret, size - ret, 325 " (inlined)"); 326 } 327 } else { 328 size_t len = BITS_PER_LONG / 4; 329 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 330 len, ip); 331 } 332 333 return ret; 334 } 335 336 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) 337 { 338 return _hist_entry__sym_snprintf(&he->ms, he->ip, 339 he->level, bf, size, width); 340 } 341 342 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 343 { 344 const char *sym = arg; 345 346 if (type != HIST_FILTER__SYMBOL) 347 return -1; 348 349 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 350 } 351 352 struct sort_entry sort_sym = { 353 .se_header = "Symbol", 354 .se_cmp = sort__sym_cmp, 355 .se_sort = sort__sym_sort, 356 .se_snprintf = hist_entry__sym_snprintf, 357 .se_filter = hist_entry__sym_filter, 358 .se_width_idx = HISTC_SYMBOL, 359 }; 360 361 /* --sort srcline */ 362 363 char *hist_entry__srcline(struct hist_entry *he) 364 { 365 return map__srcline(he->ms.map, he->ip, he->ms.sym); 366 } 367 368 static int64_t 369 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 370 { 371 if (!left->srcline) 372 left->srcline = hist_entry__srcline(left); 373 if (!right->srcline) 374 right->srcline = hist_entry__srcline(right); 375 376 return strcmp(right->srcline, left->srcline); 377 } 378 379 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 380 size_t size, unsigned int width) 381 { 382 if (!he->srcline) 383 he->srcline = hist_entry__srcline(he); 384 385 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 386 } 387 388 struct sort_entry sort_srcline = { 389 .se_header = "Source:Line", 390 .se_cmp = sort__srcline_cmp, 391 .se_snprintf = hist_entry__srcline_snprintf, 392 .se_width_idx = HISTC_SRCLINE, 393 }; 394 395 /* --sort srcline_from */ 396 397 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams) 398 { 399 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym); 400 } 401 402 static int64_t 403 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 404 { 405 if (!left->branch_info->srcline_from) 406 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from); 407 408 if (!right->branch_info->srcline_from) 409 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from); 410 411 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 412 } 413 414 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 415 size_t size, unsigned int width) 416 { 417 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 418 } 419 420 struct sort_entry sort_srcline_from = { 421 .se_header = "From Source:Line", 422 .se_cmp = sort__srcline_from_cmp, 423 .se_snprintf = hist_entry__srcline_from_snprintf, 424 .se_width_idx = HISTC_SRCLINE_FROM, 425 }; 426 427 /* --sort srcline_to */ 428 429 static int64_t 430 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 431 { 432 if (!left->branch_info->srcline_to) 433 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to); 434 435 if (!right->branch_info->srcline_to) 436 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to); 437 438 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 439 } 440 441 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 442 size_t size, unsigned int width) 443 { 444 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 445 } 446 447 struct sort_entry sort_srcline_to = { 448 .se_header = "To Source:Line", 449 .se_cmp = sort__srcline_to_cmp, 450 .se_snprintf = hist_entry__srcline_to_snprintf, 451 .se_width_idx = HISTC_SRCLINE_TO, 452 }; 453 454 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, 455 size_t size, unsigned int width) 456 { 457 458 struct symbol *sym = he->ms.sym; 459 struct annotation *notes; 460 double ipc = 0.0, coverage = 0.0; 461 char tmp[64]; 462 463 if (!sym) 464 return repsep_snprintf(bf, size, "%-*s", width, "-"); 465 466 notes = symbol__annotation(sym); 467 468 if (notes->hit_cycles) 469 ipc = notes->hit_insn / ((double)notes->hit_cycles); 470 471 if (notes->total_insn) { 472 coverage = notes->cover_insn * 100.0 / 473 ((double)notes->total_insn); 474 } 475 476 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); 477 return repsep_snprintf(bf, size, "%-*s", width, tmp); 478 } 479 480 struct sort_entry sort_sym_ipc = { 481 .se_header = "IPC [IPC Coverage]", 482 .se_cmp = sort__sym_cmp, 483 .se_snprintf = hist_entry__sym_ipc_snprintf, 484 .se_width_idx = HISTC_SYMBOL_IPC, 485 }; 486 487 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he 488 __maybe_unused, 489 char *bf, size_t size, 490 unsigned int width) 491 { 492 char tmp[64]; 493 494 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-"); 495 return repsep_snprintf(bf, size, "%-*s", width, tmp); 496 } 497 498 struct sort_entry sort_sym_ipc_null = { 499 .se_header = "IPC [IPC Coverage]", 500 .se_cmp = sort__sym_cmp, 501 .se_snprintf = hist_entry__sym_ipc_null_snprintf, 502 .se_width_idx = HISTC_SYMBOL_IPC, 503 }; 504 505 /* --sort srcfile */ 506 507 static char no_srcfile[1]; 508 509 static char *hist_entry__get_srcfile(struct hist_entry *e) 510 { 511 char *sf, *p; 512 struct map *map = e->ms.map; 513 514 if (!map) 515 return no_srcfile; 516 517 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 518 e->ms.sym, false, true, true, e->ip); 519 if (!strcmp(sf, SRCLINE_UNKNOWN)) 520 return no_srcfile; 521 p = strchr(sf, ':'); 522 if (p && *sf) { 523 *p = 0; 524 return sf; 525 } 526 free(sf); 527 return no_srcfile; 528 } 529 530 static int64_t 531 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 532 { 533 if (!left->srcfile) 534 left->srcfile = hist_entry__get_srcfile(left); 535 if (!right->srcfile) 536 right->srcfile = hist_entry__get_srcfile(right); 537 538 return strcmp(right->srcfile, left->srcfile); 539 } 540 541 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 542 size_t size, unsigned int width) 543 { 544 if (!he->srcfile) 545 he->srcfile = hist_entry__get_srcfile(he); 546 547 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 548 } 549 550 struct sort_entry sort_srcfile = { 551 .se_header = "Source File", 552 .se_cmp = sort__srcfile_cmp, 553 .se_snprintf = hist_entry__srcfile_snprintf, 554 .se_width_idx = HISTC_SRCFILE, 555 }; 556 557 /* --sort parent */ 558 559 static int64_t 560 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 561 { 562 struct symbol *sym_l = left->parent; 563 struct symbol *sym_r = right->parent; 564 565 if (!sym_l || !sym_r) 566 return cmp_null(sym_l, sym_r); 567 568 return strcmp(sym_r->name, sym_l->name); 569 } 570 571 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 572 size_t size, unsigned int width) 573 { 574 return repsep_snprintf(bf, size, "%-*.*s", width, width, 575 he->parent ? he->parent->name : "[other]"); 576 } 577 578 struct sort_entry sort_parent = { 579 .se_header = "Parent symbol", 580 .se_cmp = sort__parent_cmp, 581 .se_snprintf = hist_entry__parent_snprintf, 582 .se_width_idx = HISTC_PARENT, 583 }; 584 585 /* --sort cpu */ 586 587 static int64_t 588 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 589 { 590 return right->cpu - left->cpu; 591 } 592 593 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 594 size_t size, unsigned int width) 595 { 596 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 597 } 598 599 struct sort_entry sort_cpu = { 600 .se_header = "CPU", 601 .se_cmp = sort__cpu_cmp, 602 .se_snprintf = hist_entry__cpu_snprintf, 603 .se_width_idx = HISTC_CPU, 604 }; 605 606 /* --sort cgroup_id */ 607 608 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 609 { 610 return (int64_t)(right_dev - left_dev); 611 } 612 613 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 614 { 615 return (int64_t)(right_ino - left_ino); 616 } 617 618 static int64_t 619 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 620 { 621 int64_t ret; 622 623 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 624 if (ret != 0) 625 return ret; 626 627 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 628 left->cgroup_id.ino); 629 } 630 631 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 632 char *bf, size_t size, 633 unsigned int width __maybe_unused) 634 { 635 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 636 he->cgroup_id.ino); 637 } 638 639 struct sort_entry sort_cgroup_id = { 640 .se_header = "cgroup id (dev/inode)", 641 .se_cmp = sort__cgroup_id_cmp, 642 .se_snprintf = hist_entry__cgroup_id_snprintf, 643 .se_width_idx = HISTC_CGROUP_ID, 644 }; 645 646 /* --sort cgroup */ 647 648 static int64_t 649 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right) 650 { 651 return right->cgroup - left->cgroup; 652 } 653 654 static int hist_entry__cgroup_snprintf(struct hist_entry *he, 655 char *bf, size_t size, 656 unsigned int width __maybe_unused) 657 { 658 const char *cgrp_name = "N/A"; 659 660 if (he->cgroup) { 661 struct cgroup *cgrp = cgroup__find(he->ms.maps->machine->env, 662 he->cgroup); 663 if (cgrp != NULL) 664 cgrp_name = cgrp->name; 665 else 666 cgrp_name = "unknown"; 667 } 668 669 return repsep_snprintf(bf, size, "%s", cgrp_name); 670 } 671 672 struct sort_entry sort_cgroup = { 673 .se_header = "Cgroup", 674 .se_cmp = sort__cgroup_cmp, 675 .se_snprintf = hist_entry__cgroup_snprintf, 676 .se_width_idx = HISTC_CGROUP, 677 }; 678 679 /* --sort socket */ 680 681 static int64_t 682 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 683 { 684 return right->socket - left->socket; 685 } 686 687 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 688 size_t size, unsigned int width) 689 { 690 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 691 } 692 693 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 694 { 695 int sk = *(const int *)arg; 696 697 if (type != HIST_FILTER__SOCKET) 698 return -1; 699 700 return sk >= 0 && he->socket != sk; 701 } 702 703 struct sort_entry sort_socket = { 704 .se_header = "Socket", 705 .se_cmp = sort__socket_cmp, 706 .se_snprintf = hist_entry__socket_snprintf, 707 .se_filter = hist_entry__socket_filter, 708 .se_width_idx = HISTC_SOCKET, 709 }; 710 711 /* --sort time */ 712 713 static int64_t 714 sort__time_cmp(struct hist_entry *left, struct hist_entry *right) 715 { 716 return right->time - left->time; 717 } 718 719 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf, 720 size_t size, unsigned int width) 721 { 722 char he_time[32]; 723 724 if (symbol_conf.nanosecs) 725 timestamp__scnprintf_nsec(he->time, he_time, 726 sizeof(he_time)); 727 else 728 timestamp__scnprintf_usec(he->time, he_time, 729 sizeof(he_time)); 730 731 return repsep_snprintf(bf, size, "%-.*s", width, he_time); 732 } 733 734 struct sort_entry sort_time = { 735 .se_header = "Time", 736 .se_cmp = sort__time_cmp, 737 .se_snprintf = hist_entry__time_snprintf, 738 .se_width_idx = HISTC_TIME, 739 }; 740 741 /* --sort trace */ 742 743 static char *get_trace_output(struct hist_entry *he) 744 { 745 struct trace_seq seq; 746 struct evsel *evsel; 747 struct tep_record rec = { 748 .data = he->raw_data, 749 .size = he->raw_size, 750 }; 751 752 evsel = hists_to_evsel(he->hists); 753 754 trace_seq_init(&seq); 755 if (symbol_conf.raw_trace) { 756 tep_print_fields(&seq, he->raw_data, he->raw_size, 757 evsel->tp_format); 758 } else { 759 tep_print_event(evsel->tp_format->tep, 760 &seq, &rec, "%s", TEP_PRINT_INFO); 761 } 762 /* 763 * Trim the buffer, it starts at 4KB and we're not going to 764 * add anything more to this buffer. 765 */ 766 return realloc(seq.buffer, seq.len + 1); 767 } 768 769 static int64_t 770 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 771 { 772 struct evsel *evsel; 773 774 evsel = hists_to_evsel(left->hists); 775 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 776 return 0; 777 778 if (left->trace_output == NULL) 779 left->trace_output = get_trace_output(left); 780 if (right->trace_output == NULL) 781 right->trace_output = get_trace_output(right); 782 783 return strcmp(right->trace_output, left->trace_output); 784 } 785 786 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 787 size_t size, unsigned int width) 788 { 789 struct evsel *evsel; 790 791 evsel = hists_to_evsel(he->hists); 792 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 793 return scnprintf(bf, size, "%-.*s", width, "N/A"); 794 795 if (he->trace_output == NULL) 796 he->trace_output = get_trace_output(he); 797 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 798 } 799 800 struct sort_entry sort_trace = { 801 .se_header = "Trace output", 802 .se_cmp = sort__trace_cmp, 803 .se_snprintf = hist_entry__trace_snprintf, 804 .se_width_idx = HISTC_TRACE, 805 }; 806 807 /* sort keys for branch stacks */ 808 809 static int64_t 810 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 811 { 812 if (!left->branch_info || !right->branch_info) 813 return cmp_null(left->branch_info, right->branch_info); 814 815 return _sort__dso_cmp(left->branch_info->from.ms.map, 816 right->branch_info->from.ms.map); 817 } 818 819 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 820 size_t size, unsigned int width) 821 { 822 if (he->branch_info) 823 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map, 824 bf, size, width); 825 else 826 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 827 } 828 829 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 830 const void *arg) 831 { 832 const struct dso *dso = arg; 833 834 if (type != HIST_FILTER__DSO) 835 return -1; 836 837 return dso && (!he->branch_info || !he->branch_info->from.ms.map || 838 he->branch_info->from.ms.map->dso != dso); 839 } 840 841 static int64_t 842 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 843 { 844 if (!left->branch_info || !right->branch_info) 845 return cmp_null(left->branch_info, right->branch_info); 846 847 return _sort__dso_cmp(left->branch_info->to.ms.map, 848 right->branch_info->to.ms.map); 849 } 850 851 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 852 size_t size, unsigned int width) 853 { 854 if (he->branch_info) 855 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map, 856 bf, size, width); 857 else 858 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 859 } 860 861 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 862 const void *arg) 863 { 864 const struct dso *dso = arg; 865 866 if (type != HIST_FILTER__DSO) 867 return -1; 868 869 return dso && (!he->branch_info || !he->branch_info->to.ms.map || 870 he->branch_info->to.ms.map->dso != dso); 871 } 872 873 static int64_t 874 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 875 { 876 struct addr_map_symbol *from_l = &left->branch_info->from; 877 struct addr_map_symbol *from_r = &right->branch_info->from; 878 879 if (!left->branch_info || !right->branch_info) 880 return cmp_null(left->branch_info, right->branch_info); 881 882 from_l = &left->branch_info->from; 883 from_r = &right->branch_info->from; 884 885 if (!from_l->ms.sym && !from_r->ms.sym) 886 return _sort__addr_cmp(from_l->addr, from_r->addr); 887 888 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym); 889 } 890 891 static int64_t 892 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 893 { 894 struct addr_map_symbol *to_l, *to_r; 895 896 if (!left->branch_info || !right->branch_info) 897 return cmp_null(left->branch_info, right->branch_info); 898 899 to_l = &left->branch_info->to; 900 to_r = &right->branch_info->to; 901 902 if (!to_l->ms.sym && !to_r->ms.sym) 903 return _sort__addr_cmp(to_l->addr, to_r->addr); 904 905 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym); 906 } 907 908 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 909 size_t size, unsigned int width) 910 { 911 if (he->branch_info) { 912 struct addr_map_symbol *from = &he->branch_info->from; 913 914 return _hist_entry__sym_snprintf(&from->ms, from->al_addr, 915 he->level, bf, size, width); 916 } 917 918 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 919 } 920 921 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 922 size_t size, unsigned int width) 923 { 924 if (he->branch_info) { 925 struct addr_map_symbol *to = &he->branch_info->to; 926 927 return _hist_entry__sym_snprintf(&to->ms, to->al_addr, 928 he->level, bf, size, width); 929 } 930 931 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 932 } 933 934 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 935 const void *arg) 936 { 937 const char *sym = arg; 938 939 if (type != HIST_FILTER__SYMBOL) 940 return -1; 941 942 return sym && !(he->branch_info && he->branch_info->from.ms.sym && 943 strstr(he->branch_info->from.ms.sym->name, sym)); 944 } 945 946 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 947 const void *arg) 948 { 949 const char *sym = arg; 950 951 if (type != HIST_FILTER__SYMBOL) 952 return -1; 953 954 return sym && !(he->branch_info && he->branch_info->to.ms.sym && 955 strstr(he->branch_info->to.ms.sym->name, sym)); 956 } 957 958 struct sort_entry sort_dso_from = { 959 .se_header = "Source Shared Object", 960 .se_cmp = sort__dso_from_cmp, 961 .se_snprintf = hist_entry__dso_from_snprintf, 962 .se_filter = hist_entry__dso_from_filter, 963 .se_width_idx = HISTC_DSO_FROM, 964 }; 965 966 struct sort_entry sort_dso_to = { 967 .se_header = "Target Shared Object", 968 .se_cmp = sort__dso_to_cmp, 969 .se_snprintf = hist_entry__dso_to_snprintf, 970 .se_filter = hist_entry__dso_to_filter, 971 .se_width_idx = HISTC_DSO_TO, 972 }; 973 974 struct sort_entry sort_sym_from = { 975 .se_header = "Source Symbol", 976 .se_cmp = sort__sym_from_cmp, 977 .se_snprintf = hist_entry__sym_from_snprintf, 978 .se_filter = hist_entry__sym_from_filter, 979 .se_width_idx = HISTC_SYMBOL_FROM, 980 }; 981 982 struct sort_entry sort_sym_to = { 983 .se_header = "Target Symbol", 984 .se_cmp = sort__sym_to_cmp, 985 .se_snprintf = hist_entry__sym_to_snprintf, 986 .se_filter = hist_entry__sym_to_filter, 987 .se_width_idx = HISTC_SYMBOL_TO, 988 }; 989 990 static int64_t 991 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 992 { 993 unsigned char mp, p; 994 995 if (!left->branch_info || !right->branch_info) 996 return cmp_null(left->branch_info, right->branch_info); 997 998 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 999 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 1000 return mp || p; 1001 } 1002 1003 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 1004 size_t size, unsigned int width){ 1005 static const char *out = "N/A"; 1006 1007 if (he->branch_info) { 1008 if (he->branch_info->flags.predicted) 1009 out = "N"; 1010 else if (he->branch_info->flags.mispred) 1011 out = "Y"; 1012 } 1013 1014 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 1015 } 1016 1017 static int64_t 1018 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 1019 { 1020 if (!left->branch_info || !right->branch_info) 1021 return cmp_null(left->branch_info, right->branch_info); 1022 1023 return left->branch_info->flags.cycles - 1024 right->branch_info->flags.cycles; 1025 } 1026 1027 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 1028 size_t size, unsigned int width) 1029 { 1030 if (!he->branch_info) 1031 return scnprintf(bf, size, "%-.*s", width, "N/A"); 1032 if (he->branch_info->flags.cycles == 0) 1033 return repsep_snprintf(bf, size, "%-*s", width, "-"); 1034 return repsep_snprintf(bf, size, "%-*hd", width, 1035 he->branch_info->flags.cycles); 1036 } 1037 1038 struct sort_entry sort_cycles = { 1039 .se_header = "Basic Block Cycles", 1040 .se_cmp = sort__cycles_cmp, 1041 .se_snprintf = hist_entry__cycles_snprintf, 1042 .se_width_idx = HISTC_CYCLES, 1043 }; 1044 1045 /* --sort daddr_sym */ 1046 int64_t 1047 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1048 { 1049 uint64_t l = 0, r = 0; 1050 1051 if (left->mem_info) 1052 l = left->mem_info->daddr.addr; 1053 if (right->mem_info) 1054 r = right->mem_info->daddr.addr; 1055 1056 return (int64_t)(r - l); 1057 } 1058 1059 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 1060 size_t size, unsigned int width) 1061 { 1062 uint64_t addr = 0; 1063 struct map_symbol *ms = NULL; 1064 1065 if (he->mem_info) { 1066 addr = he->mem_info->daddr.addr; 1067 ms = &he->mem_info->daddr.ms; 1068 } 1069 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1070 } 1071 1072 int64_t 1073 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 1074 { 1075 uint64_t l = 0, r = 0; 1076 1077 if (left->mem_info) 1078 l = left->mem_info->iaddr.addr; 1079 if (right->mem_info) 1080 r = right->mem_info->iaddr.addr; 1081 1082 return (int64_t)(r - l); 1083 } 1084 1085 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 1086 size_t size, unsigned int width) 1087 { 1088 uint64_t addr = 0; 1089 struct map_symbol *ms = NULL; 1090 1091 if (he->mem_info) { 1092 addr = he->mem_info->iaddr.addr; 1093 ms = &he->mem_info->iaddr.ms; 1094 } 1095 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width); 1096 } 1097 1098 static int64_t 1099 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1100 { 1101 struct map *map_l = NULL; 1102 struct map *map_r = NULL; 1103 1104 if (left->mem_info) 1105 map_l = left->mem_info->daddr.ms.map; 1106 if (right->mem_info) 1107 map_r = right->mem_info->daddr.ms.map; 1108 1109 return _sort__dso_cmp(map_l, map_r); 1110 } 1111 1112 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1113 size_t size, unsigned int width) 1114 { 1115 struct map *map = NULL; 1116 1117 if (he->mem_info) 1118 map = he->mem_info->daddr.ms.map; 1119 1120 return _hist_entry__dso_snprintf(map, bf, size, width); 1121 } 1122 1123 static int64_t 1124 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1125 { 1126 union perf_mem_data_src data_src_l; 1127 union perf_mem_data_src data_src_r; 1128 1129 if (left->mem_info) 1130 data_src_l = left->mem_info->data_src; 1131 else 1132 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1133 1134 if (right->mem_info) 1135 data_src_r = right->mem_info->data_src; 1136 else 1137 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1138 1139 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1140 } 1141 1142 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1143 size_t size, unsigned int width) 1144 { 1145 char out[10]; 1146 1147 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1148 return repsep_snprintf(bf, size, "%.*s", width, out); 1149 } 1150 1151 static int64_t 1152 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1153 { 1154 union perf_mem_data_src data_src_l; 1155 union perf_mem_data_src data_src_r; 1156 1157 if (left->mem_info) 1158 data_src_l = left->mem_info->data_src; 1159 else 1160 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1161 1162 if (right->mem_info) 1163 data_src_r = right->mem_info->data_src; 1164 else 1165 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1166 1167 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1168 } 1169 1170 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1171 size_t size, unsigned int width) 1172 { 1173 char out[64]; 1174 1175 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1176 return repsep_snprintf(bf, size, "%-*s", width, out); 1177 } 1178 1179 static int64_t 1180 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1181 { 1182 union perf_mem_data_src data_src_l; 1183 union perf_mem_data_src data_src_r; 1184 1185 if (left->mem_info) 1186 data_src_l = left->mem_info->data_src; 1187 else 1188 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1189 1190 if (right->mem_info) 1191 data_src_r = right->mem_info->data_src; 1192 else 1193 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1194 1195 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1196 } 1197 1198 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1199 size_t size, unsigned int width) 1200 { 1201 char out[64]; 1202 1203 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1204 return repsep_snprintf(bf, size, "%-*s", width, out); 1205 } 1206 1207 static int64_t 1208 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1209 { 1210 union perf_mem_data_src data_src_l; 1211 union perf_mem_data_src data_src_r; 1212 1213 if (left->mem_info) 1214 data_src_l = left->mem_info->data_src; 1215 else 1216 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1217 1218 if (right->mem_info) 1219 data_src_r = right->mem_info->data_src; 1220 else 1221 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1222 1223 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1224 } 1225 1226 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1227 size_t size, unsigned int width) 1228 { 1229 char out[64]; 1230 1231 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1232 return repsep_snprintf(bf, size, "%-*s", width, out); 1233 } 1234 1235 int64_t 1236 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1237 { 1238 u64 l, r; 1239 struct map *l_map, *r_map; 1240 int rc; 1241 1242 if (!left->mem_info) return -1; 1243 if (!right->mem_info) return 1; 1244 1245 /* group event types together */ 1246 if (left->cpumode > right->cpumode) return -1; 1247 if (left->cpumode < right->cpumode) return 1; 1248 1249 l_map = left->mem_info->daddr.ms.map; 1250 r_map = right->mem_info->daddr.ms.map; 1251 1252 /* if both are NULL, jump to sort on al_addr instead */ 1253 if (!l_map && !r_map) 1254 goto addr; 1255 1256 if (!l_map) return -1; 1257 if (!r_map) return 1; 1258 1259 rc = dso__cmp_id(l_map->dso, r_map->dso); 1260 if (rc) 1261 return rc; 1262 /* 1263 * Addresses with no major/minor numbers are assumed to be 1264 * anonymous in userspace. Sort those on pid then address. 1265 * 1266 * The kernel and non-zero major/minor mapped areas are 1267 * assumed to be unity mapped. Sort those on address. 1268 */ 1269 1270 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1271 (!(l_map->flags & MAP_SHARED)) && 1272 !l_map->dso->id.maj && !l_map->dso->id.min && 1273 !l_map->dso->id.ino && !l_map->dso->id.ino_generation) { 1274 /* userspace anonymous */ 1275 1276 if (left->thread->pid_ > right->thread->pid_) return -1; 1277 if (left->thread->pid_ < right->thread->pid_) return 1; 1278 } 1279 1280 addr: 1281 /* al_addr does all the right addr - start + offset calculations */ 1282 l = cl_address(left->mem_info->daddr.al_addr); 1283 r = cl_address(right->mem_info->daddr.al_addr); 1284 1285 if (l > r) return -1; 1286 if (l < r) return 1; 1287 1288 return 0; 1289 } 1290 1291 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1292 size_t size, unsigned int width) 1293 { 1294 1295 uint64_t addr = 0; 1296 struct map_symbol *ms = NULL; 1297 char level = he->level; 1298 1299 if (he->mem_info) { 1300 struct map *map = he->mem_info->daddr.ms.map; 1301 1302 addr = cl_address(he->mem_info->daddr.al_addr); 1303 ms = &he->mem_info->daddr.ms; 1304 1305 /* print [s] for shared data mmaps */ 1306 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1307 map && !(map->prot & PROT_EXEC) && 1308 (map->flags & MAP_SHARED) && 1309 (map->dso->id.maj || map->dso->id.min || 1310 map->dso->id.ino || map->dso->id.ino_generation)) 1311 level = 's'; 1312 else if (!map) 1313 level = 'X'; 1314 } 1315 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width); 1316 } 1317 1318 struct sort_entry sort_mispredict = { 1319 .se_header = "Branch Mispredicted", 1320 .se_cmp = sort__mispredict_cmp, 1321 .se_snprintf = hist_entry__mispredict_snprintf, 1322 .se_width_idx = HISTC_MISPREDICT, 1323 }; 1324 1325 static u64 he_weight(struct hist_entry *he) 1326 { 1327 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; 1328 } 1329 1330 static int64_t 1331 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1332 { 1333 return he_weight(left) - he_weight(right); 1334 } 1335 1336 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1337 size_t size, unsigned int width) 1338 { 1339 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); 1340 } 1341 1342 struct sort_entry sort_local_weight = { 1343 .se_header = "Local Weight", 1344 .se_cmp = sort__local_weight_cmp, 1345 .se_snprintf = hist_entry__local_weight_snprintf, 1346 .se_width_idx = HISTC_LOCAL_WEIGHT, 1347 }; 1348 1349 static int64_t 1350 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1351 { 1352 return left->stat.weight - right->stat.weight; 1353 } 1354 1355 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1356 size_t size, unsigned int width) 1357 { 1358 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); 1359 } 1360 1361 struct sort_entry sort_global_weight = { 1362 .se_header = "Weight", 1363 .se_cmp = sort__global_weight_cmp, 1364 .se_snprintf = hist_entry__global_weight_snprintf, 1365 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1366 }; 1367 1368 static u64 he_ins_lat(struct hist_entry *he) 1369 { 1370 return he->stat.nr_events ? he->stat.ins_lat / he->stat.nr_events : 0; 1371 } 1372 1373 static int64_t 1374 sort__local_ins_lat_cmp(struct hist_entry *left, struct hist_entry *right) 1375 { 1376 return he_ins_lat(left) - he_ins_lat(right); 1377 } 1378 1379 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf, 1380 size_t size, unsigned int width) 1381 { 1382 return repsep_snprintf(bf, size, "%-*u", width, he_ins_lat(he)); 1383 } 1384 1385 struct sort_entry sort_local_ins_lat = { 1386 .se_header = "Local INSTR Latency", 1387 .se_cmp = sort__local_ins_lat_cmp, 1388 .se_snprintf = hist_entry__local_ins_lat_snprintf, 1389 .se_width_idx = HISTC_LOCAL_INS_LAT, 1390 }; 1391 1392 static int64_t 1393 sort__global_ins_lat_cmp(struct hist_entry *left, struct hist_entry *right) 1394 { 1395 return left->stat.ins_lat - right->stat.ins_lat; 1396 } 1397 1398 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf, 1399 size_t size, unsigned int width) 1400 { 1401 return repsep_snprintf(bf, size, "%-*u", width, he->stat.ins_lat); 1402 } 1403 1404 struct sort_entry sort_global_ins_lat = { 1405 .se_header = "INSTR Latency", 1406 .se_cmp = sort__global_ins_lat_cmp, 1407 .se_snprintf = hist_entry__global_ins_lat_snprintf, 1408 .se_width_idx = HISTC_GLOBAL_INS_LAT, 1409 }; 1410 1411 struct sort_entry sort_mem_daddr_sym = { 1412 .se_header = "Data Symbol", 1413 .se_cmp = sort__daddr_cmp, 1414 .se_snprintf = hist_entry__daddr_snprintf, 1415 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1416 }; 1417 1418 struct sort_entry sort_mem_iaddr_sym = { 1419 .se_header = "Code Symbol", 1420 .se_cmp = sort__iaddr_cmp, 1421 .se_snprintf = hist_entry__iaddr_snprintf, 1422 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1423 }; 1424 1425 struct sort_entry sort_mem_daddr_dso = { 1426 .se_header = "Data Object", 1427 .se_cmp = sort__dso_daddr_cmp, 1428 .se_snprintf = hist_entry__dso_daddr_snprintf, 1429 .se_width_idx = HISTC_MEM_DADDR_DSO, 1430 }; 1431 1432 struct sort_entry sort_mem_locked = { 1433 .se_header = "Locked", 1434 .se_cmp = sort__locked_cmp, 1435 .se_snprintf = hist_entry__locked_snprintf, 1436 .se_width_idx = HISTC_MEM_LOCKED, 1437 }; 1438 1439 struct sort_entry sort_mem_tlb = { 1440 .se_header = "TLB access", 1441 .se_cmp = sort__tlb_cmp, 1442 .se_snprintf = hist_entry__tlb_snprintf, 1443 .se_width_idx = HISTC_MEM_TLB, 1444 }; 1445 1446 struct sort_entry sort_mem_lvl = { 1447 .se_header = "Memory access", 1448 .se_cmp = sort__lvl_cmp, 1449 .se_snprintf = hist_entry__lvl_snprintf, 1450 .se_width_idx = HISTC_MEM_LVL, 1451 }; 1452 1453 struct sort_entry sort_mem_snoop = { 1454 .se_header = "Snoop", 1455 .se_cmp = sort__snoop_cmp, 1456 .se_snprintf = hist_entry__snoop_snprintf, 1457 .se_width_idx = HISTC_MEM_SNOOP, 1458 }; 1459 1460 struct sort_entry sort_mem_dcacheline = { 1461 .se_header = "Data Cacheline", 1462 .se_cmp = sort__dcacheline_cmp, 1463 .se_snprintf = hist_entry__dcacheline_snprintf, 1464 .se_width_idx = HISTC_MEM_DCACHELINE, 1465 }; 1466 1467 static int64_t 1468 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right) 1469 { 1470 union perf_mem_data_src data_src_l; 1471 union perf_mem_data_src data_src_r; 1472 1473 if (left->mem_info) 1474 data_src_l = left->mem_info->data_src; 1475 else 1476 data_src_l.mem_blk = PERF_MEM_BLK_NA; 1477 1478 if (right->mem_info) 1479 data_src_r = right->mem_info->data_src; 1480 else 1481 data_src_r.mem_blk = PERF_MEM_BLK_NA; 1482 1483 return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk); 1484 } 1485 1486 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf, 1487 size_t size, unsigned int width) 1488 { 1489 char out[16]; 1490 1491 perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info); 1492 return repsep_snprintf(bf, size, "%.*s", width, out); 1493 } 1494 1495 struct sort_entry sort_mem_blocked = { 1496 .se_header = "Blocked", 1497 .se_cmp = sort__blocked_cmp, 1498 .se_snprintf = hist_entry__blocked_snprintf, 1499 .se_width_idx = HISTC_MEM_BLOCKED, 1500 }; 1501 1502 static int64_t 1503 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1504 { 1505 uint64_t l = 0, r = 0; 1506 1507 if (left->mem_info) 1508 l = left->mem_info->daddr.phys_addr; 1509 if (right->mem_info) 1510 r = right->mem_info->daddr.phys_addr; 1511 1512 return (int64_t)(r - l); 1513 } 1514 1515 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1516 size_t size, unsigned int width) 1517 { 1518 uint64_t addr = 0; 1519 size_t ret = 0; 1520 size_t len = BITS_PER_LONG / 4; 1521 1522 addr = he->mem_info->daddr.phys_addr; 1523 1524 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1525 1526 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1527 1528 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1529 1530 if (ret > width) 1531 bf[width] = '\0'; 1532 1533 return width; 1534 } 1535 1536 struct sort_entry sort_mem_phys_daddr = { 1537 .se_header = "Data Physical Address", 1538 .se_cmp = sort__phys_daddr_cmp, 1539 .se_snprintf = hist_entry__phys_daddr_snprintf, 1540 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1541 }; 1542 1543 static int64_t 1544 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right) 1545 { 1546 uint64_t l = 0, r = 0; 1547 1548 if (left->mem_info) 1549 l = left->mem_info->daddr.data_page_size; 1550 if (right->mem_info) 1551 r = right->mem_info->daddr.data_page_size; 1552 1553 return (int64_t)(r - l); 1554 } 1555 1556 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf, 1557 size_t size, unsigned int width) 1558 { 1559 char str[PAGE_SIZE_NAME_LEN]; 1560 1561 return repsep_snprintf(bf, size, "%-*s", width, 1562 get_page_size_name(he->mem_info->daddr.data_page_size, str)); 1563 } 1564 1565 struct sort_entry sort_mem_data_page_size = { 1566 .se_header = "Data Page Size", 1567 .se_cmp = sort__data_page_size_cmp, 1568 .se_snprintf = hist_entry__data_page_size_snprintf, 1569 .se_width_idx = HISTC_MEM_DATA_PAGE_SIZE, 1570 }; 1571 1572 static int64_t 1573 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right) 1574 { 1575 uint64_t l = left->code_page_size; 1576 uint64_t r = right->code_page_size; 1577 1578 return (int64_t)(r - l); 1579 } 1580 1581 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf, 1582 size_t size, unsigned int width) 1583 { 1584 char str[PAGE_SIZE_NAME_LEN]; 1585 1586 return repsep_snprintf(bf, size, "%-*s", width, 1587 get_page_size_name(he->code_page_size, str)); 1588 } 1589 1590 struct sort_entry sort_code_page_size = { 1591 .se_header = "Code Page Size", 1592 .se_cmp = sort__code_page_size_cmp, 1593 .se_snprintf = hist_entry__code_page_size_snprintf, 1594 .se_width_idx = HISTC_CODE_PAGE_SIZE, 1595 }; 1596 1597 static int64_t 1598 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1599 { 1600 if (!left->branch_info || !right->branch_info) 1601 return cmp_null(left->branch_info, right->branch_info); 1602 1603 return left->branch_info->flags.abort != 1604 right->branch_info->flags.abort; 1605 } 1606 1607 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1608 size_t size, unsigned int width) 1609 { 1610 static const char *out = "N/A"; 1611 1612 if (he->branch_info) { 1613 if (he->branch_info->flags.abort) 1614 out = "A"; 1615 else 1616 out = "."; 1617 } 1618 1619 return repsep_snprintf(bf, size, "%-*s", width, out); 1620 } 1621 1622 struct sort_entry sort_abort = { 1623 .se_header = "Transaction abort", 1624 .se_cmp = sort__abort_cmp, 1625 .se_snprintf = hist_entry__abort_snprintf, 1626 .se_width_idx = HISTC_ABORT, 1627 }; 1628 1629 static int64_t 1630 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1631 { 1632 if (!left->branch_info || !right->branch_info) 1633 return cmp_null(left->branch_info, right->branch_info); 1634 1635 return left->branch_info->flags.in_tx != 1636 right->branch_info->flags.in_tx; 1637 } 1638 1639 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1640 size_t size, unsigned int width) 1641 { 1642 static const char *out = "N/A"; 1643 1644 if (he->branch_info) { 1645 if (he->branch_info->flags.in_tx) 1646 out = "T"; 1647 else 1648 out = "."; 1649 } 1650 1651 return repsep_snprintf(bf, size, "%-*s", width, out); 1652 } 1653 1654 struct sort_entry sort_in_tx = { 1655 .se_header = "Branch in transaction", 1656 .se_cmp = sort__in_tx_cmp, 1657 .se_snprintf = hist_entry__in_tx_snprintf, 1658 .se_width_idx = HISTC_IN_TX, 1659 }; 1660 1661 static int64_t 1662 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1663 { 1664 return left->transaction - right->transaction; 1665 } 1666 1667 static inline char *add_str(char *p, const char *str) 1668 { 1669 strcpy(p, str); 1670 return p + strlen(str); 1671 } 1672 1673 static struct txbit { 1674 unsigned flag; 1675 const char *name; 1676 int skip_for_len; 1677 } txbits[] = { 1678 { PERF_TXN_ELISION, "EL ", 0 }, 1679 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1680 { PERF_TXN_SYNC, "SYNC ", 1 }, 1681 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1682 { PERF_TXN_RETRY, "RETRY ", 0 }, 1683 { PERF_TXN_CONFLICT, "CON ", 0 }, 1684 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1685 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1686 { 0, NULL, 0 } 1687 }; 1688 1689 int hist_entry__transaction_len(void) 1690 { 1691 int i; 1692 int len = 0; 1693 1694 for (i = 0; txbits[i].name; i++) { 1695 if (!txbits[i].skip_for_len) 1696 len += strlen(txbits[i].name); 1697 } 1698 len += 4; /* :XX<space> */ 1699 return len; 1700 } 1701 1702 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1703 size_t size, unsigned int width) 1704 { 1705 u64 t = he->transaction; 1706 char buf[128]; 1707 char *p = buf; 1708 int i; 1709 1710 buf[0] = 0; 1711 for (i = 0; txbits[i].name; i++) 1712 if (txbits[i].flag & t) 1713 p = add_str(p, txbits[i].name); 1714 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1715 p = add_str(p, "NEITHER "); 1716 if (t & PERF_TXN_ABORT_MASK) { 1717 sprintf(p, ":%" PRIx64, 1718 (t & PERF_TXN_ABORT_MASK) >> 1719 PERF_TXN_ABORT_SHIFT); 1720 p += strlen(p); 1721 } 1722 1723 return repsep_snprintf(bf, size, "%-*s", width, buf); 1724 } 1725 1726 struct sort_entry sort_transaction = { 1727 .se_header = "Transaction ", 1728 .se_cmp = sort__transaction_cmp, 1729 .se_snprintf = hist_entry__transaction_snprintf, 1730 .se_width_idx = HISTC_TRANSACTION, 1731 }; 1732 1733 /* --sort symbol_size */ 1734 1735 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 1736 { 1737 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 1738 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 1739 1740 return size_l < size_r ? -1 : 1741 size_l == size_r ? 0 : 1; 1742 } 1743 1744 static int64_t 1745 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 1746 { 1747 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 1748 } 1749 1750 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 1751 size_t bf_size, unsigned int width) 1752 { 1753 if (sym) 1754 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 1755 1756 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1757 } 1758 1759 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 1760 size_t size, unsigned int width) 1761 { 1762 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 1763 } 1764 1765 struct sort_entry sort_sym_size = { 1766 .se_header = "Symbol size", 1767 .se_cmp = sort__sym_size_cmp, 1768 .se_snprintf = hist_entry__sym_size_snprintf, 1769 .se_width_idx = HISTC_SYM_SIZE, 1770 }; 1771 1772 /* --sort dso_size */ 1773 1774 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r) 1775 { 1776 int64_t size_l = map_l != NULL ? map__size(map_l) : 0; 1777 int64_t size_r = map_r != NULL ? map__size(map_r) : 0; 1778 1779 return size_l < size_r ? -1 : 1780 size_l == size_r ? 0 : 1; 1781 } 1782 1783 static int64_t 1784 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right) 1785 { 1786 return _sort__dso_size_cmp(right->ms.map, left->ms.map); 1787 } 1788 1789 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf, 1790 size_t bf_size, unsigned int width) 1791 { 1792 if (map && map->dso) 1793 return repsep_snprintf(bf, bf_size, "%*d", width, 1794 map__size(map)); 1795 1796 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1797 } 1798 1799 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf, 1800 size_t size, unsigned int width) 1801 { 1802 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width); 1803 } 1804 1805 struct sort_entry sort_dso_size = { 1806 .se_header = "DSO size", 1807 .se_cmp = sort__dso_size_cmp, 1808 .se_snprintf = hist_entry__dso_size_snprintf, 1809 .se_width_idx = HISTC_DSO_SIZE, 1810 }; 1811 1812 1813 struct sort_dimension { 1814 const char *name; 1815 struct sort_entry *entry; 1816 int taken; 1817 }; 1818 1819 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 1820 1821 static struct sort_dimension common_sort_dimensions[] = { 1822 DIM(SORT_PID, "pid", sort_thread), 1823 DIM(SORT_COMM, "comm", sort_comm), 1824 DIM(SORT_DSO, "dso", sort_dso), 1825 DIM(SORT_SYM, "symbol", sort_sym), 1826 DIM(SORT_PARENT, "parent", sort_parent), 1827 DIM(SORT_CPU, "cpu", sort_cpu), 1828 DIM(SORT_SOCKET, "socket", sort_socket), 1829 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1830 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 1831 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1832 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 1833 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 1834 DIM(SORT_TRACE, "trace", sort_trace), 1835 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 1836 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 1837 DIM(SORT_CGROUP, "cgroup", sort_cgroup), 1838 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 1839 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null), 1840 DIM(SORT_TIME, "time", sort_time), 1841 DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size), 1842 DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat), 1843 DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat), 1844 }; 1845 1846 #undef DIM 1847 1848 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 1849 1850 static struct sort_dimension bstack_sort_dimensions[] = { 1851 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 1852 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 1853 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 1854 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 1855 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 1856 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1857 DIM(SORT_ABORT, "abort", sort_abort), 1858 DIM(SORT_CYCLES, "cycles", sort_cycles), 1859 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1860 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1861 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc), 1862 }; 1863 1864 #undef DIM 1865 1866 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 1867 1868 static struct sort_dimension memory_sort_dimensions[] = { 1869 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1870 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 1871 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1872 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1873 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1874 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 1875 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 1876 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 1877 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 1878 DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size), 1879 DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked), 1880 }; 1881 1882 #undef DIM 1883 1884 struct hpp_dimension { 1885 const char *name; 1886 struct perf_hpp_fmt *fmt; 1887 int taken; 1888 }; 1889 1890 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 1891 1892 static struct hpp_dimension hpp_sort_dimensions[] = { 1893 DIM(PERF_HPP__OVERHEAD, "overhead"), 1894 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 1895 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 1896 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 1897 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 1898 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 1899 DIM(PERF_HPP__SAMPLES, "sample"), 1900 DIM(PERF_HPP__PERIOD, "period"), 1901 }; 1902 1903 #undef DIM 1904 1905 struct hpp_sort_entry { 1906 struct perf_hpp_fmt hpp; 1907 struct sort_entry *se; 1908 }; 1909 1910 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 1911 { 1912 struct hpp_sort_entry *hse; 1913 1914 if (!perf_hpp__is_sort_entry(fmt)) 1915 return; 1916 1917 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1918 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 1919 } 1920 1921 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1922 struct hists *hists, int line __maybe_unused, 1923 int *span __maybe_unused) 1924 { 1925 struct hpp_sort_entry *hse; 1926 size_t len = fmt->user_len; 1927 1928 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1929 1930 if (!len) 1931 len = hists__col_len(hists, hse->se->se_width_idx); 1932 1933 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 1934 } 1935 1936 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 1937 struct perf_hpp *hpp __maybe_unused, 1938 struct hists *hists) 1939 { 1940 struct hpp_sort_entry *hse; 1941 size_t len = fmt->user_len; 1942 1943 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1944 1945 if (!len) 1946 len = hists__col_len(hists, hse->se->se_width_idx); 1947 1948 return len; 1949 } 1950 1951 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1952 struct hist_entry *he) 1953 { 1954 struct hpp_sort_entry *hse; 1955 size_t len = fmt->user_len; 1956 1957 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1958 1959 if (!len) 1960 len = hists__col_len(he->hists, hse->se->se_width_idx); 1961 1962 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1963 } 1964 1965 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 1966 struct hist_entry *a, struct hist_entry *b) 1967 { 1968 struct hpp_sort_entry *hse; 1969 1970 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1971 return hse->se->se_cmp(a, b); 1972 } 1973 1974 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 1975 struct hist_entry *a, struct hist_entry *b) 1976 { 1977 struct hpp_sort_entry *hse; 1978 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 1979 1980 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1981 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 1982 return collapse_fn(a, b); 1983 } 1984 1985 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 1986 struct hist_entry *a, struct hist_entry *b) 1987 { 1988 struct hpp_sort_entry *hse; 1989 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 1990 1991 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1992 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 1993 return sort_fn(a, b); 1994 } 1995 1996 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 1997 { 1998 return format->header == __sort__hpp_header; 1999 } 2000 2001 #define MK_SORT_ENTRY_CHK(key) \ 2002 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 2003 { \ 2004 struct hpp_sort_entry *hse; \ 2005 \ 2006 if (!perf_hpp__is_sort_entry(fmt)) \ 2007 return false; \ 2008 \ 2009 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 2010 return hse->se == &sort_ ## key ; \ 2011 } 2012 2013 MK_SORT_ENTRY_CHK(trace) 2014 MK_SORT_ENTRY_CHK(srcline) 2015 MK_SORT_ENTRY_CHK(srcfile) 2016 MK_SORT_ENTRY_CHK(thread) 2017 MK_SORT_ENTRY_CHK(comm) 2018 MK_SORT_ENTRY_CHK(dso) 2019 MK_SORT_ENTRY_CHK(sym) 2020 2021 2022 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2023 { 2024 struct hpp_sort_entry *hse_a; 2025 struct hpp_sort_entry *hse_b; 2026 2027 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 2028 return false; 2029 2030 hse_a = container_of(a, struct hpp_sort_entry, hpp); 2031 hse_b = container_of(b, struct hpp_sort_entry, hpp); 2032 2033 return hse_a->se == hse_b->se; 2034 } 2035 2036 static void hse_free(struct perf_hpp_fmt *fmt) 2037 { 2038 struct hpp_sort_entry *hse; 2039 2040 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2041 free(hse); 2042 } 2043 2044 static struct hpp_sort_entry * 2045 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 2046 { 2047 struct hpp_sort_entry *hse; 2048 2049 hse = malloc(sizeof(*hse)); 2050 if (hse == NULL) { 2051 pr_err("Memory allocation failed\n"); 2052 return NULL; 2053 } 2054 2055 hse->se = sd->entry; 2056 hse->hpp.name = sd->entry->se_header; 2057 hse->hpp.header = __sort__hpp_header; 2058 hse->hpp.width = __sort__hpp_width; 2059 hse->hpp.entry = __sort__hpp_entry; 2060 hse->hpp.color = NULL; 2061 2062 hse->hpp.cmp = __sort__hpp_cmp; 2063 hse->hpp.collapse = __sort__hpp_collapse; 2064 hse->hpp.sort = __sort__hpp_sort; 2065 hse->hpp.equal = __sort__hpp_equal; 2066 hse->hpp.free = hse_free; 2067 2068 INIT_LIST_HEAD(&hse->hpp.list); 2069 INIT_LIST_HEAD(&hse->hpp.sort_list); 2070 hse->hpp.elide = false; 2071 hse->hpp.len = 0; 2072 hse->hpp.user_len = 0; 2073 hse->hpp.level = level; 2074 2075 return hse; 2076 } 2077 2078 static void hpp_free(struct perf_hpp_fmt *fmt) 2079 { 2080 free(fmt); 2081 } 2082 2083 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 2084 int level) 2085 { 2086 struct perf_hpp_fmt *fmt; 2087 2088 fmt = memdup(hd->fmt, sizeof(*fmt)); 2089 if (fmt) { 2090 INIT_LIST_HEAD(&fmt->list); 2091 INIT_LIST_HEAD(&fmt->sort_list); 2092 fmt->free = hpp_free; 2093 fmt->level = level; 2094 } 2095 2096 return fmt; 2097 } 2098 2099 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 2100 { 2101 struct perf_hpp_fmt *fmt; 2102 struct hpp_sort_entry *hse; 2103 int ret = -1; 2104 int r; 2105 2106 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 2107 if (!perf_hpp__is_sort_entry(fmt)) 2108 continue; 2109 2110 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2111 if (hse->se->se_filter == NULL) 2112 continue; 2113 2114 /* 2115 * hist entry is filtered if any of sort key in the hpp list 2116 * is applied. But it should skip non-matched filter types. 2117 */ 2118 r = hse->se->se_filter(he, type, arg); 2119 if (r >= 0) { 2120 if (ret < 0) 2121 ret = 0; 2122 ret |= r; 2123 } 2124 } 2125 2126 return ret; 2127 } 2128 2129 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 2130 struct perf_hpp_list *list, 2131 int level) 2132 { 2133 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 2134 2135 if (hse == NULL) 2136 return -1; 2137 2138 perf_hpp_list__register_sort_field(list, &hse->hpp); 2139 return 0; 2140 } 2141 2142 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 2143 struct perf_hpp_list *list) 2144 { 2145 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 2146 2147 if (hse == NULL) 2148 return -1; 2149 2150 perf_hpp_list__column_register(list, &hse->hpp); 2151 return 0; 2152 } 2153 2154 struct hpp_dynamic_entry { 2155 struct perf_hpp_fmt hpp; 2156 struct evsel *evsel; 2157 struct tep_format_field *field; 2158 unsigned dynamic_len; 2159 bool raw_trace; 2160 }; 2161 2162 static int hde_width(struct hpp_dynamic_entry *hde) 2163 { 2164 if (!hde->hpp.len) { 2165 int len = hde->dynamic_len; 2166 int namelen = strlen(hde->field->name); 2167 int fieldlen = hde->field->size; 2168 2169 if (namelen > len) 2170 len = namelen; 2171 2172 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) { 2173 /* length for print hex numbers */ 2174 fieldlen = hde->field->size * 2 + 2; 2175 } 2176 if (fieldlen > len) 2177 len = fieldlen; 2178 2179 hde->hpp.len = len; 2180 } 2181 return hde->hpp.len; 2182 } 2183 2184 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 2185 struct hist_entry *he) 2186 { 2187 char *str, *pos; 2188 struct tep_format_field *field = hde->field; 2189 size_t namelen; 2190 bool last = false; 2191 2192 if (hde->raw_trace) 2193 return; 2194 2195 /* parse pretty print result and update max length */ 2196 if (!he->trace_output) 2197 he->trace_output = get_trace_output(he); 2198 2199 namelen = strlen(field->name); 2200 str = he->trace_output; 2201 2202 while (str) { 2203 pos = strchr(str, ' '); 2204 if (pos == NULL) { 2205 last = true; 2206 pos = str + strlen(str); 2207 } 2208 2209 if (!strncmp(str, field->name, namelen)) { 2210 size_t len; 2211 2212 str += namelen + 1; 2213 len = pos - str; 2214 2215 if (len > hde->dynamic_len) 2216 hde->dynamic_len = len; 2217 break; 2218 } 2219 2220 if (last) 2221 str = NULL; 2222 else 2223 str = pos + 1; 2224 } 2225 } 2226 2227 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2228 struct hists *hists __maybe_unused, 2229 int line __maybe_unused, 2230 int *span __maybe_unused) 2231 { 2232 struct hpp_dynamic_entry *hde; 2233 size_t len = fmt->user_len; 2234 2235 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2236 2237 if (!len) 2238 len = hde_width(hde); 2239 2240 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 2241 } 2242 2243 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 2244 struct perf_hpp *hpp __maybe_unused, 2245 struct hists *hists __maybe_unused) 2246 { 2247 struct hpp_dynamic_entry *hde; 2248 size_t len = fmt->user_len; 2249 2250 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2251 2252 if (!len) 2253 len = hde_width(hde); 2254 2255 return len; 2256 } 2257 2258 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 2259 { 2260 struct hpp_dynamic_entry *hde; 2261 2262 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2263 2264 return hists_to_evsel(hists) == hde->evsel; 2265 } 2266 2267 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 2268 struct hist_entry *he) 2269 { 2270 struct hpp_dynamic_entry *hde; 2271 size_t len = fmt->user_len; 2272 char *str, *pos; 2273 struct tep_format_field *field; 2274 size_t namelen; 2275 bool last = false; 2276 int ret; 2277 2278 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2279 2280 if (!len) 2281 len = hde_width(hde); 2282 2283 if (hde->raw_trace) 2284 goto raw_field; 2285 2286 if (!he->trace_output) 2287 he->trace_output = get_trace_output(he); 2288 2289 field = hde->field; 2290 namelen = strlen(field->name); 2291 str = he->trace_output; 2292 2293 while (str) { 2294 pos = strchr(str, ' '); 2295 if (pos == NULL) { 2296 last = true; 2297 pos = str + strlen(str); 2298 } 2299 2300 if (!strncmp(str, field->name, namelen)) { 2301 str += namelen + 1; 2302 str = strndup(str, pos - str); 2303 2304 if (str == NULL) 2305 return scnprintf(hpp->buf, hpp->size, 2306 "%*.*s", len, len, "ERROR"); 2307 break; 2308 } 2309 2310 if (last) 2311 str = NULL; 2312 else 2313 str = pos + 1; 2314 } 2315 2316 if (str == NULL) { 2317 struct trace_seq seq; 2318 raw_field: 2319 trace_seq_init(&seq); 2320 tep_print_field(&seq, he->raw_data, hde->field); 2321 str = seq.buffer; 2322 } 2323 2324 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2325 free(str); 2326 return ret; 2327 } 2328 2329 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2330 struct hist_entry *a, struct hist_entry *b) 2331 { 2332 struct hpp_dynamic_entry *hde; 2333 struct tep_format_field *field; 2334 unsigned offset, size; 2335 2336 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2337 2338 if (b == NULL) { 2339 update_dynamic_len(hde, a); 2340 return 0; 2341 } 2342 2343 field = hde->field; 2344 if (field->flags & TEP_FIELD_IS_DYNAMIC) { 2345 unsigned long long dyn; 2346 2347 tep_read_number_field(field, a->raw_data, &dyn); 2348 offset = dyn & 0xffff; 2349 size = (dyn >> 16) & 0xffff; 2350 2351 /* record max width for output */ 2352 if (size > hde->dynamic_len) 2353 hde->dynamic_len = size; 2354 } else { 2355 offset = field->offset; 2356 size = field->size; 2357 } 2358 2359 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2360 } 2361 2362 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2363 { 2364 return fmt->cmp == __sort__hde_cmp; 2365 } 2366 2367 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2368 { 2369 struct hpp_dynamic_entry *hde_a; 2370 struct hpp_dynamic_entry *hde_b; 2371 2372 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2373 return false; 2374 2375 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2376 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2377 2378 return hde_a->field == hde_b->field; 2379 } 2380 2381 static void hde_free(struct perf_hpp_fmt *fmt) 2382 { 2383 struct hpp_dynamic_entry *hde; 2384 2385 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2386 free(hde); 2387 } 2388 2389 static struct hpp_dynamic_entry * 2390 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field, 2391 int level) 2392 { 2393 struct hpp_dynamic_entry *hde; 2394 2395 hde = malloc(sizeof(*hde)); 2396 if (hde == NULL) { 2397 pr_debug("Memory allocation failed\n"); 2398 return NULL; 2399 } 2400 2401 hde->evsel = evsel; 2402 hde->field = field; 2403 hde->dynamic_len = 0; 2404 2405 hde->hpp.name = field->name; 2406 hde->hpp.header = __sort__hde_header; 2407 hde->hpp.width = __sort__hde_width; 2408 hde->hpp.entry = __sort__hde_entry; 2409 hde->hpp.color = NULL; 2410 2411 hde->hpp.cmp = __sort__hde_cmp; 2412 hde->hpp.collapse = __sort__hde_cmp; 2413 hde->hpp.sort = __sort__hde_cmp; 2414 hde->hpp.equal = __sort__hde_equal; 2415 hde->hpp.free = hde_free; 2416 2417 INIT_LIST_HEAD(&hde->hpp.list); 2418 INIT_LIST_HEAD(&hde->hpp.sort_list); 2419 hde->hpp.elide = false; 2420 hde->hpp.len = 0; 2421 hde->hpp.user_len = 0; 2422 hde->hpp.level = level; 2423 2424 return hde; 2425 } 2426 2427 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2428 { 2429 struct perf_hpp_fmt *new_fmt = NULL; 2430 2431 if (perf_hpp__is_sort_entry(fmt)) { 2432 struct hpp_sort_entry *hse, *new_hse; 2433 2434 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2435 new_hse = memdup(hse, sizeof(*hse)); 2436 if (new_hse) 2437 new_fmt = &new_hse->hpp; 2438 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2439 struct hpp_dynamic_entry *hde, *new_hde; 2440 2441 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2442 new_hde = memdup(hde, sizeof(*hde)); 2443 if (new_hde) 2444 new_fmt = &new_hde->hpp; 2445 } else { 2446 new_fmt = memdup(fmt, sizeof(*fmt)); 2447 } 2448 2449 INIT_LIST_HEAD(&new_fmt->list); 2450 INIT_LIST_HEAD(&new_fmt->sort_list); 2451 2452 return new_fmt; 2453 } 2454 2455 static int parse_field_name(char *str, char **event, char **field, char **opt) 2456 { 2457 char *event_name, *field_name, *opt_name; 2458 2459 event_name = str; 2460 field_name = strchr(str, '.'); 2461 2462 if (field_name) { 2463 *field_name++ = '\0'; 2464 } else { 2465 event_name = NULL; 2466 field_name = str; 2467 } 2468 2469 opt_name = strchr(field_name, '/'); 2470 if (opt_name) 2471 *opt_name++ = '\0'; 2472 2473 *event = event_name; 2474 *field = field_name; 2475 *opt = opt_name; 2476 2477 return 0; 2478 } 2479 2480 /* find match evsel using a given event name. The event name can be: 2481 * 1. '%' + event index (e.g. '%1' for first event) 2482 * 2. full event name (e.g. sched:sched_switch) 2483 * 3. partial event name (should not contain ':') 2484 */ 2485 static struct evsel *find_evsel(struct evlist *evlist, char *event_name) 2486 { 2487 struct evsel *evsel = NULL; 2488 struct evsel *pos; 2489 bool full_name; 2490 2491 /* case 1 */ 2492 if (event_name[0] == '%') { 2493 int nr = strtol(event_name+1, NULL, 0); 2494 2495 if (nr > evlist->core.nr_entries) 2496 return NULL; 2497 2498 evsel = evlist__first(evlist); 2499 while (--nr > 0) 2500 evsel = evsel__next(evsel); 2501 2502 return evsel; 2503 } 2504 2505 full_name = !!strchr(event_name, ':'); 2506 evlist__for_each_entry(evlist, pos) { 2507 /* case 2 */ 2508 if (full_name && !strcmp(pos->name, event_name)) 2509 return pos; 2510 /* case 3 */ 2511 if (!full_name && strstr(pos->name, event_name)) { 2512 if (evsel) { 2513 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2514 event_name, evsel->name, pos->name); 2515 return NULL; 2516 } 2517 evsel = pos; 2518 } 2519 } 2520 2521 return evsel; 2522 } 2523 2524 static int __dynamic_dimension__add(struct evsel *evsel, 2525 struct tep_format_field *field, 2526 bool raw_trace, int level) 2527 { 2528 struct hpp_dynamic_entry *hde; 2529 2530 hde = __alloc_dynamic_entry(evsel, field, level); 2531 if (hde == NULL) 2532 return -ENOMEM; 2533 2534 hde->raw_trace = raw_trace; 2535 2536 perf_hpp__register_sort_field(&hde->hpp); 2537 return 0; 2538 } 2539 2540 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level) 2541 { 2542 int ret; 2543 struct tep_format_field *field; 2544 2545 field = evsel->tp_format->format.fields; 2546 while (field) { 2547 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2548 if (ret < 0) 2549 return ret; 2550 2551 field = field->next; 2552 } 2553 return 0; 2554 } 2555 2556 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace, 2557 int level) 2558 { 2559 int ret; 2560 struct evsel *evsel; 2561 2562 evlist__for_each_entry(evlist, evsel) { 2563 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2564 continue; 2565 2566 ret = add_evsel_fields(evsel, raw_trace, level); 2567 if (ret < 0) 2568 return ret; 2569 } 2570 return 0; 2571 } 2572 2573 static int add_all_matching_fields(struct evlist *evlist, 2574 char *field_name, bool raw_trace, int level) 2575 { 2576 int ret = -ESRCH; 2577 struct evsel *evsel; 2578 struct tep_format_field *field; 2579 2580 evlist__for_each_entry(evlist, evsel) { 2581 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 2582 continue; 2583 2584 field = tep_find_any_field(evsel->tp_format, field_name); 2585 if (field == NULL) 2586 continue; 2587 2588 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2589 if (ret < 0) 2590 break; 2591 } 2592 return ret; 2593 } 2594 2595 static int add_dynamic_entry(struct evlist *evlist, const char *tok, 2596 int level) 2597 { 2598 char *str, *event_name, *field_name, *opt_name; 2599 struct evsel *evsel; 2600 struct tep_format_field *field; 2601 bool raw_trace = symbol_conf.raw_trace; 2602 int ret = 0; 2603 2604 if (evlist == NULL) 2605 return -ENOENT; 2606 2607 str = strdup(tok); 2608 if (str == NULL) 2609 return -ENOMEM; 2610 2611 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2612 ret = -EINVAL; 2613 goto out; 2614 } 2615 2616 if (opt_name) { 2617 if (strcmp(opt_name, "raw")) { 2618 pr_debug("unsupported field option %s\n", opt_name); 2619 ret = -EINVAL; 2620 goto out; 2621 } 2622 raw_trace = true; 2623 } 2624 2625 if (!strcmp(field_name, "trace_fields")) { 2626 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2627 goto out; 2628 } 2629 2630 if (event_name == NULL) { 2631 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2632 goto out; 2633 } 2634 2635 evsel = find_evsel(evlist, event_name); 2636 if (evsel == NULL) { 2637 pr_debug("Cannot find event: %s\n", event_name); 2638 ret = -ENOENT; 2639 goto out; 2640 } 2641 2642 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2643 pr_debug("%s is not a tracepoint event\n", event_name); 2644 ret = -EINVAL; 2645 goto out; 2646 } 2647 2648 if (!strcmp(field_name, "*")) { 2649 ret = add_evsel_fields(evsel, raw_trace, level); 2650 } else { 2651 field = tep_find_any_field(evsel->tp_format, field_name); 2652 if (field == NULL) { 2653 pr_debug("Cannot find event field for %s.%s\n", 2654 event_name, field_name); 2655 return -ENOENT; 2656 } 2657 2658 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2659 } 2660 2661 out: 2662 free(str); 2663 return ret; 2664 } 2665 2666 static int __sort_dimension__add(struct sort_dimension *sd, 2667 struct perf_hpp_list *list, 2668 int level) 2669 { 2670 if (sd->taken) 2671 return 0; 2672 2673 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2674 return -1; 2675 2676 if (sd->entry->se_collapse) 2677 list->need_collapse = 1; 2678 2679 sd->taken = 1; 2680 2681 return 0; 2682 } 2683 2684 static int __hpp_dimension__add(struct hpp_dimension *hd, 2685 struct perf_hpp_list *list, 2686 int level) 2687 { 2688 struct perf_hpp_fmt *fmt; 2689 2690 if (hd->taken) 2691 return 0; 2692 2693 fmt = __hpp_dimension__alloc_hpp(hd, level); 2694 if (!fmt) 2695 return -1; 2696 2697 hd->taken = 1; 2698 perf_hpp_list__register_sort_field(list, fmt); 2699 return 0; 2700 } 2701 2702 static int __sort_dimension__add_output(struct perf_hpp_list *list, 2703 struct sort_dimension *sd) 2704 { 2705 if (sd->taken) 2706 return 0; 2707 2708 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2709 return -1; 2710 2711 sd->taken = 1; 2712 return 0; 2713 } 2714 2715 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2716 struct hpp_dimension *hd) 2717 { 2718 struct perf_hpp_fmt *fmt; 2719 2720 if (hd->taken) 2721 return 0; 2722 2723 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2724 if (!fmt) 2725 return -1; 2726 2727 hd->taken = 1; 2728 perf_hpp_list__column_register(list, fmt); 2729 return 0; 2730 } 2731 2732 int hpp_dimension__add_output(unsigned col) 2733 { 2734 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2735 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2736 } 2737 2738 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2739 struct evlist *evlist, 2740 int level) 2741 { 2742 unsigned int i; 2743 2744 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2745 struct sort_dimension *sd = &common_sort_dimensions[i]; 2746 2747 if (strncasecmp(tok, sd->name, strlen(tok))) 2748 continue; 2749 2750 if (sd->entry == &sort_parent) { 2751 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 2752 if (ret) { 2753 char err[BUFSIZ]; 2754 2755 regerror(ret, &parent_regex, err, sizeof(err)); 2756 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 2757 return -EINVAL; 2758 } 2759 list->parent = 1; 2760 } else if (sd->entry == &sort_sym) { 2761 list->sym = 1; 2762 /* 2763 * perf diff displays the performance difference amongst 2764 * two or more perf.data files. Those files could come 2765 * from different binaries. So we should not compare 2766 * their ips, but the name of symbol. 2767 */ 2768 if (sort__mode == SORT_MODE__DIFF) 2769 sd->entry->se_collapse = sort__sym_sort; 2770 2771 } else if (sd->entry == &sort_dso) { 2772 list->dso = 1; 2773 } else if (sd->entry == &sort_socket) { 2774 list->socket = 1; 2775 } else if (sd->entry == &sort_thread) { 2776 list->thread = 1; 2777 } else if (sd->entry == &sort_comm) { 2778 list->comm = 1; 2779 } 2780 2781 return __sort_dimension__add(sd, list, level); 2782 } 2783 2784 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2785 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2786 2787 if (strncasecmp(tok, hd->name, strlen(tok))) 2788 continue; 2789 2790 return __hpp_dimension__add(hd, list, level); 2791 } 2792 2793 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2794 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2795 2796 if (strncasecmp(tok, sd->name, strlen(tok))) 2797 continue; 2798 2799 if (sort__mode != SORT_MODE__BRANCH) 2800 return -EINVAL; 2801 2802 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 2803 list->sym = 1; 2804 2805 __sort_dimension__add(sd, list, level); 2806 return 0; 2807 } 2808 2809 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2810 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2811 2812 if (strncasecmp(tok, sd->name, strlen(tok))) 2813 continue; 2814 2815 if (sort__mode != SORT_MODE__MEMORY) 2816 return -EINVAL; 2817 2818 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0) 2819 return -EINVAL; 2820 2821 if (sd->entry == &sort_mem_daddr_sym) 2822 list->sym = 1; 2823 2824 __sort_dimension__add(sd, list, level); 2825 return 0; 2826 } 2827 2828 if (!add_dynamic_entry(evlist, tok, level)) 2829 return 0; 2830 2831 return -ESRCH; 2832 } 2833 2834 static int setup_sort_list(struct perf_hpp_list *list, char *str, 2835 struct evlist *evlist) 2836 { 2837 char *tmp, *tok; 2838 int ret = 0; 2839 int level = 0; 2840 int next_level = 1; 2841 bool in_group = false; 2842 2843 do { 2844 tok = str; 2845 tmp = strpbrk(str, "{}, "); 2846 if (tmp) { 2847 if (in_group) 2848 next_level = level; 2849 else 2850 next_level = level + 1; 2851 2852 if (*tmp == '{') 2853 in_group = true; 2854 else if (*tmp == '}') 2855 in_group = false; 2856 2857 *tmp = '\0'; 2858 str = tmp + 1; 2859 } 2860 2861 if (*tok) { 2862 ret = sort_dimension__add(list, tok, evlist, level); 2863 if (ret == -EINVAL) { 2864 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok))) 2865 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 2866 else 2867 ui__error("Invalid --sort key: `%s'", tok); 2868 break; 2869 } else if (ret == -ESRCH) { 2870 ui__error("Unknown --sort key: `%s'", tok); 2871 break; 2872 } 2873 } 2874 2875 level = next_level; 2876 } while (tmp); 2877 2878 return ret; 2879 } 2880 2881 static const char *get_default_sort_order(struct evlist *evlist) 2882 { 2883 const char *default_sort_orders[] = { 2884 default_sort_order, 2885 default_branch_sort_order, 2886 default_mem_sort_order, 2887 default_top_sort_order, 2888 default_diff_sort_order, 2889 default_tracepoint_sort_order, 2890 }; 2891 bool use_trace = true; 2892 struct evsel *evsel; 2893 2894 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 2895 2896 if (evlist == NULL || evlist__empty(evlist)) 2897 goto out_no_evlist; 2898 2899 evlist__for_each_entry(evlist, evsel) { 2900 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) { 2901 use_trace = false; 2902 break; 2903 } 2904 } 2905 2906 if (use_trace) { 2907 sort__mode = SORT_MODE__TRACEPOINT; 2908 if (symbol_conf.raw_trace) 2909 return "trace_fields"; 2910 } 2911 out_no_evlist: 2912 return default_sort_orders[sort__mode]; 2913 } 2914 2915 static int setup_sort_order(struct evlist *evlist) 2916 { 2917 char *new_sort_order; 2918 2919 /* 2920 * Append '+'-prefixed sort order to the default sort 2921 * order string. 2922 */ 2923 if (!sort_order || is_strict_order(sort_order)) 2924 return 0; 2925 2926 if (sort_order[1] == '\0') { 2927 ui__error("Invalid --sort key: `+'"); 2928 return -EINVAL; 2929 } 2930 2931 /* 2932 * We allocate new sort_order string, but we never free it, 2933 * because it's checked over the rest of the code. 2934 */ 2935 if (asprintf(&new_sort_order, "%s,%s", 2936 get_default_sort_order(evlist), sort_order + 1) < 0) { 2937 pr_err("Not enough memory to set up --sort"); 2938 return -ENOMEM; 2939 } 2940 2941 sort_order = new_sort_order; 2942 return 0; 2943 } 2944 2945 /* 2946 * Adds 'pre,' prefix into 'str' is 'pre' is 2947 * not already part of 'str'. 2948 */ 2949 static char *prefix_if_not_in(const char *pre, char *str) 2950 { 2951 char *n; 2952 2953 if (!str || strstr(str, pre)) 2954 return str; 2955 2956 if (asprintf(&n, "%s,%s", pre, str) < 0) 2957 n = NULL; 2958 2959 free(str); 2960 return n; 2961 } 2962 2963 static char *setup_overhead(char *keys) 2964 { 2965 if (sort__mode == SORT_MODE__DIFF) 2966 return keys; 2967 2968 keys = prefix_if_not_in("overhead", keys); 2969 2970 if (symbol_conf.cumulate_callchain) 2971 keys = prefix_if_not_in("overhead_children", keys); 2972 2973 return keys; 2974 } 2975 2976 static int __setup_sorting(struct evlist *evlist) 2977 { 2978 char *str; 2979 const char *sort_keys; 2980 int ret = 0; 2981 2982 ret = setup_sort_order(evlist); 2983 if (ret) 2984 return ret; 2985 2986 sort_keys = sort_order; 2987 if (sort_keys == NULL) { 2988 if (is_strict_order(field_order)) { 2989 /* 2990 * If user specified field order but no sort order, 2991 * we'll honor it and not add default sort orders. 2992 */ 2993 return 0; 2994 } 2995 2996 sort_keys = get_default_sort_order(evlist); 2997 } 2998 2999 str = strdup(sort_keys); 3000 if (str == NULL) { 3001 pr_err("Not enough memory to setup sort keys"); 3002 return -ENOMEM; 3003 } 3004 3005 /* 3006 * Prepend overhead fields for backward compatibility. 3007 */ 3008 if (!is_strict_order(field_order)) { 3009 str = setup_overhead(str); 3010 if (str == NULL) { 3011 pr_err("Not enough memory to setup overhead keys"); 3012 return -ENOMEM; 3013 } 3014 } 3015 3016 ret = setup_sort_list(&perf_hpp_list, str, evlist); 3017 3018 free(str); 3019 return ret; 3020 } 3021 3022 void perf_hpp__set_elide(int idx, bool elide) 3023 { 3024 struct perf_hpp_fmt *fmt; 3025 struct hpp_sort_entry *hse; 3026 3027 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3028 if (!perf_hpp__is_sort_entry(fmt)) 3029 continue; 3030 3031 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3032 if (hse->se->se_width_idx == idx) { 3033 fmt->elide = elide; 3034 break; 3035 } 3036 } 3037 } 3038 3039 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 3040 { 3041 if (list && strlist__nr_entries(list) == 1) { 3042 if (fp != NULL) 3043 fprintf(fp, "# %s: %s\n", list_name, 3044 strlist__entry(list, 0)->s); 3045 return true; 3046 } 3047 return false; 3048 } 3049 3050 static bool get_elide(int idx, FILE *output) 3051 { 3052 switch (idx) { 3053 case HISTC_SYMBOL: 3054 return __get_elide(symbol_conf.sym_list, "symbol", output); 3055 case HISTC_DSO: 3056 return __get_elide(symbol_conf.dso_list, "dso", output); 3057 case HISTC_COMM: 3058 return __get_elide(symbol_conf.comm_list, "comm", output); 3059 default: 3060 break; 3061 } 3062 3063 if (sort__mode != SORT_MODE__BRANCH) 3064 return false; 3065 3066 switch (idx) { 3067 case HISTC_SYMBOL_FROM: 3068 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 3069 case HISTC_SYMBOL_TO: 3070 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 3071 case HISTC_DSO_FROM: 3072 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 3073 case HISTC_DSO_TO: 3074 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 3075 default: 3076 break; 3077 } 3078 3079 return false; 3080 } 3081 3082 void sort__setup_elide(FILE *output) 3083 { 3084 struct perf_hpp_fmt *fmt; 3085 struct hpp_sort_entry *hse; 3086 3087 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3088 if (!perf_hpp__is_sort_entry(fmt)) 3089 continue; 3090 3091 hse = container_of(fmt, struct hpp_sort_entry, hpp); 3092 fmt->elide = get_elide(hse->se->se_width_idx, output); 3093 } 3094 3095 /* 3096 * It makes no sense to elide all of sort entries. 3097 * Just revert them to show up again. 3098 */ 3099 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3100 if (!perf_hpp__is_sort_entry(fmt)) 3101 continue; 3102 3103 if (!fmt->elide) 3104 return; 3105 } 3106 3107 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 3108 if (!perf_hpp__is_sort_entry(fmt)) 3109 continue; 3110 3111 fmt->elide = false; 3112 } 3113 } 3114 3115 int output_field_add(struct perf_hpp_list *list, char *tok) 3116 { 3117 unsigned int i; 3118 3119 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 3120 struct sort_dimension *sd = &common_sort_dimensions[i]; 3121 3122 if (strncasecmp(tok, sd->name, strlen(tok))) 3123 continue; 3124 3125 return __sort_dimension__add_output(list, sd); 3126 } 3127 3128 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 3129 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 3130 3131 if (strncasecmp(tok, hd->name, strlen(tok))) 3132 continue; 3133 3134 return __hpp_dimension__add_output(list, hd); 3135 } 3136 3137 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 3138 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 3139 3140 if (strncasecmp(tok, sd->name, strlen(tok))) 3141 continue; 3142 3143 if (sort__mode != SORT_MODE__BRANCH) 3144 return -EINVAL; 3145 3146 return __sort_dimension__add_output(list, sd); 3147 } 3148 3149 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 3150 struct sort_dimension *sd = &memory_sort_dimensions[i]; 3151 3152 if (strncasecmp(tok, sd->name, strlen(tok))) 3153 continue; 3154 3155 if (sort__mode != SORT_MODE__MEMORY) 3156 return -EINVAL; 3157 3158 return __sort_dimension__add_output(list, sd); 3159 } 3160 3161 return -ESRCH; 3162 } 3163 3164 static int setup_output_list(struct perf_hpp_list *list, char *str) 3165 { 3166 char *tmp, *tok; 3167 int ret = 0; 3168 3169 for (tok = strtok_r(str, ", ", &tmp); 3170 tok; tok = strtok_r(NULL, ", ", &tmp)) { 3171 ret = output_field_add(list, tok); 3172 if (ret == -EINVAL) { 3173 ui__error("Invalid --fields key: `%s'", tok); 3174 break; 3175 } else if (ret == -ESRCH) { 3176 ui__error("Unknown --fields key: `%s'", tok); 3177 break; 3178 } 3179 } 3180 3181 return ret; 3182 } 3183 3184 void reset_dimensions(void) 3185 { 3186 unsigned int i; 3187 3188 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 3189 common_sort_dimensions[i].taken = 0; 3190 3191 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 3192 hpp_sort_dimensions[i].taken = 0; 3193 3194 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 3195 bstack_sort_dimensions[i].taken = 0; 3196 3197 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 3198 memory_sort_dimensions[i].taken = 0; 3199 } 3200 3201 bool is_strict_order(const char *order) 3202 { 3203 return order && (*order != '+'); 3204 } 3205 3206 static int __setup_output_field(void) 3207 { 3208 char *str, *strp; 3209 int ret = -EINVAL; 3210 3211 if (field_order == NULL) 3212 return 0; 3213 3214 strp = str = strdup(field_order); 3215 if (str == NULL) { 3216 pr_err("Not enough memory to setup output fields"); 3217 return -ENOMEM; 3218 } 3219 3220 if (!is_strict_order(field_order)) 3221 strp++; 3222 3223 if (!strlen(strp)) { 3224 ui__error("Invalid --fields key: `+'"); 3225 goto out; 3226 } 3227 3228 ret = setup_output_list(&perf_hpp_list, strp); 3229 3230 out: 3231 free(str); 3232 return ret; 3233 } 3234 3235 int setup_sorting(struct evlist *evlist) 3236 { 3237 int err; 3238 3239 err = __setup_sorting(evlist); 3240 if (err < 0) 3241 return err; 3242 3243 if (parent_pattern != default_parent_pattern) { 3244 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 3245 if (err < 0) 3246 return err; 3247 } 3248 3249 reset_dimensions(); 3250 3251 /* 3252 * perf diff doesn't use default hpp output fields. 3253 */ 3254 if (sort__mode != SORT_MODE__DIFF) 3255 perf_hpp__init(); 3256 3257 err = __setup_output_field(); 3258 if (err < 0) 3259 return err; 3260 3261 /* copy sort keys to output fields */ 3262 perf_hpp__setup_output_field(&perf_hpp_list); 3263 /* and then copy output fields to sort keys */ 3264 perf_hpp__append_sort_keys(&perf_hpp_list); 3265 3266 /* setup hists-specific output fields */ 3267 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 3268 return -1; 3269 3270 return 0; 3271 } 3272 3273 void reset_output_field(void) 3274 { 3275 perf_hpp_list.need_collapse = 0; 3276 perf_hpp_list.parent = 0; 3277 perf_hpp_list.sym = 0; 3278 perf_hpp_list.dso = 0; 3279 3280 field_order = NULL; 3281 sort_order = NULL; 3282 3283 reset_dimensions(); 3284 perf_hpp__reset_output_field(&perf_hpp_list); 3285 } 3286 3287 #define INDENT (3*8 + 1) 3288 3289 static void add_key(struct strbuf *sb, const char *str, int *llen) 3290 { 3291 if (*llen >= 75) { 3292 strbuf_addstr(sb, "\n\t\t\t "); 3293 *llen = INDENT; 3294 } 3295 strbuf_addf(sb, " %s", str); 3296 *llen += strlen(str) + 1; 3297 } 3298 3299 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n, 3300 int *llen) 3301 { 3302 int i; 3303 3304 for (i = 0; i < n; i++) 3305 add_key(sb, s[i].name, llen); 3306 } 3307 3308 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n, 3309 int *llen) 3310 { 3311 int i; 3312 3313 for (i = 0; i < n; i++) 3314 add_key(sb, s[i].name, llen); 3315 } 3316 3317 const char *sort_help(const char *prefix) 3318 { 3319 struct strbuf sb; 3320 char *s; 3321 int len = strlen(prefix) + INDENT; 3322 3323 strbuf_init(&sb, 300); 3324 strbuf_addstr(&sb, prefix); 3325 add_hpp_sort_string(&sb, hpp_sort_dimensions, 3326 ARRAY_SIZE(hpp_sort_dimensions), &len); 3327 add_sort_string(&sb, common_sort_dimensions, 3328 ARRAY_SIZE(common_sort_dimensions), &len); 3329 add_sort_string(&sb, bstack_sort_dimensions, 3330 ARRAY_SIZE(bstack_sort_dimensions), &len); 3331 add_sort_string(&sb, memory_sort_dimensions, 3332 ARRAY_SIZE(memory_sort_dimensions), &len); 3333 s = strbuf_detach(&sb, NULL); 3334 strbuf_release(&sb); 3335 return s; 3336 } 3337