1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <sys/mman.h> 6 #include "sort.h" 7 #include "hist.h" 8 #include "comm.h" 9 #include "symbol.h" 10 #include "thread.h" 11 #include "evsel.h" 12 #include "evlist.h" 13 #include "strlist.h" 14 #include <traceevent/event-parse.h> 15 #include "mem-events.h" 16 #include <linux/kernel.h> 17 18 regex_t parent_regex; 19 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 20 const char *parent_pattern = default_parent_pattern; 21 const char *default_sort_order = "comm,dso,symbol"; 22 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 23 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; 24 const char default_top_sort_order[] = "dso,symbol"; 25 const char default_diff_sort_order[] = "dso,symbol"; 26 const char default_tracepoint_sort_order[] = "trace"; 27 const char *sort_order; 28 const char *field_order; 29 regex_t ignore_callees_regex; 30 int have_ignore_callees = 0; 31 enum sort_mode sort__mode = SORT_MODE__NORMAL; 32 33 /* 34 * Replaces all occurrences of a char used with the: 35 * 36 * -t, --field-separator 37 * 38 * option, that uses a special separator character and don't pad with spaces, 39 * replacing all occurances of this separator in symbol names (and other 40 * output) with a '.' character, that thus it's the only non valid separator. 41 */ 42 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 43 { 44 int n; 45 va_list ap; 46 47 va_start(ap, fmt); 48 n = vsnprintf(bf, size, fmt, ap); 49 if (symbol_conf.field_sep && n > 0) { 50 char *sep = bf; 51 52 while (1) { 53 sep = strchr(sep, *symbol_conf.field_sep); 54 if (sep == NULL) 55 break; 56 *sep = '.'; 57 } 58 } 59 va_end(ap); 60 61 if (n >= (int)size) 62 return size - 1; 63 return n; 64 } 65 66 static int64_t cmp_null(const void *l, const void *r) 67 { 68 if (!l && !r) 69 return 0; 70 else if (!l) 71 return -1; 72 else 73 return 1; 74 } 75 76 /* --sort pid */ 77 78 static int64_t 79 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 80 { 81 return right->thread->tid - left->thread->tid; 82 } 83 84 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 85 size_t size, unsigned int width) 86 { 87 const char *comm = thread__comm_str(he->thread); 88 89 width = max(7U, width) - 8; 90 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 91 width, width, comm ?: ""); 92 } 93 94 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 95 { 96 const struct thread *th = arg; 97 98 if (type != HIST_FILTER__THREAD) 99 return -1; 100 101 return th && he->thread != th; 102 } 103 104 struct sort_entry sort_thread = { 105 .se_header = " Pid:Command", 106 .se_cmp = sort__thread_cmp, 107 .se_snprintf = hist_entry__thread_snprintf, 108 .se_filter = hist_entry__thread_filter, 109 .se_width_idx = HISTC_THREAD, 110 }; 111 112 /* --sort comm */ 113 114 static int64_t 115 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 116 { 117 /* Compare the addr that should be unique among comm */ 118 return strcmp(comm__str(right->comm), comm__str(left->comm)); 119 } 120 121 static int64_t 122 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 123 { 124 /* Compare the addr that should be unique among comm */ 125 return strcmp(comm__str(right->comm), comm__str(left->comm)); 126 } 127 128 static int64_t 129 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 130 { 131 return strcmp(comm__str(right->comm), comm__str(left->comm)); 132 } 133 134 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 135 size_t size, unsigned int width) 136 { 137 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 138 } 139 140 struct sort_entry sort_comm = { 141 .se_header = "Command", 142 .se_cmp = sort__comm_cmp, 143 .se_collapse = sort__comm_collapse, 144 .se_sort = sort__comm_sort, 145 .se_snprintf = hist_entry__comm_snprintf, 146 .se_filter = hist_entry__thread_filter, 147 .se_width_idx = HISTC_COMM, 148 }; 149 150 /* --sort dso */ 151 152 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 153 { 154 struct dso *dso_l = map_l ? map_l->dso : NULL; 155 struct dso *dso_r = map_r ? map_r->dso : NULL; 156 const char *dso_name_l, *dso_name_r; 157 158 if (!dso_l || !dso_r) 159 return cmp_null(dso_r, dso_l); 160 161 if (verbose > 0) { 162 dso_name_l = dso_l->long_name; 163 dso_name_r = dso_r->long_name; 164 } else { 165 dso_name_l = dso_l->short_name; 166 dso_name_r = dso_r->short_name; 167 } 168 169 return strcmp(dso_name_l, dso_name_r); 170 } 171 172 static int64_t 173 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 174 { 175 return _sort__dso_cmp(right->ms.map, left->ms.map); 176 } 177 178 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 179 size_t size, unsigned int width) 180 { 181 if (map && map->dso) { 182 const char *dso_name = verbose > 0 ? map->dso->long_name : 183 map->dso->short_name; 184 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 185 } 186 187 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 188 } 189 190 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 191 size_t size, unsigned int width) 192 { 193 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 194 } 195 196 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 197 { 198 const struct dso *dso = arg; 199 200 if (type != HIST_FILTER__DSO) 201 return -1; 202 203 return dso && (!he->ms.map || he->ms.map->dso != dso); 204 } 205 206 struct sort_entry sort_dso = { 207 .se_header = "Shared Object", 208 .se_cmp = sort__dso_cmp, 209 .se_snprintf = hist_entry__dso_snprintf, 210 .se_filter = hist_entry__dso_filter, 211 .se_width_idx = HISTC_DSO, 212 }; 213 214 /* --sort symbol */ 215 216 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 217 { 218 return (int64_t)(right_ip - left_ip); 219 } 220 221 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 222 { 223 if (!sym_l || !sym_r) 224 return cmp_null(sym_l, sym_r); 225 226 if (sym_l == sym_r) 227 return 0; 228 229 if (sym_l->inlined || sym_r->inlined) 230 return strcmp(sym_l->name, sym_r->name); 231 232 if (sym_l->start != sym_r->start) 233 return (int64_t)(sym_r->start - sym_l->start); 234 235 return (int64_t)(sym_r->end - sym_l->end); 236 } 237 238 static int64_t 239 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 240 { 241 int64_t ret; 242 243 if (!left->ms.sym && !right->ms.sym) 244 return _sort__addr_cmp(left->ip, right->ip); 245 246 /* 247 * comparing symbol address alone is not enough since it's a 248 * relative address within a dso. 249 */ 250 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 251 ret = sort__dso_cmp(left, right); 252 if (ret != 0) 253 return ret; 254 } 255 256 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 257 } 258 259 static int64_t 260 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 261 { 262 if (!left->ms.sym || !right->ms.sym) 263 return cmp_null(left->ms.sym, right->ms.sym); 264 265 return strcmp(right->ms.sym->name, left->ms.sym->name); 266 } 267 268 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, 269 u64 ip, char level, char *bf, size_t size, 270 unsigned int width) 271 { 272 size_t ret = 0; 273 274 if (verbose > 0) { 275 char o = map ? dso__symtab_origin(map->dso) : '!'; 276 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 277 BITS_PER_LONG / 4 + 2, ip, o); 278 } 279 280 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 281 if (sym && map) { 282 if (map->type == MAP__VARIABLE) { 283 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 284 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 285 ip - map->unmap_ip(map, sym->start)); 286 } else { 287 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 288 width - ret, 289 sym->name); 290 if (sym->inlined) 291 ret += repsep_snprintf(bf + ret, size - ret, 292 " (inlined)"); 293 } 294 } else { 295 size_t len = BITS_PER_LONG / 4; 296 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 297 len, ip); 298 } 299 300 return ret; 301 } 302 303 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, 304 size_t size, unsigned int width) 305 { 306 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip, 307 he->level, bf, size, width); 308 } 309 310 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 311 { 312 const char *sym = arg; 313 314 if (type != HIST_FILTER__SYMBOL) 315 return -1; 316 317 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 318 } 319 320 struct sort_entry sort_sym = { 321 .se_header = "Symbol", 322 .se_cmp = sort__sym_cmp, 323 .se_sort = sort__sym_sort, 324 .se_snprintf = hist_entry__sym_snprintf, 325 .se_filter = hist_entry__sym_filter, 326 .se_width_idx = HISTC_SYMBOL, 327 }; 328 329 /* --sort srcline */ 330 331 char *hist_entry__get_srcline(struct hist_entry *he) 332 { 333 struct map *map = he->ms.map; 334 335 if (!map) 336 return SRCLINE_UNKNOWN; 337 338 return get_srcline(map->dso, map__rip_2objdump(map, he->ip), 339 he->ms.sym, true, true, he->ip); 340 } 341 342 static int64_t 343 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 344 { 345 if (!left->srcline) 346 left->srcline = hist_entry__get_srcline(left); 347 if (!right->srcline) 348 right->srcline = hist_entry__get_srcline(right); 349 350 return strcmp(right->srcline, left->srcline); 351 } 352 353 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 354 size_t size, unsigned int width) 355 { 356 if (!he->srcline) 357 he->srcline = hist_entry__get_srcline(he); 358 359 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 360 } 361 362 struct sort_entry sort_srcline = { 363 .se_header = "Source:Line", 364 .se_cmp = sort__srcline_cmp, 365 .se_snprintf = hist_entry__srcline_snprintf, 366 .se_width_idx = HISTC_SRCLINE, 367 }; 368 369 /* --sort srcline_from */ 370 371 static int64_t 372 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 373 { 374 if (!left->branch_info->srcline_from) { 375 struct map *map = left->branch_info->from.map; 376 if (!map) 377 left->branch_info->srcline_from = SRCLINE_UNKNOWN; 378 else 379 left->branch_info->srcline_from = get_srcline(map->dso, 380 map__rip_2objdump(map, 381 left->branch_info->from.al_addr), 382 left->branch_info->from.sym, 383 true, true, 384 left->branch_info->from.al_addr); 385 } 386 if (!right->branch_info->srcline_from) { 387 struct map *map = right->branch_info->from.map; 388 if (!map) 389 right->branch_info->srcline_from = SRCLINE_UNKNOWN; 390 else 391 right->branch_info->srcline_from = get_srcline(map->dso, 392 map__rip_2objdump(map, 393 right->branch_info->from.al_addr), 394 right->branch_info->from.sym, 395 true, true, 396 right->branch_info->from.al_addr); 397 } 398 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 399 } 400 401 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 402 size_t size, unsigned int width) 403 { 404 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 405 } 406 407 struct sort_entry sort_srcline_from = { 408 .se_header = "From Source:Line", 409 .se_cmp = sort__srcline_from_cmp, 410 .se_snprintf = hist_entry__srcline_from_snprintf, 411 .se_width_idx = HISTC_SRCLINE_FROM, 412 }; 413 414 /* --sort srcline_to */ 415 416 static int64_t 417 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 418 { 419 if (!left->branch_info->srcline_to) { 420 struct map *map = left->branch_info->to.map; 421 if (!map) 422 left->branch_info->srcline_to = SRCLINE_UNKNOWN; 423 else 424 left->branch_info->srcline_to = get_srcline(map->dso, 425 map__rip_2objdump(map, 426 left->branch_info->to.al_addr), 427 left->branch_info->from.sym, 428 true, true, 429 left->branch_info->to.al_addr); 430 } 431 if (!right->branch_info->srcline_to) { 432 struct map *map = right->branch_info->to.map; 433 if (!map) 434 right->branch_info->srcline_to = SRCLINE_UNKNOWN; 435 else 436 right->branch_info->srcline_to = get_srcline(map->dso, 437 map__rip_2objdump(map, 438 right->branch_info->to.al_addr), 439 right->branch_info->to.sym, 440 true, true, 441 right->branch_info->to.al_addr); 442 } 443 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 444 } 445 446 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 447 size_t size, unsigned int width) 448 { 449 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 450 } 451 452 struct sort_entry sort_srcline_to = { 453 .se_header = "To Source:Line", 454 .se_cmp = sort__srcline_to_cmp, 455 .se_snprintf = hist_entry__srcline_to_snprintf, 456 .se_width_idx = HISTC_SRCLINE_TO, 457 }; 458 459 /* --sort srcfile */ 460 461 static char no_srcfile[1]; 462 463 static char *hist_entry__get_srcfile(struct hist_entry *e) 464 { 465 char *sf, *p; 466 struct map *map = e->ms.map; 467 468 if (!map) 469 return no_srcfile; 470 471 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 472 e->ms.sym, false, true, true, e->ip); 473 if (!strcmp(sf, SRCLINE_UNKNOWN)) 474 return no_srcfile; 475 p = strchr(sf, ':'); 476 if (p && *sf) { 477 *p = 0; 478 return sf; 479 } 480 free(sf); 481 return no_srcfile; 482 } 483 484 static int64_t 485 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 486 { 487 if (!left->srcfile) 488 left->srcfile = hist_entry__get_srcfile(left); 489 if (!right->srcfile) 490 right->srcfile = hist_entry__get_srcfile(right); 491 492 return strcmp(right->srcfile, left->srcfile); 493 } 494 495 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 496 size_t size, unsigned int width) 497 { 498 if (!he->srcfile) 499 he->srcfile = hist_entry__get_srcfile(he); 500 501 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 502 } 503 504 struct sort_entry sort_srcfile = { 505 .se_header = "Source File", 506 .se_cmp = sort__srcfile_cmp, 507 .se_snprintf = hist_entry__srcfile_snprintf, 508 .se_width_idx = HISTC_SRCFILE, 509 }; 510 511 /* --sort parent */ 512 513 static int64_t 514 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 515 { 516 struct symbol *sym_l = left->parent; 517 struct symbol *sym_r = right->parent; 518 519 if (!sym_l || !sym_r) 520 return cmp_null(sym_l, sym_r); 521 522 return strcmp(sym_r->name, sym_l->name); 523 } 524 525 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 526 size_t size, unsigned int width) 527 { 528 return repsep_snprintf(bf, size, "%-*.*s", width, width, 529 he->parent ? he->parent->name : "[other]"); 530 } 531 532 struct sort_entry sort_parent = { 533 .se_header = "Parent symbol", 534 .se_cmp = sort__parent_cmp, 535 .se_snprintf = hist_entry__parent_snprintf, 536 .se_width_idx = HISTC_PARENT, 537 }; 538 539 /* --sort cpu */ 540 541 static int64_t 542 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 543 { 544 return right->cpu - left->cpu; 545 } 546 547 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 548 size_t size, unsigned int width) 549 { 550 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 551 } 552 553 struct sort_entry sort_cpu = { 554 .se_header = "CPU", 555 .se_cmp = sort__cpu_cmp, 556 .se_snprintf = hist_entry__cpu_snprintf, 557 .se_width_idx = HISTC_CPU, 558 }; 559 560 /* --sort cgroup_id */ 561 562 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 563 { 564 return (int64_t)(right_dev - left_dev); 565 } 566 567 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 568 { 569 return (int64_t)(right_ino - left_ino); 570 } 571 572 static int64_t 573 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 574 { 575 int64_t ret; 576 577 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 578 if (ret != 0) 579 return ret; 580 581 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 582 left->cgroup_id.ino); 583 } 584 585 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 586 char *bf, size_t size, 587 unsigned int width __maybe_unused) 588 { 589 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 590 he->cgroup_id.ino); 591 } 592 593 struct sort_entry sort_cgroup_id = { 594 .se_header = "cgroup id (dev/inode)", 595 .se_cmp = sort__cgroup_id_cmp, 596 .se_snprintf = hist_entry__cgroup_id_snprintf, 597 .se_width_idx = HISTC_CGROUP_ID, 598 }; 599 600 /* --sort socket */ 601 602 static int64_t 603 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 604 { 605 return right->socket - left->socket; 606 } 607 608 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 609 size_t size, unsigned int width) 610 { 611 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 612 } 613 614 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 615 { 616 int sk = *(const int *)arg; 617 618 if (type != HIST_FILTER__SOCKET) 619 return -1; 620 621 return sk >= 0 && he->socket != sk; 622 } 623 624 struct sort_entry sort_socket = { 625 .se_header = "Socket", 626 .se_cmp = sort__socket_cmp, 627 .se_snprintf = hist_entry__socket_snprintf, 628 .se_filter = hist_entry__socket_filter, 629 .se_width_idx = HISTC_SOCKET, 630 }; 631 632 /* --sort trace */ 633 634 static char *get_trace_output(struct hist_entry *he) 635 { 636 struct trace_seq seq; 637 struct perf_evsel *evsel; 638 struct pevent_record rec = { 639 .data = he->raw_data, 640 .size = he->raw_size, 641 }; 642 643 evsel = hists_to_evsel(he->hists); 644 645 trace_seq_init(&seq); 646 if (symbol_conf.raw_trace) { 647 pevent_print_fields(&seq, he->raw_data, he->raw_size, 648 evsel->tp_format); 649 } else { 650 pevent_event_info(&seq, evsel->tp_format, &rec); 651 } 652 /* 653 * Trim the buffer, it starts at 4KB and we're not going to 654 * add anything more to this buffer. 655 */ 656 return realloc(seq.buffer, seq.len + 1); 657 } 658 659 static int64_t 660 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 661 { 662 struct perf_evsel *evsel; 663 664 evsel = hists_to_evsel(left->hists); 665 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 666 return 0; 667 668 if (left->trace_output == NULL) 669 left->trace_output = get_trace_output(left); 670 if (right->trace_output == NULL) 671 right->trace_output = get_trace_output(right); 672 673 return strcmp(right->trace_output, left->trace_output); 674 } 675 676 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 677 size_t size, unsigned int width) 678 { 679 struct perf_evsel *evsel; 680 681 evsel = hists_to_evsel(he->hists); 682 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 683 return scnprintf(bf, size, "%-.*s", width, "N/A"); 684 685 if (he->trace_output == NULL) 686 he->trace_output = get_trace_output(he); 687 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 688 } 689 690 struct sort_entry sort_trace = { 691 .se_header = "Trace output", 692 .se_cmp = sort__trace_cmp, 693 .se_snprintf = hist_entry__trace_snprintf, 694 .se_width_idx = HISTC_TRACE, 695 }; 696 697 /* sort keys for branch stacks */ 698 699 static int64_t 700 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 701 { 702 if (!left->branch_info || !right->branch_info) 703 return cmp_null(left->branch_info, right->branch_info); 704 705 return _sort__dso_cmp(left->branch_info->from.map, 706 right->branch_info->from.map); 707 } 708 709 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 710 size_t size, unsigned int width) 711 { 712 if (he->branch_info) 713 return _hist_entry__dso_snprintf(he->branch_info->from.map, 714 bf, size, width); 715 else 716 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 717 } 718 719 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 720 const void *arg) 721 { 722 const struct dso *dso = arg; 723 724 if (type != HIST_FILTER__DSO) 725 return -1; 726 727 return dso && (!he->branch_info || !he->branch_info->from.map || 728 he->branch_info->from.map->dso != dso); 729 } 730 731 static int64_t 732 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 733 { 734 if (!left->branch_info || !right->branch_info) 735 return cmp_null(left->branch_info, right->branch_info); 736 737 return _sort__dso_cmp(left->branch_info->to.map, 738 right->branch_info->to.map); 739 } 740 741 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 742 size_t size, unsigned int width) 743 { 744 if (he->branch_info) 745 return _hist_entry__dso_snprintf(he->branch_info->to.map, 746 bf, size, width); 747 else 748 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 749 } 750 751 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 752 const void *arg) 753 { 754 const struct dso *dso = arg; 755 756 if (type != HIST_FILTER__DSO) 757 return -1; 758 759 return dso && (!he->branch_info || !he->branch_info->to.map || 760 he->branch_info->to.map->dso != dso); 761 } 762 763 static int64_t 764 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 765 { 766 struct addr_map_symbol *from_l = &left->branch_info->from; 767 struct addr_map_symbol *from_r = &right->branch_info->from; 768 769 if (!left->branch_info || !right->branch_info) 770 return cmp_null(left->branch_info, right->branch_info); 771 772 from_l = &left->branch_info->from; 773 from_r = &right->branch_info->from; 774 775 if (!from_l->sym && !from_r->sym) 776 return _sort__addr_cmp(from_l->addr, from_r->addr); 777 778 return _sort__sym_cmp(from_l->sym, from_r->sym); 779 } 780 781 static int64_t 782 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 783 { 784 struct addr_map_symbol *to_l, *to_r; 785 786 if (!left->branch_info || !right->branch_info) 787 return cmp_null(left->branch_info, right->branch_info); 788 789 to_l = &left->branch_info->to; 790 to_r = &right->branch_info->to; 791 792 if (!to_l->sym && !to_r->sym) 793 return _sort__addr_cmp(to_l->addr, to_r->addr); 794 795 return _sort__sym_cmp(to_l->sym, to_r->sym); 796 } 797 798 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 799 size_t size, unsigned int width) 800 { 801 if (he->branch_info) { 802 struct addr_map_symbol *from = &he->branch_info->from; 803 804 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, 805 he->level, bf, size, width); 806 } 807 808 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 809 } 810 811 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 812 size_t size, unsigned int width) 813 { 814 if (he->branch_info) { 815 struct addr_map_symbol *to = &he->branch_info->to; 816 817 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 818 he->level, bf, size, width); 819 } 820 821 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 822 } 823 824 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 825 const void *arg) 826 { 827 const char *sym = arg; 828 829 if (type != HIST_FILTER__SYMBOL) 830 return -1; 831 832 return sym && !(he->branch_info && he->branch_info->from.sym && 833 strstr(he->branch_info->from.sym->name, sym)); 834 } 835 836 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 837 const void *arg) 838 { 839 const char *sym = arg; 840 841 if (type != HIST_FILTER__SYMBOL) 842 return -1; 843 844 return sym && !(he->branch_info && he->branch_info->to.sym && 845 strstr(he->branch_info->to.sym->name, sym)); 846 } 847 848 struct sort_entry sort_dso_from = { 849 .se_header = "Source Shared Object", 850 .se_cmp = sort__dso_from_cmp, 851 .se_snprintf = hist_entry__dso_from_snprintf, 852 .se_filter = hist_entry__dso_from_filter, 853 .se_width_idx = HISTC_DSO_FROM, 854 }; 855 856 struct sort_entry sort_dso_to = { 857 .se_header = "Target Shared Object", 858 .se_cmp = sort__dso_to_cmp, 859 .se_snprintf = hist_entry__dso_to_snprintf, 860 .se_filter = hist_entry__dso_to_filter, 861 .se_width_idx = HISTC_DSO_TO, 862 }; 863 864 struct sort_entry sort_sym_from = { 865 .se_header = "Source Symbol", 866 .se_cmp = sort__sym_from_cmp, 867 .se_snprintf = hist_entry__sym_from_snprintf, 868 .se_filter = hist_entry__sym_from_filter, 869 .se_width_idx = HISTC_SYMBOL_FROM, 870 }; 871 872 struct sort_entry sort_sym_to = { 873 .se_header = "Target Symbol", 874 .se_cmp = sort__sym_to_cmp, 875 .se_snprintf = hist_entry__sym_to_snprintf, 876 .se_filter = hist_entry__sym_to_filter, 877 .se_width_idx = HISTC_SYMBOL_TO, 878 }; 879 880 static int64_t 881 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 882 { 883 unsigned char mp, p; 884 885 if (!left->branch_info || !right->branch_info) 886 return cmp_null(left->branch_info, right->branch_info); 887 888 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 889 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 890 return mp || p; 891 } 892 893 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 894 size_t size, unsigned int width){ 895 static const char *out = "N/A"; 896 897 if (he->branch_info) { 898 if (he->branch_info->flags.predicted) 899 out = "N"; 900 else if (he->branch_info->flags.mispred) 901 out = "Y"; 902 } 903 904 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 905 } 906 907 static int64_t 908 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 909 { 910 if (!left->branch_info || !right->branch_info) 911 return cmp_null(left->branch_info, right->branch_info); 912 913 return left->branch_info->flags.cycles - 914 right->branch_info->flags.cycles; 915 } 916 917 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 918 size_t size, unsigned int width) 919 { 920 if (!he->branch_info) 921 return scnprintf(bf, size, "%-.*s", width, "N/A"); 922 if (he->branch_info->flags.cycles == 0) 923 return repsep_snprintf(bf, size, "%-*s", width, "-"); 924 return repsep_snprintf(bf, size, "%-*hd", width, 925 he->branch_info->flags.cycles); 926 } 927 928 struct sort_entry sort_cycles = { 929 .se_header = "Basic Block Cycles", 930 .se_cmp = sort__cycles_cmp, 931 .se_snprintf = hist_entry__cycles_snprintf, 932 .se_width_idx = HISTC_CYCLES, 933 }; 934 935 /* --sort daddr_sym */ 936 int64_t 937 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 938 { 939 uint64_t l = 0, r = 0; 940 941 if (left->mem_info) 942 l = left->mem_info->daddr.addr; 943 if (right->mem_info) 944 r = right->mem_info->daddr.addr; 945 946 return (int64_t)(r - l); 947 } 948 949 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 950 size_t size, unsigned int width) 951 { 952 uint64_t addr = 0; 953 struct map *map = NULL; 954 struct symbol *sym = NULL; 955 956 if (he->mem_info) { 957 addr = he->mem_info->daddr.addr; 958 map = he->mem_info->daddr.map; 959 sym = he->mem_info->daddr.sym; 960 } 961 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 962 width); 963 } 964 965 int64_t 966 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 967 { 968 uint64_t l = 0, r = 0; 969 970 if (left->mem_info) 971 l = left->mem_info->iaddr.addr; 972 if (right->mem_info) 973 r = right->mem_info->iaddr.addr; 974 975 return (int64_t)(r - l); 976 } 977 978 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 979 size_t size, unsigned int width) 980 { 981 uint64_t addr = 0; 982 struct map *map = NULL; 983 struct symbol *sym = NULL; 984 985 if (he->mem_info) { 986 addr = he->mem_info->iaddr.addr; 987 map = he->mem_info->iaddr.map; 988 sym = he->mem_info->iaddr.sym; 989 } 990 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 991 width); 992 } 993 994 static int64_t 995 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 996 { 997 struct map *map_l = NULL; 998 struct map *map_r = NULL; 999 1000 if (left->mem_info) 1001 map_l = left->mem_info->daddr.map; 1002 if (right->mem_info) 1003 map_r = right->mem_info->daddr.map; 1004 1005 return _sort__dso_cmp(map_l, map_r); 1006 } 1007 1008 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1009 size_t size, unsigned int width) 1010 { 1011 struct map *map = NULL; 1012 1013 if (he->mem_info) 1014 map = he->mem_info->daddr.map; 1015 1016 return _hist_entry__dso_snprintf(map, bf, size, width); 1017 } 1018 1019 static int64_t 1020 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1021 { 1022 union perf_mem_data_src data_src_l; 1023 union perf_mem_data_src data_src_r; 1024 1025 if (left->mem_info) 1026 data_src_l = left->mem_info->data_src; 1027 else 1028 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1029 1030 if (right->mem_info) 1031 data_src_r = right->mem_info->data_src; 1032 else 1033 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1034 1035 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1036 } 1037 1038 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1039 size_t size, unsigned int width) 1040 { 1041 char out[10]; 1042 1043 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1044 return repsep_snprintf(bf, size, "%.*s", width, out); 1045 } 1046 1047 static int64_t 1048 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1049 { 1050 union perf_mem_data_src data_src_l; 1051 union perf_mem_data_src data_src_r; 1052 1053 if (left->mem_info) 1054 data_src_l = left->mem_info->data_src; 1055 else 1056 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1057 1058 if (right->mem_info) 1059 data_src_r = right->mem_info->data_src; 1060 else 1061 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1062 1063 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1064 } 1065 1066 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1067 size_t size, unsigned int width) 1068 { 1069 char out[64]; 1070 1071 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1072 return repsep_snprintf(bf, size, "%-*s", width, out); 1073 } 1074 1075 static int64_t 1076 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1077 { 1078 union perf_mem_data_src data_src_l; 1079 union perf_mem_data_src data_src_r; 1080 1081 if (left->mem_info) 1082 data_src_l = left->mem_info->data_src; 1083 else 1084 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1085 1086 if (right->mem_info) 1087 data_src_r = right->mem_info->data_src; 1088 else 1089 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1090 1091 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1092 } 1093 1094 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1095 size_t size, unsigned int width) 1096 { 1097 char out[64]; 1098 1099 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1100 return repsep_snprintf(bf, size, "%-*s", width, out); 1101 } 1102 1103 static int64_t 1104 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1105 { 1106 union perf_mem_data_src data_src_l; 1107 union perf_mem_data_src data_src_r; 1108 1109 if (left->mem_info) 1110 data_src_l = left->mem_info->data_src; 1111 else 1112 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1113 1114 if (right->mem_info) 1115 data_src_r = right->mem_info->data_src; 1116 else 1117 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1118 1119 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1120 } 1121 1122 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1123 size_t size, unsigned int width) 1124 { 1125 char out[64]; 1126 1127 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1128 return repsep_snprintf(bf, size, "%-*s", width, out); 1129 } 1130 1131 int64_t 1132 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1133 { 1134 u64 l, r; 1135 struct map *l_map, *r_map; 1136 1137 if (!left->mem_info) return -1; 1138 if (!right->mem_info) return 1; 1139 1140 /* group event types together */ 1141 if (left->cpumode > right->cpumode) return -1; 1142 if (left->cpumode < right->cpumode) return 1; 1143 1144 l_map = left->mem_info->daddr.map; 1145 r_map = right->mem_info->daddr.map; 1146 1147 /* if both are NULL, jump to sort on al_addr instead */ 1148 if (!l_map && !r_map) 1149 goto addr; 1150 1151 if (!l_map) return -1; 1152 if (!r_map) return 1; 1153 1154 if (l_map->maj > r_map->maj) return -1; 1155 if (l_map->maj < r_map->maj) return 1; 1156 1157 if (l_map->min > r_map->min) return -1; 1158 if (l_map->min < r_map->min) return 1; 1159 1160 if (l_map->ino > r_map->ino) return -1; 1161 if (l_map->ino < r_map->ino) return 1; 1162 1163 if (l_map->ino_generation > r_map->ino_generation) return -1; 1164 if (l_map->ino_generation < r_map->ino_generation) return 1; 1165 1166 /* 1167 * Addresses with no major/minor numbers are assumed to be 1168 * anonymous in userspace. Sort those on pid then address. 1169 * 1170 * The kernel and non-zero major/minor mapped areas are 1171 * assumed to be unity mapped. Sort those on address. 1172 */ 1173 1174 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1175 (!(l_map->flags & MAP_SHARED)) && 1176 !l_map->maj && !l_map->min && !l_map->ino && 1177 !l_map->ino_generation) { 1178 /* userspace anonymous */ 1179 1180 if (left->thread->pid_ > right->thread->pid_) return -1; 1181 if (left->thread->pid_ < right->thread->pid_) return 1; 1182 } 1183 1184 addr: 1185 /* al_addr does all the right addr - start + offset calculations */ 1186 l = cl_address(left->mem_info->daddr.al_addr); 1187 r = cl_address(right->mem_info->daddr.al_addr); 1188 1189 if (l > r) return -1; 1190 if (l < r) return 1; 1191 1192 return 0; 1193 } 1194 1195 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1196 size_t size, unsigned int width) 1197 { 1198 1199 uint64_t addr = 0; 1200 struct map *map = NULL; 1201 struct symbol *sym = NULL; 1202 char level = he->level; 1203 1204 if (he->mem_info) { 1205 addr = cl_address(he->mem_info->daddr.al_addr); 1206 map = he->mem_info->daddr.map; 1207 sym = he->mem_info->daddr.sym; 1208 1209 /* print [s] for shared data mmaps */ 1210 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1211 map && (map->type == MAP__VARIABLE) && 1212 (map->flags & MAP_SHARED) && 1213 (map->maj || map->min || map->ino || 1214 map->ino_generation)) 1215 level = 's'; 1216 else if (!map) 1217 level = 'X'; 1218 } 1219 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size, 1220 width); 1221 } 1222 1223 struct sort_entry sort_mispredict = { 1224 .se_header = "Branch Mispredicted", 1225 .se_cmp = sort__mispredict_cmp, 1226 .se_snprintf = hist_entry__mispredict_snprintf, 1227 .se_width_idx = HISTC_MISPREDICT, 1228 }; 1229 1230 static u64 he_weight(struct hist_entry *he) 1231 { 1232 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; 1233 } 1234 1235 static int64_t 1236 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1237 { 1238 return he_weight(left) - he_weight(right); 1239 } 1240 1241 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1242 size_t size, unsigned int width) 1243 { 1244 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); 1245 } 1246 1247 struct sort_entry sort_local_weight = { 1248 .se_header = "Local Weight", 1249 .se_cmp = sort__local_weight_cmp, 1250 .se_snprintf = hist_entry__local_weight_snprintf, 1251 .se_width_idx = HISTC_LOCAL_WEIGHT, 1252 }; 1253 1254 static int64_t 1255 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1256 { 1257 return left->stat.weight - right->stat.weight; 1258 } 1259 1260 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1261 size_t size, unsigned int width) 1262 { 1263 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); 1264 } 1265 1266 struct sort_entry sort_global_weight = { 1267 .se_header = "Weight", 1268 .se_cmp = sort__global_weight_cmp, 1269 .se_snprintf = hist_entry__global_weight_snprintf, 1270 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1271 }; 1272 1273 struct sort_entry sort_mem_daddr_sym = { 1274 .se_header = "Data Symbol", 1275 .se_cmp = sort__daddr_cmp, 1276 .se_snprintf = hist_entry__daddr_snprintf, 1277 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1278 }; 1279 1280 struct sort_entry sort_mem_iaddr_sym = { 1281 .se_header = "Code Symbol", 1282 .se_cmp = sort__iaddr_cmp, 1283 .se_snprintf = hist_entry__iaddr_snprintf, 1284 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1285 }; 1286 1287 struct sort_entry sort_mem_daddr_dso = { 1288 .se_header = "Data Object", 1289 .se_cmp = sort__dso_daddr_cmp, 1290 .se_snprintf = hist_entry__dso_daddr_snprintf, 1291 .se_width_idx = HISTC_MEM_DADDR_DSO, 1292 }; 1293 1294 struct sort_entry sort_mem_locked = { 1295 .se_header = "Locked", 1296 .se_cmp = sort__locked_cmp, 1297 .se_snprintf = hist_entry__locked_snprintf, 1298 .se_width_idx = HISTC_MEM_LOCKED, 1299 }; 1300 1301 struct sort_entry sort_mem_tlb = { 1302 .se_header = "TLB access", 1303 .se_cmp = sort__tlb_cmp, 1304 .se_snprintf = hist_entry__tlb_snprintf, 1305 .se_width_idx = HISTC_MEM_TLB, 1306 }; 1307 1308 struct sort_entry sort_mem_lvl = { 1309 .se_header = "Memory access", 1310 .se_cmp = sort__lvl_cmp, 1311 .se_snprintf = hist_entry__lvl_snprintf, 1312 .se_width_idx = HISTC_MEM_LVL, 1313 }; 1314 1315 struct sort_entry sort_mem_snoop = { 1316 .se_header = "Snoop", 1317 .se_cmp = sort__snoop_cmp, 1318 .se_snprintf = hist_entry__snoop_snprintf, 1319 .se_width_idx = HISTC_MEM_SNOOP, 1320 }; 1321 1322 struct sort_entry sort_mem_dcacheline = { 1323 .se_header = "Data Cacheline", 1324 .se_cmp = sort__dcacheline_cmp, 1325 .se_snprintf = hist_entry__dcacheline_snprintf, 1326 .se_width_idx = HISTC_MEM_DCACHELINE, 1327 }; 1328 1329 static int64_t 1330 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1331 { 1332 uint64_t l = 0, r = 0; 1333 1334 if (left->mem_info) 1335 l = left->mem_info->daddr.phys_addr; 1336 if (right->mem_info) 1337 r = right->mem_info->daddr.phys_addr; 1338 1339 return (int64_t)(r - l); 1340 } 1341 1342 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1343 size_t size, unsigned int width) 1344 { 1345 uint64_t addr = 0; 1346 size_t ret = 0; 1347 size_t len = BITS_PER_LONG / 4; 1348 1349 addr = he->mem_info->daddr.phys_addr; 1350 1351 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1352 1353 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1354 1355 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1356 1357 if (ret > width) 1358 bf[width] = '\0'; 1359 1360 return width; 1361 } 1362 1363 struct sort_entry sort_mem_phys_daddr = { 1364 .se_header = "Data Physical Address", 1365 .se_cmp = sort__phys_daddr_cmp, 1366 .se_snprintf = hist_entry__phys_daddr_snprintf, 1367 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1368 }; 1369 1370 static int64_t 1371 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1372 { 1373 if (!left->branch_info || !right->branch_info) 1374 return cmp_null(left->branch_info, right->branch_info); 1375 1376 return left->branch_info->flags.abort != 1377 right->branch_info->flags.abort; 1378 } 1379 1380 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1381 size_t size, unsigned int width) 1382 { 1383 static const char *out = "N/A"; 1384 1385 if (he->branch_info) { 1386 if (he->branch_info->flags.abort) 1387 out = "A"; 1388 else 1389 out = "."; 1390 } 1391 1392 return repsep_snprintf(bf, size, "%-*s", width, out); 1393 } 1394 1395 struct sort_entry sort_abort = { 1396 .se_header = "Transaction abort", 1397 .se_cmp = sort__abort_cmp, 1398 .se_snprintf = hist_entry__abort_snprintf, 1399 .se_width_idx = HISTC_ABORT, 1400 }; 1401 1402 static int64_t 1403 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1404 { 1405 if (!left->branch_info || !right->branch_info) 1406 return cmp_null(left->branch_info, right->branch_info); 1407 1408 return left->branch_info->flags.in_tx != 1409 right->branch_info->flags.in_tx; 1410 } 1411 1412 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1413 size_t size, unsigned int width) 1414 { 1415 static const char *out = "N/A"; 1416 1417 if (he->branch_info) { 1418 if (he->branch_info->flags.in_tx) 1419 out = "T"; 1420 else 1421 out = "."; 1422 } 1423 1424 return repsep_snprintf(bf, size, "%-*s", width, out); 1425 } 1426 1427 struct sort_entry sort_in_tx = { 1428 .se_header = "Branch in transaction", 1429 .se_cmp = sort__in_tx_cmp, 1430 .se_snprintf = hist_entry__in_tx_snprintf, 1431 .se_width_idx = HISTC_IN_TX, 1432 }; 1433 1434 static int64_t 1435 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1436 { 1437 return left->transaction - right->transaction; 1438 } 1439 1440 static inline char *add_str(char *p, const char *str) 1441 { 1442 strcpy(p, str); 1443 return p + strlen(str); 1444 } 1445 1446 static struct txbit { 1447 unsigned flag; 1448 const char *name; 1449 int skip_for_len; 1450 } txbits[] = { 1451 { PERF_TXN_ELISION, "EL ", 0 }, 1452 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1453 { PERF_TXN_SYNC, "SYNC ", 1 }, 1454 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1455 { PERF_TXN_RETRY, "RETRY ", 0 }, 1456 { PERF_TXN_CONFLICT, "CON ", 0 }, 1457 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1458 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1459 { 0, NULL, 0 } 1460 }; 1461 1462 int hist_entry__transaction_len(void) 1463 { 1464 int i; 1465 int len = 0; 1466 1467 for (i = 0; txbits[i].name; i++) { 1468 if (!txbits[i].skip_for_len) 1469 len += strlen(txbits[i].name); 1470 } 1471 len += 4; /* :XX<space> */ 1472 return len; 1473 } 1474 1475 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1476 size_t size, unsigned int width) 1477 { 1478 u64 t = he->transaction; 1479 char buf[128]; 1480 char *p = buf; 1481 int i; 1482 1483 buf[0] = 0; 1484 for (i = 0; txbits[i].name; i++) 1485 if (txbits[i].flag & t) 1486 p = add_str(p, txbits[i].name); 1487 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1488 p = add_str(p, "NEITHER "); 1489 if (t & PERF_TXN_ABORT_MASK) { 1490 sprintf(p, ":%" PRIx64, 1491 (t & PERF_TXN_ABORT_MASK) >> 1492 PERF_TXN_ABORT_SHIFT); 1493 p += strlen(p); 1494 } 1495 1496 return repsep_snprintf(bf, size, "%-*s", width, buf); 1497 } 1498 1499 struct sort_entry sort_transaction = { 1500 .se_header = "Transaction ", 1501 .se_cmp = sort__transaction_cmp, 1502 .se_snprintf = hist_entry__transaction_snprintf, 1503 .se_width_idx = HISTC_TRANSACTION, 1504 }; 1505 1506 /* --sort symbol_size */ 1507 1508 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 1509 { 1510 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 1511 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 1512 1513 return size_l < size_r ? -1 : 1514 size_l == size_r ? 0 : 1; 1515 } 1516 1517 static int64_t 1518 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 1519 { 1520 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 1521 } 1522 1523 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 1524 size_t bf_size, unsigned int width) 1525 { 1526 if (sym) 1527 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 1528 1529 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1530 } 1531 1532 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 1533 size_t size, unsigned int width) 1534 { 1535 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 1536 } 1537 1538 struct sort_entry sort_sym_size = { 1539 .se_header = "Symbol size", 1540 .se_cmp = sort__sym_size_cmp, 1541 .se_snprintf = hist_entry__sym_size_snprintf, 1542 .se_width_idx = HISTC_SYM_SIZE, 1543 }; 1544 1545 1546 struct sort_dimension { 1547 const char *name; 1548 struct sort_entry *entry; 1549 int taken; 1550 }; 1551 1552 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 1553 1554 static struct sort_dimension common_sort_dimensions[] = { 1555 DIM(SORT_PID, "pid", sort_thread), 1556 DIM(SORT_COMM, "comm", sort_comm), 1557 DIM(SORT_DSO, "dso", sort_dso), 1558 DIM(SORT_SYM, "symbol", sort_sym), 1559 DIM(SORT_PARENT, "parent", sort_parent), 1560 DIM(SORT_CPU, "cpu", sort_cpu), 1561 DIM(SORT_SOCKET, "socket", sort_socket), 1562 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1563 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 1564 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1565 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 1566 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 1567 DIM(SORT_TRACE, "trace", sort_trace), 1568 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 1569 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 1570 }; 1571 1572 #undef DIM 1573 1574 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 1575 1576 static struct sort_dimension bstack_sort_dimensions[] = { 1577 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 1578 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 1579 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 1580 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 1581 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 1582 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1583 DIM(SORT_ABORT, "abort", sort_abort), 1584 DIM(SORT_CYCLES, "cycles", sort_cycles), 1585 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1586 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1587 }; 1588 1589 #undef DIM 1590 1591 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 1592 1593 static struct sort_dimension memory_sort_dimensions[] = { 1594 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1595 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 1596 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1597 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1598 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1599 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 1600 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 1601 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 1602 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 1603 }; 1604 1605 #undef DIM 1606 1607 struct hpp_dimension { 1608 const char *name; 1609 struct perf_hpp_fmt *fmt; 1610 int taken; 1611 }; 1612 1613 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 1614 1615 static struct hpp_dimension hpp_sort_dimensions[] = { 1616 DIM(PERF_HPP__OVERHEAD, "overhead"), 1617 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 1618 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 1619 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 1620 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 1621 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 1622 DIM(PERF_HPP__SAMPLES, "sample"), 1623 DIM(PERF_HPP__PERIOD, "period"), 1624 }; 1625 1626 #undef DIM 1627 1628 struct hpp_sort_entry { 1629 struct perf_hpp_fmt hpp; 1630 struct sort_entry *se; 1631 }; 1632 1633 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 1634 { 1635 struct hpp_sort_entry *hse; 1636 1637 if (!perf_hpp__is_sort_entry(fmt)) 1638 return; 1639 1640 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1641 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 1642 } 1643 1644 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1645 struct hists *hists, int line __maybe_unused, 1646 int *span __maybe_unused) 1647 { 1648 struct hpp_sort_entry *hse; 1649 size_t len = fmt->user_len; 1650 1651 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1652 1653 if (!len) 1654 len = hists__col_len(hists, hse->se->se_width_idx); 1655 1656 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 1657 } 1658 1659 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 1660 struct perf_hpp *hpp __maybe_unused, 1661 struct hists *hists) 1662 { 1663 struct hpp_sort_entry *hse; 1664 size_t len = fmt->user_len; 1665 1666 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1667 1668 if (!len) 1669 len = hists__col_len(hists, hse->se->se_width_idx); 1670 1671 return len; 1672 } 1673 1674 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1675 struct hist_entry *he) 1676 { 1677 struct hpp_sort_entry *hse; 1678 size_t len = fmt->user_len; 1679 1680 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1681 1682 if (!len) 1683 len = hists__col_len(he->hists, hse->se->se_width_idx); 1684 1685 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1686 } 1687 1688 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 1689 struct hist_entry *a, struct hist_entry *b) 1690 { 1691 struct hpp_sort_entry *hse; 1692 1693 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1694 return hse->se->se_cmp(a, b); 1695 } 1696 1697 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 1698 struct hist_entry *a, struct hist_entry *b) 1699 { 1700 struct hpp_sort_entry *hse; 1701 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 1702 1703 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1704 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 1705 return collapse_fn(a, b); 1706 } 1707 1708 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 1709 struct hist_entry *a, struct hist_entry *b) 1710 { 1711 struct hpp_sort_entry *hse; 1712 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 1713 1714 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1715 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 1716 return sort_fn(a, b); 1717 } 1718 1719 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 1720 { 1721 return format->header == __sort__hpp_header; 1722 } 1723 1724 #define MK_SORT_ENTRY_CHK(key) \ 1725 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 1726 { \ 1727 struct hpp_sort_entry *hse; \ 1728 \ 1729 if (!perf_hpp__is_sort_entry(fmt)) \ 1730 return false; \ 1731 \ 1732 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 1733 return hse->se == &sort_ ## key ; \ 1734 } 1735 1736 MK_SORT_ENTRY_CHK(trace) 1737 MK_SORT_ENTRY_CHK(srcline) 1738 MK_SORT_ENTRY_CHK(srcfile) 1739 MK_SORT_ENTRY_CHK(thread) 1740 MK_SORT_ENTRY_CHK(comm) 1741 MK_SORT_ENTRY_CHK(dso) 1742 MK_SORT_ENTRY_CHK(sym) 1743 1744 1745 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1746 { 1747 struct hpp_sort_entry *hse_a; 1748 struct hpp_sort_entry *hse_b; 1749 1750 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 1751 return false; 1752 1753 hse_a = container_of(a, struct hpp_sort_entry, hpp); 1754 hse_b = container_of(b, struct hpp_sort_entry, hpp); 1755 1756 return hse_a->se == hse_b->se; 1757 } 1758 1759 static void hse_free(struct perf_hpp_fmt *fmt) 1760 { 1761 struct hpp_sort_entry *hse; 1762 1763 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1764 free(hse); 1765 } 1766 1767 static struct hpp_sort_entry * 1768 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 1769 { 1770 struct hpp_sort_entry *hse; 1771 1772 hse = malloc(sizeof(*hse)); 1773 if (hse == NULL) { 1774 pr_err("Memory allocation failed\n"); 1775 return NULL; 1776 } 1777 1778 hse->se = sd->entry; 1779 hse->hpp.name = sd->entry->se_header; 1780 hse->hpp.header = __sort__hpp_header; 1781 hse->hpp.width = __sort__hpp_width; 1782 hse->hpp.entry = __sort__hpp_entry; 1783 hse->hpp.color = NULL; 1784 1785 hse->hpp.cmp = __sort__hpp_cmp; 1786 hse->hpp.collapse = __sort__hpp_collapse; 1787 hse->hpp.sort = __sort__hpp_sort; 1788 hse->hpp.equal = __sort__hpp_equal; 1789 hse->hpp.free = hse_free; 1790 1791 INIT_LIST_HEAD(&hse->hpp.list); 1792 INIT_LIST_HEAD(&hse->hpp.sort_list); 1793 hse->hpp.elide = false; 1794 hse->hpp.len = 0; 1795 hse->hpp.user_len = 0; 1796 hse->hpp.level = level; 1797 1798 return hse; 1799 } 1800 1801 static void hpp_free(struct perf_hpp_fmt *fmt) 1802 { 1803 free(fmt); 1804 } 1805 1806 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 1807 int level) 1808 { 1809 struct perf_hpp_fmt *fmt; 1810 1811 fmt = memdup(hd->fmt, sizeof(*fmt)); 1812 if (fmt) { 1813 INIT_LIST_HEAD(&fmt->list); 1814 INIT_LIST_HEAD(&fmt->sort_list); 1815 fmt->free = hpp_free; 1816 fmt->level = level; 1817 } 1818 1819 return fmt; 1820 } 1821 1822 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 1823 { 1824 struct perf_hpp_fmt *fmt; 1825 struct hpp_sort_entry *hse; 1826 int ret = -1; 1827 int r; 1828 1829 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1830 if (!perf_hpp__is_sort_entry(fmt)) 1831 continue; 1832 1833 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1834 if (hse->se->se_filter == NULL) 1835 continue; 1836 1837 /* 1838 * hist entry is filtered if any of sort key in the hpp list 1839 * is applied. But it should skip non-matched filter types. 1840 */ 1841 r = hse->se->se_filter(he, type, arg); 1842 if (r >= 0) { 1843 if (ret < 0) 1844 ret = 0; 1845 ret |= r; 1846 } 1847 } 1848 1849 return ret; 1850 } 1851 1852 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 1853 struct perf_hpp_list *list, 1854 int level) 1855 { 1856 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 1857 1858 if (hse == NULL) 1859 return -1; 1860 1861 perf_hpp_list__register_sort_field(list, &hse->hpp); 1862 return 0; 1863 } 1864 1865 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 1866 struct perf_hpp_list *list) 1867 { 1868 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 1869 1870 if (hse == NULL) 1871 return -1; 1872 1873 perf_hpp_list__column_register(list, &hse->hpp); 1874 return 0; 1875 } 1876 1877 struct hpp_dynamic_entry { 1878 struct perf_hpp_fmt hpp; 1879 struct perf_evsel *evsel; 1880 struct format_field *field; 1881 unsigned dynamic_len; 1882 bool raw_trace; 1883 }; 1884 1885 static int hde_width(struct hpp_dynamic_entry *hde) 1886 { 1887 if (!hde->hpp.len) { 1888 int len = hde->dynamic_len; 1889 int namelen = strlen(hde->field->name); 1890 int fieldlen = hde->field->size; 1891 1892 if (namelen > len) 1893 len = namelen; 1894 1895 if (!(hde->field->flags & FIELD_IS_STRING)) { 1896 /* length for print hex numbers */ 1897 fieldlen = hde->field->size * 2 + 2; 1898 } 1899 if (fieldlen > len) 1900 len = fieldlen; 1901 1902 hde->hpp.len = len; 1903 } 1904 return hde->hpp.len; 1905 } 1906 1907 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 1908 struct hist_entry *he) 1909 { 1910 char *str, *pos; 1911 struct format_field *field = hde->field; 1912 size_t namelen; 1913 bool last = false; 1914 1915 if (hde->raw_trace) 1916 return; 1917 1918 /* parse pretty print result and update max length */ 1919 if (!he->trace_output) 1920 he->trace_output = get_trace_output(he); 1921 1922 namelen = strlen(field->name); 1923 str = he->trace_output; 1924 1925 while (str) { 1926 pos = strchr(str, ' '); 1927 if (pos == NULL) { 1928 last = true; 1929 pos = str + strlen(str); 1930 } 1931 1932 if (!strncmp(str, field->name, namelen)) { 1933 size_t len; 1934 1935 str += namelen + 1; 1936 len = pos - str; 1937 1938 if (len > hde->dynamic_len) 1939 hde->dynamic_len = len; 1940 break; 1941 } 1942 1943 if (last) 1944 str = NULL; 1945 else 1946 str = pos + 1; 1947 } 1948 } 1949 1950 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1951 struct hists *hists __maybe_unused, 1952 int line __maybe_unused, 1953 int *span __maybe_unused) 1954 { 1955 struct hpp_dynamic_entry *hde; 1956 size_t len = fmt->user_len; 1957 1958 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1959 1960 if (!len) 1961 len = hde_width(hde); 1962 1963 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 1964 } 1965 1966 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 1967 struct perf_hpp *hpp __maybe_unused, 1968 struct hists *hists __maybe_unused) 1969 { 1970 struct hpp_dynamic_entry *hde; 1971 size_t len = fmt->user_len; 1972 1973 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1974 1975 if (!len) 1976 len = hde_width(hde); 1977 1978 return len; 1979 } 1980 1981 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 1982 { 1983 struct hpp_dynamic_entry *hde; 1984 1985 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1986 1987 return hists_to_evsel(hists) == hde->evsel; 1988 } 1989 1990 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1991 struct hist_entry *he) 1992 { 1993 struct hpp_dynamic_entry *hde; 1994 size_t len = fmt->user_len; 1995 char *str, *pos; 1996 struct format_field *field; 1997 size_t namelen; 1998 bool last = false; 1999 int ret; 2000 2001 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2002 2003 if (!len) 2004 len = hde_width(hde); 2005 2006 if (hde->raw_trace) 2007 goto raw_field; 2008 2009 if (!he->trace_output) 2010 he->trace_output = get_trace_output(he); 2011 2012 field = hde->field; 2013 namelen = strlen(field->name); 2014 str = he->trace_output; 2015 2016 while (str) { 2017 pos = strchr(str, ' '); 2018 if (pos == NULL) { 2019 last = true; 2020 pos = str + strlen(str); 2021 } 2022 2023 if (!strncmp(str, field->name, namelen)) { 2024 str += namelen + 1; 2025 str = strndup(str, pos - str); 2026 2027 if (str == NULL) 2028 return scnprintf(hpp->buf, hpp->size, 2029 "%*.*s", len, len, "ERROR"); 2030 break; 2031 } 2032 2033 if (last) 2034 str = NULL; 2035 else 2036 str = pos + 1; 2037 } 2038 2039 if (str == NULL) { 2040 struct trace_seq seq; 2041 raw_field: 2042 trace_seq_init(&seq); 2043 pevent_print_field(&seq, he->raw_data, hde->field); 2044 str = seq.buffer; 2045 } 2046 2047 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2048 free(str); 2049 return ret; 2050 } 2051 2052 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2053 struct hist_entry *a, struct hist_entry *b) 2054 { 2055 struct hpp_dynamic_entry *hde; 2056 struct format_field *field; 2057 unsigned offset, size; 2058 2059 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2060 2061 if (b == NULL) { 2062 update_dynamic_len(hde, a); 2063 return 0; 2064 } 2065 2066 field = hde->field; 2067 if (field->flags & FIELD_IS_DYNAMIC) { 2068 unsigned long long dyn; 2069 2070 pevent_read_number_field(field, a->raw_data, &dyn); 2071 offset = dyn & 0xffff; 2072 size = (dyn >> 16) & 0xffff; 2073 2074 /* record max width for output */ 2075 if (size > hde->dynamic_len) 2076 hde->dynamic_len = size; 2077 } else { 2078 offset = field->offset; 2079 size = field->size; 2080 } 2081 2082 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2083 } 2084 2085 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2086 { 2087 return fmt->cmp == __sort__hde_cmp; 2088 } 2089 2090 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2091 { 2092 struct hpp_dynamic_entry *hde_a; 2093 struct hpp_dynamic_entry *hde_b; 2094 2095 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2096 return false; 2097 2098 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2099 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2100 2101 return hde_a->field == hde_b->field; 2102 } 2103 2104 static void hde_free(struct perf_hpp_fmt *fmt) 2105 { 2106 struct hpp_dynamic_entry *hde; 2107 2108 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2109 free(hde); 2110 } 2111 2112 static struct hpp_dynamic_entry * 2113 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field, 2114 int level) 2115 { 2116 struct hpp_dynamic_entry *hde; 2117 2118 hde = malloc(sizeof(*hde)); 2119 if (hde == NULL) { 2120 pr_debug("Memory allocation failed\n"); 2121 return NULL; 2122 } 2123 2124 hde->evsel = evsel; 2125 hde->field = field; 2126 hde->dynamic_len = 0; 2127 2128 hde->hpp.name = field->name; 2129 hde->hpp.header = __sort__hde_header; 2130 hde->hpp.width = __sort__hde_width; 2131 hde->hpp.entry = __sort__hde_entry; 2132 hde->hpp.color = NULL; 2133 2134 hde->hpp.cmp = __sort__hde_cmp; 2135 hde->hpp.collapse = __sort__hde_cmp; 2136 hde->hpp.sort = __sort__hde_cmp; 2137 hde->hpp.equal = __sort__hde_equal; 2138 hde->hpp.free = hde_free; 2139 2140 INIT_LIST_HEAD(&hde->hpp.list); 2141 INIT_LIST_HEAD(&hde->hpp.sort_list); 2142 hde->hpp.elide = false; 2143 hde->hpp.len = 0; 2144 hde->hpp.user_len = 0; 2145 hde->hpp.level = level; 2146 2147 return hde; 2148 } 2149 2150 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2151 { 2152 struct perf_hpp_fmt *new_fmt = NULL; 2153 2154 if (perf_hpp__is_sort_entry(fmt)) { 2155 struct hpp_sort_entry *hse, *new_hse; 2156 2157 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2158 new_hse = memdup(hse, sizeof(*hse)); 2159 if (new_hse) 2160 new_fmt = &new_hse->hpp; 2161 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2162 struct hpp_dynamic_entry *hde, *new_hde; 2163 2164 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2165 new_hde = memdup(hde, sizeof(*hde)); 2166 if (new_hde) 2167 new_fmt = &new_hde->hpp; 2168 } else { 2169 new_fmt = memdup(fmt, sizeof(*fmt)); 2170 } 2171 2172 INIT_LIST_HEAD(&new_fmt->list); 2173 INIT_LIST_HEAD(&new_fmt->sort_list); 2174 2175 return new_fmt; 2176 } 2177 2178 static int parse_field_name(char *str, char **event, char **field, char **opt) 2179 { 2180 char *event_name, *field_name, *opt_name; 2181 2182 event_name = str; 2183 field_name = strchr(str, '.'); 2184 2185 if (field_name) { 2186 *field_name++ = '\0'; 2187 } else { 2188 event_name = NULL; 2189 field_name = str; 2190 } 2191 2192 opt_name = strchr(field_name, '/'); 2193 if (opt_name) 2194 *opt_name++ = '\0'; 2195 2196 *event = event_name; 2197 *field = field_name; 2198 *opt = opt_name; 2199 2200 return 0; 2201 } 2202 2203 /* find match evsel using a given event name. The event name can be: 2204 * 1. '%' + event index (e.g. '%1' for first event) 2205 * 2. full event name (e.g. sched:sched_switch) 2206 * 3. partial event name (should not contain ':') 2207 */ 2208 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name) 2209 { 2210 struct perf_evsel *evsel = NULL; 2211 struct perf_evsel *pos; 2212 bool full_name; 2213 2214 /* case 1 */ 2215 if (event_name[0] == '%') { 2216 int nr = strtol(event_name+1, NULL, 0); 2217 2218 if (nr > evlist->nr_entries) 2219 return NULL; 2220 2221 evsel = perf_evlist__first(evlist); 2222 while (--nr > 0) 2223 evsel = perf_evsel__next(evsel); 2224 2225 return evsel; 2226 } 2227 2228 full_name = !!strchr(event_name, ':'); 2229 evlist__for_each_entry(evlist, pos) { 2230 /* case 2 */ 2231 if (full_name && !strcmp(pos->name, event_name)) 2232 return pos; 2233 /* case 3 */ 2234 if (!full_name && strstr(pos->name, event_name)) { 2235 if (evsel) { 2236 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2237 event_name, evsel->name, pos->name); 2238 return NULL; 2239 } 2240 evsel = pos; 2241 } 2242 } 2243 2244 return evsel; 2245 } 2246 2247 static int __dynamic_dimension__add(struct perf_evsel *evsel, 2248 struct format_field *field, 2249 bool raw_trace, int level) 2250 { 2251 struct hpp_dynamic_entry *hde; 2252 2253 hde = __alloc_dynamic_entry(evsel, field, level); 2254 if (hde == NULL) 2255 return -ENOMEM; 2256 2257 hde->raw_trace = raw_trace; 2258 2259 perf_hpp__register_sort_field(&hde->hpp); 2260 return 0; 2261 } 2262 2263 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level) 2264 { 2265 int ret; 2266 struct format_field *field; 2267 2268 field = evsel->tp_format->format.fields; 2269 while (field) { 2270 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2271 if (ret < 0) 2272 return ret; 2273 2274 field = field->next; 2275 } 2276 return 0; 2277 } 2278 2279 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace, 2280 int level) 2281 { 2282 int ret; 2283 struct perf_evsel *evsel; 2284 2285 evlist__for_each_entry(evlist, evsel) { 2286 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2287 continue; 2288 2289 ret = add_evsel_fields(evsel, raw_trace, level); 2290 if (ret < 0) 2291 return ret; 2292 } 2293 return 0; 2294 } 2295 2296 static int add_all_matching_fields(struct perf_evlist *evlist, 2297 char *field_name, bool raw_trace, int level) 2298 { 2299 int ret = -ESRCH; 2300 struct perf_evsel *evsel; 2301 struct format_field *field; 2302 2303 evlist__for_each_entry(evlist, evsel) { 2304 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2305 continue; 2306 2307 field = pevent_find_any_field(evsel->tp_format, field_name); 2308 if (field == NULL) 2309 continue; 2310 2311 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2312 if (ret < 0) 2313 break; 2314 } 2315 return ret; 2316 } 2317 2318 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok, 2319 int level) 2320 { 2321 char *str, *event_name, *field_name, *opt_name; 2322 struct perf_evsel *evsel; 2323 struct format_field *field; 2324 bool raw_trace = symbol_conf.raw_trace; 2325 int ret = 0; 2326 2327 if (evlist == NULL) 2328 return -ENOENT; 2329 2330 str = strdup(tok); 2331 if (str == NULL) 2332 return -ENOMEM; 2333 2334 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2335 ret = -EINVAL; 2336 goto out; 2337 } 2338 2339 if (opt_name) { 2340 if (strcmp(opt_name, "raw")) { 2341 pr_debug("unsupported field option %s\n", opt_name); 2342 ret = -EINVAL; 2343 goto out; 2344 } 2345 raw_trace = true; 2346 } 2347 2348 if (!strcmp(field_name, "trace_fields")) { 2349 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2350 goto out; 2351 } 2352 2353 if (event_name == NULL) { 2354 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2355 goto out; 2356 } 2357 2358 evsel = find_evsel(evlist, event_name); 2359 if (evsel == NULL) { 2360 pr_debug("Cannot find event: %s\n", event_name); 2361 ret = -ENOENT; 2362 goto out; 2363 } 2364 2365 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2366 pr_debug("%s is not a tracepoint event\n", event_name); 2367 ret = -EINVAL; 2368 goto out; 2369 } 2370 2371 if (!strcmp(field_name, "*")) { 2372 ret = add_evsel_fields(evsel, raw_trace, level); 2373 } else { 2374 field = pevent_find_any_field(evsel->tp_format, field_name); 2375 if (field == NULL) { 2376 pr_debug("Cannot find event field for %s.%s\n", 2377 event_name, field_name); 2378 return -ENOENT; 2379 } 2380 2381 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2382 } 2383 2384 out: 2385 free(str); 2386 return ret; 2387 } 2388 2389 static int __sort_dimension__add(struct sort_dimension *sd, 2390 struct perf_hpp_list *list, 2391 int level) 2392 { 2393 if (sd->taken) 2394 return 0; 2395 2396 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2397 return -1; 2398 2399 if (sd->entry->se_collapse) 2400 list->need_collapse = 1; 2401 2402 sd->taken = 1; 2403 2404 return 0; 2405 } 2406 2407 static int __hpp_dimension__add(struct hpp_dimension *hd, 2408 struct perf_hpp_list *list, 2409 int level) 2410 { 2411 struct perf_hpp_fmt *fmt; 2412 2413 if (hd->taken) 2414 return 0; 2415 2416 fmt = __hpp_dimension__alloc_hpp(hd, level); 2417 if (!fmt) 2418 return -1; 2419 2420 hd->taken = 1; 2421 perf_hpp_list__register_sort_field(list, fmt); 2422 return 0; 2423 } 2424 2425 static int __sort_dimension__add_output(struct perf_hpp_list *list, 2426 struct sort_dimension *sd) 2427 { 2428 if (sd->taken) 2429 return 0; 2430 2431 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2432 return -1; 2433 2434 sd->taken = 1; 2435 return 0; 2436 } 2437 2438 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2439 struct hpp_dimension *hd) 2440 { 2441 struct perf_hpp_fmt *fmt; 2442 2443 if (hd->taken) 2444 return 0; 2445 2446 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2447 if (!fmt) 2448 return -1; 2449 2450 hd->taken = 1; 2451 perf_hpp_list__column_register(list, fmt); 2452 return 0; 2453 } 2454 2455 int hpp_dimension__add_output(unsigned col) 2456 { 2457 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2458 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2459 } 2460 2461 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2462 struct perf_evlist *evlist, 2463 int level) 2464 { 2465 unsigned int i; 2466 2467 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2468 struct sort_dimension *sd = &common_sort_dimensions[i]; 2469 2470 if (strncasecmp(tok, sd->name, strlen(tok))) 2471 continue; 2472 2473 if (sd->entry == &sort_parent) { 2474 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 2475 if (ret) { 2476 char err[BUFSIZ]; 2477 2478 regerror(ret, &parent_regex, err, sizeof(err)); 2479 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 2480 return -EINVAL; 2481 } 2482 list->parent = 1; 2483 } else if (sd->entry == &sort_sym) { 2484 list->sym = 1; 2485 /* 2486 * perf diff displays the performance difference amongst 2487 * two or more perf.data files. Those files could come 2488 * from different binaries. So we should not compare 2489 * their ips, but the name of symbol. 2490 */ 2491 if (sort__mode == SORT_MODE__DIFF) 2492 sd->entry->se_collapse = sort__sym_sort; 2493 2494 } else if (sd->entry == &sort_dso) { 2495 list->dso = 1; 2496 } else if (sd->entry == &sort_socket) { 2497 list->socket = 1; 2498 } else if (sd->entry == &sort_thread) { 2499 list->thread = 1; 2500 } else if (sd->entry == &sort_comm) { 2501 list->comm = 1; 2502 } 2503 2504 return __sort_dimension__add(sd, list, level); 2505 } 2506 2507 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2508 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2509 2510 if (strncasecmp(tok, hd->name, strlen(tok))) 2511 continue; 2512 2513 return __hpp_dimension__add(hd, list, level); 2514 } 2515 2516 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2517 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2518 2519 if (strncasecmp(tok, sd->name, strlen(tok))) 2520 continue; 2521 2522 if (sort__mode != SORT_MODE__BRANCH) 2523 return -EINVAL; 2524 2525 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 2526 list->sym = 1; 2527 2528 __sort_dimension__add(sd, list, level); 2529 return 0; 2530 } 2531 2532 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2533 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2534 2535 if (strncasecmp(tok, sd->name, strlen(tok))) 2536 continue; 2537 2538 if (sort__mode != SORT_MODE__MEMORY) 2539 return -EINVAL; 2540 2541 if (sd->entry == &sort_mem_dcacheline && cacheline_size == 0) 2542 return -EINVAL; 2543 2544 if (sd->entry == &sort_mem_daddr_sym) 2545 list->sym = 1; 2546 2547 __sort_dimension__add(sd, list, level); 2548 return 0; 2549 } 2550 2551 if (!add_dynamic_entry(evlist, tok, level)) 2552 return 0; 2553 2554 return -ESRCH; 2555 } 2556 2557 static int setup_sort_list(struct perf_hpp_list *list, char *str, 2558 struct perf_evlist *evlist) 2559 { 2560 char *tmp, *tok; 2561 int ret = 0; 2562 int level = 0; 2563 int next_level = 1; 2564 bool in_group = false; 2565 2566 do { 2567 tok = str; 2568 tmp = strpbrk(str, "{}, "); 2569 if (tmp) { 2570 if (in_group) 2571 next_level = level; 2572 else 2573 next_level = level + 1; 2574 2575 if (*tmp == '{') 2576 in_group = true; 2577 else if (*tmp == '}') 2578 in_group = false; 2579 2580 *tmp = '\0'; 2581 str = tmp + 1; 2582 } 2583 2584 if (*tok) { 2585 ret = sort_dimension__add(list, tok, evlist, level); 2586 if (ret == -EINVAL) { 2587 if (!cacheline_size && !strncasecmp(tok, "dcacheline", strlen(tok))) 2588 pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 2589 else 2590 pr_err("Invalid --sort key: `%s'", tok); 2591 break; 2592 } else if (ret == -ESRCH) { 2593 pr_err("Unknown --sort key: `%s'", tok); 2594 break; 2595 } 2596 } 2597 2598 level = next_level; 2599 } while (tmp); 2600 2601 return ret; 2602 } 2603 2604 static const char *get_default_sort_order(struct perf_evlist *evlist) 2605 { 2606 const char *default_sort_orders[] = { 2607 default_sort_order, 2608 default_branch_sort_order, 2609 default_mem_sort_order, 2610 default_top_sort_order, 2611 default_diff_sort_order, 2612 default_tracepoint_sort_order, 2613 }; 2614 bool use_trace = true; 2615 struct perf_evsel *evsel; 2616 2617 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 2618 2619 if (evlist == NULL || perf_evlist__empty(evlist)) 2620 goto out_no_evlist; 2621 2622 evlist__for_each_entry(evlist, evsel) { 2623 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2624 use_trace = false; 2625 break; 2626 } 2627 } 2628 2629 if (use_trace) { 2630 sort__mode = SORT_MODE__TRACEPOINT; 2631 if (symbol_conf.raw_trace) 2632 return "trace_fields"; 2633 } 2634 out_no_evlist: 2635 return default_sort_orders[sort__mode]; 2636 } 2637 2638 static int setup_sort_order(struct perf_evlist *evlist) 2639 { 2640 char *new_sort_order; 2641 2642 /* 2643 * Append '+'-prefixed sort order to the default sort 2644 * order string. 2645 */ 2646 if (!sort_order || is_strict_order(sort_order)) 2647 return 0; 2648 2649 if (sort_order[1] == '\0') { 2650 pr_err("Invalid --sort key: `+'"); 2651 return -EINVAL; 2652 } 2653 2654 /* 2655 * We allocate new sort_order string, but we never free it, 2656 * because it's checked over the rest of the code. 2657 */ 2658 if (asprintf(&new_sort_order, "%s,%s", 2659 get_default_sort_order(evlist), sort_order + 1) < 0) { 2660 pr_err("Not enough memory to set up --sort"); 2661 return -ENOMEM; 2662 } 2663 2664 sort_order = new_sort_order; 2665 return 0; 2666 } 2667 2668 /* 2669 * Adds 'pre,' prefix into 'str' is 'pre' is 2670 * not already part of 'str'. 2671 */ 2672 static char *prefix_if_not_in(const char *pre, char *str) 2673 { 2674 char *n; 2675 2676 if (!str || strstr(str, pre)) 2677 return str; 2678 2679 if (asprintf(&n, "%s,%s", pre, str) < 0) 2680 return NULL; 2681 2682 free(str); 2683 return n; 2684 } 2685 2686 static char *setup_overhead(char *keys) 2687 { 2688 if (sort__mode == SORT_MODE__DIFF) 2689 return keys; 2690 2691 keys = prefix_if_not_in("overhead", keys); 2692 2693 if (symbol_conf.cumulate_callchain) 2694 keys = prefix_if_not_in("overhead_children", keys); 2695 2696 return keys; 2697 } 2698 2699 static int __setup_sorting(struct perf_evlist *evlist) 2700 { 2701 char *str; 2702 const char *sort_keys; 2703 int ret = 0; 2704 2705 ret = setup_sort_order(evlist); 2706 if (ret) 2707 return ret; 2708 2709 sort_keys = sort_order; 2710 if (sort_keys == NULL) { 2711 if (is_strict_order(field_order)) { 2712 /* 2713 * If user specified field order but no sort order, 2714 * we'll honor it and not add default sort orders. 2715 */ 2716 return 0; 2717 } 2718 2719 sort_keys = get_default_sort_order(evlist); 2720 } 2721 2722 str = strdup(sort_keys); 2723 if (str == NULL) { 2724 pr_err("Not enough memory to setup sort keys"); 2725 return -ENOMEM; 2726 } 2727 2728 /* 2729 * Prepend overhead fields for backward compatibility. 2730 */ 2731 if (!is_strict_order(field_order)) { 2732 str = setup_overhead(str); 2733 if (str == NULL) { 2734 pr_err("Not enough memory to setup overhead keys"); 2735 return -ENOMEM; 2736 } 2737 } 2738 2739 ret = setup_sort_list(&perf_hpp_list, str, evlist); 2740 2741 free(str); 2742 return ret; 2743 } 2744 2745 void perf_hpp__set_elide(int idx, bool elide) 2746 { 2747 struct perf_hpp_fmt *fmt; 2748 struct hpp_sort_entry *hse; 2749 2750 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2751 if (!perf_hpp__is_sort_entry(fmt)) 2752 continue; 2753 2754 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2755 if (hse->se->se_width_idx == idx) { 2756 fmt->elide = elide; 2757 break; 2758 } 2759 } 2760 } 2761 2762 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 2763 { 2764 if (list && strlist__nr_entries(list) == 1) { 2765 if (fp != NULL) 2766 fprintf(fp, "# %s: %s\n", list_name, 2767 strlist__entry(list, 0)->s); 2768 return true; 2769 } 2770 return false; 2771 } 2772 2773 static bool get_elide(int idx, FILE *output) 2774 { 2775 switch (idx) { 2776 case HISTC_SYMBOL: 2777 return __get_elide(symbol_conf.sym_list, "symbol", output); 2778 case HISTC_DSO: 2779 return __get_elide(symbol_conf.dso_list, "dso", output); 2780 case HISTC_COMM: 2781 return __get_elide(symbol_conf.comm_list, "comm", output); 2782 default: 2783 break; 2784 } 2785 2786 if (sort__mode != SORT_MODE__BRANCH) 2787 return false; 2788 2789 switch (idx) { 2790 case HISTC_SYMBOL_FROM: 2791 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 2792 case HISTC_SYMBOL_TO: 2793 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 2794 case HISTC_DSO_FROM: 2795 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 2796 case HISTC_DSO_TO: 2797 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 2798 default: 2799 break; 2800 } 2801 2802 return false; 2803 } 2804 2805 void sort__setup_elide(FILE *output) 2806 { 2807 struct perf_hpp_fmt *fmt; 2808 struct hpp_sort_entry *hse; 2809 2810 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2811 if (!perf_hpp__is_sort_entry(fmt)) 2812 continue; 2813 2814 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2815 fmt->elide = get_elide(hse->se->se_width_idx, output); 2816 } 2817 2818 /* 2819 * It makes no sense to elide all of sort entries. 2820 * Just revert them to show up again. 2821 */ 2822 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2823 if (!perf_hpp__is_sort_entry(fmt)) 2824 continue; 2825 2826 if (!fmt->elide) 2827 return; 2828 } 2829 2830 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2831 if (!perf_hpp__is_sort_entry(fmt)) 2832 continue; 2833 2834 fmt->elide = false; 2835 } 2836 } 2837 2838 int output_field_add(struct perf_hpp_list *list, char *tok) 2839 { 2840 unsigned int i; 2841 2842 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2843 struct sort_dimension *sd = &common_sort_dimensions[i]; 2844 2845 if (strncasecmp(tok, sd->name, strlen(tok))) 2846 continue; 2847 2848 return __sort_dimension__add_output(list, sd); 2849 } 2850 2851 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2852 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2853 2854 if (strncasecmp(tok, hd->name, strlen(tok))) 2855 continue; 2856 2857 return __hpp_dimension__add_output(list, hd); 2858 } 2859 2860 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2861 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2862 2863 if (strncasecmp(tok, sd->name, strlen(tok))) 2864 continue; 2865 2866 return __sort_dimension__add_output(list, sd); 2867 } 2868 2869 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2870 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2871 2872 if (strncasecmp(tok, sd->name, strlen(tok))) 2873 continue; 2874 2875 return __sort_dimension__add_output(list, sd); 2876 } 2877 2878 return -ESRCH; 2879 } 2880 2881 static int setup_output_list(struct perf_hpp_list *list, char *str) 2882 { 2883 char *tmp, *tok; 2884 int ret = 0; 2885 2886 for (tok = strtok_r(str, ", ", &tmp); 2887 tok; tok = strtok_r(NULL, ", ", &tmp)) { 2888 ret = output_field_add(list, tok); 2889 if (ret == -EINVAL) { 2890 ui__error("Invalid --fields key: `%s'", tok); 2891 break; 2892 } else if (ret == -ESRCH) { 2893 ui__error("Unknown --fields key: `%s'", tok); 2894 break; 2895 } 2896 } 2897 2898 return ret; 2899 } 2900 2901 void reset_dimensions(void) 2902 { 2903 unsigned int i; 2904 2905 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 2906 common_sort_dimensions[i].taken = 0; 2907 2908 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 2909 hpp_sort_dimensions[i].taken = 0; 2910 2911 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 2912 bstack_sort_dimensions[i].taken = 0; 2913 2914 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 2915 memory_sort_dimensions[i].taken = 0; 2916 } 2917 2918 bool is_strict_order(const char *order) 2919 { 2920 return order && (*order != '+'); 2921 } 2922 2923 static int __setup_output_field(void) 2924 { 2925 char *str, *strp; 2926 int ret = -EINVAL; 2927 2928 if (field_order == NULL) 2929 return 0; 2930 2931 strp = str = strdup(field_order); 2932 if (str == NULL) { 2933 pr_err("Not enough memory to setup output fields"); 2934 return -ENOMEM; 2935 } 2936 2937 if (!is_strict_order(field_order)) 2938 strp++; 2939 2940 if (!strlen(strp)) { 2941 pr_err("Invalid --fields key: `+'"); 2942 goto out; 2943 } 2944 2945 ret = setup_output_list(&perf_hpp_list, strp); 2946 2947 out: 2948 free(str); 2949 return ret; 2950 } 2951 2952 int setup_sorting(struct perf_evlist *evlist) 2953 { 2954 int err; 2955 2956 err = __setup_sorting(evlist); 2957 if (err < 0) 2958 return err; 2959 2960 if (parent_pattern != default_parent_pattern) { 2961 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 2962 if (err < 0) 2963 return err; 2964 } 2965 2966 reset_dimensions(); 2967 2968 /* 2969 * perf diff doesn't use default hpp output fields. 2970 */ 2971 if (sort__mode != SORT_MODE__DIFF) 2972 perf_hpp__init(); 2973 2974 err = __setup_output_field(); 2975 if (err < 0) 2976 return err; 2977 2978 /* copy sort keys to output fields */ 2979 perf_hpp__setup_output_field(&perf_hpp_list); 2980 /* and then copy output fields to sort keys */ 2981 perf_hpp__append_sort_keys(&perf_hpp_list); 2982 2983 /* setup hists-specific output fields */ 2984 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 2985 return -1; 2986 2987 return 0; 2988 } 2989 2990 void reset_output_field(void) 2991 { 2992 perf_hpp_list.need_collapse = 0; 2993 perf_hpp_list.parent = 0; 2994 perf_hpp_list.sym = 0; 2995 perf_hpp_list.dso = 0; 2996 2997 field_order = NULL; 2998 sort_order = NULL; 2999 3000 reset_dimensions(); 3001 perf_hpp__reset_output_field(&perf_hpp_list); 3002 } 3003