1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <sys/mman.h> 6 #include "sort.h" 7 #include "hist.h" 8 #include "comm.h" 9 #include "symbol.h" 10 #include "thread.h" 11 #include "evsel.h" 12 #include "evlist.h" 13 #include "strlist.h" 14 #include <traceevent/event-parse.h> 15 #include "mem-events.h" 16 #include <linux/kernel.h> 17 18 regex_t parent_regex; 19 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 20 const char *parent_pattern = default_parent_pattern; 21 const char *default_sort_order = "comm,dso,symbol"; 22 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 23 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; 24 const char default_top_sort_order[] = "dso,symbol"; 25 const char default_diff_sort_order[] = "dso,symbol"; 26 const char default_tracepoint_sort_order[] = "trace"; 27 const char *sort_order; 28 const char *field_order; 29 regex_t ignore_callees_regex; 30 int have_ignore_callees = 0; 31 enum sort_mode sort__mode = SORT_MODE__NORMAL; 32 33 /* 34 * Replaces all occurrences of a char used with the: 35 * 36 * -t, --field-separator 37 * 38 * option, that uses a special separator character and don't pad with spaces, 39 * replacing all occurances of this separator in symbol names (and other 40 * output) with a '.' character, that thus it's the only non valid separator. 41 */ 42 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 43 { 44 int n; 45 va_list ap; 46 47 va_start(ap, fmt); 48 n = vsnprintf(bf, size, fmt, ap); 49 if (symbol_conf.field_sep && n > 0) { 50 char *sep = bf; 51 52 while (1) { 53 sep = strchr(sep, *symbol_conf.field_sep); 54 if (sep == NULL) 55 break; 56 *sep = '.'; 57 } 58 } 59 va_end(ap); 60 61 if (n >= (int)size) 62 return size - 1; 63 return n; 64 } 65 66 static int64_t cmp_null(const void *l, const void *r) 67 { 68 if (!l && !r) 69 return 0; 70 else if (!l) 71 return -1; 72 else 73 return 1; 74 } 75 76 /* --sort pid */ 77 78 static int64_t 79 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 80 { 81 return right->thread->tid - left->thread->tid; 82 } 83 84 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 85 size_t size, unsigned int width) 86 { 87 const char *comm = thread__comm_str(he->thread); 88 89 width = max(7U, width) - 8; 90 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 91 width, width, comm ?: ""); 92 } 93 94 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 95 { 96 const struct thread *th = arg; 97 98 if (type != HIST_FILTER__THREAD) 99 return -1; 100 101 return th && he->thread != th; 102 } 103 104 struct sort_entry sort_thread = { 105 .se_header = " Pid:Command", 106 .se_cmp = sort__thread_cmp, 107 .se_snprintf = hist_entry__thread_snprintf, 108 .se_filter = hist_entry__thread_filter, 109 .se_width_idx = HISTC_THREAD, 110 }; 111 112 /* --sort comm */ 113 114 static int64_t 115 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 116 { 117 /* Compare the addr that should be unique among comm */ 118 return strcmp(comm__str(right->comm), comm__str(left->comm)); 119 } 120 121 static int64_t 122 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 123 { 124 /* Compare the addr that should be unique among comm */ 125 return strcmp(comm__str(right->comm), comm__str(left->comm)); 126 } 127 128 static int64_t 129 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 130 { 131 return strcmp(comm__str(right->comm), comm__str(left->comm)); 132 } 133 134 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 135 size_t size, unsigned int width) 136 { 137 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 138 } 139 140 struct sort_entry sort_comm = { 141 .se_header = "Command", 142 .se_cmp = sort__comm_cmp, 143 .se_collapse = sort__comm_collapse, 144 .se_sort = sort__comm_sort, 145 .se_snprintf = hist_entry__comm_snprintf, 146 .se_filter = hist_entry__thread_filter, 147 .se_width_idx = HISTC_COMM, 148 }; 149 150 /* --sort dso */ 151 152 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 153 { 154 struct dso *dso_l = map_l ? map_l->dso : NULL; 155 struct dso *dso_r = map_r ? map_r->dso : NULL; 156 const char *dso_name_l, *dso_name_r; 157 158 if (!dso_l || !dso_r) 159 return cmp_null(dso_r, dso_l); 160 161 if (verbose > 0) { 162 dso_name_l = dso_l->long_name; 163 dso_name_r = dso_r->long_name; 164 } else { 165 dso_name_l = dso_l->short_name; 166 dso_name_r = dso_r->short_name; 167 } 168 169 return strcmp(dso_name_l, dso_name_r); 170 } 171 172 static int64_t 173 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 174 { 175 return _sort__dso_cmp(right->ms.map, left->ms.map); 176 } 177 178 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 179 size_t size, unsigned int width) 180 { 181 if (map && map->dso) { 182 const char *dso_name = verbose > 0 ? map->dso->long_name : 183 map->dso->short_name; 184 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 185 } 186 187 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 188 } 189 190 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 191 size_t size, unsigned int width) 192 { 193 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 194 } 195 196 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 197 { 198 const struct dso *dso = arg; 199 200 if (type != HIST_FILTER__DSO) 201 return -1; 202 203 return dso && (!he->ms.map || he->ms.map->dso != dso); 204 } 205 206 struct sort_entry sort_dso = { 207 .se_header = "Shared Object", 208 .se_cmp = sort__dso_cmp, 209 .se_snprintf = hist_entry__dso_snprintf, 210 .se_filter = hist_entry__dso_filter, 211 .se_width_idx = HISTC_DSO, 212 }; 213 214 /* --sort symbol */ 215 216 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 217 { 218 return (int64_t)(right_ip - left_ip); 219 } 220 221 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 222 { 223 if (!sym_l || !sym_r) 224 return cmp_null(sym_l, sym_r); 225 226 if (sym_l == sym_r) 227 return 0; 228 229 if (sym_l->inlined || sym_r->inlined) 230 return strcmp(sym_l->name, sym_r->name); 231 232 if (sym_l->start != sym_r->start) 233 return (int64_t)(sym_r->start - sym_l->start); 234 235 return (int64_t)(sym_r->end - sym_l->end); 236 } 237 238 static int64_t 239 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 240 { 241 int64_t ret; 242 243 if (!left->ms.sym && !right->ms.sym) 244 return _sort__addr_cmp(left->ip, right->ip); 245 246 /* 247 * comparing symbol address alone is not enough since it's a 248 * relative address within a dso. 249 */ 250 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 251 ret = sort__dso_cmp(left, right); 252 if (ret != 0) 253 return ret; 254 } 255 256 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 257 } 258 259 static int64_t 260 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 261 { 262 if (!left->ms.sym || !right->ms.sym) 263 return cmp_null(left->ms.sym, right->ms.sym); 264 265 return strcmp(right->ms.sym->name, left->ms.sym->name); 266 } 267 268 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, 269 u64 ip, char level, char *bf, size_t size, 270 unsigned int width) 271 { 272 size_t ret = 0; 273 274 if (verbose > 0) { 275 char o = map ? dso__symtab_origin(map->dso) : '!'; 276 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 277 BITS_PER_LONG / 4 + 2, ip, o); 278 } 279 280 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 281 if (sym && map) { 282 if (map->type == MAP__VARIABLE) { 283 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 284 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 285 ip - map->unmap_ip(map, sym->start)); 286 } else { 287 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 288 width - ret, 289 sym->name); 290 if (sym->inlined) 291 ret += repsep_snprintf(bf + ret, size - ret, 292 " (inlined)"); 293 } 294 } else { 295 size_t len = BITS_PER_LONG / 4; 296 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 297 len, ip); 298 } 299 300 return ret; 301 } 302 303 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, 304 size_t size, unsigned int width) 305 { 306 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip, 307 he->level, bf, size, width); 308 } 309 310 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 311 { 312 const char *sym = arg; 313 314 if (type != HIST_FILTER__SYMBOL) 315 return -1; 316 317 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 318 } 319 320 struct sort_entry sort_sym = { 321 .se_header = "Symbol", 322 .se_cmp = sort__sym_cmp, 323 .se_sort = sort__sym_sort, 324 .se_snprintf = hist_entry__sym_snprintf, 325 .se_filter = hist_entry__sym_filter, 326 .se_width_idx = HISTC_SYMBOL, 327 }; 328 329 /* --sort srcline */ 330 331 char *hist_entry__get_srcline(struct hist_entry *he) 332 { 333 struct map *map = he->ms.map; 334 335 if (!map) 336 return SRCLINE_UNKNOWN; 337 338 return get_srcline(map->dso, map__rip_2objdump(map, he->ip), 339 he->ms.sym, true, true); 340 } 341 342 static int64_t 343 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 344 { 345 if (!left->srcline) 346 left->srcline = hist_entry__get_srcline(left); 347 if (!right->srcline) 348 right->srcline = hist_entry__get_srcline(right); 349 350 return strcmp(right->srcline, left->srcline); 351 } 352 353 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 354 size_t size, unsigned int width) 355 { 356 if (!he->srcline) 357 he->srcline = hist_entry__get_srcline(he); 358 359 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 360 } 361 362 struct sort_entry sort_srcline = { 363 .se_header = "Source:Line", 364 .se_cmp = sort__srcline_cmp, 365 .se_snprintf = hist_entry__srcline_snprintf, 366 .se_width_idx = HISTC_SRCLINE, 367 }; 368 369 /* --sort srcline_from */ 370 371 static int64_t 372 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 373 { 374 if (!left->branch_info->srcline_from) { 375 struct map *map = left->branch_info->from.map; 376 if (!map) 377 left->branch_info->srcline_from = SRCLINE_UNKNOWN; 378 else 379 left->branch_info->srcline_from = get_srcline(map->dso, 380 map__rip_2objdump(map, 381 left->branch_info->from.al_addr), 382 left->branch_info->from.sym, 383 true, true); 384 } 385 if (!right->branch_info->srcline_from) { 386 struct map *map = right->branch_info->from.map; 387 if (!map) 388 right->branch_info->srcline_from = SRCLINE_UNKNOWN; 389 else 390 right->branch_info->srcline_from = get_srcline(map->dso, 391 map__rip_2objdump(map, 392 right->branch_info->from.al_addr), 393 right->branch_info->from.sym, 394 true, true); 395 } 396 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 397 } 398 399 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 400 size_t size, unsigned int width) 401 { 402 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 403 } 404 405 struct sort_entry sort_srcline_from = { 406 .se_header = "From Source:Line", 407 .se_cmp = sort__srcline_from_cmp, 408 .se_snprintf = hist_entry__srcline_from_snprintf, 409 .se_width_idx = HISTC_SRCLINE_FROM, 410 }; 411 412 /* --sort srcline_to */ 413 414 static int64_t 415 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 416 { 417 if (!left->branch_info->srcline_to) { 418 struct map *map = left->branch_info->to.map; 419 if (!map) 420 left->branch_info->srcline_to = SRCLINE_UNKNOWN; 421 else 422 left->branch_info->srcline_to = get_srcline(map->dso, 423 map__rip_2objdump(map, 424 left->branch_info->to.al_addr), 425 left->branch_info->from.sym, 426 true, true); 427 } 428 if (!right->branch_info->srcline_to) { 429 struct map *map = right->branch_info->to.map; 430 if (!map) 431 right->branch_info->srcline_to = SRCLINE_UNKNOWN; 432 else 433 right->branch_info->srcline_to = get_srcline(map->dso, 434 map__rip_2objdump(map, 435 right->branch_info->to.al_addr), 436 right->branch_info->to.sym, 437 true, true); 438 } 439 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 440 } 441 442 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 443 size_t size, unsigned int width) 444 { 445 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 446 } 447 448 struct sort_entry sort_srcline_to = { 449 .se_header = "To Source:Line", 450 .se_cmp = sort__srcline_to_cmp, 451 .se_snprintf = hist_entry__srcline_to_snprintf, 452 .se_width_idx = HISTC_SRCLINE_TO, 453 }; 454 455 /* --sort srcfile */ 456 457 static char no_srcfile[1]; 458 459 static char *hist_entry__get_srcfile(struct hist_entry *e) 460 { 461 char *sf, *p; 462 struct map *map = e->ms.map; 463 464 if (!map) 465 return no_srcfile; 466 467 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 468 e->ms.sym, false, true, true); 469 if (!strcmp(sf, SRCLINE_UNKNOWN)) 470 return no_srcfile; 471 p = strchr(sf, ':'); 472 if (p && *sf) { 473 *p = 0; 474 return sf; 475 } 476 free(sf); 477 return no_srcfile; 478 } 479 480 static int64_t 481 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 482 { 483 if (!left->srcfile) 484 left->srcfile = hist_entry__get_srcfile(left); 485 if (!right->srcfile) 486 right->srcfile = hist_entry__get_srcfile(right); 487 488 return strcmp(right->srcfile, left->srcfile); 489 } 490 491 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 492 size_t size, unsigned int width) 493 { 494 if (!he->srcfile) 495 he->srcfile = hist_entry__get_srcfile(he); 496 497 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 498 } 499 500 struct sort_entry sort_srcfile = { 501 .se_header = "Source File", 502 .se_cmp = sort__srcfile_cmp, 503 .se_snprintf = hist_entry__srcfile_snprintf, 504 .se_width_idx = HISTC_SRCFILE, 505 }; 506 507 /* --sort parent */ 508 509 static int64_t 510 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 511 { 512 struct symbol *sym_l = left->parent; 513 struct symbol *sym_r = right->parent; 514 515 if (!sym_l || !sym_r) 516 return cmp_null(sym_l, sym_r); 517 518 return strcmp(sym_r->name, sym_l->name); 519 } 520 521 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 522 size_t size, unsigned int width) 523 { 524 return repsep_snprintf(bf, size, "%-*.*s", width, width, 525 he->parent ? he->parent->name : "[other]"); 526 } 527 528 struct sort_entry sort_parent = { 529 .se_header = "Parent symbol", 530 .se_cmp = sort__parent_cmp, 531 .se_snprintf = hist_entry__parent_snprintf, 532 .se_width_idx = HISTC_PARENT, 533 }; 534 535 /* --sort cpu */ 536 537 static int64_t 538 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 539 { 540 return right->cpu - left->cpu; 541 } 542 543 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 544 size_t size, unsigned int width) 545 { 546 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 547 } 548 549 struct sort_entry sort_cpu = { 550 .se_header = "CPU", 551 .se_cmp = sort__cpu_cmp, 552 .se_snprintf = hist_entry__cpu_snprintf, 553 .se_width_idx = HISTC_CPU, 554 }; 555 556 /* --sort cgroup_id */ 557 558 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 559 { 560 return (int64_t)(right_dev - left_dev); 561 } 562 563 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 564 { 565 return (int64_t)(right_ino - left_ino); 566 } 567 568 static int64_t 569 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 570 { 571 int64_t ret; 572 573 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 574 if (ret != 0) 575 return ret; 576 577 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 578 left->cgroup_id.ino); 579 } 580 581 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 582 char *bf, size_t size, 583 unsigned int width __maybe_unused) 584 { 585 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 586 he->cgroup_id.ino); 587 } 588 589 struct sort_entry sort_cgroup_id = { 590 .se_header = "cgroup id (dev/inode)", 591 .se_cmp = sort__cgroup_id_cmp, 592 .se_snprintf = hist_entry__cgroup_id_snprintf, 593 .se_width_idx = HISTC_CGROUP_ID, 594 }; 595 596 /* --sort socket */ 597 598 static int64_t 599 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 600 { 601 return right->socket - left->socket; 602 } 603 604 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 605 size_t size, unsigned int width) 606 { 607 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 608 } 609 610 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 611 { 612 int sk = *(const int *)arg; 613 614 if (type != HIST_FILTER__SOCKET) 615 return -1; 616 617 return sk >= 0 && he->socket != sk; 618 } 619 620 struct sort_entry sort_socket = { 621 .se_header = "Socket", 622 .se_cmp = sort__socket_cmp, 623 .se_snprintf = hist_entry__socket_snprintf, 624 .se_filter = hist_entry__socket_filter, 625 .se_width_idx = HISTC_SOCKET, 626 }; 627 628 /* --sort trace */ 629 630 static char *get_trace_output(struct hist_entry *he) 631 { 632 struct trace_seq seq; 633 struct perf_evsel *evsel; 634 struct pevent_record rec = { 635 .data = he->raw_data, 636 .size = he->raw_size, 637 }; 638 639 evsel = hists_to_evsel(he->hists); 640 641 trace_seq_init(&seq); 642 if (symbol_conf.raw_trace) { 643 pevent_print_fields(&seq, he->raw_data, he->raw_size, 644 evsel->tp_format); 645 } else { 646 pevent_event_info(&seq, evsel->tp_format, &rec); 647 } 648 /* 649 * Trim the buffer, it starts at 4KB and we're not going to 650 * add anything more to this buffer. 651 */ 652 return realloc(seq.buffer, seq.len + 1); 653 } 654 655 static int64_t 656 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 657 { 658 struct perf_evsel *evsel; 659 660 evsel = hists_to_evsel(left->hists); 661 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 662 return 0; 663 664 if (left->trace_output == NULL) 665 left->trace_output = get_trace_output(left); 666 if (right->trace_output == NULL) 667 right->trace_output = get_trace_output(right); 668 669 return strcmp(right->trace_output, left->trace_output); 670 } 671 672 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 673 size_t size, unsigned int width) 674 { 675 struct perf_evsel *evsel; 676 677 evsel = hists_to_evsel(he->hists); 678 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 679 return scnprintf(bf, size, "%-.*s", width, "N/A"); 680 681 if (he->trace_output == NULL) 682 he->trace_output = get_trace_output(he); 683 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 684 } 685 686 struct sort_entry sort_trace = { 687 .se_header = "Trace output", 688 .se_cmp = sort__trace_cmp, 689 .se_snprintf = hist_entry__trace_snprintf, 690 .se_width_idx = HISTC_TRACE, 691 }; 692 693 /* sort keys for branch stacks */ 694 695 static int64_t 696 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 697 { 698 if (!left->branch_info || !right->branch_info) 699 return cmp_null(left->branch_info, right->branch_info); 700 701 return _sort__dso_cmp(left->branch_info->from.map, 702 right->branch_info->from.map); 703 } 704 705 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 706 size_t size, unsigned int width) 707 { 708 if (he->branch_info) 709 return _hist_entry__dso_snprintf(he->branch_info->from.map, 710 bf, size, width); 711 else 712 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 713 } 714 715 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 716 const void *arg) 717 { 718 const struct dso *dso = arg; 719 720 if (type != HIST_FILTER__DSO) 721 return -1; 722 723 return dso && (!he->branch_info || !he->branch_info->from.map || 724 he->branch_info->from.map->dso != dso); 725 } 726 727 static int64_t 728 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 729 { 730 if (!left->branch_info || !right->branch_info) 731 return cmp_null(left->branch_info, right->branch_info); 732 733 return _sort__dso_cmp(left->branch_info->to.map, 734 right->branch_info->to.map); 735 } 736 737 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 738 size_t size, unsigned int width) 739 { 740 if (he->branch_info) 741 return _hist_entry__dso_snprintf(he->branch_info->to.map, 742 bf, size, width); 743 else 744 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 745 } 746 747 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 748 const void *arg) 749 { 750 const struct dso *dso = arg; 751 752 if (type != HIST_FILTER__DSO) 753 return -1; 754 755 return dso && (!he->branch_info || !he->branch_info->to.map || 756 he->branch_info->to.map->dso != dso); 757 } 758 759 static int64_t 760 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 761 { 762 struct addr_map_symbol *from_l = &left->branch_info->from; 763 struct addr_map_symbol *from_r = &right->branch_info->from; 764 765 if (!left->branch_info || !right->branch_info) 766 return cmp_null(left->branch_info, right->branch_info); 767 768 from_l = &left->branch_info->from; 769 from_r = &right->branch_info->from; 770 771 if (!from_l->sym && !from_r->sym) 772 return _sort__addr_cmp(from_l->addr, from_r->addr); 773 774 return _sort__sym_cmp(from_l->sym, from_r->sym); 775 } 776 777 static int64_t 778 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 779 { 780 struct addr_map_symbol *to_l, *to_r; 781 782 if (!left->branch_info || !right->branch_info) 783 return cmp_null(left->branch_info, right->branch_info); 784 785 to_l = &left->branch_info->to; 786 to_r = &right->branch_info->to; 787 788 if (!to_l->sym && !to_r->sym) 789 return _sort__addr_cmp(to_l->addr, to_r->addr); 790 791 return _sort__sym_cmp(to_l->sym, to_r->sym); 792 } 793 794 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 795 size_t size, unsigned int width) 796 { 797 if (he->branch_info) { 798 struct addr_map_symbol *from = &he->branch_info->from; 799 800 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, 801 he->level, bf, size, width); 802 } 803 804 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 805 } 806 807 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 808 size_t size, unsigned int width) 809 { 810 if (he->branch_info) { 811 struct addr_map_symbol *to = &he->branch_info->to; 812 813 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 814 he->level, bf, size, width); 815 } 816 817 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 818 } 819 820 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 821 const void *arg) 822 { 823 const char *sym = arg; 824 825 if (type != HIST_FILTER__SYMBOL) 826 return -1; 827 828 return sym && !(he->branch_info && he->branch_info->from.sym && 829 strstr(he->branch_info->from.sym->name, sym)); 830 } 831 832 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 833 const void *arg) 834 { 835 const char *sym = arg; 836 837 if (type != HIST_FILTER__SYMBOL) 838 return -1; 839 840 return sym && !(he->branch_info && he->branch_info->to.sym && 841 strstr(he->branch_info->to.sym->name, sym)); 842 } 843 844 struct sort_entry sort_dso_from = { 845 .se_header = "Source Shared Object", 846 .se_cmp = sort__dso_from_cmp, 847 .se_snprintf = hist_entry__dso_from_snprintf, 848 .se_filter = hist_entry__dso_from_filter, 849 .se_width_idx = HISTC_DSO_FROM, 850 }; 851 852 struct sort_entry sort_dso_to = { 853 .se_header = "Target Shared Object", 854 .se_cmp = sort__dso_to_cmp, 855 .se_snprintf = hist_entry__dso_to_snprintf, 856 .se_filter = hist_entry__dso_to_filter, 857 .se_width_idx = HISTC_DSO_TO, 858 }; 859 860 struct sort_entry sort_sym_from = { 861 .se_header = "Source Symbol", 862 .se_cmp = sort__sym_from_cmp, 863 .se_snprintf = hist_entry__sym_from_snprintf, 864 .se_filter = hist_entry__sym_from_filter, 865 .se_width_idx = HISTC_SYMBOL_FROM, 866 }; 867 868 struct sort_entry sort_sym_to = { 869 .se_header = "Target Symbol", 870 .se_cmp = sort__sym_to_cmp, 871 .se_snprintf = hist_entry__sym_to_snprintf, 872 .se_filter = hist_entry__sym_to_filter, 873 .se_width_idx = HISTC_SYMBOL_TO, 874 }; 875 876 static int64_t 877 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 878 { 879 unsigned char mp, p; 880 881 if (!left->branch_info || !right->branch_info) 882 return cmp_null(left->branch_info, right->branch_info); 883 884 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 885 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 886 return mp || p; 887 } 888 889 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 890 size_t size, unsigned int width){ 891 static const char *out = "N/A"; 892 893 if (he->branch_info) { 894 if (he->branch_info->flags.predicted) 895 out = "N"; 896 else if (he->branch_info->flags.mispred) 897 out = "Y"; 898 } 899 900 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 901 } 902 903 static int64_t 904 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 905 { 906 if (!left->branch_info || !right->branch_info) 907 return cmp_null(left->branch_info, right->branch_info); 908 909 return left->branch_info->flags.cycles - 910 right->branch_info->flags.cycles; 911 } 912 913 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 914 size_t size, unsigned int width) 915 { 916 if (!he->branch_info) 917 return scnprintf(bf, size, "%-.*s", width, "N/A"); 918 if (he->branch_info->flags.cycles == 0) 919 return repsep_snprintf(bf, size, "%-*s", width, "-"); 920 return repsep_snprintf(bf, size, "%-*hd", width, 921 he->branch_info->flags.cycles); 922 } 923 924 struct sort_entry sort_cycles = { 925 .se_header = "Basic Block Cycles", 926 .se_cmp = sort__cycles_cmp, 927 .se_snprintf = hist_entry__cycles_snprintf, 928 .se_width_idx = HISTC_CYCLES, 929 }; 930 931 /* --sort daddr_sym */ 932 int64_t 933 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 934 { 935 uint64_t l = 0, r = 0; 936 937 if (left->mem_info) 938 l = left->mem_info->daddr.addr; 939 if (right->mem_info) 940 r = right->mem_info->daddr.addr; 941 942 return (int64_t)(r - l); 943 } 944 945 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 946 size_t size, unsigned int width) 947 { 948 uint64_t addr = 0; 949 struct map *map = NULL; 950 struct symbol *sym = NULL; 951 952 if (he->mem_info) { 953 addr = he->mem_info->daddr.addr; 954 map = he->mem_info->daddr.map; 955 sym = he->mem_info->daddr.sym; 956 } 957 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 958 width); 959 } 960 961 int64_t 962 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 963 { 964 uint64_t l = 0, r = 0; 965 966 if (left->mem_info) 967 l = left->mem_info->iaddr.addr; 968 if (right->mem_info) 969 r = right->mem_info->iaddr.addr; 970 971 return (int64_t)(r - l); 972 } 973 974 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 975 size_t size, unsigned int width) 976 { 977 uint64_t addr = 0; 978 struct map *map = NULL; 979 struct symbol *sym = NULL; 980 981 if (he->mem_info) { 982 addr = he->mem_info->iaddr.addr; 983 map = he->mem_info->iaddr.map; 984 sym = he->mem_info->iaddr.sym; 985 } 986 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 987 width); 988 } 989 990 static int64_t 991 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 992 { 993 struct map *map_l = NULL; 994 struct map *map_r = NULL; 995 996 if (left->mem_info) 997 map_l = left->mem_info->daddr.map; 998 if (right->mem_info) 999 map_r = right->mem_info->daddr.map; 1000 1001 return _sort__dso_cmp(map_l, map_r); 1002 } 1003 1004 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 1005 size_t size, unsigned int width) 1006 { 1007 struct map *map = NULL; 1008 1009 if (he->mem_info) 1010 map = he->mem_info->daddr.map; 1011 1012 return _hist_entry__dso_snprintf(map, bf, size, width); 1013 } 1014 1015 static int64_t 1016 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1017 { 1018 union perf_mem_data_src data_src_l; 1019 union perf_mem_data_src data_src_r; 1020 1021 if (left->mem_info) 1022 data_src_l = left->mem_info->data_src; 1023 else 1024 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1025 1026 if (right->mem_info) 1027 data_src_r = right->mem_info->data_src; 1028 else 1029 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1030 1031 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1032 } 1033 1034 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1035 size_t size, unsigned int width) 1036 { 1037 char out[10]; 1038 1039 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1040 return repsep_snprintf(bf, size, "%.*s", width, out); 1041 } 1042 1043 static int64_t 1044 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1045 { 1046 union perf_mem_data_src data_src_l; 1047 union perf_mem_data_src data_src_r; 1048 1049 if (left->mem_info) 1050 data_src_l = left->mem_info->data_src; 1051 else 1052 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1053 1054 if (right->mem_info) 1055 data_src_r = right->mem_info->data_src; 1056 else 1057 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1058 1059 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1060 } 1061 1062 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1063 size_t size, unsigned int width) 1064 { 1065 char out[64]; 1066 1067 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1068 return repsep_snprintf(bf, size, "%-*s", width, out); 1069 } 1070 1071 static int64_t 1072 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1073 { 1074 union perf_mem_data_src data_src_l; 1075 union perf_mem_data_src data_src_r; 1076 1077 if (left->mem_info) 1078 data_src_l = left->mem_info->data_src; 1079 else 1080 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1081 1082 if (right->mem_info) 1083 data_src_r = right->mem_info->data_src; 1084 else 1085 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1086 1087 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1088 } 1089 1090 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1091 size_t size, unsigned int width) 1092 { 1093 char out[64]; 1094 1095 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1096 return repsep_snprintf(bf, size, "%-*s", width, out); 1097 } 1098 1099 static int64_t 1100 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1101 { 1102 union perf_mem_data_src data_src_l; 1103 union perf_mem_data_src data_src_r; 1104 1105 if (left->mem_info) 1106 data_src_l = left->mem_info->data_src; 1107 else 1108 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1109 1110 if (right->mem_info) 1111 data_src_r = right->mem_info->data_src; 1112 else 1113 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1114 1115 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1116 } 1117 1118 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1119 size_t size, unsigned int width) 1120 { 1121 char out[64]; 1122 1123 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1124 return repsep_snprintf(bf, size, "%-*s", width, out); 1125 } 1126 1127 int64_t 1128 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1129 { 1130 u64 l, r; 1131 struct map *l_map, *r_map; 1132 1133 if (!left->mem_info) return -1; 1134 if (!right->mem_info) return 1; 1135 1136 /* group event types together */ 1137 if (left->cpumode > right->cpumode) return -1; 1138 if (left->cpumode < right->cpumode) return 1; 1139 1140 l_map = left->mem_info->daddr.map; 1141 r_map = right->mem_info->daddr.map; 1142 1143 /* if both are NULL, jump to sort on al_addr instead */ 1144 if (!l_map && !r_map) 1145 goto addr; 1146 1147 if (!l_map) return -1; 1148 if (!r_map) return 1; 1149 1150 if (l_map->maj > r_map->maj) return -1; 1151 if (l_map->maj < r_map->maj) return 1; 1152 1153 if (l_map->min > r_map->min) return -1; 1154 if (l_map->min < r_map->min) return 1; 1155 1156 if (l_map->ino > r_map->ino) return -1; 1157 if (l_map->ino < r_map->ino) return 1; 1158 1159 if (l_map->ino_generation > r_map->ino_generation) return -1; 1160 if (l_map->ino_generation < r_map->ino_generation) return 1; 1161 1162 /* 1163 * Addresses with no major/minor numbers are assumed to be 1164 * anonymous in userspace. Sort those on pid then address. 1165 * 1166 * The kernel and non-zero major/minor mapped areas are 1167 * assumed to be unity mapped. Sort those on address. 1168 */ 1169 1170 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1171 (!(l_map->flags & MAP_SHARED)) && 1172 !l_map->maj && !l_map->min && !l_map->ino && 1173 !l_map->ino_generation) { 1174 /* userspace anonymous */ 1175 1176 if (left->thread->pid_ > right->thread->pid_) return -1; 1177 if (left->thread->pid_ < right->thread->pid_) return 1; 1178 } 1179 1180 addr: 1181 /* al_addr does all the right addr - start + offset calculations */ 1182 l = cl_address(left->mem_info->daddr.al_addr); 1183 r = cl_address(right->mem_info->daddr.al_addr); 1184 1185 if (l > r) return -1; 1186 if (l < r) return 1; 1187 1188 return 0; 1189 } 1190 1191 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1192 size_t size, unsigned int width) 1193 { 1194 1195 uint64_t addr = 0; 1196 struct map *map = NULL; 1197 struct symbol *sym = NULL; 1198 char level = he->level; 1199 1200 if (he->mem_info) { 1201 addr = cl_address(he->mem_info->daddr.al_addr); 1202 map = he->mem_info->daddr.map; 1203 sym = he->mem_info->daddr.sym; 1204 1205 /* print [s] for shared data mmaps */ 1206 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1207 map && (map->type == MAP__VARIABLE) && 1208 (map->flags & MAP_SHARED) && 1209 (map->maj || map->min || map->ino || 1210 map->ino_generation)) 1211 level = 's'; 1212 else if (!map) 1213 level = 'X'; 1214 } 1215 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size, 1216 width); 1217 } 1218 1219 struct sort_entry sort_mispredict = { 1220 .se_header = "Branch Mispredicted", 1221 .se_cmp = sort__mispredict_cmp, 1222 .se_snprintf = hist_entry__mispredict_snprintf, 1223 .se_width_idx = HISTC_MISPREDICT, 1224 }; 1225 1226 static u64 he_weight(struct hist_entry *he) 1227 { 1228 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; 1229 } 1230 1231 static int64_t 1232 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1233 { 1234 return he_weight(left) - he_weight(right); 1235 } 1236 1237 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1238 size_t size, unsigned int width) 1239 { 1240 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); 1241 } 1242 1243 struct sort_entry sort_local_weight = { 1244 .se_header = "Local Weight", 1245 .se_cmp = sort__local_weight_cmp, 1246 .se_snprintf = hist_entry__local_weight_snprintf, 1247 .se_width_idx = HISTC_LOCAL_WEIGHT, 1248 }; 1249 1250 static int64_t 1251 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1252 { 1253 return left->stat.weight - right->stat.weight; 1254 } 1255 1256 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1257 size_t size, unsigned int width) 1258 { 1259 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); 1260 } 1261 1262 struct sort_entry sort_global_weight = { 1263 .se_header = "Weight", 1264 .se_cmp = sort__global_weight_cmp, 1265 .se_snprintf = hist_entry__global_weight_snprintf, 1266 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1267 }; 1268 1269 struct sort_entry sort_mem_daddr_sym = { 1270 .se_header = "Data Symbol", 1271 .se_cmp = sort__daddr_cmp, 1272 .se_snprintf = hist_entry__daddr_snprintf, 1273 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1274 }; 1275 1276 struct sort_entry sort_mem_iaddr_sym = { 1277 .se_header = "Code Symbol", 1278 .se_cmp = sort__iaddr_cmp, 1279 .se_snprintf = hist_entry__iaddr_snprintf, 1280 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1281 }; 1282 1283 struct sort_entry sort_mem_daddr_dso = { 1284 .se_header = "Data Object", 1285 .se_cmp = sort__dso_daddr_cmp, 1286 .se_snprintf = hist_entry__dso_daddr_snprintf, 1287 .se_width_idx = HISTC_MEM_DADDR_DSO, 1288 }; 1289 1290 struct sort_entry sort_mem_locked = { 1291 .se_header = "Locked", 1292 .se_cmp = sort__locked_cmp, 1293 .se_snprintf = hist_entry__locked_snprintf, 1294 .se_width_idx = HISTC_MEM_LOCKED, 1295 }; 1296 1297 struct sort_entry sort_mem_tlb = { 1298 .se_header = "TLB access", 1299 .se_cmp = sort__tlb_cmp, 1300 .se_snprintf = hist_entry__tlb_snprintf, 1301 .se_width_idx = HISTC_MEM_TLB, 1302 }; 1303 1304 struct sort_entry sort_mem_lvl = { 1305 .se_header = "Memory access", 1306 .se_cmp = sort__lvl_cmp, 1307 .se_snprintf = hist_entry__lvl_snprintf, 1308 .se_width_idx = HISTC_MEM_LVL, 1309 }; 1310 1311 struct sort_entry sort_mem_snoop = { 1312 .se_header = "Snoop", 1313 .se_cmp = sort__snoop_cmp, 1314 .se_snprintf = hist_entry__snoop_snprintf, 1315 .se_width_idx = HISTC_MEM_SNOOP, 1316 }; 1317 1318 struct sort_entry sort_mem_dcacheline = { 1319 .se_header = "Data Cacheline", 1320 .se_cmp = sort__dcacheline_cmp, 1321 .se_snprintf = hist_entry__dcacheline_snprintf, 1322 .se_width_idx = HISTC_MEM_DCACHELINE, 1323 }; 1324 1325 static int64_t 1326 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1327 { 1328 uint64_t l = 0, r = 0; 1329 1330 if (left->mem_info) 1331 l = left->mem_info->daddr.phys_addr; 1332 if (right->mem_info) 1333 r = right->mem_info->daddr.phys_addr; 1334 1335 return (int64_t)(r - l); 1336 } 1337 1338 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1339 size_t size, unsigned int width) 1340 { 1341 uint64_t addr = 0; 1342 size_t ret = 0; 1343 size_t len = BITS_PER_LONG / 4; 1344 1345 addr = he->mem_info->daddr.phys_addr; 1346 1347 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1348 1349 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1350 1351 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1352 1353 if (ret > width) 1354 bf[width] = '\0'; 1355 1356 return width; 1357 } 1358 1359 struct sort_entry sort_mem_phys_daddr = { 1360 .se_header = "Data Physical Address", 1361 .se_cmp = sort__phys_daddr_cmp, 1362 .se_snprintf = hist_entry__phys_daddr_snprintf, 1363 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1364 }; 1365 1366 static int64_t 1367 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1368 { 1369 if (!left->branch_info || !right->branch_info) 1370 return cmp_null(left->branch_info, right->branch_info); 1371 1372 return left->branch_info->flags.abort != 1373 right->branch_info->flags.abort; 1374 } 1375 1376 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1377 size_t size, unsigned int width) 1378 { 1379 static const char *out = "N/A"; 1380 1381 if (he->branch_info) { 1382 if (he->branch_info->flags.abort) 1383 out = "A"; 1384 else 1385 out = "."; 1386 } 1387 1388 return repsep_snprintf(bf, size, "%-*s", width, out); 1389 } 1390 1391 struct sort_entry sort_abort = { 1392 .se_header = "Transaction abort", 1393 .se_cmp = sort__abort_cmp, 1394 .se_snprintf = hist_entry__abort_snprintf, 1395 .se_width_idx = HISTC_ABORT, 1396 }; 1397 1398 static int64_t 1399 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1400 { 1401 if (!left->branch_info || !right->branch_info) 1402 return cmp_null(left->branch_info, right->branch_info); 1403 1404 return left->branch_info->flags.in_tx != 1405 right->branch_info->flags.in_tx; 1406 } 1407 1408 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1409 size_t size, unsigned int width) 1410 { 1411 static const char *out = "N/A"; 1412 1413 if (he->branch_info) { 1414 if (he->branch_info->flags.in_tx) 1415 out = "T"; 1416 else 1417 out = "."; 1418 } 1419 1420 return repsep_snprintf(bf, size, "%-*s", width, out); 1421 } 1422 1423 struct sort_entry sort_in_tx = { 1424 .se_header = "Branch in transaction", 1425 .se_cmp = sort__in_tx_cmp, 1426 .se_snprintf = hist_entry__in_tx_snprintf, 1427 .se_width_idx = HISTC_IN_TX, 1428 }; 1429 1430 static int64_t 1431 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1432 { 1433 return left->transaction - right->transaction; 1434 } 1435 1436 static inline char *add_str(char *p, const char *str) 1437 { 1438 strcpy(p, str); 1439 return p + strlen(str); 1440 } 1441 1442 static struct txbit { 1443 unsigned flag; 1444 const char *name; 1445 int skip_for_len; 1446 } txbits[] = { 1447 { PERF_TXN_ELISION, "EL ", 0 }, 1448 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1449 { PERF_TXN_SYNC, "SYNC ", 1 }, 1450 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1451 { PERF_TXN_RETRY, "RETRY ", 0 }, 1452 { PERF_TXN_CONFLICT, "CON ", 0 }, 1453 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1454 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1455 { 0, NULL, 0 } 1456 }; 1457 1458 int hist_entry__transaction_len(void) 1459 { 1460 int i; 1461 int len = 0; 1462 1463 for (i = 0; txbits[i].name; i++) { 1464 if (!txbits[i].skip_for_len) 1465 len += strlen(txbits[i].name); 1466 } 1467 len += 4; /* :XX<space> */ 1468 return len; 1469 } 1470 1471 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1472 size_t size, unsigned int width) 1473 { 1474 u64 t = he->transaction; 1475 char buf[128]; 1476 char *p = buf; 1477 int i; 1478 1479 buf[0] = 0; 1480 for (i = 0; txbits[i].name; i++) 1481 if (txbits[i].flag & t) 1482 p = add_str(p, txbits[i].name); 1483 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1484 p = add_str(p, "NEITHER "); 1485 if (t & PERF_TXN_ABORT_MASK) { 1486 sprintf(p, ":%" PRIx64, 1487 (t & PERF_TXN_ABORT_MASK) >> 1488 PERF_TXN_ABORT_SHIFT); 1489 p += strlen(p); 1490 } 1491 1492 return repsep_snprintf(bf, size, "%-*s", width, buf); 1493 } 1494 1495 struct sort_entry sort_transaction = { 1496 .se_header = "Transaction ", 1497 .se_cmp = sort__transaction_cmp, 1498 .se_snprintf = hist_entry__transaction_snprintf, 1499 .se_width_idx = HISTC_TRANSACTION, 1500 }; 1501 1502 /* --sort symbol_size */ 1503 1504 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 1505 { 1506 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 1507 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 1508 1509 return size_l < size_r ? -1 : 1510 size_l == size_r ? 0 : 1; 1511 } 1512 1513 static int64_t 1514 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 1515 { 1516 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 1517 } 1518 1519 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 1520 size_t bf_size, unsigned int width) 1521 { 1522 if (sym) 1523 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 1524 1525 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1526 } 1527 1528 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 1529 size_t size, unsigned int width) 1530 { 1531 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 1532 } 1533 1534 struct sort_entry sort_sym_size = { 1535 .se_header = "Symbol size", 1536 .se_cmp = sort__sym_size_cmp, 1537 .se_snprintf = hist_entry__sym_size_snprintf, 1538 .se_width_idx = HISTC_SYM_SIZE, 1539 }; 1540 1541 1542 struct sort_dimension { 1543 const char *name; 1544 struct sort_entry *entry; 1545 int taken; 1546 }; 1547 1548 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 1549 1550 static struct sort_dimension common_sort_dimensions[] = { 1551 DIM(SORT_PID, "pid", sort_thread), 1552 DIM(SORT_COMM, "comm", sort_comm), 1553 DIM(SORT_DSO, "dso", sort_dso), 1554 DIM(SORT_SYM, "symbol", sort_sym), 1555 DIM(SORT_PARENT, "parent", sort_parent), 1556 DIM(SORT_CPU, "cpu", sort_cpu), 1557 DIM(SORT_SOCKET, "socket", sort_socket), 1558 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1559 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 1560 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1561 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 1562 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 1563 DIM(SORT_TRACE, "trace", sort_trace), 1564 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 1565 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 1566 }; 1567 1568 #undef DIM 1569 1570 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 1571 1572 static struct sort_dimension bstack_sort_dimensions[] = { 1573 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 1574 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 1575 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 1576 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 1577 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 1578 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1579 DIM(SORT_ABORT, "abort", sort_abort), 1580 DIM(SORT_CYCLES, "cycles", sort_cycles), 1581 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1582 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1583 }; 1584 1585 #undef DIM 1586 1587 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 1588 1589 static struct sort_dimension memory_sort_dimensions[] = { 1590 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1591 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 1592 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1593 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1594 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1595 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 1596 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 1597 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 1598 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 1599 }; 1600 1601 #undef DIM 1602 1603 struct hpp_dimension { 1604 const char *name; 1605 struct perf_hpp_fmt *fmt; 1606 int taken; 1607 }; 1608 1609 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 1610 1611 static struct hpp_dimension hpp_sort_dimensions[] = { 1612 DIM(PERF_HPP__OVERHEAD, "overhead"), 1613 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 1614 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 1615 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 1616 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 1617 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 1618 DIM(PERF_HPP__SAMPLES, "sample"), 1619 DIM(PERF_HPP__PERIOD, "period"), 1620 }; 1621 1622 #undef DIM 1623 1624 struct hpp_sort_entry { 1625 struct perf_hpp_fmt hpp; 1626 struct sort_entry *se; 1627 }; 1628 1629 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 1630 { 1631 struct hpp_sort_entry *hse; 1632 1633 if (!perf_hpp__is_sort_entry(fmt)) 1634 return; 1635 1636 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1637 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 1638 } 1639 1640 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1641 struct hists *hists, int line __maybe_unused, 1642 int *span __maybe_unused) 1643 { 1644 struct hpp_sort_entry *hse; 1645 size_t len = fmt->user_len; 1646 1647 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1648 1649 if (!len) 1650 len = hists__col_len(hists, hse->se->se_width_idx); 1651 1652 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 1653 } 1654 1655 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 1656 struct perf_hpp *hpp __maybe_unused, 1657 struct hists *hists) 1658 { 1659 struct hpp_sort_entry *hse; 1660 size_t len = fmt->user_len; 1661 1662 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1663 1664 if (!len) 1665 len = hists__col_len(hists, hse->se->se_width_idx); 1666 1667 return len; 1668 } 1669 1670 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1671 struct hist_entry *he) 1672 { 1673 struct hpp_sort_entry *hse; 1674 size_t len = fmt->user_len; 1675 1676 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1677 1678 if (!len) 1679 len = hists__col_len(he->hists, hse->se->se_width_idx); 1680 1681 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1682 } 1683 1684 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 1685 struct hist_entry *a, struct hist_entry *b) 1686 { 1687 struct hpp_sort_entry *hse; 1688 1689 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1690 return hse->se->se_cmp(a, b); 1691 } 1692 1693 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 1694 struct hist_entry *a, struct hist_entry *b) 1695 { 1696 struct hpp_sort_entry *hse; 1697 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 1698 1699 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1700 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 1701 return collapse_fn(a, b); 1702 } 1703 1704 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 1705 struct hist_entry *a, struct hist_entry *b) 1706 { 1707 struct hpp_sort_entry *hse; 1708 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 1709 1710 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1711 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 1712 return sort_fn(a, b); 1713 } 1714 1715 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 1716 { 1717 return format->header == __sort__hpp_header; 1718 } 1719 1720 #define MK_SORT_ENTRY_CHK(key) \ 1721 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 1722 { \ 1723 struct hpp_sort_entry *hse; \ 1724 \ 1725 if (!perf_hpp__is_sort_entry(fmt)) \ 1726 return false; \ 1727 \ 1728 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 1729 return hse->se == &sort_ ## key ; \ 1730 } 1731 1732 MK_SORT_ENTRY_CHK(trace) 1733 MK_SORT_ENTRY_CHK(srcline) 1734 MK_SORT_ENTRY_CHK(srcfile) 1735 MK_SORT_ENTRY_CHK(thread) 1736 MK_SORT_ENTRY_CHK(comm) 1737 MK_SORT_ENTRY_CHK(dso) 1738 MK_SORT_ENTRY_CHK(sym) 1739 1740 1741 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1742 { 1743 struct hpp_sort_entry *hse_a; 1744 struct hpp_sort_entry *hse_b; 1745 1746 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 1747 return false; 1748 1749 hse_a = container_of(a, struct hpp_sort_entry, hpp); 1750 hse_b = container_of(b, struct hpp_sort_entry, hpp); 1751 1752 return hse_a->se == hse_b->se; 1753 } 1754 1755 static void hse_free(struct perf_hpp_fmt *fmt) 1756 { 1757 struct hpp_sort_entry *hse; 1758 1759 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1760 free(hse); 1761 } 1762 1763 static struct hpp_sort_entry * 1764 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 1765 { 1766 struct hpp_sort_entry *hse; 1767 1768 hse = malloc(sizeof(*hse)); 1769 if (hse == NULL) { 1770 pr_err("Memory allocation failed\n"); 1771 return NULL; 1772 } 1773 1774 hse->se = sd->entry; 1775 hse->hpp.name = sd->entry->se_header; 1776 hse->hpp.header = __sort__hpp_header; 1777 hse->hpp.width = __sort__hpp_width; 1778 hse->hpp.entry = __sort__hpp_entry; 1779 hse->hpp.color = NULL; 1780 1781 hse->hpp.cmp = __sort__hpp_cmp; 1782 hse->hpp.collapse = __sort__hpp_collapse; 1783 hse->hpp.sort = __sort__hpp_sort; 1784 hse->hpp.equal = __sort__hpp_equal; 1785 hse->hpp.free = hse_free; 1786 1787 INIT_LIST_HEAD(&hse->hpp.list); 1788 INIT_LIST_HEAD(&hse->hpp.sort_list); 1789 hse->hpp.elide = false; 1790 hse->hpp.len = 0; 1791 hse->hpp.user_len = 0; 1792 hse->hpp.level = level; 1793 1794 return hse; 1795 } 1796 1797 static void hpp_free(struct perf_hpp_fmt *fmt) 1798 { 1799 free(fmt); 1800 } 1801 1802 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 1803 int level) 1804 { 1805 struct perf_hpp_fmt *fmt; 1806 1807 fmt = memdup(hd->fmt, sizeof(*fmt)); 1808 if (fmt) { 1809 INIT_LIST_HEAD(&fmt->list); 1810 INIT_LIST_HEAD(&fmt->sort_list); 1811 fmt->free = hpp_free; 1812 fmt->level = level; 1813 } 1814 1815 return fmt; 1816 } 1817 1818 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 1819 { 1820 struct perf_hpp_fmt *fmt; 1821 struct hpp_sort_entry *hse; 1822 int ret = -1; 1823 int r; 1824 1825 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1826 if (!perf_hpp__is_sort_entry(fmt)) 1827 continue; 1828 1829 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1830 if (hse->se->se_filter == NULL) 1831 continue; 1832 1833 /* 1834 * hist entry is filtered if any of sort key in the hpp list 1835 * is applied. But it should skip non-matched filter types. 1836 */ 1837 r = hse->se->se_filter(he, type, arg); 1838 if (r >= 0) { 1839 if (ret < 0) 1840 ret = 0; 1841 ret |= r; 1842 } 1843 } 1844 1845 return ret; 1846 } 1847 1848 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 1849 struct perf_hpp_list *list, 1850 int level) 1851 { 1852 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 1853 1854 if (hse == NULL) 1855 return -1; 1856 1857 perf_hpp_list__register_sort_field(list, &hse->hpp); 1858 return 0; 1859 } 1860 1861 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 1862 struct perf_hpp_list *list) 1863 { 1864 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 1865 1866 if (hse == NULL) 1867 return -1; 1868 1869 perf_hpp_list__column_register(list, &hse->hpp); 1870 return 0; 1871 } 1872 1873 struct hpp_dynamic_entry { 1874 struct perf_hpp_fmt hpp; 1875 struct perf_evsel *evsel; 1876 struct format_field *field; 1877 unsigned dynamic_len; 1878 bool raw_trace; 1879 }; 1880 1881 static int hde_width(struct hpp_dynamic_entry *hde) 1882 { 1883 if (!hde->hpp.len) { 1884 int len = hde->dynamic_len; 1885 int namelen = strlen(hde->field->name); 1886 int fieldlen = hde->field->size; 1887 1888 if (namelen > len) 1889 len = namelen; 1890 1891 if (!(hde->field->flags & FIELD_IS_STRING)) { 1892 /* length for print hex numbers */ 1893 fieldlen = hde->field->size * 2 + 2; 1894 } 1895 if (fieldlen > len) 1896 len = fieldlen; 1897 1898 hde->hpp.len = len; 1899 } 1900 return hde->hpp.len; 1901 } 1902 1903 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 1904 struct hist_entry *he) 1905 { 1906 char *str, *pos; 1907 struct format_field *field = hde->field; 1908 size_t namelen; 1909 bool last = false; 1910 1911 if (hde->raw_trace) 1912 return; 1913 1914 /* parse pretty print result and update max length */ 1915 if (!he->trace_output) 1916 he->trace_output = get_trace_output(he); 1917 1918 namelen = strlen(field->name); 1919 str = he->trace_output; 1920 1921 while (str) { 1922 pos = strchr(str, ' '); 1923 if (pos == NULL) { 1924 last = true; 1925 pos = str + strlen(str); 1926 } 1927 1928 if (!strncmp(str, field->name, namelen)) { 1929 size_t len; 1930 1931 str += namelen + 1; 1932 len = pos - str; 1933 1934 if (len > hde->dynamic_len) 1935 hde->dynamic_len = len; 1936 break; 1937 } 1938 1939 if (last) 1940 str = NULL; 1941 else 1942 str = pos + 1; 1943 } 1944 } 1945 1946 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1947 struct hists *hists __maybe_unused, 1948 int line __maybe_unused, 1949 int *span __maybe_unused) 1950 { 1951 struct hpp_dynamic_entry *hde; 1952 size_t len = fmt->user_len; 1953 1954 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1955 1956 if (!len) 1957 len = hde_width(hde); 1958 1959 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 1960 } 1961 1962 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 1963 struct perf_hpp *hpp __maybe_unused, 1964 struct hists *hists __maybe_unused) 1965 { 1966 struct hpp_dynamic_entry *hde; 1967 size_t len = fmt->user_len; 1968 1969 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1970 1971 if (!len) 1972 len = hde_width(hde); 1973 1974 return len; 1975 } 1976 1977 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 1978 { 1979 struct hpp_dynamic_entry *hde; 1980 1981 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1982 1983 return hists_to_evsel(hists) == hde->evsel; 1984 } 1985 1986 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1987 struct hist_entry *he) 1988 { 1989 struct hpp_dynamic_entry *hde; 1990 size_t len = fmt->user_len; 1991 char *str, *pos; 1992 struct format_field *field; 1993 size_t namelen; 1994 bool last = false; 1995 int ret; 1996 1997 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1998 1999 if (!len) 2000 len = hde_width(hde); 2001 2002 if (hde->raw_trace) 2003 goto raw_field; 2004 2005 if (!he->trace_output) 2006 he->trace_output = get_trace_output(he); 2007 2008 field = hde->field; 2009 namelen = strlen(field->name); 2010 str = he->trace_output; 2011 2012 while (str) { 2013 pos = strchr(str, ' '); 2014 if (pos == NULL) { 2015 last = true; 2016 pos = str + strlen(str); 2017 } 2018 2019 if (!strncmp(str, field->name, namelen)) { 2020 str += namelen + 1; 2021 str = strndup(str, pos - str); 2022 2023 if (str == NULL) 2024 return scnprintf(hpp->buf, hpp->size, 2025 "%*.*s", len, len, "ERROR"); 2026 break; 2027 } 2028 2029 if (last) 2030 str = NULL; 2031 else 2032 str = pos + 1; 2033 } 2034 2035 if (str == NULL) { 2036 struct trace_seq seq; 2037 raw_field: 2038 trace_seq_init(&seq); 2039 pevent_print_field(&seq, he->raw_data, hde->field); 2040 str = seq.buffer; 2041 } 2042 2043 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2044 free(str); 2045 return ret; 2046 } 2047 2048 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2049 struct hist_entry *a, struct hist_entry *b) 2050 { 2051 struct hpp_dynamic_entry *hde; 2052 struct format_field *field; 2053 unsigned offset, size; 2054 2055 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2056 2057 if (b == NULL) { 2058 update_dynamic_len(hde, a); 2059 return 0; 2060 } 2061 2062 field = hde->field; 2063 if (field->flags & FIELD_IS_DYNAMIC) { 2064 unsigned long long dyn; 2065 2066 pevent_read_number_field(field, a->raw_data, &dyn); 2067 offset = dyn & 0xffff; 2068 size = (dyn >> 16) & 0xffff; 2069 2070 /* record max width for output */ 2071 if (size > hde->dynamic_len) 2072 hde->dynamic_len = size; 2073 } else { 2074 offset = field->offset; 2075 size = field->size; 2076 } 2077 2078 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2079 } 2080 2081 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2082 { 2083 return fmt->cmp == __sort__hde_cmp; 2084 } 2085 2086 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2087 { 2088 struct hpp_dynamic_entry *hde_a; 2089 struct hpp_dynamic_entry *hde_b; 2090 2091 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2092 return false; 2093 2094 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2095 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2096 2097 return hde_a->field == hde_b->field; 2098 } 2099 2100 static void hde_free(struct perf_hpp_fmt *fmt) 2101 { 2102 struct hpp_dynamic_entry *hde; 2103 2104 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2105 free(hde); 2106 } 2107 2108 static struct hpp_dynamic_entry * 2109 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field, 2110 int level) 2111 { 2112 struct hpp_dynamic_entry *hde; 2113 2114 hde = malloc(sizeof(*hde)); 2115 if (hde == NULL) { 2116 pr_debug("Memory allocation failed\n"); 2117 return NULL; 2118 } 2119 2120 hde->evsel = evsel; 2121 hde->field = field; 2122 hde->dynamic_len = 0; 2123 2124 hde->hpp.name = field->name; 2125 hde->hpp.header = __sort__hde_header; 2126 hde->hpp.width = __sort__hde_width; 2127 hde->hpp.entry = __sort__hde_entry; 2128 hde->hpp.color = NULL; 2129 2130 hde->hpp.cmp = __sort__hde_cmp; 2131 hde->hpp.collapse = __sort__hde_cmp; 2132 hde->hpp.sort = __sort__hde_cmp; 2133 hde->hpp.equal = __sort__hde_equal; 2134 hde->hpp.free = hde_free; 2135 2136 INIT_LIST_HEAD(&hde->hpp.list); 2137 INIT_LIST_HEAD(&hde->hpp.sort_list); 2138 hde->hpp.elide = false; 2139 hde->hpp.len = 0; 2140 hde->hpp.user_len = 0; 2141 hde->hpp.level = level; 2142 2143 return hde; 2144 } 2145 2146 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2147 { 2148 struct perf_hpp_fmt *new_fmt = NULL; 2149 2150 if (perf_hpp__is_sort_entry(fmt)) { 2151 struct hpp_sort_entry *hse, *new_hse; 2152 2153 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2154 new_hse = memdup(hse, sizeof(*hse)); 2155 if (new_hse) 2156 new_fmt = &new_hse->hpp; 2157 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2158 struct hpp_dynamic_entry *hde, *new_hde; 2159 2160 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2161 new_hde = memdup(hde, sizeof(*hde)); 2162 if (new_hde) 2163 new_fmt = &new_hde->hpp; 2164 } else { 2165 new_fmt = memdup(fmt, sizeof(*fmt)); 2166 } 2167 2168 INIT_LIST_HEAD(&new_fmt->list); 2169 INIT_LIST_HEAD(&new_fmt->sort_list); 2170 2171 return new_fmt; 2172 } 2173 2174 static int parse_field_name(char *str, char **event, char **field, char **opt) 2175 { 2176 char *event_name, *field_name, *opt_name; 2177 2178 event_name = str; 2179 field_name = strchr(str, '.'); 2180 2181 if (field_name) { 2182 *field_name++ = '\0'; 2183 } else { 2184 event_name = NULL; 2185 field_name = str; 2186 } 2187 2188 opt_name = strchr(field_name, '/'); 2189 if (opt_name) 2190 *opt_name++ = '\0'; 2191 2192 *event = event_name; 2193 *field = field_name; 2194 *opt = opt_name; 2195 2196 return 0; 2197 } 2198 2199 /* find match evsel using a given event name. The event name can be: 2200 * 1. '%' + event index (e.g. '%1' for first event) 2201 * 2. full event name (e.g. sched:sched_switch) 2202 * 3. partial event name (should not contain ':') 2203 */ 2204 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name) 2205 { 2206 struct perf_evsel *evsel = NULL; 2207 struct perf_evsel *pos; 2208 bool full_name; 2209 2210 /* case 1 */ 2211 if (event_name[0] == '%') { 2212 int nr = strtol(event_name+1, NULL, 0); 2213 2214 if (nr > evlist->nr_entries) 2215 return NULL; 2216 2217 evsel = perf_evlist__first(evlist); 2218 while (--nr > 0) 2219 evsel = perf_evsel__next(evsel); 2220 2221 return evsel; 2222 } 2223 2224 full_name = !!strchr(event_name, ':'); 2225 evlist__for_each_entry(evlist, pos) { 2226 /* case 2 */ 2227 if (full_name && !strcmp(pos->name, event_name)) 2228 return pos; 2229 /* case 3 */ 2230 if (!full_name && strstr(pos->name, event_name)) { 2231 if (evsel) { 2232 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2233 event_name, evsel->name, pos->name); 2234 return NULL; 2235 } 2236 evsel = pos; 2237 } 2238 } 2239 2240 return evsel; 2241 } 2242 2243 static int __dynamic_dimension__add(struct perf_evsel *evsel, 2244 struct format_field *field, 2245 bool raw_trace, int level) 2246 { 2247 struct hpp_dynamic_entry *hde; 2248 2249 hde = __alloc_dynamic_entry(evsel, field, level); 2250 if (hde == NULL) 2251 return -ENOMEM; 2252 2253 hde->raw_trace = raw_trace; 2254 2255 perf_hpp__register_sort_field(&hde->hpp); 2256 return 0; 2257 } 2258 2259 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level) 2260 { 2261 int ret; 2262 struct format_field *field; 2263 2264 field = evsel->tp_format->format.fields; 2265 while (field) { 2266 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2267 if (ret < 0) 2268 return ret; 2269 2270 field = field->next; 2271 } 2272 return 0; 2273 } 2274 2275 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace, 2276 int level) 2277 { 2278 int ret; 2279 struct perf_evsel *evsel; 2280 2281 evlist__for_each_entry(evlist, evsel) { 2282 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2283 continue; 2284 2285 ret = add_evsel_fields(evsel, raw_trace, level); 2286 if (ret < 0) 2287 return ret; 2288 } 2289 return 0; 2290 } 2291 2292 static int add_all_matching_fields(struct perf_evlist *evlist, 2293 char *field_name, bool raw_trace, int level) 2294 { 2295 int ret = -ESRCH; 2296 struct perf_evsel *evsel; 2297 struct format_field *field; 2298 2299 evlist__for_each_entry(evlist, evsel) { 2300 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2301 continue; 2302 2303 field = pevent_find_any_field(evsel->tp_format, field_name); 2304 if (field == NULL) 2305 continue; 2306 2307 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2308 if (ret < 0) 2309 break; 2310 } 2311 return ret; 2312 } 2313 2314 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok, 2315 int level) 2316 { 2317 char *str, *event_name, *field_name, *opt_name; 2318 struct perf_evsel *evsel; 2319 struct format_field *field; 2320 bool raw_trace = symbol_conf.raw_trace; 2321 int ret = 0; 2322 2323 if (evlist == NULL) 2324 return -ENOENT; 2325 2326 str = strdup(tok); 2327 if (str == NULL) 2328 return -ENOMEM; 2329 2330 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2331 ret = -EINVAL; 2332 goto out; 2333 } 2334 2335 if (opt_name) { 2336 if (strcmp(opt_name, "raw")) { 2337 pr_debug("unsupported field option %s\n", opt_name); 2338 ret = -EINVAL; 2339 goto out; 2340 } 2341 raw_trace = true; 2342 } 2343 2344 if (!strcmp(field_name, "trace_fields")) { 2345 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2346 goto out; 2347 } 2348 2349 if (event_name == NULL) { 2350 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2351 goto out; 2352 } 2353 2354 evsel = find_evsel(evlist, event_name); 2355 if (evsel == NULL) { 2356 pr_debug("Cannot find event: %s\n", event_name); 2357 ret = -ENOENT; 2358 goto out; 2359 } 2360 2361 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2362 pr_debug("%s is not a tracepoint event\n", event_name); 2363 ret = -EINVAL; 2364 goto out; 2365 } 2366 2367 if (!strcmp(field_name, "*")) { 2368 ret = add_evsel_fields(evsel, raw_trace, level); 2369 } else { 2370 field = pevent_find_any_field(evsel->tp_format, field_name); 2371 if (field == NULL) { 2372 pr_debug("Cannot find event field for %s.%s\n", 2373 event_name, field_name); 2374 return -ENOENT; 2375 } 2376 2377 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2378 } 2379 2380 out: 2381 free(str); 2382 return ret; 2383 } 2384 2385 static int __sort_dimension__add(struct sort_dimension *sd, 2386 struct perf_hpp_list *list, 2387 int level) 2388 { 2389 if (sd->taken) 2390 return 0; 2391 2392 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2393 return -1; 2394 2395 if (sd->entry->se_collapse) 2396 list->need_collapse = 1; 2397 2398 sd->taken = 1; 2399 2400 return 0; 2401 } 2402 2403 static int __hpp_dimension__add(struct hpp_dimension *hd, 2404 struct perf_hpp_list *list, 2405 int level) 2406 { 2407 struct perf_hpp_fmt *fmt; 2408 2409 if (hd->taken) 2410 return 0; 2411 2412 fmt = __hpp_dimension__alloc_hpp(hd, level); 2413 if (!fmt) 2414 return -1; 2415 2416 hd->taken = 1; 2417 perf_hpp_list__register_sort_field(list, fmt); 2418 return 0; 2419 } 2420 2421 static int __sort_dimension__add_output(struct perf_hpp_list *list, 2422 struct sort_dimension *sd) 2423 { 2424 if (sd->taken) 2425 return 0; 2426 2427 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2428 return -1; 2429 2430 sd->taken = 1; 2431 return 0; 2432 } 2433 2434 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2435 struct hpp_dimension *hd) 2436 { 2437 struct perf_hpp_fmt *fmt; 2438 2439 if (hd->taken) 2440 return 0; 2441 2442 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2443 if (!fmt) 2444 return -1; 2445 2446 hd->taken = 1; 2447 perf_hpp_list__column_register(list, fmt); 2448 return 0; 2449 } 2450 2451 int hpp_dimension__add_output(unsigned col) 2452 { 2453 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2454 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2455 } 2456 2457 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2458 struct perf_evlist *evlist, 2459 int level) 2460 { 2461 unsigned int i; 2462 2463 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2464 struct sort_dimension *sd = &common_sort_dimensions[i]; 2465 2466 if (strncasecmp(tok, sd->name, strlen(tok))) 2467 continue; 2468 2469 if (sd->entry == &sort_parent) { 2470 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 2471 if (ret) { 2472 char err[BUFSIZ]; 2473 2474 regerror(ret, &parent_regex, err, sizeof(err)); 2475 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 2476 return -EINVAL; 2477 } 2478 list->parent = 1; 2479 } else if (sd->entry == &sort_sym) { 2480 list->sym = 1; 2481 /* 2482 * perf diff displays the performance difference amongst 2483 * two or more perf.data files. Those files could come 2484 * from different binaries. So we should not compare 2485 * their ips, but the name of symbol. 2486 */ 2487 if (sort__mode == SORT_MODE__DIFF) 2488 sd->entry->se_collapse = sort__sym_sort; 2489 2490 } else if (sd->entry == &sort_dso) { 2491 list->dso = 1; 2492 } else if (sd->entry == &sort_socket) { 2493 list->socket = 1; 2494 } else if (sd->entry == &sort_thread) { 2495 list->thread = 1; 2496 } else if (sd->entry == &sort_comm) { 2497 list->comm = 1; 2498 } 2499 2500 return __sort_dimension__add(sd, list, level); 2501 } 2502 2503 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2504 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2505 2506 if (strncasecmp(tok, hd->name, strlen(tok))) 2507 continue; 2508 2509 return __hpp_dimension__add(hd, list, level); 2510 } 2511 2512 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2513 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2514 2515 if (strncasecmp(tok, sd->name, strlen(tok))) 2516 continue; 2517 2518 if (sort__mode != SORT_MODE__BRANCH) 2519 return -EINVAL; 2520 2521 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 2522 list->sym = 1; 2523 2524 __sort_dimension__add(sd, list, level); 2525 return 0; 2526 } 2527 2528 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2529 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2530 2531 if (strncasecmp(tok, sd->name, strlen(tok))) 2532 continue; 2533 2534 if (sort__mode != SORT_MODE__MEMORY) 2535 return -EINVAL; 2536 2537 if (sd->entry == &sort_mem_dcacheline && cacheline_size == 0) 2538 return -EINVAL; 2539 2540 if (sd->entry == &sort_mem_daddr_sym) 2541 list->sym = 1; 2542 2543 __sort_dimension__add(sd, list, level); 2544 return 0; 2545 } 2546 2547 if (!add_dynamic_entry(evlist, tok, level)) 2548 return 0; 2549 2550 return -ESRCH; 2551 } 2552 2553 static int setup_sort_list(struct perf_hpp_list *list, char *str, 2554 struct perf_evlist *evlist) 2555 { 2556 char *tmp, *tok; 2557 int ret = 0; 2558 int level = 0; 2559 int next_level = 1; 2560 bool in_group = false; 2561 2562 do { 2563 tok = str; 2564 tmp = strpbrk(str, "{}, "); 2565 if (tmp) { 2566 if (in_group) 2567 next_level = level; 2568 else 2569 next_level = level + 1; 2570 2571 if (*tmp == '{') 2572 in_group = true; 2573 else if (*tmp == '}') 2574 in_group = false; 2575 2576 *tmp = '\0'; 2577 str = tmp + 1; 2578 } 2579 2580 if (*tok) { 2581 ret = sort_dimension__add(list, tok, evlist, level); 2582 if (ret == -EINVAL) { 2583 if (!cacheline_size && !strncasecmp(tok, "dcacheline", strlen(tok))) 2584 pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 2585 else 2586 pr_err("Invalid --sort key: `%s'", tok); 2587 break; 2588 } else if (ret == -ESRCH) { 2589 pr_err("Unknown --sort key: `%s'", tok); 2590 break; 2591 } 2592 } 2593 2594 level = next_level; 2595 } while (tmp); 2596 2597 return ret; 2598 } 2599 2600 static const char *get_default_sort_order(struct perf_evlist *evlist) 2601 { 2602 const char *default_sort_orders[] = { 2603 default_sort_order, 2604 default_branch_sort_order, 2605 default_mem_sort_order, 2606 default_top_sort_order, 2607 default_diff_sort_order, 2608 default_tracepoint_sort_order, 2609 }; 2610 bool use_trace = true; 2611 struct perf_evsel *evsel; 2612 2613 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 2614 2615 if (evlist == NULL || perf_evlist__empty(evlist)) 2616 goto out_no_evlist; 2617 2618 evlist__for_each_entry(evlist, evsel) { 2619 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2620 use_trace = false; 2621 break; 2622 } 2623 } 2624 2625 if (use_trace) { 2626 sort__mode = SORT_MODE__TRACEPOINT; 2627 if (symbol_conf.raw_trace) 2628 return "trace_fields"; 2629 } 2630 out_no_evlist: 2631 return default_sort_orders[sort__mode]; 2632 } 2633 2634 static int setup_sort_order(struct perf_evlist *evlist) 2635 { 2636 char *new_sort_order; 2637 2638 /* 2639 * Append '+'-prefixed sort order to the default sort 2640 * order string. 2641 */ 2642 if (!sort_order || is_strict_order(sort_order)) 2643 return 0; 2644 2645 if (sort_order[1] == '\0') { 2646 pr_err("Invalid --sort key: `+'"); 2647 return -EINVAL; 2648 } 2649 2650 /* 2651 * We allocate new sort_order string, but we never free it, 2652 * because it's checked over the rest of the code. 2653 */ 2654 if (asprintf(&new_sort_order, "%s,%s", 2655 get_default_sort_order(evlist), sort_order + 1) < 0) { 2656 pr_err("Not enough memory to set up --sort"); 2657 return -ENOMEM; 2658 } 2659 2660 sort_order = new_sort_order; 2661 return 0; 2662 } 2663 2664 /* 2665 * Adds 'pre,' prefix into 'str' is 'pre' is 2666 * not already part of 'str'. 2667 */ 2668 static char *prefix_if_not_in(const char *pre, char *str) 2669 { 2670 char *n; 2671 2672 if (!str || strstr(str, pre)) 2673 return str; 2674 2675 if (asprintf(&n, "%s,%s", pre, str) < 0) 2676 return NULL; 2677 2678 free(str); 2679 return n; 2680 } 2681 2682 static char *setup_overhead(char *keys) 2683 { 2684 if (sort__mode == SORT_MODE__DIFF) 2685 return keys; 2686 2687 keys = prefix_if_not_in("overhead", keys); 2688 2689 if (symbol_conf.cumulate_callchain) 2690 keys = prefix_if_not_in("overhead_children", keys); 2691 2692 return keys; 2693 } 2694 2695 static int __setup_sorting(struct perf_evlist *evlist) 2696 { 2697 char *str; 2698 const char *sort_keys; 2699 int ret = 0; 2700 2701 ret = setup_sort_order(evlist); 2702 if (ret) 2703 return ret; 2704 2705 sort_keys = sort_order; 2706 if (sort_keys == NULL) { 2707 if (is_strict_order(field_order)) { 2708 /* 2709 * If user specified field order but no sort order, 2710 * we'll honor it and not add default sort orders. 2711 */ 2712 return 0; 2713 } 2714 2715 sort_keys = get_default_sort_order(evlist); 2716 } 2717 2718 str = strdup(sort_keys); 2719 if (str == NULL) { 2720 pr_err("Not enough memory to setup sort keys"); 2721 return -ENOMEM; 2722 } 2723 2724 /* 2725 * Prepend overhead fields for backward compatibility. 2726 */ 2727 if (!is_strict_order(field_order)) { 2728 str = setup_overhead(str); 2729 if (str == NULL) { 2730 pr_err("Not enough memory to setup overhead keys"); 2731 return -ENOMEM; 2732 } 2733 } 2734 2735 ret = setup_sort_list(&perf_hpp_list, str, evlist); 2736 2737 free(str); 2738 return ret; 2739 } 2740 2741 void perf_hpp__set_elide(int idx, bool elide) 2742 { 2743 struct perf_hpp_fmt *fmt; 2744 struct hpp_sort_entry *hse; 2745 2746 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2747 if (!perf_hpp__is_sort_entry(fmt)) 2748 continue; 2749 2750 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2751 if (hse->se->se_width_idx == idx) { 2752 fmt->elide = elide; 2753 break; 2754 } 2755 } 2756 } 2757 2758 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 2759 { 2760 if (list && strlist__nr_entries(list) == 1) { 2761 if (fp != NULL) 2762 fprintf(fp, "# %s: %s\n", list_name, 2763 strlist__entry(list, 0)->s); 2764 return true; 2765 } 2766 return false; 2767 } 2768 2769 static bool get_elide(int idx, FILE *output) 2770 { 2771 switch (idx) { 2772 case HISTC_SYMBOL: 2773 return __get_elide(symbol_conf.sym_list, "symbol", output); 2774 case HISTC_DSO: 2775 return __get_elide(symbol_conf.dso_list, "dso", output); 2776 case HISTC_COMM: 2777 return __get_elide(symbol_conf.comm_list, "comm", output); 2778 default: 2779 break; 2780 } 2781 2782 if (sort__mode != SORT_MODE__BRANCH) 2783 return false; 2784 2785 switch (idx) { 2786 case HISTC_SYMBOL_FROM: 2787 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 2788 case HISTC_SYMBOL_TO: 2789 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 2790 case HISTC_DSO_FROM: 2791 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 2792 case HISTC_DSO_TO: 2793 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 2794 default: 2795 break; 2796 } 2797 2798 return false; 2799 } 2800 2801 void sort__setup_elide(FILE *output) 2802 { 2803 struct perf_hpp_fmt *fmt; 2804 struct hpp_sort_entry *hse; 2805 2806 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2807 if (!perf_hpp__is_sort_entry(fmt)) 2808 continue; 2809 2810 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2811 fmt->elide = get_elide(hse->se->se_width_idx, output); 2812 } 2813 2814 /* 2815 * It makes no sense to elide all of sort entries. 2816 * Just revert them to show up again. 2817 */ 2818 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2819 if (!perf_hpp__is_sort_entry(fmt)) 2820 continue; 2821 2822 if (!fmt->elide) 2823 return; 2824 } 2825 2826 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2827 if (!perf_hpp__is_sort_entry(fmt)) 2828 continue; 2829 2830 fmt->elide = false; 2831 } 2832 } 2833 2834 int output_field_add(struct perf_hpp_list *list, char *tok) 2835 { 2836 unsigned int i; 2837 2838 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2839 struct sort_dimension *sd = &common_sort_dimensions[i]; 2840 2841 if (strncasecmp(tok, sd->name, strlen(tok))) 2842 continue; 2843 2844 return __sort_dimension__add_output(list, sd); 2845 } 2846 2847 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2848 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2849 2850 if (strncasecmp(tok, hd->name, strlen(tok))) 2851 continue; 2852 2853 return __hpp_dimension__add_output(list, hd); 2854 } 2855 2856 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2857 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2858 2859 if (strncasecmp(tok, sd->name, strlen(tok))) 2860 continue; 2861 2862 return __sort_dimension__add_output(list, sd); 2863 } 2864 2865 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2866 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2867 2868 if (strncasecmp(tok, sd->name, strlen(tok))) 2869 continue; 2870 2871 return __sort_dimension__add_output(list, sd); 2872 } 2873 2874 return -ESRCH; 2875 } 2876 2877 static int setup_output_list(struct perf_hpp_list *list, char *str) 2878 { 2879 char *tmp, *tok; 2880 int ret = 0; 2881 2882 for (tok = strtok_r(str, ", ", &tmp); 2883 tok; tok = strtok_r(NULL, ", ", &tmp)) { 2884 ret = output_field_add(list, tok); 2885 if (ret == -EINVAL) { 2886 pr_err("Invalid --fields key: `%s'", tok); 2887 break; 2888 } else if (ret == -ESRCH) { 2889 pr_err("Unknown --fields key: `%s'", tok); 2890 break; 2891 } 2892 } 2893 2894 return ret; 2895 } 2896 2897 void reset_dimensions(void) 2898 { 2899 unsigned int i; 2900 2901 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 2902 common_sort_dimensions[i].taken = 0; 2903 2904 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 2905 hpp_sort_dimensions[i].taken = 0; 2906 2907 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 2908 bstack_sort_dimensions[i].taken = 0; 2909 2910 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 2911 memory_sort_dimensions[i].taken = 0; 2912 } 2913 2914 bool is_strict_order(const char *order) 2915 { 2916 return order && (*order != '+'); 2917 } 2918 2919 static int __setup_output_field(void) 2920 { 2921 char *str, *strp; 2922 int ret = -EINVAL; 2923 2924 if (field_order == NULL) 2925 return 0; 2926 2927 strp = str = strdup(field_order); 2928 if (str == NULL) { 2929 pr_err("Not enough memory to setup output fields"); 2930 return -ENOMEM; 2931 } 2932 2933 if (!is_strict_order(field_order)) 2934 strp++; 2935 2936 if (!strlen(strp)) { 2937 pr_err("Invalid --fields key: `+'"); 2938 goto out; 2939 } 2940 2941 ret = setup_output_list(&perf_hpp_list, strp); 2942 2943 out: 2944 free(str); 2945 return ret; 2946 } 2947 2948 int setup_sorting(struct perf_evlist *evlist) 2949 { 2950 int err; 2951 2952 err = __setup_sorting(evlist); 2953 if (err < 0) 2954 return err; 2955 2956 if (parent_pattern != default_parent_pattern) { 2957 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 2958 if (err < 0) 2959 return err; 2960 } 2961 2962 reset_dimensions(); 2963 2964 /* 2965 * perf diff doesn't use default hpp output fields. 2966 */ 2967 if (sort__mode != SORT_MODE__DIFF) 2968 perf_hpp__init(); 2969 2970 err = __setup_output_field(); 2971 if (err < 0) 2972 return err; 2973 2974 /* copy sort keys to output fields */ 2975 perf_hpp__setup_output_field(&perf_hpp_list); 2976 /* and then copy output fields to sort keys */ 2977 perf_hpp__append_sort_keys(&perf_hpp_list); 2978 2979 /* setup hists-specific output fields */ 2980 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 2981 return -1; 2982 2983 return 0; 2984 } 2985 2986 void reset_output_field(void) 2987 { 2988 perf_hpp_list.need_collapse = 0; 2989 perf_hpp_list.parent = 0; 2990 perf_hpp_list.sym = 0; 2991 perf_hpp_list.dso = 0; 2992 2993 field_order = NULL; 2994 sort_order = NULL; 2995 2996 reset_dimensions(); 2997 perf_hpp__reset_output_field(&perf_hpp_list); 2998 } 2999