1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <regex.h> 5 #include <sys/mman.h> 6 #include "sort.h" 7 #include "hist.h" 8 #include "comm.h" 9 #include "symbol.h" 10 #include "thread.h" 11 #include "evsel.h" 12 #include "evlist.h" 13 #include "strlist.h" 14 #include <traceevent/event-parse.h> 15 #include "mem-events.h" 16 #include <linux/kernel.h> 17 18 regex_t parent_regex; 19 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 20 const char *parent_pattern = default_parent_pattern; 21 const char *default_sort_order = "comm,dso,symbol"; 22 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 23 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; 24 const char default_top_sort_order[] = "dso,symbol"; 25 const char default_diff_sort_order[] = "dso,symbol"; 26 const char default_tracepoint_sort_order[] = "trace"; 27 const char *sort_order; 28 const char *field_order; 29 regex_t ignore_callees_regex; 30 int have_ignore_callees = 0; 31 enum sort_mode sort__mode = SORT_MODE__NORMAL; 32 33 /* 34 * Replaces all occurrences of a char used with the: 35 * 36 * -t, --field-separator 37 * 38 * option, that uses a special separator character and don't pad with spaces, 39 * replacing all occurances of this separator in symbol names (and other 40 * output) with a '.' character, that thus it's the only non valid separator. 41 */ 42 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 43 { 44 int n; 45 va_list ap; 46 47 va_start(ap, fmt); 48 n = vsnprintf(bf, size, fmt, ap); 49 if (symbol_conf.field_sep && n > 0) { 50 char *sep = bf; 51 52 while (1) { 53 sep = strchr(sep, *symbol_conf.field_sep); 54 if (sep == NULL) 55 break; 56 *sep = '.'; 57 } 58 } 59 va_end(ap); 60 61 if (n >= (int)size) 62 return size - 1; 63 return n; 64 } 65 66 static int64_t cmp_null(const void *l, const void *r) 67 { 68 if (!l && !r) 69 return 0; 70 else if (!l) 71 return -1; 72 else 73 return 1; 74 } 75 76 /* --sort pid */ 77 78 static int64_t 79 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 80 { 81 return right->thread->tid - left->thread->tid; 82 } 83 84 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 85 size_t size, unsigned int width) 86 { 87 const char *comm = thread__comm_str(he->thread); 88 89 width = max(7U, width) - 8; 90 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 91 width, width, comm ?: ""); 92 } 93 94 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 95 { 96 const struct thread *th = arg; 97 98 if (type != HIST_FILTER__THREAD) 99 return -1; 100 101 return th && he->thread != th; 102 } 103 104 struct sort_entry sort_thread = { 105 .se_header = " Pid:Command", 106 .se_cmp = sort__thread_cmp, 107 .se_snprintf = hist_entry__thread_snprintf, 108 .se_filter = hist_entry__thread_filter, 109 .se_width_idx = HISTC_THREAD, 110 }; 111 112 /* --sort comm */ 113 114 static int64_t 115 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 116 { 117 /* Compare the addr that should be unique among comm */ 118 return strcmp(comm__str(right->comm), comm__str(left->comm)); 119 } 120 121 static int64_t 122 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 123 { 124 /* Compare the addr that should be unique among comm */ 125 return strcmp(comm__str(right->comm), comm__str(left->comm)); 126 } 127 128 static int64_t 129 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 130 { 131 return strcmp(comm__str(right->comm), comm__str(left->comm)); 132 } 133 134 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 135 size_t size, unsigned int width) 136 { 137 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 138 } 139 140 struct sort_entry sort_comm = { 141 .se_header = "Command", 142 .se_cmp = sort__comm_cmp, 143 .se_collapse = sort__comm_collapse, 144 .se_sort = sort__comm_sort, 145 .se_snprintf = hist_entry__comm_snprintf, 146 .se_filter = hist_entry__thread_filter, 147 .se_width_idx = HISTC_COMM, 148 }; 149 150 /* --sort dso */ 151 152 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 153 { 154 struct dso *dso_l = map_l ? map_l->dso : NULL; 155 struct dso *dso_r = map_r ? map_r->dso : NULL; 156 const char *dso_name_l, *dso_name_r; 157 158 if (!dso_l || !dso_r) 159 return cmp_null(dso_r, dso_l); 160 161 if (verbose > 0) { 162 dso_name_l = dso_l->long_name; 163 dso_name_r = dso_r->long_name; 164 } else { 165 dso_name_l = dso_l->short_name; 166 dso_name_r = dso_r->short_name; 167 } 168 169 return strcmp(dso_name_l, dso_name_r); 170 } 171 172 static int64_t 173 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 174 { 175 return _sort__dso_cmp(right->ms.map, left->ms.map); 176 } 177 178 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 179 size_t size, unsigned int width) 180 { 181 if (map && map->dso) { 182 const char *dso_name = verbose > 0 ? map->dso->long_name : 183 map->dso->short_name; 184 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 185 } 186 187 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 188 } 189 190 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 191 size_t size, unsigned int width) 192 { 193 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 194 } 195 196 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 197 { 198 const struct dso *dso = arg; 199 200 if (type != HIST_FILTER__DSO) 201 return -1; 202 203 return dso && (!he->ms.map || he->ms.map->dso != dso); 204 } 205 206 struct sort_entry sort_dso = { 207 .se_header = "Shared Object", 208 .se_cmp = sort__dso_cmp, 209 .se_snprintf = hist_entry__dso_snprintf, 210 .se_filter = hist_entry__dso_filter, 211 .se_width_idx = HISTC_DSO, 212 }; 213 214 /* --sort symbol */ 215 216 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 217 { 218 return (int64_t)(right_ip - left_ip); 219 } 220 221 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 222 { 223 if (!sym_l || !sym_r) 224 return cmp_null(sym_l, sym_r); 225 226 if (sym_l == sym_r) 227 return 0; 228 229 if (sym_l->start != sym_r->start) 230 return (int64_t)(sym_r->start - sym_l->start); 231 232 return (int64_t)(sym_r->end - sym_l->end); 233 } 234 235 static int64_t 236 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 237 { 238 int64_t ret; 239 240 if (!left->ms.sym && !right->ms.sym) 241 return _sort__addr_cmp(left->ip, right->ip); 242 243 /* 244 * comparing symbol address alone is not enough since it's a 245 * relative address within a dso. 246 */ 247 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 248 ret = sort__dso_cmp(left, right); 249 if (ret != 0) 250 return ret; 251 } 252 253 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 254 } 255 256 static int64_t 257 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 258 { 259 if (!left->ms.sym || !right->ms.sym) 260 return cmp_null(left->ms.sym, right->ms.sym); 261 262 return strcmp(right->ms.sym->name, left->ms.sym->name); 263 } 264 265 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, 266 u64 ip, char level, char *bf, size_t size, 267 unsigned int width) 268 { 269 size_t ret = 0; 270 271 if (verbose > 0) { 272 char o = map ? dso__symtab_origin(map->dso) : '!'; 273 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 274 BITS_PER_LONG / 4 + 2, ip, o); 275 } 276 277 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 278 if (sym && map) { 279 if (map->type == MAP__VARIABLE) { 280 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 281 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 282 ip - map->unmap_ip(map, sym->start)); 283 } else { 284 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 285 width - ret, 286 sym->name); 287 } 288 } else { 289 size_t len = BITS_PER_LONG / 4; 290 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 291 len, ip); 292 } 293 294 return ret; 295 } 296 297 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, 298 size_t size, unsigned int width) 299 { 300 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip, 301 he->level, bf, size, width); 302 } 303 304 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 305 { 306 const char *sym = arg; 307 308 if (type != HIST_FILTER__SYMBOL) 309 return -1; 310 311 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 312 } 313 314 struct sort_entry sort_sym = { 315 .se_header = "Symbol", 316 .se_cmp = sort__sym_cmp, 317 .se_sort = sort__sym_sort, 318 .se_snprintf = hist_entry__sym_snprintf, 319 .se_filter = hist_entry__sym_filter, 320 .se_width_idx = HISTC_SYMBOL, 321 }; 322 323 /* --sort srcline */ 324 325 char *hist_entry__get_srcline(struct hist_entry *he) 326 { 327 struct map *map = he->ms.map; 328 329 if (!map) 330 return SRCLINE_UNKNOWN; 331 332 return get_srcline(map->dso, map__rip_2objdump(map, he->ip), 333 he->ms.sym, true, true); 334 } 335 336 static int64_t 337 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 338 { 339 if (!left->srcline) 340 left->srcline = hist_entry__get_srcline(left); 341 if (!right->srcline) 342 right->srcline = hist_entry__get_srcline(right); 343 344 return strcmp(right->srcline, left->srcline); 345 } 346 347 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 348 size_t size, unsigned int width) 349 { 350 if (!he->srcline) 351 he->srcline = hist_entry__get_srcline(he); 352 353 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 354 } 355 356 struct sort_entry sort_srcline = { 357 .se_header = "Source:Line", 358 .se_cmp = sort__srcline_cmp, 359 .se_snprintf = hist_entry__srcline_snprintf, 360 .se_width_idx = HISTC_SRCLINE, 361 }; 362 363 /* --sort srcline_from */ 364 365 static int64_t 366 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 367 { 368 if (!left->branch_info->srcline_from) { 369 struct map *map = left->branch_info->from.map; 370 if (!map) 371 left->branch_info->srcline_from = SRCLINE_UNKNOWN; 372 else 373 left->branch_info->srcline_from = get_srcline(map->dso, 374 map__rip_2objdump(map, 375 left->branch_info->from.al_addr), 376 left->branch_info->from.sym, 377 true, true); 378 } 379 if (!right->branch_info->srcline_from) { 380 struct map *map = right->branch_info->from.map; 381 if (!map) 382 right->branch_info->srcline_from = SRCLINE_UNKNOWN; 383 else 384 right->branch_info->srcline_from = get_srcline(map->dso, 385 map__rip_2objdump(map, 386 right->branch_info->from.al_addr), 387 right->branch_info->from.sym, 388 true, true); 389 } 390 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 391 } 392 393 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 394 size_t size, unsigned int width) 395 { 396 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 397 } 398 399 struct sort_entry sort_srcline_from = { 400 .se_header = "From Source:Line", 401 .se_cmp = sort__srcline_from_cmp, 402 .se_snprintf = hist_entry__srcline_from_snprintf, 403 .se_width_idx = HISTC_SRCLINE_FROM, 404 }; 405 406 /* --sort srcline_to */ 407 408 static int64_t 409 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 410 { 411 if (!left->branch_info->srcline_to) { 412 struct map *map = left->branch_info->to.map; 413 if (!map) 414 left->branch_info->srcline_to = SRCLINE_UNKNOWN; 415 else 416 left->branch_info->srcline_to = get_srcline(map->dso, 417 map__rip_2objdump(map, 418 left->branch_info->to.al_addr), 419 left->branch_info->from.sym, 420 true, true); 421 } 422 if (!right->branch_info->srcline_to) { 423 struct map *map = right->branch_info->to.map; 424 if (!map) 425 right->branch_info->srcline_to = SRCLINE_UNKNOWN; 426 else 427 right->branch_info->srcline_to = get_srcline(map->dso, 428 map__rip_2objdump(map, 429 right->branch_info->to.al_addr), 430 right->branch_info->to.sym, 431 true, true); 432 } 433 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 434 } 435 436 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 437 size_t size, unsigned int width) 438 { 439 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 440 } 441 442 struct sort_entry sort_srcline_to = { 443 .se_header = "To Source:Line", 444 .se_cmp = sort__srcline_to_cmp, 445 .se_snprintf = hist_entry__srcline_to_snprintf, 446 .se_width_idx = HISTC_SRCLINE_TO, 447 }; 448 449 /* --sort srcfile */ 450 451 static char no_srcfile[1]; 452 453 static char *hist_entry__get_srcfile(struct hist_entry *e) 454 { 455 char *sf, *p; 456 struct map *map = e->ms.map; 457 458 if (!map) 459 return no_srcfile; 460 461 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 462 e->ms.sym, false, true, true); 463 if (!strcmp(sf, SRCLINE_UNKNOWN)) 464 return no_srcfile; 465 p = strchr(sf, ':'); 466 if (p && *sf) { 467 *p = 0; 468 return sf; 469 } 470 free(sf); 471 return no_srcfile; 472 } 473 474 static int64_t 475 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 476 { 477 if (!left->srcfile) 478 left->srcfile = hist_entry__get_srcfile(left); 479 if (!right->srcfile) 480 right->srcfile = hist_entry__get_srcfile(right); 481 482 return strcmp(right->srcfile, left->srcfile); 483 } 484 485 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 486 size_t size, unsigned int width) 487 { 488 if (!he->srcfile) 489 he->srcfile = hist_entry__get_srcfile(he); 490 491 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 492 } 493 494 struct sort_entry sort_srcfile = { 495 .se_header = "Source File", 496 .se_cmp = sort__srcfile_cmp, 497 .se_snprintf = hist_entry__srcfile_snprintf, 498 .se_width_idx = HISTC_SRCFILE, 499 }; 500 501 /* --sort parent */ 502 503 static int64_t 504 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 505 { 506 struct symbol *sym_l = left->parent; 507 struct symbol *sym_r = right->parent; 508 509 if (!sym_l || !sym_r) 510 return cmp_null(sym_l, sym_r); 511 512 return strcmp(sym_r->name, sym_l->name); 513 } 514 515 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 516 size_t size, unsigned int width) 517 { 518 return repsep_snprintf(bf, size, "%-*.*s", width, width, 519 he->parent ? he->parent->name : "[other]"); 520 } 521 522 struct sort_entry sort_parent = { 523 .se_header = "Parent symbol", 524 .se_cmp = sort__parent_cmp, 525 .se_snprintf = hist_entry__parent_snprintf, 526 .se_width_idx = HISTC_PARENT, 527 }; 528 529 /* --sort cpu */ 530 531 static int64_t 532 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 533 { 534 return right->cpu - left->cpu; 535 } 536 537 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 538 size_t size, unsigned int width) 539 { 540 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 541 } 542 543 struct sort_entry sort_cpu = { 544 .se_header = "CPU", 545 .se_cmp = sort__cpu_cmp, 546 .se_snprintf = hist_entry__cpu_snprintf, 547 .se_width_idx = HISTC_CPU, 548 }; 549 550 /* --sort cgroup_id */ 551 552 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 553 { 554 return (int64_t)(right_dev - left_dev); 555 } 556 557 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 558 { 559 return (int64_t)(right_ino - left_ino); 560 } 561 562 static int64_t 563 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 564 { 565 int64_t ret; 566 567 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 568 if (ret != 0) 569 return ret; 570 571 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 572 left->cgroup_id.ino); 573 } 574 575 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 576 char *bf, size_t size, 577 unsigned int width __maybe_unused) 578 { 579 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 580 he->cgroup_id.ino); 581 } 582 583 struct sort_entry sort_cgroup_id = { 584 .se_header = "cgroup id (dev/inode)", 585 .se_cmp = sort__cgroup_id_cmp, 586 .se_snprintf = hist_entry__cgroup_id_snprintf, 587 .se_width_idx = HISTC_CGROUP_ID, 588 }; 589 590 /* --sort socket */ 591 592 static int64_t 593 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 594 { 595 return right->socket - left->socket; 596 } 597 598 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 599 size_t size, unsigned int width) 600 { 601 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 602 } 603 604 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 605 { 606 int sk = *(const int *)arg; 607 608 if (type != HIST_FILTER__SOCKET) 609 return -1; 610 611 return sk >= 0 && he->socket != sk; 612 } 613 614 struct sort_entry sort_socket = { 615 .se_header = "Socket", 616 .se_cmp = sort__socket_cmp, 617 .se_snprintf = hist_entry__socket_snprintf, 618 .se_filter = hist_entry__socket_filter, 619 .se_width_idx = HISTC_SOCKET, 620 }; 621 622 /* --sort trace */ 623 624 static char *get_trace_output(struct hist_entry *he) 625 { 626 struct trace_seq seq; 627 struct perf_evsel *evsel; 628 struct pevent_record rec = { 629 .data = he->raw_data, 630 .size = he->raw_size, 631 }; 632 633 evsel = hists_to_evsel(he->hists); 634 635 trace_seq_init(&seq); 636 if (symbol_conf.raw_trace) { 637 pevent_print_fields(&seq, he->raw_data, he->raw_size, 638 evsel->tp_format); 639 } else { 640 pevent_event_info(&seq, evsel->tp_format, &rec); 641 } 642 /* 643 * Trim the buffer, it starts at 4KB and we're not going to 644 * add anything more to this buffer. 645 */ 646 return realloc(seq.buffer, seq.len + 1); 647 } 648 649 static int64_t 650 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 651 { 652 struct perf_evsel *evsel; 653 654 evsel = hists_to_evsel(left->hists); 655 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 656 return 0; 657 658 if (left->trace_output == NULL) 659 left->trace_output = get_trace_output(left); 660 if (right->trace_output == NULL) 661 right->trace_output = get_trace_output(right); 662 663 return strcmp(right->trace_output, left->trace_output); 664 } 665 666 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 667 size_t size, unsigned int width) 668 { 669 struct perf_evsel *evsel; 670 671 evsel = hists_to_evsel(he->hists); 672 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 673 return scnprintf(bf, size, "%-.*s", width, "N/A"); 674 675 if (he->trace_output == NULL) 676 he->trace_output = get_trace_output(he); 677 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 678 } 679 680 struct sort_entry sort_trace = { 681 .se_header = "Trace output", 682 .se_cmp = sort__trace_cmp, 683 .se_snprintf = hist_entry__trace_snprintf, 684 .se_width_idx = HISTC_TRACE, 685 }; 686 687 /* sort keys for branch stacks */ 688 689 static int64_t 690 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 691 { 692 if (!left->branch_info || !right->branch_info) 693 return cmp_null(left->branch_info, right->branch_info); 694 695 return _sort__dso_cmp(left->branch_info->from.map, 696 right->branch_info->from.map); 697 } 698 699 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 700 size_t size, unsigned int width) 701 { 702 if (he->branch_info) 703 return _hist_entry__dso_snprintf(he->branch_info->from.map, 704 bf, size, width); 705 else 706 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 707 } 708 709 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 710 const void *arg) 711 { 712 const struct dso *dso = arg; 713 714 if (type != HIST_FILTER__DSO) 715 return -1; 716 717 return dso && (!he->branch_info || !he->branch_info->from.map || 718 he->branch_info->from.map->dso != dso); 719 } 720 721 static int64_t 722 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 723 { 724 if (!left->branch_info || !right->branch_info) 725 return cmp_null(left->branch_info, right->branch_info); 726 727 return _sort__dso_cmp(left->branch_info->to.map, 728 right->branch_info->to.map); 729 } 730 731 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 732 size_t size, unsigned int width) 733 { 734 if (he->branch_info) 735 return _hist_entry__dso_snprintf(he->branch_info->to.map, 736 bf, size, width); 737 else 738 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 739 } 740 741 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 742 const void *arg) 743 { 744 const struct dso *dso = arg; 745 746 if (type != HIST_FILTER__DSO) 747 return -1; 748 749 return dso && (!he->branch_info || !he->branch_info->to.map || 750 he->branch_info->to.map->dso != dso); 751 } 752 753 static int64_t 754 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 755 { 756 struct addr_map_symbol *from_l = &left->branch_info->from; 757 struct addr_map_symbol *from_r = &right->branch_info->from; 758 759 if (!left->branch_info || !right->branch_info) 760 return cmp_null(left->branch_info, right->branch_info); 761 762 from_l = &left->branch_info->from; 763 from_r = &right->branch_info->from; 764 765 if (!from_l->sym && !from_r->sym) 766 return _sort__addr_cmp(from_l->addr, from_r->addr); 767 768 return _sort__sym_cmp(from_l->sym, from_r->sym); 769 } 770 771 static int64_t 772 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 773 { 774 struct addr_map_symbol *to_l, *to_r; 775 776 if (!left->branch_info || !right->branch_info) 777 return cmp_null(left->branch_info, right->branch_info); 778 779 to_l = &left->branch_info->to; 780 to_r = &right->branch_info->to; 781 782 if (!to_l->sym && !to_r->sym) 783 return _sort__addr_cmp(to_l->addr, to_r->addr); 784 785 return _sort__sym_cmp(to_l->sym, to_r->sym); 786 } 787 788 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 789 size_t size, unsigned int width) 790 { 791 if (he->branch_info) { 792 struct addr_map_symbol *from = &he->branch_info->from; 793 794 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, 795 he->level, bf, size, width); 796 } 797 798 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 799 } 800 801 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 802 size_t size, unsigned int width) 803 { 804 if (he->branch_info) { 805 struct addr_map_symbol *to = &he->branch_info->to; 806 807 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 808 he->level, bf, size, width); 809 } 810 811 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 812 } 813 814 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 815 const void *arg) 816 { 817 const char *sym = arg; 818 819 if (type != HIST_FILTER__SYMBOL) 820 return -1; 821 822 return sym && !(he->branch_info && he->branch_info->from.sym && 823 strstr(he->branch_info->from.sym->name, sym)); 824 } 825 826 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 827 const void *arg) 828 { 829 const char *sym = arg; 830 831 if (type != HIST_FILTER__SYMBOL) 832 return -1; 833 834 return sym && !(he->branch_info && he->branch_info->to.sym && 835 strstr(he->branch_info->to.sym->name, sym)); 836 } 837 838 struct sort_entry sort_dso_from = { 839 .se_header = "Source Shared Object", 840 .se_cmp = sort__dso_from_cmp, 841 .se_snprintf = hist_entry__dso_from_snprintf, 842 .se_filter = hist_entry__dso_from_filter, 843 .se_width_idx = HISTC_DSO_FROM, 844 }; 845 846 struct sort_entry sort_dso_to = { 847 .se_header = "Target Shared Object", 848 .se_cmp = sort__dso_to_cmp, 849 .se_snprintf = hist_entry__dso_to_snprintf, 850 .se_filter = hist_entry__dso_to_filter, 851 .se_width_idx = HISTC_DSO_TO, 852 }; 853 854 struct sort_entry sort_sym_from = { 855 .se_header = "Source Symbol", 856 .se_cmp = sort__sym_from_cmp, 857 .se_snprintf = hist_entry__sym_from_snprintf, 858 .se_filter = hist_entry__sym_from_filter, 859 .se_width_idx = HISTC_SYMBOL_FROM, 860 }; 861 862 struct sort_entry sort_sym_to = { 863 .se_header = "Target Symbol", 864 .se_cmp = sort__sym_to_cmp, 865 .se_snprintf = hist_entry__sym_to_snprintf, 866 .se_filter = hist_entry__sym_to_filter, 867 .se_width_idx = HISTC_SYMBOL_TO, 868 }; 869 870 static int64_t 871 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 872 { 873 unsigned char mp, p; 874 875 if (!left->branch_info || !right->branch_info) 876 return cmp_null(left->branch_info, right->branch_info); 877 878 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 879 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 880 return mp || p; 881 } 882 883 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 884 size_t size, unsigned int width){ 885 static const char *out = "N/A"; 886 887 if (he->branch_info) { 888 if (he->branch_info->flags.predicted) 889 out = "N"; 890 else if (he->branch_info->flags.mispred) 891 out = "Y"; 892 } 893 894 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 895 } 896 897 static int64_t 898 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 899 { 900 if (!left->branch_info || !right->branch_info) 901 return cmp_null(left->branch_info, right->branch_info); 902 903 return left->branch_info->flags.cycles - 904 right->branch_info->flags.cycles; 905 } 906 907 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 908 size_t size, unsigned int width) 909 { 910 if (!he->branch_info) 911 return scnprintf(bf, size, "%-.*s", width, "N/A"); 912 if (he->branch_info->flags.cycles == 0) 913 return repsep_snprintf(bf, size, "%-*s", width, "-"); 914 return repsep_snprintf(bf, size, "%-*hd", width, 915 he->branch_info->flags.cycles); 916 } 917 918 struct sort_entry sort_cycles = { 919 .se_header = "Basic Block Cycles", 920 .se_cmp = sort__cycles_cmp, 921 .se_snprintf = hist_entry__cycles_snprintf, 922 .se_width_idx = HISTC_CYCLES, 923 }; 924 925 /* --sort daddr_sym */ 926 int64_t 927 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 928 { 929 uint64_t l = 0, r = 0; 930 931 if (left->mem_info) 932 l = left->mem_info->daddr.addr; 933 if (right->mem_info) 934 r = right->mem_info->daddr.addr; 935 936 return (int64_t)(r - l); 937 } 938 939 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 940 size_t size, unsigned int width) 941 { 942 uint64_t addr = 0; 943 struct map *map = NULL; 944 struct symbol *sym = NULL; 945 946 if (he->mem_info) { 947 addr = he->mem_info->daddr.addr; 948 map = he->mem_info->daddr.map; 949 sym = he->mem_info->daddr.sym; 950 } 951 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 952 width); 953 } 954 955 int64_t 956 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 957 { 958 uint64_t l = 0, r = 0; 959 960 if (left->mem_info) 961 l = left->mem_info->iaddr.addr; 962 if (right->mem_info) 963 r = right->mem_info->iaddr.addr; 964 965 return (int64_t)(r - l); 966 } 967 968 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 969 size_t size, unsigned int width) 970 { 971 uint64_t addr = 0; 972 struct map *map = NULL; 973 struct symbol *sym = NULL; 974 975 if (he->mem_info) { 976 addr = he->mem_info->iaddr.addr; 977 map = he->mem_info->iaddr.map; 978 sym = he->mem_info->iaddr.sym; 979 } 980 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 981 width); 982 } 983 984 static int64_t 985 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 986 { 987 struct map *map_l = NULL; 988 struct map *map_r = NULL; 989 990 if (left->mem_info) 991 map_l = left->mem_info->daddr.map; 992 if (right->mem_info) 993 map_r = right->mem_info->daddr.map; 994 995 return _sort__dso_cmp(map_l, map_r); 996 } 997 998 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 999 size_t size, unsigned int width) 1000 { 1001 struct map *map = NULL; 1002 1003 if (he->mem_info) 1004 map = he->mem_info->daddr.map; 1005 1006 return _hist_entry__dso_snprintf(map, bf, size, width); 1007 } 1008 1009 static int64_t 1010 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1011 { 1012 union perf_mem_data_src data_src_l; 1013 union perf_mem_data_src data_src_r; 1014 1015 if (left->mem_info) 1016 data_src_l = left->mem_info->data_src; 1017 else 1018 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1019 1020 if (right->mem_info) 1021 data_src_r = right->mem_info->data_src; 1022 else 1023 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1024 1025 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1026 } 1027 1028 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1029 size_t size, unsigned int width) 1030 { 1031 char out[10]; 1032 1033 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1034 return repsep_snprintf(bf, size, "%.*s", width, out); 1035 } 1036 1037 static int64_t 1038 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1039 { 1040 union perf_mem_data_src data_src_l; 1041 union perf_mem_data_src data_src_r; 1042 1043 if (left->mem_info) 1044 data_src_l = left->mem_info->data_src; 1045 else 1046 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1047 1048 if (right->mem_info) 1049 data_src_r = right->mem_info->data_src; 1050 else 1051 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1052 1053 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1054 } 1055 1056 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1057 size_t size, unsigned int width) 1058 { 1059 char out[64]; 1060 1061 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1062 return repsep_snprintf(bf, size, "%-*s", width, out); 1063 } 1064 1065 static int64_t 1066 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1067 { 1068 union perf_mem_data_src data_src_l; 1069 union perf_mem_data_src data_src_r; 1070 1071 if (left->mem_info) 1072 data_src_l = left->mem_info->data_src; 1073 else 1074 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1075 1076 if (right->mem_info) 1077 data_src_r = right->mem_info->data_src; 1078 else 1079 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1080 1081 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1082 } 1083 1084 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1085 size_t size, unsigned int width) 1086 { 1087 char out[64]; 1088 1089 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1090 return repsep_snprintf(bf, size, "%-*s", width, out); 1091 } 1092 1093 static int64_t 1094 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1095 { 1096 union perf_mem_data_src data_src_l; 1097 union perf_mem_data_src data_src_r; 1098 1099 if (left->mem_info) 1100 data_src_l = left->mem_info->data_src; 1101 else 1102 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1103 1104 if (right->mem_info) 1105 data_src_r = right->mem_info->data_src; 1106 else 1107 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1108 1109 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1110 } 1111 1112 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1113 size_t size, unsigned int width) 1114 { 1115 char out[64]; 1116 1117 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1118 return repsep_snprintf(bf, size, "%-*s", width, out); 1119 } 1120 1121 int64_t 1122 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1123 { 1124 u64 l, r; 1125 struct map *l_map, *r_map; 1126 1127 if (!left->mem_info) return -1; 1128 if (!right->mem_info) return 1; 1129 1130 /* group event types together */ 1131 if (left->cpumode > right->cpumode) return -1; 1132 if (left->cpumode < right->cpumode) return 1; 1133 1134 l_map = left->mem_info->daddr.map; 1135 r_map = right->mem_info->daddr.map; 1136 1137 /* if both are NULL, jump to sort on al_addr instead */ 1138 if (!l_map && !r_map) 1139 goto addr; 1140 1141 if (!l_map) return -1; 1142 if (!r_map) return 1; 1143 1144 if (l_map->maj > r_map->maj) return -1; 1145 if (l_map->maj < r_map->maj) return 1; 1146 1147 if (l_map->min > r_map->min) return -1; 1148 if (l_map->min < r_map->min) return 1; 1149 1150 if (l_map->ino > r_map->ino) return -1; 1151 if (l_map->ino < r_map->ino) return 1; 1152 1153 if (l_map->ino_generation > r_map->ino_generation) return -1; 1154 if (l_map->ino_generation < r_map->ino_generation) return 1; 1155 1156 /* 1157 * Addresses with no major/minor numbers are assumed to be 1158 * anonymous in userspace. Sort those on pid then address. 1159 * 1160 * The kernel and non-zero major/minor mapped areas are 1161 * assumed to be unity mapped. Sort those on address. 1162 */ 1163 1164 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1165 (!(l_map->flags & MAP_SHARED)) && 1166 !l_map->maj && !l_map->min && !l_map->ino && 1167 !l_map->ino_generation) { 1168 /* userspace anonymous */ 1169 1170 if (left->thread->pid_ > right->thread->pid_) return -1; 1171 if (left->thread->pid_ < right->thread->pid_) return 1; 1172 } 1173 1174 addr: 1175 /* al_addr does all the right addr - start + offset calculations */ 1176 l = cl_address(left->mem_info->daddr.al_addr); 1177 r = cl_address(right->mem_info->daddr.al_addr); 1178 1179 if (l > r) return -1; 1180 if (l < r) return 1; 1181 1182 return 0; 1183 } 1184 1185 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1186 size_t size, unsigned int width) 1187 { 1188 1189 uint64_t addr = 0; 1190 struct map *map = NULL; 1191 struct symbol *sym = NULL; 1192 char level = he->level; 1193 1194 if (he->mem_info) { 1195 addr = cl_address(he->mem_info->daddr.al_addr); 1196 map = he->mem_info->daddr.map; 1197 sym = he->mem_info->daddr.sym; 1198 1199 /* print [s] for shared data mmaps */ 1200 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1201 map && (map->type == MAP__VARIABLE) && 1202 (map->flags & MAP_SHARED) && 1203 (map->maj || map->min || map->ino || 1204 map->ino_generation)) 1205 level = 's'; 1206 else if (!map) 1207 level = 'X'; 1208 } 1209 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size, 1210 width); 1211 } 1212 1213 struct sort_entry sort_mispredict = { 1214 .se_header = "Branch Mispredicted", 1215 .se_cmp = sort__mispredict_cmp, 1216 .se_snprintf = hist_entry__mispredict_snprintf, 1217 .se_width_idx = HISTC_MISPREDICT, 1218 }; 1219 1220 static u64 he_weight(struct hist_entry *he) 1221 { 1222 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; 1223 } 1224 1225 static int64_t 1226 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1227 { 1228 return he_weight(left) - he_weight(right); 1229 } 1230 1231 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1232 size_t size, unsigned int width) 1233 { 1234 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); 1235 } 1236 1237 struct sort_entry sort_local_weight = { 1238 .se_header = "Local Weight", 1239 .se_cmp = sort__local_weight_cmp, 1240 .se_snprintf = hist_entry__local_weight_snprintf, 1241 .se_width_idx = HISTC_LOCAL_WEIGHT, 1242 }; 1243 1244 static int64_t 1245 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1246 { 1247 return left->stat.weight - right->stat.weight; 1248 } 1249 1250 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1251 size_t size, unsigned int width) 1252 { 1253 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); 1254 } 1255 1256 struct sort_entry sort_global_weight = { 1257 .se_header = "Weight", 1258 .se_cmp = sort__global_weight_cmp, 1259 .se_snprintf = hist_entry__global_weight_snprintf, 1260 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1261 }; 1262 1263 struct sort_entry sort_mem_daddr_sym = { 1264 .se_header = "Data Symbol", 1265 .se_cmp = sort__daddr_cmp, 1266 .se_snprintf = hist_entry__daddr_snprintf, 1267 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1268 }; 1269 1270 struct sort_entry sort_mem_iaddr_sym = { 1271 .se_header = "Code Symbol", 1272 .se_cmp = sort__iaddr_cmp, 1273 .se_snprintf = hist_entry__iaddr_snprintf, 1274 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1275 }; 1276 1277 struct sort_entry sort_mem_daddr_dso = { 1278 .se_header = "Data Object", 1279 .se_cmp = sort__dso_daddr_cmp, 1280 .se_snprintf = hist_entry__dso_daddr_snprintf, 1281 .se_width_idx = HISTC_MEM_DADDR_DSO, 1282 }; 1283 1284 struct sort_entry sort_mem_locked = { 1285 .se_header = "Locked", 1286 .se_cmp = sort__locked_cmp, 1287 .se_snprintf = hist_entry__locked_snprintf, 1288 .se_width_idx = HISTC_MEM_LOCKED, 1289 }; 1290 1291 struct sort_entry sort_mem_tlb = { 1292 .se_header = "TLB access", 1293 .se_cmp = sort__tlb_cmp, 1294 .se_snprintf = hist_entry__tlb_snprintf, 1295 .se_width_idx = HISTC_MEM_TLB, 1296 }; 1297 1298 struct sort_entry sort_mem_lvl = { 1299 .se_header = "Memory access", 1300 .se_cmp = sort__lvl_cmp, 1301 .se_snprintf = hist_entry__lvl_snprintf, 1302 .se_width_idx = HISTC_MEM_LVL, 1303 }; 1304 1305 struct sort_entry sort_mem_snoop = { 1306 .se_header = "Snoop", 1307 .se_cmp = sort__snoop_cmp, 1308 .se_snprintf = hist_entry__snoop_snprintf, 1309 .se_width_idx = HISTC_MEM_SNOOP, 1310 }; 1311 1312 struct sort_entry sort_mem_dcacheline = { 1313 .se_header = "Data Cacheline", 1314 .se_cmp = sort__dcacheline_cmp, 1315 .se_snprintf = hist_entry__dcacheline_snprintf, 1316 .se_width_idx = HISTC_MEM_DCACHELINE, 1317 }; 1318 1319 static int64_t 1320 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1321 { 1322 uint64_t l = 0, r = 0; 1323 1324 if (left->mem_info) 1325 l = left->mem_info->daddr.phys_addr; 1326 if (right->mem_info) 1327 r = right->mem_info->daddr.phys_addr; 1328 1329 return (int64_t)(r - l); 1330 } 1331 1332 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1333 size_t size, unsigned int width) 1334 { 1335 uint64_t addr = 0; 1336 size_t ret = 0; 1337 size_t len = BITS_PER_LONG / 4; 1338 1339 addr = he->mem_info->daddr.phys_addr; 1340 1341 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1342 1343 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1344 1345 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1346 1347 if (ret > width) 1348 bf[width] = '\0'; 1349 1350 return width; 1351 } 1352 1353 struct sort_entry sort_mem_phys_daddr = { 1354 .se_header = "Data Physical Address", 1355 .se_cmp = sort__phys_daddr_cmp, 1356 .se_snprintf = hist_entry__phys_daddr_snprintf, 1357 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1358 }; 1359 1360 static int64_t 1361 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1362 { 1363 if (!left->branch_info || !right->branch_info) 1364 return cmp_null(left->branch_info, right->branch_info); 1365 1366 return left->branch_info->flags.abort != 1367 right->branch_info->flags.abort; 1368 } 1369 1370 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1371 size_t size, unsigned int width) 1372 { 1373 static const char *out = "N/A"; 1374 1375 if (he->branch_info) { 1376 if (he->branch_info->flags.abort) 1377 out = "A"; 1378 else 1379 out = "."; 1380 } 1381 1382 return repsep_snprintf(bf, size, "%-*s", width, out); 1383 } 1384 1385 struct sort_entry sort_abort = { 1386 .se_header = "Transaction abort", 1387 .se_cmp = sort__abort_cmp, 1388 .se_snprintf = hist_entry__abort_snprintf, 1389 .se_width_idx = HISTC_ABORT, 1390 }; 1391 1392 static int64_t 1393 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1394 { 1395 if (!left->branch_info || !right->branch_info) 1396 return cmp_null(left->branch_info, right->branch_info); 1397 1398 return left->branch_info->flags.in_tx != 1399 right->branch_info->flags.in_tx; 1400 } 1401 1402 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1403 size_t size, unsigned int width) 1404 { 1405 static const char *out = "N/A"; 1406 1407 if (he->branch_info) { 1408 if (he->branch_info->flags.in_tx) 1409 out = "T"; 1410 else 1411 out = "."; 1412 } 1413 1414 return repsep_snprintf(bf, size, "%-*s", width, out); 1415 } 1416 1417 struct sort_entry sort_in_tx = { 1418 .se_header = "Branch in transaction", 1419 .se_cmp = sort__in_tx_cmp, 1420 .se_snprintf = hist_entry__in_tx_snprintf, 1421 .se_width_idx = HISTC_IN_TX, 1422 }; 1423 1424 static int64_t 1425 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1426 { 1427 return left->transaction - right->transaction; 1428 } 1429 1430 static inline char *add_str(char *p, const char *str) 1431 { 1432 strcpy(p, str); 1433 return p + strlen(str); 1434 } 1435 1436 static struct txbit { 1437 unsigned flag; 1438 const char *name; 1439 int skip_for_len; 1440 } txbits[] = { 1441 { PERF_TXN_ELISION, "EL ", 0 }, 1442 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1443 { PERF_TXN_SYNC, "SYNC ", 1 }, 1444 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1445 { PERF_TXN_RETRY, "RETRY ", 0 }, 1446 { PERF_TXN_CONFLICT, "CON ", 0 }, 1447 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1448 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1449 { 0, NULL, 0 } 1450 }; 1451 1452 int hist_entry__transaction_len(void) 1453 { 1454 int i; 1455 int len = 0; 1456 1457 for (i = 0; txbits[i].name; i++) { 1458 if (!txbits[i].skip_for_len) 1459 len += strlen(txbits[i].name); 1460 } 1461 len += 4; /* :XX<space> */ 1462 return len; 1463 } 1464 1465 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1466 size_t size, unsigned int width) 1467 { 1468 u64 t = he->transaction; 1469 char buf[128]; 1470 char *p = buf; 1471 int i; 1472 1473 buf[0] = 0; 1474 for (i = 0; txbits[i].name; i++) 1475 if (txbits[i].flag & t) 1476 p = add_str(p, txbits[i].name); 1477 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1478 p = add_str(p, "NEITHER "); 1479 if (t & PERF_TXN_ABORT_MASK) { 1480 sprintf(p, ":%" PRIx64, 1481 (t & PERF_TXN_ABORT_MASK) >> 1482 PERF_TXN_ABORT_SHIFT); 1483 p += strlen(p); 1484 } 1485 1486 return repsep_snprintf(bf, size, "%-*s", width, buf); 1487 } 1488 1489 struct sort_entry sort_transaction = { 1490 .se_header = "Transaction ", 1491 .se_cmp = sort__transaction_cmp, 1492 .se_snprintf = hist_entry__transaction_snprintf, 1493 .se_width_idx = HISTC_TRANSACTION, 1494 }; 1495 1496 /* --sort symbol_size */ 1497 1498 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 1499 { 1500 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 1501 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 1502 1503 return size_l < size_r ? -1 : 1504 size_l == size_r ? 0 : 1; 1505 } 1506 1507 static int64_t 1508 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 1509 { 1510 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 1511 } 1512 1513 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 1514 size_t bf_size, unsigned int width) 1515 { 1516 if (sym) 1517 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 1518 1519 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1520 } 1521 1522 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 1523 size_t size, unsigned int width) 1524 { 1525 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 1526 } 1527 1528 struct sort_entry sort_sym_size = { 1529 .se_header = "Symbol size", 1530 .se_cmp = sort__sym_size_cmp, 1531 .se_snprintf = hist_entry__sym_size_snprintf, 1532 .se_width_idx = HISTC_SYM_SIZE, 1533 }; 1534 1535 1536 struct sort_dimension { 1537 const char *name; 1538 struct sort_entry *entry; 1539 int taken; 1540 }; 1541 1542 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 1543 1544 static struct sort_dimension common_sort_dimensions[] = { 1545 DIM(SORT_PID, "pid", sort_thread), 1546 DIM(SORT_COMM, "comm", sort_comm), 1547 DIM(SORT_DSO, "dso", sort_dso), 1548 DIM(SORT_SYM, "symbol", sort_sym), 1549 DIM(SORT_PARENT, "parent", sort_parent), 1550 DIM(SORT_CPU, "cpu", sort_cpu), 1551 DIM(SORT_SOCKET, "socket", sort_socket), 1552 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1553 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 1554 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1555 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 1556 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 1557 DIM(SORT_TRACE, "trace", sort_trace), 1558 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 1559 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 1560 }; 1561 1562 #undef DIM 1563 1564 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 1565 1566 static struct sort_dimension bstack_sort_dimensions[] = { 1567 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 1568 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 1569 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 1570 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 1571 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 1572 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1573 DIM(SORT_ABORT, "abort", sort_abort), 1574 DIM(SORT_CYCLES, "cycles", sort_cycles), 1575 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1576 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1577 }; 1578 1579 #undef DIM 1580 1581 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 1582 1583 static struct sort_dimension memory_sort_dimensions[] = { 1584 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1585 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 1586 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1587 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1588 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1589 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 1590 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 1591 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 1592 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 1593 }; 1594 1595 #undef DIM 1596 1597 struct hpp_dimension { 1598 const char *name; 1599 struct perf_hpp_fmt *fmt; 1600 int taken; 1601 }; 1602 1603 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 1604 1605 static struct hpp_dimension hpp_sort_dimensions[] = { 1606 DIM(PERF_HPP__OVERHEAD, "overhead"), 1607 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 1608 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 1609 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 1610 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 1611 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 1612 DIM(PERF_HPP__SAMPLES, "sample"), 1613 DIM(PERF_HPP__PERIOD, "period"), 1614 }; 1615 1616 #undef DIM 1617 1618 struct hpp_sort_entry { 1619 struct perf_hpp_fmt hpp; 1620 struct sort_entry *se; 1621 }; 1622 1623 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 1624 { 1625 struct hpp_sort_entry *hse; 1626 1627 if (!perf_hpp__is_sort_entry(fmt)) 1628 return; 1629 1630 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1631 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 1632 } 1633 1634 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1635 struct hists *hists, int line __maybe_unused, 1636 int *span __maybe_unused) 1637 { 1638 struct hpp_sort_entry *hse; 1639 size_t len = fmt->user_len; 1640 1641 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1642 1643 if (!len) 1644 len = hists__col_len(hists, hse->se->se_width_idx); 1645 1646 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 1647 } 1648 1649 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 1650 struct perf_hpp *hpp __maybe_unused, 1651 struct hists *hists) 1652 { 1653 struct hpp_sort_entry *hse; 1654 size_t len = fmt->user_len; 1655 1656 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1657 1658 if (!len) 1659 len = hists__col_len(hists, hse->se->se_width_idx); 1660 1661 return len; 1662 } 1663 1664 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1665 struct hist_entry *he) 1666 { 1667 struct hpp_sort_entry *hse; 1668 size_t len = fmt->user_len; 1669 1670 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1671 1672 if (!len) 1673 len = hists__col_len(he->hists, hse->se->se_width_idx); 1674 1675 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1676 } 1677 1678 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 1679 struct hist_entry *a, struct hist_entry *b) 1680 { 1681 struct hpp_sort_entry *hse; 1682 1683 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1684 return hse->se->se_cmp(a, b); 1685 } 1686 1687 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 1688 struct hist_entry *a, struct hist_entry *b) 1689 { 1690 struct hpp_sort_entry *hse; 1691 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 1692 1693 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1694 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 1695 return collapse_fn(a, b); 1696 } 1697 1698 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 1699 struct hist_entry *a, struct hist_entry *b) 1700 { 1701 struct hpp_sort_entry *hse; 1702 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 1703 1704 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1705 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 1706 return sort_fn(a, b); 1707 } 1708 1709 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 1710 { 1711 return format->header == __sort__hpp_header; 1712 } 1713 1714 #define MK_SORT_ENTRY_CHK(key) \ 1715 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 1716 { \ 1717 struct hpp_sort_entry *hse; \ 1718 \ 1719 if (!perf_hpp__is_sort_entry(fmt)) \ 1720 return false; \ 1721 \ 1722 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 1723 return hse->se == &sort_ ## key ; \ 1724 } 1725 1726 MK_SORT_ENTRY_CHK(trace) 1727 MK_SORT_ENTRY_CHK(srcline) 1728 MK_SORT_ENTRY_CHK(srcfile) 1729 MK_SORT_ENTRY_CHK(thread) 1730 MK_SORT_ENTRY_CHK(comm) 1731 MK_SORT_ENTRY_CHK(dso) 1732 MK_SORT_ENTRY_CHK(sym) 1733 1734 1735 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1736 { 1737 struct hpp_sort_entry *hse_a; 1738 struct hpp_sort_entry *hse_b; 1739 1740 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 1741 return false; 1742 1743 hse_a = container_of(a, struct hpp_sort_entry, hpp); 1744 hse_b = container_of(b, struct hpp_sort_entry, hpp); 1745 1746 return hse_a->se == hse_b->se; 1747 } 1748 1749 static void hse_free(struct perf_hpp_fmt *fmt) 1750 { 1751 struct hpp_sort_entry *hse; 1752 1753 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1754 free(hse); 1755 } 1756 1757 static struct hpp_sort_entry * 1758 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 1759 { 1760 struct hpp_sort_entry *hse; 1761 1762 hse = malloc(sizeof(*hse)); 1763 if (hse == NULL) { 1764 pr_err("Memory allocation failed\n"); 1765 return NULL; 1766 } 1767 1768 hse->se = sd->entry; 1769 hse->hpp.name = sd->entry->se_header; 1770 hse->hpp.header = __sort__hpp_header; 1771 hse->hpp.width = __sort__hpp_width; 1772 hse->hpp.entry = __sort__hpp_entry; 1773 hse->hpp.color = NULL; 1774 1775 hse->hpp.cmp = __sort__hpp_cmp; 1776 hse->hpp.collapse = __sort__hpp_collapse; 1777 hse->hpp.sort = __sort__hpp_sort; 1778 hse->hpp.equal = __sort__hpp_equal; 1779 hse->hpp.free = hse_free; 1780 1781 INIT_LIST_HEAD(&hse->hpp.list); 1782 INIT_LIST_HEAD(&hse->hpp.sort_list); 1783 hse->hpp.elide = false; 1784 hse->hpp.len = 0; 1785 hse->hpp.user_len = 0; 1786 hse->hpp.level = level; 1787 1788 return hse; 1789 } 1790 1791 static void hpp_free(struct perf_hpp_fmt *fmt) 1792 { 1793 free(fmt); 1794 } 1795 1796 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 1797 int level) 1798 { 1799 struct perf_hpp_fmt *fmt; 1800 1801 fmt = memdup(hd->fmt, sizeof(*fmt)); 1802 if (fmt) { 1803 INIT_LIST_HEAD(&fmt->list); 1804 INIT_LIST_HEAD(&fmt->sort_list); 1805 fmt->free = hpp_free; 1806 fmt->level = level; 1807 } 1808 1809 return fmt; 1810 } 1811 1812 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 1813 { 1814 struct perf_hpp_fmt *fmt; 1815 struct hpp_sort_entry *hse; 1816 int ret = -1; 1817 int r; 1818 1819 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1820 if (!perf_hpp__is_sort_entry(fmt)) 1821 continue; 1822 1823 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1824 if (hse->se->se_filter == NULL) 1825 continue; 1826 1827 /* 1828 * hist entry is filtered if any of sort key in the hpp list 1829 * is applied. But it should skip non-matched filter types. 1830 */ 1831 r = hse->se->se_filter(he, type, arg); 1832 if (r >= 0) { 1833 if (ret < 0) 1834 ret = 0; 1835 ret |= r; 1836 } 1837 } 1838 1839 return ret; 1840 } 1841 1842 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 1843 struct perf_hpp_list *list, 1844 int level) 1845 { 1846 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 1847 1848 if (hse == NULL) 1849 return -1; 1850 1851 perf_hpp_list__register_sort_field(list, &hse->hpp); 1852 return 0; 1853 } 1854 1855 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 1856 struct perf_hpp_list *list) 1857 { 1858 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 1859 1860 if (hse == NULL) 1861 return -1; 1862 1863 perf_hpp_list__column_register(list, &hse->hpp); 1864 return 0; 1865 } 1866 1867 struct hpp_dynamic_entry { 1868 struct perf_hpp_fmt hpp; 1869 struct perf_evsel *evsel; 1870 struct format_field *field; 1871 unsigned dynamic_len; 1872 bool raw_trace; 1873 }; 1874 1875 static int hde_width(struct hpp_dynamic_entry *hde) 1876 { 1877 if (!hde->hpp.len) { 1878 int len = hde->dynamic_len; 1879 int namelen = strlen(hde->field->name); 1880 int fieldlen = hde->field->size; 1881 1882 if (namelen > len) 1883 len = namelen; 1884 1885 if (!(hde->field->flags & FIELD_IS_STRING)) { 1886 /* length for print hex numbers */ 1887 fieldlen = hde->field->size * 2 + 2; 1888 } 1889 if (fieldlen > len) 1890 len = fieldlen; 1891 1892 hde->hpp.len = len; 1893 } 1894 return hde->hpp.len; 1895 } 1896 1897 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 1898 struct hist_entry *he) 1899 { 1900 char *str, *pos; 1901 struct format_field *field = hde->field; 1902 size_t namelen; 1903 bool last = false; 1904 1905 if (hde->raw_trace) 1906 return; 1907 1908 /* parse pretty print result and update max length */ 1909 if (!he->trace_output) 1910 he->trace_output = get_trace_output(he); 1911 1912 namelen = strlen(field->name); 1913 str = he->trace_output; 1914 1915 while (str) { 1916 pos = strchr(str, ' '); 1917 if (pos == NULL) { 1918 last = true; 1919 pos = str + strlen(str); 1920 } 1921 1922 if (!strncmp(str, field->name, namelen)) { 1923 size_t len; 1924 1925 str += namelen + 1; 1926 len = pos - str; 1927 1928 if (len > hde->dynamic_len) 1929 hde->dynamic_len = len; 1930 break; 1931 } 1932 1933 if (last) 1934 str = NULL; 1935 else 1936 str = pos + 1; 1937 } 1938 } 1939 1940 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1941 struct hists *hists __maybe_unused, 1942 int line __maybe_unused, 1943 int *span __maybe_unused) 1944 { 1945 struct hpp_dynamic_entry *hde; 1946 size_t len = fmt->user_len; 1947 1948 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1949 1950 if (!len) 1951 len = hde_width(hde); 1952 1953 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 1954 } 1955 1956 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 1957 struct perf_hpp *hpp __maybe_unused, 1958 struct hists *hists __maybe_unused) 1959 { 1960 struct hpp_dynamic_entry *hde; 1961 size_t len = fmt->user_len; 1962 1963 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1964 1965 if (!len) 1966 len = hde_width(hde); 1967 1968 return len; 1969 } 1970 1971 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 1972 { 1973 struct hpp_dynamic_entry *hde; 1974 1975 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1976 1977 return hists_to_evsel(hists) == hde->evsel; 1978 } 1979 1980 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1981 struct hist_entry *he) 1982 { 1983 struct hpp_dynamic_entry *hde; 1984 size_t len = fmt->user_len; 1985 char *str, *pos; 1986 struct format_field *field; 1987 size_t namelen; 1988 bool last = false; 1989 int ret; 1990 1991 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1992 1993 if (!len) 1994 len = hde_width(hde); 1995 1996 if (hde->raw_trace) 1997 goto raw_field; 1998 1999 if (!he->trace_output) 2000 he->trace_output = get_trace_output(he); 2001 2002 field = hde->field; 2003 namelen = strlen(field->name); 2004 str = he->trace_output; 2005 2006 while (str) { 2007 pos = strchr(str, ' '); 2008 if (pos == NULL) { 2009 last = true; 2010 pos = str + strlen(str); 2011 } 2012 2013 if (!strncmp(str, field->name, namelen)) { 2014 str += namelen + 1; 2015 str = strndup(str, pos - str); 2016 2017 if (str == NULL) 2018 return scnprintf(hpp->buf, hpp->size, 2019 "%*.*s", len, len, "ERROR"); 2020 break; 2021 } 2022 2023 if (last) 2024 str = NULL; 2025 else 2026 str = pos + 1; 2027 } 2028 2029 if (str == NULL) { 2030 struct trace_seq seq; 2031 raw_field: 2032 trace_seq_init(&seq); 2033 pevent_print_field(&seq, he->raw_data, hde->field); 2034 str = seq.buffer; 2035 } 2036 2037 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2038 free(str); 2039 return ret; 2040 } 2041 2042 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2043 struct hist_entry *a, struct hist_entry *b) 2044 { 2045 struct hpp_dynamic_entry *hde; 2046 struct format_field *field; 2047 unsigned offset, size; 2048 2049 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2050 2051 if (b == NULL) { 2052 update_dynamic_len(hde, a); 2053 return 0; 2054 } 2055 2056 field = hde->field; 2057 if (field->flags & FIELD_IS_DYNAMIC) { 2058 unsigned long long dyn; 2059 2060 pevent_read_number_field(field, a->raw_data, &dyn); 2061 offset = dyn & 0xffff; 2062 size = (dyn >> 16) & 0xffff; 2063 2064 /* record max width for output */ 2065 if (size > hde->dynamic_len) 2066 hde->dynamic_len = size; 2067 } else { 2068 offset = field->offset; 2069 size = field->size; 2070 } 2071 2072 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2073 } 2074 2075 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2076 { 2077 return fmt->cmp == __sort__hde_cmp; 2078 } 2079 2080 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2081 { 2082 struct hpp_dynamic_entry *hde_a; 2083 struct hpp_dynamic_entry *hde_b; 2084 2085 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2086 return false; 2087 2088 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2089 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2090 2091 return hde_a->field == hde_b->field; 2092 } 2093 2094 static void hde_free(struct perf_hpp_fmt *fmt) 2095 { 2096 struct hpp_dynamic_entry *hde; 2097 2098 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2099 free(hde); 2100 } 2101 2102 static struct hpp_dynamic_entry * 2103 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field, 2104 int level) 2105 { 2106 struct hpp_dynamic_entry *hde; 2107 2108 hde = malloc(sizeof(*hde)); 2109 if (hde == NULL) { 2110 pr_debug("Memory allocation failed\n"); 2111 return NULL; 2112 } 2113 2114 hde->evsel = evsel; 2115 hde->field = field; 2116 hde->dynamic_len = 0; 2117 2118 hde->hpp.name = field->name; 2119 hde->hpp.header = __sort__hde_header; 2120 hde->hpp.width = __sort__hde_width; 2121 hde->hpp.entry = __sort__hde_entry; 2122 hde->hpp.color = NULL; 2123 2124 hde->hpp.cmp = __sort__hde_cmp; 2125 hde->hpp.collapse = __sort__hde_cmp; 2126 hde->hpp.sort = __sort__hde_cmp; 2127 hde->hpp.equal = __sort__hde_equal; 2128 hde->hpp.free = hde_free; 2129 2130 INIT_LIST_HEAD(&hde->hpp.list); 2131 INIT_LIST_HEAD(&hde->hpp.sort_list); 2132 hde->hpp.elide = false; 2133 hde->hpp.len = 0; 2134 hde->hpp.user_len = 0; 2135 hde->hpp.level = level; 2136 2137 return hde; 2138 } 2139 2140 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2141 { 2142 struct perf_hpp_fmt *new_fmt = NULL; 2143 2144 if (perf_hpp__is_sort_entry(fmt)) { 2145 struct hpp_sort_entry *hse, *new_hse; 2146 2147 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2148 new_hse = memdup(hse, sizeof(*hse)); 2149 if (new_hse) 2150 new_fmt = &new_hse->hpp; 2151 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2152 struct hpp_dynamic_entry *hde, *new_hde; 2153 2154 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2155 new_hde = memdup(hde, sizeof(*hde)); 2156 if (new_hde) 2157 new_fmt = &new_hde->hpp; 2158 } else { 2159 new_fmt = memdup(fmt, sizeof(*fmt)); 2160 } 2161 2162 INIT_LIST_HEAD(&new_fmt->list); 2163 INIT_LIST_HEAD(&new_fmt->sort_list); 2164 2165 return new_fmt; 2166 } 2167 2168 static int parse_field_name(char *str, char **event, char **field, char **opt) 2169 { 2170 char *event_name, *field_name, *opt_name; 2171 2172 event_name = str; 2173 field_name = strchr(str, '.'); 2174 2175 if (field_name) { 2176 *field_name++ = '\0'; 2177 } else { 2178 event_name = NULL; 2179 field_name = str; 2180 } 2181 2182 opt_name = strchr(field_name, '/'); 2183 if (opt_name) 2184 *opt_name++ = '\0'; 2185 2186 *event = event_name; 2187 *field = field_name; 2188 *opt = opt_name; 2189 2190 return 0; 2191 } 2192 2193 /* find match evsel using a given event name. The event name can be: 2194 * 1. '%' + event index (e.g. '%1' for first event) 2195 * 2. full event name (e.g. sched:sched_switch) 2196 * 3. partial event name (should not contain ':') 2197 */ 2198 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name) 2199 { 2200 struct perf_evsel *evsel = NULL; 2201 struct perf_evsel *pos; 2202 bool full_name; 2203 2204 /* case 1 */ 2205 if (event_name[0] == '%') { 2206 int nr = strtol(event_name+1, NULL, 0); 2207 2208 if (nr > evlist->nr_entries) 2209 return NULL; 2210 2211 evsel = perf_evlist__first(evlist); 2212 while (--nr > 0) 2213 evsel = perf_evsel__next(evsel); 2214 2215 return evsel; 2216 } 2217 2218 full_name = !!strchr(event_name, ':'); 2219 evlist__for_each_entry(evlist, pos) { 2220 /* case 2 */ 2221 if (full_name && !strcmp(pos->name, event_name)) 2222 return pos; 2223 /* case 3 */ 2224 if (!full_name && strstr(pos->name, event_name)) { 2225 if (evsel) { 2226 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2227 event_name, evsel->name, pos->name); 2228 return NULL; 2229 } 2230 evsel = pos; 2231 } 2232 } 2233 2234 return evsel; 2235 } 2236 2237 static int __dynamic_dimension__add(struct perf_evsel *evsel, 2238 struct format_field *field, 2239 bool raw_trace, int level) 2240 { 2241 struct hpp_dynamic_entry *hde; 2242 2243 hde = __alloc_dynamic_entry(evsel, field, level); 2244 if (hde == NULL) 2245 return -ENOMEM; 2246 2247 hde->raw_trace = raw_trace; 2248 2249 perf_hpp__register_sort_field(&hde->hpp); 2250 return 0; 2251 } 2252 2253 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level) 2254 { 2255 int ret; 2256 struct format_field *field; 2257 2258 field = evsel->tp_format->format.fields; 2259 while (field) { 2260 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2261 if (ret < 0) 2262 return ret; 2263 2264 field = field->next; 2265 } 2266 return 0; 2267 } 2268 2269 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace, 2270 int level) 2271 { 2272 int ret; 2273 struct perf_evsel *evsel; 2274 2275 evlist__for_each_entry(evlist, evsel) { 2276 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2277 continue; 2278 2279 ret = add_evsel_fields(evsel, raw_trace, level); 2280 if (ret < 0) 2281 return ret; 2282 } 2283 return 0; 2284 } 2285 2286 static int add_all_matching_fields(struct perf_evlist *evlist, 2287 char *field_name, bool raw_trace, int level) 2288 { 2289 int ret = -ESRCH; 2290 struct perf_evsel *evsel; 2291 struct format_field *field; 2292 2293 evlist__for_each_entry(evlist, evsel) { 2294 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2295 continue; 2296 2297 field = pevent_find_any_field(evsel->tp_format, field_name); 2298 if (field == NULL) 2299 continue; 2300 2301 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2302 if (ret < 0) 2303 break; 2304 } 2305 return ret; 2306 } 2307 2308 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok, 2309 int level) 2310 { 2311 char *str, *event_name, *field_name, *opt_name; 2312 struct perf_evsel *evsel; 2313 struct format_field *field; 2314 bool raw_trace = symbol_conf.raw_trace; 2315 int ret = 0; 2316 2317 if (evlist == NULL) 2318 return -ENOENT; 2319 2320 str = strdup(tok); 2321 if (str == NULL) 2322 return -ENOMEM; 2323 2324 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2325 ret = -EINVAL; 2326 goto out; 2327 } 2328 2329 if (opt_name) { 2330 if (strcmp(opt_name, "raw")) { 2331 pr_debug("unsupported field option %s\n", opt_name); 2332 ret = -EINVAL; 2333 goto out; 2334 } 2335 raw_trace = true; 2336 } 2337 2338 if (!strcmp(field_name, "trace_fields")) { 2339 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2340 goto out; 2341 } 2342 2343 if (event_name == NULL) { 2344 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2345 goto out; 2346 } 2347 2348 evsel = find_evsel(evlist, event_name); 2349 if (evsel == NULL) { 2350 pr_debug("Cannot find event: %s\n", event_name); 2351 ret = -ENOENT; 2352 goto out; 2353 } 2354 2355 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2356 pr_debug("%s is not a tracepoint event\n", event_name); 2357 ret = -EINVAL; 2358 goto out; 2359 } 2360 2361 if (!strcmp(field_name, "*")) { 2362 ret = add_evsel_fields(evsel, raw_trace, level); 2363 } else { 2364 field = pevent_find_any_field(evsel->tp_format, field_name); 2365 if (field == NULL) { 2366 pr_debug("Cannot find event field for %s.%s\n", 2367 event_name, field_name); 2368 return -ENOENT; 2369 } 2370 2371 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2372 } 2373 2374 out: 2375 free(str); 2376 return ret; 2377 } 2378 2379 static int __sort_dimension__add(struct sort_dimension *sd, 2380 struct perf_hpp_list *list, 2381 int level) 2382 { 2383 if (sd->taken) 2384 return 0; 2385 2386 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2387 return -1; 2388 2389 if (sd->entry->se_collapse) 2390 list->need_collapse = 1; 2391 2392 sd->taken = 1; 2393 2394 return 0; 2395 } 2396 2397 static int __hpp_dimension__add(struct hpp_dimension *hd, 2398 struct perf_hpp_list *list, 2399 int level) 2400 { 2401 struct perf_hpp_fmt *fmt; 2402 2403 if (hd->taken) 2404 return 0; 2405 2406 fmt = __hpp_dimension__alloc_hpp(hd, level); 2407 if (!fmt) 2408 return -1; 2409 2410 hd->taken = 1; 2411 perf_hpp_list__register_sort_field(list, fmt); 2412 return 0; 2413 } 2414 2415 static int __sort_dimension__add_output(struct perf_hpp_list *list, 2416 struct sort_dimension *sd) 2417 { 2418 if (sd->taken) 2419 return 0; 2420 2421 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2422 return -1; 2423 2424 sd->taken = 1; 2425 return 0; 2426 } 2427 2428 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2429 struct hpp_dimension *hd) 2430 { 2431 struct perf_hpp_fmt *fmt; 2432 2433 if (hd->taken) 2434 return 0; 2435 2436 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2437 if (!fmt) 2438 return -1; 2439 2440 hd->taken = 1; 2441 perf_hpp_list__column_register(list, fmt); 2442 return 0; 2443 } 2444 2445 int hpp_dimension__add_output(unsigned col) 2446 { 2447 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2448 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2449 } 2450 2451 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2452 struct perf_evlist *evlist, 2453 int level) 2454 { 2455 unsigned int i; 2456 2457 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2458 struct sort_dimension *sd = &common_sort_dimensions[i]; 2459 2460 if (strncasecmp(tok, sd->name, strlen(tok))) 2461 continue; 2462 2463 if (sd->entry == &sort_parent) { 2464 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 2465 if (ret) { 2466 char err[BUFSIZ]; 2467 2468 regerror(ret, &parent_regex, err, sizeof(err)); 2469 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 2470 return -EINVAL; 2471 } 2472 list->parent = 1; 2473 } else if (sd->entry == &sort_sym) { 2474 list->sym = 1; 2475 /* 2476 * perf diff displays the performance difference amongst 2477 * two or more perf.data files. Those files could come 2478 * from different binaries. So we should not compare 2479 * their ips, but the name of symbol. 2480 */ 2481 if (sort__mode == SORT_MODE__DIFF) 2482 sd->entry->se_collapse = sort__sym_sort; 2483 2484 } else if (sd->entry == &sort_dso) { 2485 list->dso = 1; 2486 } else if (sd->entry == &sort_socket) { 2487 list->socket = 1; 2488 } else if (sd->entry == &sort_thread) { 2489 list->thread = 1; 2490 } else if (sd->entry == &sort_comm) { 2491 list->comm = 1; 2492 } 2493 2494 return __sort_dimension__add(sd, list, level); 2495 } 2496 2497 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2498 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2499 2500 if (strncasecmp(tok, hd->name, strlen(tok))) 2501 continue; 2502 2503 return __hpp_dimension__add(hd, list, level); 2504 } 2505 2506 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2507 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2508 2509 if (strncasecmp(tok, sd->name, strlen(tok))) 2510 continue; 2511 2512 if (sort__mode != SORT_MODE__BRANCH) 2513 return -EINVAL; 2514 2515 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 2516 list->sym = 1; 2517 2518 __sort_dimension__add(sd, list, level); 2519 return 0; 2520 } 2521 2522 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2523 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2524 2525 if (strncasecmp(tok, sd->name, strlen(tok))) 2526 continue; 2527 2528 if (sort__mode != SORT_MODE__MEMORY) 2529 return -EINVAL; 2530 2531 if (sd->entry == &sort_mem_dcacheline && cacheline_size == 0) 2532 return -EINVAL; 2533 2534 if (sd->entry == &sort_mem_daddr_sym) 2535 list->sym = 1; 2536 2537 __sort_dimension__add(sd, list, level); 2538 return 0; 2539 } 2540 2541 if (!add_dynamic_entry(evlist, tok, level)) 2542 return 0; 2543 2544 return -ESRCH; 2545 } 2546 2547 static int setup_sort_list(struct perf_hpp_list *list, char *str, 2548 struct perf_evlist *evlist) 2549 { 2550 char *tmp, *tok; 2551 int ret = 0; 2552 int level = 0; 2553 int next_level = 1; 2554 bool in_group = false; 2555 2556 do { 2557 tok = str; 2558 tmp = strpbrk(str, "{}, "); 2559 if (tmp) { 2560 if (in_group) 2561 next_level = level; 2562 else 2563 next_level = level + 1; 2564 2565 if (*tmp == '{') 2566 in_group = true; 2567 else if (*tmp == '}') 2568 in_group = false; 2569 2570 *tmp = '\0'; 2571 str = tmp + 1; 2572 } 2573 2574 if (*tok) { 2575 ret = sort_dimension__add(list, tok, evlist, level); 2576 if (ret == -EINVAL) { 2577 if (!cacheline_size && !strncasecmp(tok, "dcacheline", strlen(tok))) 2578 pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 2579 else 2580 pr_err("Invalid --sort key: `%s'", tok); 2581 break; 2582 } else if (ret == -ESRCH) { 2583 pr_err("Unknown --sort key: `%s'", tok); 2584 break; 2585 } 2586 } 2587 2588 level = next_level; 2589 } while (tmp); 2590 2591 return ret; 2592 } 2593 2594 static const char *get_default_sort_order(struct perf_evlist *evlist) 2595 { 2596 const char *default_sort_orders[] = { 2597 default_sort_order, 2598 default_branch_sort_order, 2599 default_mem_sort_order, 2600 default_top_sort_order, 2601 default_diff_sort_order, 2602 default_tracepoint_sort_order, 2603 }; 2604 bool use_trace = true; 2605 struct perf_evsel *evsel; 2606 2607 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 2608 2609 if (evlist == NULL || perf_evlist__empty(evlist)) 2610 goto out_no_evlist; 2611 2612 evlist__for_each_entry(evlist, evsel) { 2613 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2614 use_trace = false; 2615 break; 2616 } 2617 } 2618 2619 if (use_trace) { 2620 sort__mode = SORT_MODE__TRACEPOINT; 2621 if (symbol_conf.raw_trace) 2622 return "trace_fields"; 2623 } 2624 out_no_evlist: 2625 return default_sort_orders[sort__mode]; 2626 } 2627 2628 static int setup_sort_order(struct perf_evlist *evlist) 2629 { 2630 char *new_sort_order; 2631 2632 /* 2633 * Append '+'-prefixed sort order to the default sort 2634 * order string. 2635 */ 2636 if (!sort_order || is_strict_order(sort_order)) 2637 return 0; 2638 2639 if (sort_order[1] == '\0') { 2640 pr_err("Invalid --sort key: `+'"); 2641 return -EINVAL; 2642 } 2643 2644 /* 2645 * We allocate new sort_order string, but we never free it, 2646 * because it's checked over the rest of the code. 2647 */ 2648 if (asprintf(&new_sort_order, "%s,%s", 2649 get_default_sort_order(evlist), sort_order + 1) < 0) { 2650 pr_err("Not enough memory to set up --sort"); 2651 return -ENOMEM; 2652 } 2653 2654 sort_order = new_sort_order; 2655 return 0; 2656 } 2657 2658 /* 2659 * Adds 'pre,' prefix into 'str' is 'pre' is 2660 * not already part of 'str'. 2661 */ 2662 static char *prefix_if_not_in(const char *pre, char *str) 2663 { 2664 char *n; 2665 2666 if (!str || strstr(str, pre)) 2667 return str; 2668 2669 if (asprintf(&n, "%s,%s", pre, str) < 0) 2670 return NULL; 2671 2672 free(str); 2673 return n; 2674 } 2675 2676 static char *setup_overhead(char *keys) 2677 { 2678 if (sort__mode == SORT_MODE__DIFF) 2679 return keys; 2680 2681 keys = prefix_if_not_in("overhead", keys); 2682 2683 if (symbol_conf.cumulate_callchain) 2684 keys = prefix_if_not_in("overhead_children", keys); 2685 2686 return keys; 2687 } 2688 2689 static int __setup_sorting(struct perf_evlist *evlist) 2690 { 2691 char *str; 2692 const char *sort_keys; 2693 int ret = 0; 2694 2695 ret = setup_sort_order(evlist); 2696 if (ret) 2697 return ret; 2698 2699 sort_keys = sort_order; 2700 if (sort_keys == NULL) { 2701 if (is_strict_order(field_order)) { 2702 /* 2703 * If user specified field order but no sort order, 2704 * we'll honor it and not add default sort orders. 2705 */ 2706 return 0; 2707 } 2708 2709 sort_keys = get_default_sort_order(evlist); 2710 } 2711 2712 str = strdup(sort_keys); 2713 if (str == NULL) { 2714 pr_err("Not enough memory to setup sort keys"); 2715 return -ENOMEM; 2716 } 2717 2718 /* 2719 * Prepend overhead fields for backward compatibility. 2720 */ 2721 if (!is_strict_order(field_order)) { 2722 str = setup_overhead(str); 2723 if (str == NULL) { 2724 pr_err("Not enough memory to setup overhead keys"); 2725 return -ENOMEM; 2726 } 2727 } 2728 2729 ret = setup_sort_list(&perf_hpp_list, str, evlist); 2730 2731 free(str); 2732 return ret; 2733 } 2734 2735 void perf_hpp__set_elide(int idx, bool elide) 2736 { 2737 struct perf_hpp_fmt *fmt; 2738 struct hpp_sort_entry *hse; 2739 2740 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2741 if (!perf_hpp__is_sort_entry(fmt)) 2742 continue; 2743 2744 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2745 if (hse->se->se_width_idx == idx) { 2746 fmt->elide = elide; 2747 break; 2748 } 2749 } 2750 } 2751 2752 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 2753 { 2754 if (list && strlist__nr_entries(list) == 1) { 2755 if (fp != NULL) 2756 fprintf(fp, "# %s: %s\n", list_name, 2757 strlist__entry(list, 0)->s); 2758 return true; 2759 } 2760 return false; 2761 } 2762 2763 static bool get_elide(int idx, FILE *output) 2764 { 2765 switch (idx) { 2766 case HISTC_SYMBOL: 2767 return __get_elide(symbol_conf.sym_list, "symbol", output); 2768 case HISTC_DSO: 2769 return __get_elide(symbol_conf.dso_list, "dso", output); 2770 case HISTC_COMM: 2771 return __get_elide(symbol_conf.comm_list, "comm", output); 2772 default: 2773 break; 2774 } 2775 2776 if (sort__mode != SORT_MODE__BRANCH) 2777 return false; 2778 2779 switch (idx) { 2780 case HISTC_SYMBOL_FROM: 2781 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 2782 case HISTC_SYMBOL_TO: 2783 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 2784 case HISTC_DSO_FROM: 2785 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 2786 case HISTC_DSO_TO: 2787 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 2788 default: 2789 break; 2790 } 2791 2792 return false; 2793 } 2794 2795 void sort__setup_elide(FILE *output) 2796 { 2797 struct perf_hpp_fmt *fmt; 2798 struct hpp_sort_entry *hse; 2799 2800 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2801 if (!perf_hpp__is_sort_entry(fmt)) 2802 continue; 2803 2804 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2805 fmt->elide = get_elide(hse->se->se_width_idx, output); 2806 } 2807 2808 /* 2809 * It makes no sense to elide all of sort entries. 2810 * Just revert them to show up again. 2811 */ 2812 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2813 if (!perf_hpp__is_sort_entry(fmt)) 2814 continue; 2815 2816 if (!fmt->elide) 2817 return; 2818 } 2819 2820 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2821 if (!perf_hpp__is_sort_entry(fmt)) 2822 continue; 2823 2824 fmt->elide = false; 2825 } 2826 } 2827 2828 int output_field_add(struct perf_hpp_list *list, char *tok) 2829 { 2830 unsigned int i; 2831 2832 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2833 struct sort_dimension *sd = &common_sort_dimensions[i]; 2834 2835 if (strncasecmp(tok, sd->name, strlen(tok))) 2836 continue; 2837 2838 return __sort_dimension__add_output(list, sd); 2839 } 2840 2841 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2842 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2843 2844 if (strncasecmp(tok, hd->name, strlen(tok))) 2845 continue; 2846 2847 return __hpp_dimension__add_output(list, hd); 2848 } 2849 2850 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2851 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2852 2853 if (strncasecmp(tok, sd->name, strlen(tok))) 2854 continue; 2855 2856 return __sort_dimension__add_output(list, sd); 2857 } 2858 2859 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2860 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2861 2862 if (strncasecmp(tok, sd->name, strlen(tok))) 2863 continue; 2864 2865 return __sort_dimension__add_output(list, sd); 2866 } 2867 2868 return -ESRCH; 2869 } 2870 2871 static int setup_output_list(struct perf_hpp_list *list, char *str) 2872 { 2873 char *tmp, *tok; 2874 int ret = 0; 2875 2876 for (tok = strtok_r(str, ", ", &tmp); 2877 tok; tok = strtok_r(NULL, ", ", &tmp)) { 2878 ret = output_field_add(list, tok); 2879 if (ret == -EINVAL) { 2880 pr_err("Invalid --fields key: `%s'", tok); 2881 break; 2882 } else if (ret == -ESRCH) { 2883 pr_err("Unknown --fields key: `%s'", tok); 2884 break; 2885 } 2886 } 2887 2888 return ret; 2889 } 2890 2891 void reset_dimensions(void) 2892 { 2893 unsigned int i; 2894 2895 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 2896 common_sort_dimensions[i].taken = 0; 2897 2898 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 2899 hpp_sort_dimensions[i].taken = 0; 2900 2901 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 2902 bstack_sort_dimensions[i].taken = 0; 2903 2904 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 2905 memory_sort_dimensions[i].taken = 0; 2906 } 2907 2908 bool is_strict_order(const char *order) 2909 { 2910 return order && (*order != '+'); 2911 } 2912 2913 static int __setup_output_field(void) 2914 { 2915 char *str, *strp; 2916 int ret = -EINVAL; 2917 2918 if (field_order == NULL) 2919 return 0; 2920 2921 strp = str = strdup(field_order); 2922 if (str == NULL) { 2923 pr_err("Not enough memory to setup output fields"); 2924 return -ENOMEM; 2925 } 2926 2927 if (!is_strict_order(field_order)) 2928 strp++; 2929 2930 if (!strlen(strp)) { 2931 pr_err("Invalid --fields key: `+'"); 2932 goto out; 2933 } 2934 2935 ret = setup_output_list(&perf_hpp_list, strp); 2936 2937 out: 2938 free(str); 2939 return ret; 2940 } 2941 2942 int setup_sorting(struct perf_evlist *evlist) 2943 { 2944 int err; 2945 2946 err = __setup_sorting(evlist); 2947 if (err < 0) 2948 return err; 2949 2950 if (parent_pattern != default_parent_pattern) { 2951 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 2952 if (err < 0) 2953 return err; 2954 } 2955 2956 reset_dimensions(); 2957 2958 /* 2959 * perf diff doesn't use default hpp output fields. 2960 */ 2961 if (sort__mode != SORT_MODE__DIFF) 2962 perf_hpp__init(); 2963 2964 err = __setup_output_field(); 2965 if (err < 0) 2966 return err; 2967 2968 /* copy sort keys to output fields */ 2969 perf_hpp__setup_output_field(&perf_hpp_list); 2970 /* and then copy output fields to sort keys */ 2971 perf_hpp__append_sort_keys(&perf_hpp_list); 2972 2973 /* setup hists-specific output fields */ 2974 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 2975 return -1; 2976 2977 return 0; 2978 } 2979 2980 void reset_output_field(void) 2981 { 2982 perf_hpp_list.need_collapse = 0; 2983 perf_hpp_list.parent = 0; 2984 perf_hpp_list.sym = 0; 2985 perf_hpp_list.dso = 0; 2986 2987 field_order = NULL; 2988 sort_order = NULL; 2989 2990 reset_dimensions(); 2991 perf_hpp__reset_output_field(&perf_hpp_list); 2992 } 2993