1 #include <errno.h> 2 #include <inttypes.h> 3 #include <regex.h> 4 #include <sys/mman.h> 5 #include "sort.h" 6 #include "hist.h" 7 #include "comm.h" 8 #include "symbol.h" 9 #include "thread.h" 10 #include "evsel.h" 11 #include "evlist.h" 12 #include "strlist.h" 13 #include <traceevent/event-parse.h> 14 #include "mem-events.h" 15 #include <linux/kernel.h> 16 17 regex_t parent_regex; 18 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 19 const char *parent_pattern = default_parent_pattern; 20 const char *default_sort_order = "comm,dso,symbol"; 21 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 22 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; 23 const char default_top_sort_order[] = "dso,symbol"; 24 const char default_diff_sort_order[] = "dso,symbol"; 25 const char default_tracepoint_sort_order[] = "trace"; 26 const char *sort_order; 27 const char *field_order; 28 regex_t ignore_callees_regex; 29 int have_ignore_callees = 0; 30 enum sort_mode sort__mode = SORT_MODE__NORMAL; 31 32 /* 33 * Replaces all occurrences of a char used with the: 34 * 35 * -t, --field-separator 36 * 37 * option, that uses a special separator character and don't pad with spaces, 38 * replacing all occurances of this separator in symbol names (and other 39 * output) with a '.' character, that thus it's the only non valid separator. 40 */ 41 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 42 { 43 int n; 44 va_list ap; 45 46 va_start(ap, fmt); 47 n = vsnprintf(bf, size, fmt, ap); 48 if (symbol_conf.field_sep && n > 0) { 49 char *sep = bf; 50 51 while (1) { 52 sep = strchr(sep, *symbol_conf.field_sep); 53 if (sep == NULL) 54 break; 55 *sep = '.'; 56 } 57 } 58 va_end(ap); 59 60 if (n >= (int)size) 61 return size - 1; 62 return n; 63 } 64 65 static int64_t cmp_null(const void *l, const void *r) 66 { 67 if (!l && !r) 68 return 0; 69 else if (!l) 70 return -1; 71 else 72 return 1; 73 } 74 75 /* --sort pid */ 76 77 static int64_t 78 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 79 { 80 return right->thread->tid - left->thread->tid; 81 } 82 83 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 84 size_t size, unsigned int width) 85 { 86 const char *comm = thread__comm_str(he->thread); 87 88 width = max(7U, width) - 8; 89 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 90 width, width, comm ?: ""); 91 } 92 93 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 94 { 95 const struct thread *th = arg; 96 97 if (type != HIST_FILTER__THREAD) 98 return -1; 99 100 return th && he->thread != th; 101 } 102 103 struct sort_entry sort_thread = { 104 .se_header = " Pid:Command", 105 .se_cmp = sort__thread_cmp, 106 .se_snprintf = hist_entry__thread_snprintf, 107 .se_filter = hist_entry__thread_filter, 108 .se_width_idx = HISTC_THREAD, 109 }; 110 111 /* --sort comm */ 112 113 static int64_t 114 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 115 { 116 /* Compare the addr that should be unique among comm */ 117 return strcmp(comm__str(right->comm), comm__str(left->comm)); 118 } 119 120 static int64_t 121 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 122 { 123 /* Compare the addr that should be unique among comm */ 124 return strcmp(comm__str(right->comm), comm__str(left->comm)); 125 } 126 127 static int64_t 128 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 129 { 130 return strcmp(comm__str(right->comm), comm__str(left->comm)); 131 } 132 133 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 134 size_t size, unsigned int width) 135 { 136 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 137 } 138 139 struct sort_entry sort_comm = { 140 .se_header = "Command", 141 .se_cmp = sort__comm_cmp, 142 .se_collapse = sort__comm_collapse, 143 .se_sort = sort__comm_sort, 144 .se_snprintf = hist_entry__comm_snprintf, 145 .se_filter = hist_entry__thread_filter, 146 .se_width_idx = HISTC_COMM, 147 }; 148 149 /* --sort dso */ 150 151 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 152 { 153 struct dso *dso_l = map_l ? map_l->dso : NULL; 154 struct dso *dso_r = map_r ? map_r->dso : NULL; 155 const char *dso_name_l, *dso_name_r; 156 157 if (!dso_l || !dso_r) 158 return cmp_null(dso_r, dso_l); 159 160 if (verbose > 0) { 161 dso_name_l = dso_l->long_name; 162 dso_name_r = dso_r->long_name; 163 } else { 164 dso_name_l = dso_l->short_name; 165 dso_name_r = dso_r->short_name; 166 } 167 168 return strcmp(dso_name_l, dso_name_r); 169 } 170 171 static int64_t 172 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 173 { 174 return _sort__dso_cmp(right->ms.map, left->ms.map); 175 } 176 177 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 178 size_t size, unsigned int width) 179 { 180 if (map && map->dso) { 181 const char *dso_name = verbose > 0 ? map->dso->long_name : 182 map->dso->short_name; 183 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 184 } 185 186 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 187 } 188 189 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 190 size_t size, unsigned int width) 191 { 192 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 193 } 194 195 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 196 { 197 const struct dso *dso = arg; 198 199 if (type != HIST_FILTER__DSO) 200 return -1; 201 202 return dso && (!he->ms.map || he->ms.map->dso != dso); 203 } 204 205 struct sort_entry sort_dso = { 206 .se_header = "Shared Object", 207 .se_cmp = sort__dso_cmp, 208 .se_snprintf = hist_entry__dso_snprintf, 209 .se_filter = hist_entry__dso_filter, 210 .se_width_idx = HISTC_DSO, 211 }; 212 213 /* --sort symbol */ 214 215 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 216 { 217 return (int64_t)(right_ip - left_ip); 218 } 219 220 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 221 { 222 if (!sym_l || !sym_r) 223 return cmp_null(sym_l, sym_r); 224 225 if (sym_l == sym_r) 226 return 0; 227 228 if (sym_l->start != sym_r->start) 229 return (int64_t)(sym_r->start - sym_l->start); 230 231 return (int64_t)(sym_r->end - sym_l->end); 232 } 233 234 static int64_t 235 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 236 { 237 int64_t ret; 238 239 if (!left->ms.sym && !right->ms.sym) 240 return _sort__addr_cmp(left->ip, right->ip); 241 242 /* 243 * comparing symbol address alone is not enough since it's a 244 * relative address within a dso. 245 */ 246 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 247 ret = sort__dso_cmp(left, right); 248 if (ret != 0) 249 return ret; 250 } 251 252 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 253 } 254 255 static int64_t 256 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 257 { 258 if (!left->ms.sym || !right->ms.sym) 259 return cmp_null(left->ms.sym, right->ms.sym); 260 261 return strcmp(right->ms.sym->name, left->ms.sym->name); 262 } 263 264 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, 265 u64 ip, char level, char *bf, size_t size, 266 unsigned int width) 267 { 268 size_t ret = 0; 269 270 if (verbose > 0) { 271 char o = map ? dso__symtab_origin(map->dso) : '!'; 272 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 273 BITS_PER_LONG / 4 + 2, ip, o); 274 } 275 276 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 277 if (sym && map) { 278 if (map->type == MAP__VARIABLE) { 279 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 280 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 281 ip - map->unmap_ip(map, sym->start)); 282 } else { 283 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 284 width - ret, 285 sym->name); 286 } 287 } else { 288 size_t len = BITS_PER_LONG / 4; 289 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 290 len, ip); 291 } 292 293 return ret; 294 } 295 296 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, 297 size_t size, unsigned int width) 298 { 299 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip, 300 he->level, bf, size, width); 301 } 302 303 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 304 { 305 const char *sym = arg; 306 307 if (type != HIST_FILTER__SYMBOL) 308 return -1; 309 310 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 311 } 312 313 struct sort_entry sort_sym = { 314 .se_header = "Symbol", 315 .se_cmp = sort__sym_cmp, 316 .se_sort = sort__sym_sort, 317 .se_snprintf = hist_entry__sym_snprintf, 318 .se_filter = hist_entry__sym_filter, 319 .se_width_idx = HISTC_SYMBOL, 320 }; 321 322 /* --sort srcline */ 323 324 char *hist_entry__get_srcline(struct hist_entry *he) 325 { 326 struct map *map = he->ms.map; 327 328 if (!map) 329 return SRCLINE_UNKNOWN; 330 331 return get_srcline(map->dso, map__rip_2objdump(map, he->ip), 332 he->ms.sym, true, true); 333 } 334 335 static int64_t 336 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 337 { 338 if (!left->srcline) 339 left->srcline = hist_entry__get_srcline(left); 340 if (!right->srcline) 341 right->srcline = hist_entry__get_srcline(right); 342 343 return strcmp(right->srcline, left->srcline); 344 } 345 346 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 347 size_t size, unsigned int width) 348 { 349 if (!he->srcline) 350 he->srcline = hist_entry__get_srcline(he); 351 352 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 353 } 354 355 struct sort_entry sort_srcline = { 356 .se_header = "Source:Line", 357 .se_cmp = sort__srcline_cmp, 358 .se_snprintf = hist_entry__srcline_snprintf, 359 .se_width_idx = HISTC_SRCLINE, 360 }; 361 362 /* --sort srcline_from */ 363 364 static int64_t 365 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 366 { 367 if (!left->branch_info->srcline_from) { 368 struct map *map = left->branch_info->from.map; 369 if (!map) 370 left->branch_info->srcline_from = SRCLINE_UNKNOWN; 371 else 372 left->branch_info->srcline_from = get_srcline(map->dso, 373 map__rip_2objdump(map, 374 left->branch_info->from.al_addr), 375 left->branch_info->from.sym, 376 true, true); 377 } 378 if (!right->branch_info->srcline_from) { 379 struct map *map = right->branch_info->from.map; 380 if (!map) 381 right->branch_info->srcline_from = SRCLINE_UNKNOWN; 382 else 383 right->branch_info->srcline_from = get_srcline(map->dso, 384 map__rip_2objdump(map, 385 right->branch_info->from.al_addr), 386 right->branch_info->from.sym, 387 true, true); 388 } 389 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 390 } 391 392 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 393 size_t size, unsigned int width) 394 { 395 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 396 } 397 398 struct sort_entry sort_srcline_from = { 399 .se_header = "From Source:Line", 400 .se_cmp = sort__srcline_from_cmp, 401 .se_snprintf = hist_entry__srcline_from_snprintf, 402 .se_width_idx = HISTC_SRCLINE_FROM, 403 }; 404 405 /* --sort srcline_to */ 406 407 static int64_t 408 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 409 { 410 if (!left->branch_info->srcline_to) { 411 struct map *map = left->branch_info->to.map; 412 if (!map) 413 left->branch_info->srcline_to = SRCLINE_UNKNOWN; 414 else 415 left->branch_info->srcline_to = get_srcline(map->dso, 416 map__rip_2objdump(map, 417 left->branch_info->to.al_addr), 418 left->branch_info->from.sym, 419 true, true); 420 } 421 if (!right->branch_info->srcline_to) { 422 struct map *map = right->branch_info->to.map; 423 if (!map) 424 right->branch_info->srcline_to = SRCLINE_UNKNOWN; 425 else 426 right->branch_info->srcline_to = get_srcline(map->dso, 427 map__rip_2objdump(map, 428 right->branch_info->to.al_addr), 429 right->branch_info->to.sym, 430 true, true); 431 } 432 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 433 } 434 435 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 436 size_t size, unsigned int width) 437 { 438 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 439 } 440 441 struct sort_entry sort_srcline_to = { 442 .se_header = "To Source:Line", 443 .se_cmp = sort__srcline_to_cmp, 444 .se_snprintf = hist_entry__srcline_to_snprintf, 445 .se_width_idx = HISTC_SRCLINE_TO, 446 }; 447 448 /* --sort srcfile */ 449 450 static char no_srcfile[1]; 451 452 static char *hist_entry__get_srcfile(struct hist_entry *e) 453 { 454 char *sf, *p; 455 struct map *map = e->ms.map; 456 457 if (!map) 458 return no_srcfile; 459 460 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 461 e->ms.sym, false, true, true); 462 if (!strcmp(sf, SRCLINE_UNKNOWN)) 463 return no_srcfile; 464 p = strchr(sf, ':'); 465 if (p && *sf) { 466 *p = 0; 467 return sf; 468 } 469 free(sf); 470 return no_srcfile; 471 } 472 473 static int64_t 474 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 475 { 476 if (!left->srcfile) 477 left->srcfile = hist_entry__get_srcfile(left); 478 if (!right->srcfile) 479 right->srcfile = hist_entry__get_srcfile(right); 480 481 return strcmp(right->srcfile, left->srcfile); 482 } 483 484 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 485 size_t size, unsigned int width) 486 { 487 if (!he->srcfile) 488 he->srcfile = hist_entry__get_srcfile(he); 489 490 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 491 } 492 493 struct sort_entry sort_srcfile = { 494 .se_header = "Source File", 495 .se_cmp = sort__srcfile_cmp, 496 .se_snprintf = hist_entry__srcfile_snprintf, 497 .se_width_idx = HISTC_SRCFILE, 498 }; 499 500 /* --sort parent */ 501 502 static int64_t 503 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 504 { 505 struct symbol *sym_l = left->parent; 506 struct symbol *sym_r = right->parent; 507 508 if (!sym_l || !sym_r) 509 return cmp_null(sym_l, sym_r); 510 511 return strcmp(sym_r->name, sym_l->name); 512 } 513 514 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 515 size_t size, unsigned int width) 516 { 517 return repsep_snprintf(bf, size, "%-*.*s", width, width, 518 he->parent ? he->parent->name : "[other]"); 519 } 520 521 struct sort_entry sort_parent = { 522 .se_header = "Parent symbol", 523 .se_cmp = sort__parent_cmp, 524 .se_snprintf = hist_entry__parent_snprintf, 525 .se_width_idx = HISTC_PARENT, 526 }; 527 528 /* --sort cpu */ 529 530 static int64_t 531 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 532 { 533 return right->cpu - left->cpu; 534 } 535 536 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 537 size_t size, unsigned int width) 538 { 539 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 540 } 541 542 struct sort_entry sort_cpu = { 543 .se_header = "CPU", 544 .se_cmp = sort__cpu_cmp, 545 .se_snprintf = hist_entry__cpu_snprintf, 546 .se_width_idx = HISTC_CPU, 547 }; 548 549 /* --sort cgroup_id */ 550 551 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 552 { 553 return (int64_t)(right_dev - left_dev); 554 } 555 556 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 557 { 558 return (int64_t)(right_ino - left_ino); 559 } 560 561 static int64_t 562 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 563 { 564 int64_t ret; 565 566 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 567 if (ret != 0) 568 return ret; 569 570 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 571 left->cgroup_id.ino); 572 } 573 574 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 575 char *bf, size_t size, 576 unsigned int width __maybe_unused) 577 { 578 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 579 he->cgroup_id.ino); 580 } 581 582 struct sort_entry sort_cgroup_id = { 583 .se_header = "cgroup id (dev/inode)", 584 .se_cmp = sort__cgroup_id_cmp, 585 .se_snprintf = hist_entry__cgroup_id_snprintf, 586 .se_width_idx = HISTC_CGROUP_ID, 587 }; 588 589 /* --sort socket */ 590 591 static int64_t 592 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 593 { 594 return right->socket - left->socket; 595 } 596 597 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 598 size_t size, unsigned int width) 599 { 600 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 601 } 602 603 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 604 { 605 int sk = *(const int *)arg; 606 607 if (type != HIST_FILTER__SOCKET) 608 return -1; 609 610 return sk >= 0 && he->socket != sk; 611 } 612 613 struct sort_entry sort_socket = { 614 .se_header = "Socket", 615 .se_cmp = sort__socket_cmp, 616 .se_snprintf = hist_entry__socket_snprintf, 617 .se_filter = hist_entry__socket_filter, 618 .se_width_idx = HISTC_SOCKET, 619 }; 620 621 /* --sort trace */ 622 623 static char *get_trace_output(struct hist_entry *he) 624 { 625 struct trace_seq seq; 626 struct perf_evsel *evsel; 627 struct pevent_record rec = { 628 .data = he->raw_data, 629 .size = he->raw_size, 630 }; 631 632 evsel = hists_to_evsel(he->hists); 633 634 trace_seq_init(&seq); 635 if (symbol_conf.raw_trace) { 636 pevent_print_fields(&seq, he->raw_data, he->raw_size, 637 evsel->tp_format); 638 } else { 639 pevent_event_info(&seq, evsel->tp_format, &rec); 640 } 641 /* 642 * Trim the buffer, it starts at 4KB and we're not going to 643 * add anything more to this buffer. 644 */ 645 return realloc(seq.buffer, seq.len + 1); 646 } 647 648 static int64_t 649 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 650 { 651 struct perf_evsel *evsel; 652 653 evsel = hists_to_evsel(left->hists); 654 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 655 return 0; 656 657 if (left->trace_output == NULL) 658 left->trace_output = get_trace_output(left); 659 if (right->trace_output == NULL) 660 right->trace_output = get_trace_output(right); 661 662 return strcmp(right->trace_output, left->trace_output); 663 } 664 665 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 666 size_t size, unsigned int width) 667 { 668 struct perf_evsel *evsel; 669 670 evsel = hists_to_evsel(he->hists); 671 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 672 return scnprintf(bf, size, "%-.*s", width, "N/A"); 673 674 if (he->trace_output == NULL) 675 he->trace_output = get_trace_output(he); 676 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 677 } 678 679 struct sort_entry sort_trace = { 680 .se_header = "Trace output", 681 .se_cmp = sort__trace_cmp, 682 .se_snprintf = hist_entry__trace_snprintf, 683 .se_width_idx = HISTC_TRACE, 684 }; 685 686 /* sort keys for branch stacks */ 687 688 static int64_t 689 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 690 { 691 if (!left->branch_info || !right->branch_info) 692 return cmp_null(left->branch_info, right->branch_info); 693 694 return _sort__dso_cmp(left->branch_info->from.map, 695 right->branch_info->from.map); 696 } 697 698 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 699 size_t size, unsigned int width) 700 { 701 if (he->branch_info) 702 return _hist_entry__dso_snprintf(he->branch_info->from.map, 703 bf, size, width); 704 else 705 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 706 } 707 708 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 709 const void *arg) 710 { 711 const struct dso *dso = arg; 712 713 if (type != HIST_FILTER__DSO) 714 return -1; 715 716 return dso && (!he->branch_info || !he->branch_info->from.map || 717 he->branch_info->from.map->dso != dso); 718 } 719 720 static int64_t 721 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 722 { 723 if (!left->branch_info || !right->branch_info) 724 return cmp_null(left->branch_info, right->branch_info); 725 726 return _sort__dso_cmp(left->branch_info->to.map, 727 right->branch_info->to.map); 728 } 729 730 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 731 size_t size, unsigned int width) 732 { 733 if (he->branch_info) 734 return _hist_entry__dso_snprintf(he->branch_info->to.map, 735 bf, size, width); 736 else 737 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 738 } 739 740 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 741 const void *arg) 742 { 743 const struct dso *dso = arg; 744 745 if (type != HIST_FILTER__DSO) 746 return -1; 747 748 return dso && (!he->branch_info || !he->branch_info->to.map || 749 he->branch_info->to.map->dso != dso); 750 } 751 752 static int64_t 753 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 754 { 755 struct addr_map_symbol *from_l = &left->branch_info->from; 756 struct addr_map_symbol *from_r = &right->branch_info->from; 757 758 if (!left->branch_info || !right->branch_info) 759 return cmp_null(left->branch_info, right->branch_info); 760 761 from_l = &left->branch_info->from; 762 from_r = &right->branch_info->from; 763 764 if (!from_l->sym && !from_r->sym) 765 return _sort__addr_cmp(from_l->addr, from_r->addr); 766 767 return _sort__sym_cmp(from_l->sym, from_r->sym); 768 } 769 770 static int64_t 771 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 772 { 773 struct addr_map_symbol *to_l, *to_r; 774 775 if (!left->branch_info || !right->branch_info) 776 return cmp_null(left->branch_info, right->branch_info); 777 778 to_l = &left->branch_info->to; 779 to_r = &right->branch_info->to; 780 781 if (!to_l->sym && !to_r->sym) 782 return _sort__addr_cmp(to_l->addr, to_r->addr); 783 784 return _sort__sym_cmp(to_l->sym, to_r->sym); 785 } 786 787 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 788 size_t size, unsigned int width) 789 { 790 if (he->branch_info) { 791 struct addr_map_symbol *from = &he->branch_info->from; 792 793 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, 794 he->level, bf, size, width); 795 } 796 797 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 798 } 799 800 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 801 size_t size, unsigned int width) 802 { 803 if (he->branch_info) { 804 struct addr_map_symbol *to = &he->branch_info->to; 805 806 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 807 he->level, bf, size, width); 808 } 809 810 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 811 } 812 813 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 814 const void *arg) 815 { 816 const char *sym = arg; 817 818 if (type != HIST_FILTER__SYMBOL) 819 return -1; 820 821 return sym && !(he->branch_info && he->branch_info->from.sym && 822 strstr(he->branch_info->from.sym->name, sym)); 823 } 824 825 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 826 const void *arg) 827 { 828 const char *sym = arg; 829 830 if (type != HIST_FILTER__SYMBOL) 831 return -1; 832 833 return sym && !(he->branch_info && he->branch_info->to.sym && 834 strstr(he->branch_info->to.sym->name, sym)); 835 } 836 837 struct sort_entry sort_dso_from = { 838 .se_header = "Source Shared Object", 839 .se_cmp = sort__dso_from_cmp, 840 .se_snprintf = hist_entry__dso_from_snprintf, 841 .se_filter = hist_entry__dso_from_filter, 842 .se_width_idx = HISTC_DSO_FROM, 843 }; 844 845 struct sort_entry sort_dso_to = { 846 .se_header = "Target Shared Object", 847 .se_cmp = sort__dso_to_cmp, 848 .se_snprintf = hist_entry__dso_to_snprintf, 849 .se_filter = hist_entry__dso_to_filter, 850 .se_width_idx = HISTC_DSO_TO, 851 }; 852 853 struct sort_entry sort_sym_from = { 854 .se_header = "Source Symbol", 855 .se_cmp = sort__sym_from_cmp, 856 .se_snprintf = hist_entry__sym_from_snprintf, 857 .se_filter = hist_entry__sym_from_filter, 858 .se_width_idx = HISTC_SYMBOL_FROM, 859 }; 860 861 struct sort_entry sort_sym_to = { 862 .se_header = "Target Symbol", 863 .se_cmp = sort__sym_to_cmp, 864 .se_snprintf = hist_entry__sym_to_snprintf, 865 .se_filter = hist_entry__sym_to_filter, 866 .se_width_idx = HISTC_SYMBOL_TO, 867 }; 868 869 static int64_t 870 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 871 { 872 unsigned char mp, p; 873 874 if (!left->branch_info || !right->branch_info) 875 return cmp_null(left->branch_info, right->branch_info); 876 877 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 878 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 879 return mp || p; 880 } 881 882 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 883 size_t size, unsigned int width){ 884 static const char *out = "N/A"; 885 886 if (he->branch_info) { 887 if (he->branch_info->flags.predicted) 888 out = "N"; 889 else if (he->branch_info->flags.mispred) 890 out = "Y"; 891 } 892 893 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 894 } 895 896 static int64_t 897 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 898 { 899 if (!left->branch_info || !right->branch_info) 900 return cmp_null(left->branch_info, right->branch_info); 901 902 return left->branch_info->flags.cycles - 903 right->branch_info->flags.cycles; 904 } 905 906 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 907 size_t size, unsigned int width) 908 { 909 if (!he->branch_info) 910 return scnprintf(bf, size, "%-.*s", width, "N/A"); 911 if (he->branch_info->flags.cycles == 0) 912 return repsep_snprintf(bf, size, "%-*s", width, "-"); 913 return repsep_snprintf(bf, size, "%-*hd", width, 914 he->branch_info->flags.cycles); 915 } 916 917 struct sort_entry sort_cycles = { 918 .se_header = "Basic Block Cycles", 919 .se_cmp = sort__cycles_cmp, 920 .se_snprintf = hist_entry__cycles_snprintf, 921 .se_width_idx = HISTC_CYCLES, 922 }; 923 924 /* --sort daddr_sym */ 925 int64_t 926 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 927 { 928 uint64_t l = 0, r = 0; 929 930 if (left->mem_info) 931 l = left->mem_info->daddr.addr; 932 if (right->mem_info) 933 r = right->mem_info->daddr.addr; 934 935 return (int64_t)(r - l); 936 } 937 938 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 939 size_t size, unsigned int width) 940 { 941 uint64_t addr = 0; 942 struct map *map = NULL; 943 struct symbol *sym = NULL; 944 945 if (he->mem_info) { 946 addr = he->mem_info->daddr.addr; 947 map = he->mem_info->daddr.map; 948 sym = he->mem_info->daddr.sym; 949 } 950 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 951 width); 952 } 953 954 int64_t 955 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 956 { 957 uint64_t l = 0, r = 0; 958 959 if (left->mem_info) 960 l = left->mem_info->iaddr.addr; 961 if (right->mem_info) 962 r = right->mem_info->iaddr.addr; 963 964 return (int64_t)(r - l); 965 } 966 967 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 968 size_t size, unsigned int width) 969 { 970 uint64_t addr = 0; 971 struct map *map = NULL; 972 struct symbol *sym = NULL; 973 974 if (he->mem_info) { 975 addr = he->mem_info->iaddr.addr; 976 map = he->mem_info->iaddr.map; 977 sym = he->mem_info->iaddr.sym; 978 } 979 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 980 width); 981 } 982 983 static int64_t 984 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 985 { 986 struct map *map_l = NULL; 987 struct map *map_r = NULL; 988 989 if (left->mem_info) 990 map_l = left->mem_info->daddr.map; 991 if (right->mem_info) 992 map_r = right->mem_info->daddr.map; 993 994 return _sort__dso_cmp(map_l, map_r); 995 } 996 997 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 998 size_t size, unsigned int width) 999 { 1000 struct map *map = NULL; 1001 1002 if (he->mem_info) 1003 map = he->mem_info->daddr.map; 1004 1005 return _hist_entry__dso_snprintf(map, bf, size, width); 1006 } 1007 1008 static int64_t 1009 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1010 { 1011 union perf_mem_data_src data_src_l; 1012 union perf_mem_data_src data_src_r; 1013 1014 if (left->mem_info) 1015 data_src_l = left->mem_info->data_src; 1016 else 1017 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1018 1019 if (right->mem_info) 1020 data_src_r = right->mem_info->data_src; 1021 else 1022 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1023 1024 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1025 } 1026 1027 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1028 size_t size, unsigned int width) 1029 { 1030 char out[10]; 1031 1032 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1033 return repsep_snprintf(bf, size, "%.*s", width, out); 1034 } 1035 1036 static int64_t 1037 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1038 { 1039 union perf_mem_data_src data_src_l; 1040 union perf_mem_data_src data_src_r; 1041 1042 if (left->mem_info) 1043 data_src_l = left->mem_info->data_src; 1044 else 1045 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1046 1047 if (right->mem_info) 1048 data_src_r = right->mem_info->data_src; 1049 else 1050 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1051 1052 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1053 } 1054 1055 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1056 size_t size, unsigned int width) 1057 { 1058 char out[64]; 1059 1060 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1061 return repsep_snprintf(bf, size, "%-*s", width, out); 1062 } 1063 1064 static int64_t 1065 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1066 { 1067 union perf_mem_data_src data_src_l; 1068 union perf_mem_data_src data_src_r; 1069 1070 if (left->mem_info) 1071 data_src_l = left->mem_info->data_src; 1072 else 1073 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1074 1075 if (right->mem_info) 1076 data_src_r = right->mem_info->data_src; 1077 else 1078 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1079 1080 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1081 } 1082 1083 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1084 size_t size, unsigned int width) 1085 { 1086 char out[64]; 1087 1088 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1089 return repsep_snprintf(bf, size, "%-*s", width, out); 1090 } 1091 1092 static int64_t 1093 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1094 { 1095 union perf_mem_data_src data_src_l; 1096 union perf_mem_data_src data_src_r; 1097 1098 if (left->mem_info) 1099 data_src_l = left->mem_info->data_src; 1100 else 1101 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1102 1103 if (right->mem_info) 1104 data_src_r = right->mem_info->data_src; 1105 else 1106 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1107 1108 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1109 } 1110 1111 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1112 size_t size, unsigned int width) 1113 { 1114 char out[64]; 1115 1116 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1117 return repsep_snprintf(bf, size, "%-*s", width, out); 1118 } 1119 1120 int64_t 1121 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1122 { 1123 u64 l, r; 1124 struct map *l_map, *r_map; 1125 1126 if (!left->mem_info) return -1; 1127 if (!right->mem_info) return 1; 1128 1129 /* group event types together */ 1130 if (left->cpumode > right->cpumode) return -1; 1131 if (left->cpumode < right->cpumode) return 1; 1132 1133 l_map = left->mem_info->daddr.map; 1134 r_map = right->mem_info->daddr.map; 1135 1136 /* if both are NULL, jump to sort on al_addr instead */ 1137 if (!l_map && !r_map) 1138 goto addr; 1139 1140 if (!l_map) return -1; 1141 if (!r_map) return 1; 1142 1143 if (l_map->maj > r_map->maj) return -1; 1144 if (l_map->maj < r_map->maj) return 1; 1145 1146 if (l_map->min > r_map->min) return -1; 1147 if (l_map->min < r_map->min) return 1; 1148 1149 if (l_map->ino > r_map->ino) return -1; 1150 if (l_map->ino < r_map->ino) return 1; 1151 1152 if (l_map->ino_generation > r_map->ino_generation) return -1; 1153 if (l_map->ino_generation < r_map->ino_generation) return 1; 1154 1155 /* 1156 * Addresses with no major/minor numbers are assumed to be 1157 * anonymous in userspace. Sort those on pid then address. 1158 * 1159 * The kernel and non-zero major/minor mapped areas are 1160 * assumed to be unity mapped. Sort those on address. 1161 */ 1162 1163 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1164 (!(l_map->flags & MAP_SHARED)) && 1165 !l_map->maj && !l_map->min && !l_map->ino && 1166 !l_map->ino_generation) { 1167 /* userspace anonymous */ 1168 1169 if (left->thread->pid_ > right->thread->pid_) return -1; 1170 if (left->thread->pid_ < right->thread->pid_) return 1; 1171 } 1172 1173 addr: 1174 /* al_addr does all the right addr - start + offset calculations */ 1175 l = cl_address(left->mem_info->daddr.al_addr); 1176 r = cl_address(right->mem_info->daddr.al_addr); 1177 1178 if (l > r) return -1; 1179 if (l < r) return 1; 1180 1181 return 0; 1182 } 1183 1184 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1185 size_t size, unsigned int width) 1186 { 1187 1188 uint64_t addr = 0; 1189 struct map *map = NULL; 1190 struct symbol *sym = NULL; 1191 char level = he->level; 1192 1193 if (he->mem_info) { 1194 addr = cl_address(he->mem_info->daddr.al_addr); 1195 map = he->mem_info->daddr.map; 1196 sym = he->mem_info->daddr.sym; 1197 1198 /* print [s] for shared data mmaps */ 1199 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1200 map && (map->type == MAP__VARIABLE) && 1201 (map->flags & MAP_SHARED) && 1202 (map->maj || map->min || map->ino || 1203 map->ino_generation)) 1204 level = 's'; 1205 else if (!map) 1206 level = 'X'; 1207 } 1208 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size, 1209 width); 1210 } 1211 1212 struct sort_entry sort_mispredict = { 1213 .se_header = "Branch Mispredicted", 1214 .se_cmp = sort__mispredict_cmp, 1215 .se_snprintf = hist_entry__mispredict_snprintf, 1216 .se_width_idx = HISTC_MISPREDICT, 1217 }; 1218 1219 static u64 he_weight(struct hist_entry *he) 1220 { 1221 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; 1222 } 1223 1224 static int64_t 1225 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1226 { 1227 return he_weight(left) - he_weight(right); 1228 } 1229 1230 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1231 size_t size, unsigned int width) 1232 { 1233 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); 1234 } 1235 1236 struct sort_entry sort_local_weight = { 1237 .se_header = "Local Weight", 1238 .se_cmp = sort__local_weight_cmp, 1239 .se_snprintf = hist_entry__local_weight_snprintf, 1240 .se_width_idx = HISTC_LOCAL_WEIGHT, 1241 }; 1242 1243 static int64_t 1244 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1245 { 1246 return left->stat.weight - right->stat.weight; 1247 } 1248 1249 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1250 size_t size, unsigned int width) 1251 { 1252 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); 1253 } 1254 1255 struct sort_entry sort_global_weight = { 1256 .se_header = "Weight", 1257 .se_cmp = sort__global_weight_cmp, 1258 .se_snprintf = hist_entry__global_weight_snprintf, 1259 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1260 }; 1261 1262 struct sort_entry sort_mem_daddr_sym = { 1263 .se_header = "Data Symbol", 1264 .se_cmp = sort__daddr_cmp, 1265 .se_snprintf = hist_entry__daddr_snprintf, 1266 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1267 }; 1268 1269 struct sort_entry sort_mem_iaddr_sym = { 1270 .se_header = "Code Symbol", 1271 .se_cmp = sort__iaddr_cmp, 1272 .se_snprintf = hist_entry__iaddr_snprintf, 1273 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1274 }; 1275 1276 struct sort_entry sort_mem_daddr_dso = { 1277 .se_header = "Data Object", 1278 .se_cmp = sort__dso_daddr_cmp, 1279 .se_snprintf = hist_entry__dso_daddr_snprintf, 1280 .se_width_idx = HISTC_MEM_DADDR_DSO, 1281 }; 1282 1283 struct sort_entry sort_mem_locked = { 1284 .se_header = "Locked", 1285 .se_cmp = sort__locked_cmp, 1286 .se_snprintf = hist_entry__locked_snprintf, 1287 .se_width_idx = HISTC_MEM_LOCKED, 1288 }; 1289 1290 struct sort_entry sort_mem_tlb = { 1291 .se_header = "TLB access", 1292 .se_cmp = sort__tlb_cmp, 1293 .se_snprintf = hist_entry__tlb_snprintf, 1294 .se_width_idx = HISTC_MEM_TLB, 1295 }; 1296 1297 struct sort_entry sort_mem_lvl = { 1298 .se_header = "Memory access", 1299 .se_cmp = sort__lvl_cmp, 1300 .se_snprintf = hist_entry__lvl_snprintf, 1301 .se_width_idx = HISTC_MEM_LVL, 1302 }; 1303 1304 struct sort_entry sort_mem_snoop = { 1305 .se_header = "Snoop", 1306 .se_cmp = sort__snoop_cmp, 1307 .se_snprintf = hist_entry__snoop_snprintf, 1308 .se_width_idx = HISTC_MEM_SNOOP, 1309 }; 1310 1311 struct sort_entry sort_mem_dcacheline = { 1312 .se_header = "Data Cacheline", 1313 .se_cmp = sort__dcacheline_cmp, 1314 .se_snprintf = hist_entry__dcacheline_snprintf, 1315 .se_width_idx = HISTC_MEM_DCACHELINE, 1316 }; 1317 1318 static int64_t 1319 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1320 { 1321 if (!left->branch_info || !right->branch_info) 1322 return cmp_null(left->branch_info, right->branch_info); 1323 1324 return left->branch_info->flags.abort != 1325 right->branch_info->flags.abort; 1326 } 1327 1328 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1329 size_t size, unsigned int width) 1330 { 1331 static const char *out = "N/A"; 1332 1333 if (he->branch_info) { 1334 if (he->branch_info->flags.abort) 1335 out = "A"; 1336 else 1337 out = "."; 1338 } 1339 1340 return repsep_snprintf(bf, size, "%-*s", width, out); 1341 } 1342 1343 struct sort_entry sort_abort = { 1344 .se_header = "Transaction abort", 1345 .se_cmp = sort__abort_cmp, 1346 .se_snprintf = hist_entry__abort_snprintf, 1347 .se_width_idx = HISTC_ABORT, 1348 }; 1349 1350 static int64_t 1351 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1352 { 1353 if (!left->branch_info || !right->branch_info) 1354 return cmp_null(left->branch_info, right->branch_info); 1355 1356 return left->branch_info->flags.in_tx != 1357 right->branch_info->flags.in_tx; 1358 } 1359 1360 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1361 size_t size, unsigned int width) 1362 { 1363 static const char *out = "N/A"; 1364 1365 if (he->branch_info) { 1366 if (he->branch_info->flags.in_tx) 1367 out = "T"; 1368 else 1369 out = "."; 1370 } 1371 1372 return repsep_snprintf(bf, size, "%-*s", width, out); 1373 } 1374 1375 struct sort_entry sort_in_tx = { 1376 .se_header = "Branch in transaction", 1377 .se_cmp = sort__in_tx_cmp, 1378 .se_snprintf = hist_entry__in_tx_snprintf, 1379 .se_width_idx = HISTC_IN_TX, 1380 }; 1381 1382 static int64_t 1383 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1384 { 1385 return left->transaction - right->transaction; 1386 } 1387 1388 static inline char *add_str(char *p, const char *str) 1389 { 1390 strcpy(p, str); 1391 return p + strlen(str); 1392 } 1393 1394 static struct txbit { 1395 unsigned flag; 1396 const char *name; 1397 int skip_for_len; 1398 } txbits[] = { 1399 { PERF_TXN_ELISION, "EL ", 0 }, 1400 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1401 { PERF_TXN_SYNC, "SYNC ", 1 }, 1402 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1403 { PERF_TXN_RETRY, "RETRY ", 0 }, 1404 { PERF_TXN_CONFLICT, "CON ", 0 }, 1405 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1406 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1407 { 0, NULL, 0 } 1408 }; 1409 1410 int hist_entry__transaction_len(void) 1411 { 1412 int i; 1413 int len = 0; 1414 1415 for (i = 0; txbits[i].name; i++) { 1416 if (!txbits[i].skip_for_len) 1417 len += strlen(txbits[i].name); 1418 } 1419 len += 4; /* :XX<space> */ 1420 return len; 1421 } 1422 1423 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1424 size_t size, unsigned int width) 1425 { 1426 u64 t = he->transaction; 1427 char buf[128]; 1428 char *p = buf; 1429 int i; 1430 1431 buf[0] = 0; 1432 for (i = 0; txbits[i].name; i++) 1433 if (txbits[i].flag & t) 1434 p = add_str(p, txbits[i].name); 1435 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1436 p = add_str(p, "NEITHER "); 1437 if (t & PERF_TXN_ABORT_MASK) { 1438 sprintf(p, ":%" PRIx64, 1439 (t & PERF_TXN_ABORT_MASK) >> 1440 PERF_TXN_ABORT_SHIFT); 1441 p += strlen(p); 1442 } 1443 1444 return repsep_snprintf(bf, size, "%-*s", width, buf); 1445 } 1446 1447 struct sort_entry sort_transaction = { 1448 .se_header = "Transaction ", 1449 .se_cmp = sort__transaction_cmp, 1450 .se_snprintf = hist_entry__transaction_snprintf, 1451 .se_width_idx = HISTC_TRANSACTION, 1452 }; 1453 1454 /* --sort symbol_size */ 1455 1456 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 1457 { 1458 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 1459 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 1460 1461 return size_l < size_r ? -1 : 1462 size_l == size_r ? 0 : 1; 1463 } 1464 1465 static int64_t 1466 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 1467 { 1468 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 1469 } 1470 1471 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 1472 size_t bf_size, unsigned int width) 1473 { 1474 if (sym) 1475 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 1476 1477 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1478 } 1479 1480 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 1481 size_t size, unsigned int width) 1482 { 1483 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 1484 } 1485 1486 struct sort_entry sort_sym_size = { 1487 .se_header = "Symbol size", 1488 .se_cmp = sort__sym_size_cmp, 1489 .se_snprintf = hist_entry__sym_size_snprintf, 1490 .se_width_idx = HISTC_SYM_SIZE, 1491 }; 1492 1493 1494 struct sort_dimension { 1495 const char *name; 1496 struct sort_entry *entry; 1497 int taken; 1498 }; 1499 1500 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 1501 1502 static struct sort_dimension common_sort_dimensions[] = { 1503 DIM(SORT_PID, "pid", sort_thread), 1504 DIM(SORT_COMM, "comm", sort_comm), 1505 DIM(SORT_DSO, "dso", sort_dso), 1506 DIM(SORT_SYM, "symbol", sort_sym), 1507 DIM(SORT_PARENT, "parent", sort_parent), 1508 DIM(SORT_CPU, "cpu", sort_cpu), 1509 DIM(SORT_SOCKET, "socket", sort_socket), 1510 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1511 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 1512 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1513 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 1514 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 1515 DIM(SORT_TRACE, "trace", sort_trace), 1516 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 1517 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 1518 }; 1519 1520 #undef DIM 1521 1522 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 1523 1524 static struct sort_dimension bstack_sort_dimensions[] = { 1525 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 1526 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 1527 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 1528 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 1529 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 1530 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1531 DIM(SORT_ABORT, "abort", sort_abort), 1532 DIM(SORT_CYCLES, "cycles", sort_cycles), 1533 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1534 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1535 }; 1536 1537 #undef DIM 1538 1539 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 1540 1541 static struct sort_dimension memory_sort_dimensions[] = { 1542 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1543 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 1544 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1545 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1546 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1547 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 1548 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 1549 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 1550 }; 1551 1552 #undef DIM 1553 1554 struct hpp_dimension { 1555 const char *name; 1556 struct perf_hpp_fmt *fmt; 1557 int taken; 1558 }; 1559 1560 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 1561 1562 static struct hpp_dimension hpp_sort_dimensions[] = { 1563 DIM(PERF_HPP__OVERHEAD, "overhead"), 1564 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 1565 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 1566 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 1567 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 1568 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 1569 DIM(PERF_HPP__SAMPLES, "sample"), 1570 DIM(PERF_HPP__PERIOD, "period"), 1571 }; 1572 1573 #undef DIM 1574 1575 struct hpp_sort_entry { 1576 struct perf_hpp_fmt hpp; 1577 struct sort_entry *se; 1578 }; 1579 1580 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 1581 { 1582 struct hpp_sort_entry *hse; 1583 1584 if (!perf_hpp__is_sort_entry(fmt)) 1585 return; 1586 1587 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1588 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 1589 } 1590 1591 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1592 struct hists *hists, int line __maybe_unused, 1593 int *span __maybe_unused) 1594 { 1595 struct hpp_sort_entry *hse; 1596 size_t len = fmt->user_len; 1597 1598 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1599 1600 if (!len) 1601 len = hists__col_len(hists, hse->se->se_width_idx); 1602 1603 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 1604 } 1605 1606 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 1607 struct perf_hpp *hpp __maybe_unused, 1608 struct hists *hists) 1609 { 1610 struct hpp_sort_entry *hse; 1611 size_t len = fmt->user_len; 1612 1613 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1614 1615 if (!len) 1616 len = hists__col_len(hists, hse->se->se_width_idx); 1617 1618 return len; 1619 } 1620 1621 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1622 struct hist_entry *he) 1623 { 1624 struct hpp_sort_entry *hse; 1625 size_t len = fmt->user_len; 1626 1627 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1628 1629 if (!len) 1630 len = hists__col_len(he->hists, hse->se->se_width_idx); 1631 1632 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1633 } 1634 1635 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 1636 struct hist_entry *a, struct hist_entry *b) 1637 { 1638 struct hpp_sort_entry *hse; 1639 1640 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1641 return hse->se->se_cmp(a, b); 1642 } 1643 1644 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 1645 struct hist_entry *a, struct hist_entry *b) 1646 { 1647 struct hpp_sort_entry *hse; 1648 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 1649 1650 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1651 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 1652 return collapse_fn(a, b); 1653 } 1654 1655 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 1656 struct hist_entry *a, struct hist_entry *b) 1657 { 1658 struct hpp_sort_entry *hse; 1659 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 1660 1661 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1662 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 1663 return sort_fn(a, b); 1664 } 1665 1666 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 1667 { 1668 return format->header == __sort__hpp_header; 1669 } 1670 1671 #define MK_SORT_ENTRY_CHK(key) \ 1672 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 1673 { \ 1674 struct hpp_sort_entry *hse; \ 1675 \ 1676 if (!perf_hpp__is_sort_entry(fmt)) \ 1677 return false; \ 1678 \ 1679 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 1680 return hse->se == &sort_ ## key ; \ 1681 } 1682 1683 MK_SORT_ENTRY_CHK(trace) 1684 MK_SORT_ENTRY_CHK(srcline) 1685 MK_SORT_ENTRY_CHK(srcfile) 1686 MK_SORT_ENTRY_CHK(thread) 1687 MK_SORT_ENTRY_CHK(comm) 1688 MK_SORT_ENTRY_CHK(dso) 1689 MK_SORT_ENTRY_CHK(sym) 1690 1691 1692 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1693 { 1694 struct hpp_sort_entry *hse_a; 1695 struct hpp_sort_entry *hse_b; 1696 1697 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 1698 return false; 1699 1700 hse_a = container_of(a, struct hpp_sort_entry, hpp); 1701 hse_b = container_of(b, struct hpp_sort_entry, hpp); 1702 1703 return hse_a->se == hse_b->se; 1704 } 1705 1706 static void hse_free(struct perf_hpp_fmt *fmt) 1707 { 1708 struct hpp_sort_entry *hse; 1709 1710 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1711 free(hse); 1712 } 1713 1714 static struct hpp_sort_entry * 1715 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 1716 { 1717 struct hpp_sort_entry *hse; 1718 1719 hse = malloc(sizeof(*hse)); 1720 if (hse == NULL) { 1721 pr_err("Memory allocation failed\n"); 1722 return NULL; 1723 } 1724 1725 hse->se = sd->entry; 1726 hse->hpp.name = sd->entry->se_header; 1727 hse->hpp.header = __sort__hpp_header; 1728 hse->hpp.width = __sort__hpp_width; 1729 hse->hpp.entry = __sort__hpp_entry; 1730 hse->hpp.color = NULL; 1731 1732 hse->hpp.cmp = __sort__hpp_cmp; 1733 hse->hpp.collapse = __sort__hpp_collapse; 1734 hse->hpp.sort = __sort__hpp_sort; 1735 hse->hpp.equal = __sort__hpp_equal; 1736 hse->hpp.free = hse_free; 1737 1738 INIT_LIST_HEAD(&hse->hpp.list); 1739 INIT_LIST_HEAD(&hse->hpp.sort_list); 1740 hse->hpp.elide = false; 1741 hse->hpp.len = 0; 1742 hse->hpp.user_len = 0; 1743 hse->hpp.level = level; 1744 1745 return hse; 1746 } 1747 1748 static void hpp_free(struct perf_hpp_fmt *fmt) 1749 { 1750 free(fmt); 1751 } 1752 1753 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 1754 int level) 1755 { 1756 struct perf_hpp_fmt *fmt; 1757 1758 fmt = memdup(hd->fmt, sizeof(*fmt)); 1759 if (fmt) { 1760 INIT_LIST_HEAD(&fmt->list); 1761 INIT_LIST_HEAD(&fmt->sort_list); 1762 fmt->free = hpp_free; 1763 fmt->level = level; 1764 } 1765 1766 return fmt; 1767 } 1768 1769 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 1770 { 1771 struct perf_hpp_fmt *fmt; 1772 struct hpp_sort_entry *hse; 1773 int ret = -1; 1774 int r; 1775 1776 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1777 if (!perf_hpp__is_sort_entry(fmt)) 1778 continue; 1779 1780 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1781 if (hse->se->se_filter == NULL) 1782 continue; 1783 1784 /* 1785 * hist entry is filtered if any of sort key in the hpp list 1786 * is applied. But it should skip non-matched filter types. 1787 */ 1788 r = hse->se->se_filter(he, type, arg); 1789 if (r >= 0) { 1790 if (ret < 0) 1791 ret = 0; 1792 ret |= r; 1793 } 1794 } 1795 1796 return ret; 1797 } 1798 1799 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 1800 struct perf_hpp_list *list, 1801 int level) 1802 { 1803 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 1804 1805 if (hse == NULL) 1806 return -1; 1807 1808 perf_hpp_list__register_sort_field(list, &hse->hpp); 1809 return 0; 1810 } 1811 1812 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 1813 struct perf_hpp_list *list) 1814 { 1815 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 1816 1817 if (hse == NULL) 1818 return -1; 1819 1820 perf_hpp_list__column_register(list, &hse->hpp); 1821 return 0; 1822 } 1823 1824 struct hpp_dynamic_entry { 1825 struct perf_hpp_fmt hpp; 1826 struct perf_evsel *evsel; 1827 struct format_field *field; 1828 unsigned dynamic_len; 1829 bool raw_trace; 1830 }; 1831 1832 static int hde_width(struct hpp_dynamic_entry *hde) 1833 { 1834 if (!hde->hpp.len) { 1835 int len = hde->dynamic_len; 1836 int namelen = strlen(hde->field->name); 1837 int fieldlen = hde->field->size; 1838 1839 if (namelen > len) 1840 len = namelen; 1841 1842 if (!(hde->field->flags & FIELD_IS_STRING)) { 1843 /* length for print hex numbers */ 1844 fieldlen = hde->field->size * 2 + 2; 1845 } 1846 if (fieldlen > len) 1847 len = fieldlen; 1848 1849 hde->hpp.len = len; 1850 } 1851 return hde->hpp.len; 1852 } 1853 1854 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 1855 struct hist_entry *he) 1856 { 1857 char *str, *pos; 1858 struct format_field *field = hde->field; 1859 size_t namelen; 1860 bool last = false; 1861 1862 if (hde->raw_trace) 1863 return; 1864 1865 /* parse pretty print result and update max length */ 1866 if (!he->trace_output) 1867 he->trace_output = get_trace_output(he); 1868 1869 namelen = strlen(field->name); 1870 str = he->trace_output; 1871 1872 while (str) { 1873 pos = strchr(str, ' '); 1874 if (pos == NULL) { 1875 last = true; 1876 pos = str + strlen(str); 1877 } 1878 1879 if (!strncmp(str, field->name, namelen)) { 1880 size_t len; 1881 1882 str += namelen + 1; 1883 len = pos - str; 1884 1885 if (len > hde->dynamic_len) 1886 hde->dynamic_len = len; 1887 break; 1888 } 1889 1890 if (last) 1891 str = NULL; 1892 else 1893 str = pos + 1; 1894 } 1895 } 1896 1897 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1898 struct hists *hists __maybe_unused, 1899 int line __maybe_unused, 1900 int *span __maybe_unused) 1901 { 1902 struct hpp_dynamic_entry *hde; 1903 size_t len = fmt->user_len; 1904 1905 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1906 1907 if (!len) 1908 len = hde_width(hde); 1909 1910 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 1911 } 1912 1913 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 1914 struct perf_hpp *hpp __maybe_unused, 1915 struct hists *hists __maybe_unused) 1916 { 1917 struct hpp_dynamic_entry *hde; 1918 size_t len = fmt->user_len; 1919 1920 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1921 1922 if (!len) 1923 len = hde_width(hde); 1924 1925 return len; 1926 } 1927 1928 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 1929 { 1930 struct hpp_dynamic_entry *hde; 1931 1932 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1933 1934 return hists_to_evsel(hists) == hde->evsel; 1935 } 1936 1937 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1938 struct hist_entry *he) 1939 { 1940 struct hpp_dynamic_entry *hde; 1941 size_t len = fmt->user_len; 1942 char *str, *pos; 1943 struct format_field *field; 1944 size_t namelen; 1945 bool last = false; 1946 int ret; 1947 1948 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1949 1950 if (!len) 1951 len = hde_width(hde); 1952 1953 if (hde->raw_trace) 1954 goto raw_field; 1955 1956 if (!he->trace_output) 1957 he->trace_output = get_trace_output(he); 1958 1959 field = hde->field; 1960 namelen = strlen(field->name); 1961 str = he->trace_output; 1962 1963 while (str) { 1964 pos = strchr(str, ' '); 1965 if (pos == NULL) { 1966 last = true; 1967 pos = str + strlen(str); 1968 } 1969 1970 if (!strncmp(str, field->name, namelen)) { 1971 str += namelen + 1; 1972 str = strndup(str, pos - str); 1973 1974 if (str == NULL) 1975 return scnprintf(hpp->buf, hpp->size, 1976 "%*.*s", len, len, "ERROR"); 1977 break; 1978 } 1979 1980 if (last) 1981 str = NULL; 1982 else 1983 str = pos + 1; 1984 } 1985 1986 if (str == NULL) { 1987 struct trace_seq seq; 1988 raw_field: 1989 trace_seq_init(&seq); 1990 pevent_print_field(&seq, he->raw_data, hde->field); 1991 str = seq.buffer; 1992 } 1993 1994 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 1995 free(str); 1996 return ret; 1997 } 1998 1999 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2000 struct hist_entry *a, struct hist_entry *b) 2001 { 2002 struct hpp_dynamic_entry *hde; 2003 struct format_field *field; 2004 unsigned offset, size; 2005 2006 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2007 2008 if (b == NULL) { 2009 update_dynamic_len(hde, a); 2010 return 0; 2011 } 2012 2013 field = hde->field; 2014 if (field->flags & FIELD_IS_DYNAMIC) { 2015 unsigned long long dyn; 2016 2017 pevent_read_number_field(field, a->raw_data, &dyn); 2018 offset = dyn & 0xffff; 2019 size = (dyn >> 16) & 0xffff; 2020 2021 /* record max width for output */ 2022 if (size > hde->dynamic_len) 2023 hde->dynamic_len = size; 2024 } else { 2025 offset = field->offset; 2026 size = field->size; 2027 } 2028 2029 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2030 } 2031 2032 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2033 { 2034 return fmt->cmp == __sort__hde_cmp; 2035 } 2036 2037 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2038 { 2039 struct hpp_dynamic_entry *hde_a; 2040 struct hpp_dynamic_entry *hde_b; 2041 2042 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2043 return false; 2044 2045 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2046 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2047 2048 return hde_a->field == hde_b->field; 2049 } 2050 2051 static void hde_free(struct perf_hpp_fmt *fmt) 2052 { 2053 struct hpp_dynamic_entry *hde; 2054 2055 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2056 free(hde); 2057 } 2058 2059 static struct hpp_dynamic_entry * 2060 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field, 2061 int level) 2062 { 2063 struct hpp_dynamic_entry *hde; 2064 2065 hde = malloc(sizeof(*hde)); 2066 if (hde == NULL) { 2067 pr_debug("Memory allocation failed\n"); 2068 return NULL; 2069 } 2070 2071 hde->evsel = evsel; 2072 hde->field = field; 2073 hde->dynamic_len = 0; 2074 2075 hde->hpp.name = field->name; 2076 hde->hpp.header = __sort__hde_header; 2077 hde->hpp.width = __sort__hde_width; 2078 hde->hpp.entry = __sort__hde_entry; 2079 hde->hpp.color = NULL; 2080 2081 hde->hpp.cmp = __sort__hde_cmp; 2082 hde->hpp.collapse = __sort__hde_cmp; 2083 hde->hpp.sort = __sort__hde_cmp; 2084 hde->hpp.equal = __sort__hde_equal; 2085 hde->hpp.free = hde_free; 2086 2087 INIT_LIST_HEAD(&hde->hpp.list); 2088 INIT_LIST_HEAD(&hde->hpp.sort_list); 2089 hde->hpp.elide = false; 2090 hde->hpp.len = 0; 2091 hde->hpp.user_len = 0; 2092 hde->hpp.level = level; 2093 2094 return hde; 2095 } 2096 2097 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2098 { 2099 struct perf_hpp_fmt *new_fmt = NULL; 2100 2101 if (perf_hpp__is_sort_entry(fmt)) { 2102 struct hpp_sort_entry *hse, *new_hse; 2103 2104 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2105 new_hse = memdup(hse, sizeof(*hse)); 2106 if (new_hse) 2107 new_fmt = &new_hse->hpp; 2108 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2109 struct hpp_dynamic_entry *hde, *new_hde; 2110 2111 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2112 new_hde = memdup(hde, sizeof(*hde)); 2113 if (new_hde) 2114 new_fmt = &new_hde->hpp; 2115 } else { 2116 new_fmt = memdup(fmt, sizeof(*fmt)); 2117 } 2118 2119 INIT_LIST_HEAD(&new_fmt->list); 2120 INIT_LIST_HEAD(&new_fmt->sort_list); 2121 2122 return new_fmt; 2123 } 2124 2125 static int parse_field_name(char *str, char **event, char **field, char **opt) 2126 { 2127 char *event_name, *field_name, *opt_name; 2128 2129 event_name = str; 2130 field_name = strchr(str, '.'); 2131 2132 if (field_name) { 2133 *field_name++ = '\0'; 2134 } else { 2135 event_name = NULL; 2136 field_name = str; 2137 } 2138 2139 opt_name = strchr(field_name, '/'); 2140 if (opt_name) 2141 *opt_name++ = '\0'; 2142 2143 *event = event_name; 2144 *field = field_name; 2145 *opt = opt_name; 2146 2147 return 0; 2148 } 2149 2150 /* find match evsel using a given event name. The event name can be: 2151 * 1. '%' + event index (e.g. '%1' for first event) 2152 * 2. full event name (e.g. sched:sched_switch) 2153 * 3. partial event name (should not contain ':') 2154 */ 2155 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name) 2156 { 2157 struct perf_evsel *evsel = NULL; 2158 struct perf_evsel *pos; 2159 bool full_name; 2160 2161 /* case 1 */ 2162 if (event_name[0] == '%') { 2163 int nr = strtol(event_name+1, NULL, 0); 2164 2165 if (nr > evlist->nr_entries) 2166 return NULL; 2167 2168 evsel = perf_evlist__first(evlist); 2169 while (--nr > 0) 2170 evsel = perf_evsel__next(evsel); 2171 2172 return evsel; 2173 } 2174 2175 full_name = !!strchr(event_name, ':'); 2176 evlist__for_each_entry(evlist, pos) { 2177 /* case 2 */ 2178 if (full_name && !strcmp(pos->name, event_name)) 2179 return pos; 2180 /* case 3 */ 2181 if (!full_name && strstr(pos->name, event_name)) { 2182 if (evsel) { 2183 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2184 event_name, evsel->name, pos->name); 2185 return NULL; 2186 } 2187 evsel = pos; 2188 } 2189 } 2190 2191 return evsel; 2192 } 2193 2194 static int __dynamic_dimension__add(struct perf_evsel *evsel, 2195 struct format_field *field, 2196 bool raw_trace, int level) 2197 { 2198 struct hpp_dynamic_entry *hde; 2199 2200 hde = __alloc_dynamic_entry(evsel, field, level); 2201 if (hde == NULL) 2202 return -ENOMEM; 2203 2204 hde->raw_trace = raw_trace; 2205 2206 perf_hpp__register_sort_field(&hde->hpp); 2207 return 0; 2208 } 2209 2210 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level) 2211 { 2212 int ret; 2213 struct format_field *field; 2214 2215 field = evsel->tp_format->format.fields; 2216 while (field) { 2217 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2218 if (ret < 0) 2219 return ret; 2220 2221 field = field->next; 2222 } 2223 return 0; 2224 } 2225 2226 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace, 2227 int level) 2228 { 2229 int ret; 2230 struct perf_evsel *evsel; 2231 2232 evlist__for_each_entry(evlist, evsel) { 2233 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2234 continue; 2235 2236 ret = add_evsel_fields(evsel, raw_trace, level); 2237 if (ret < 0) 2238 return ret; 2239 } 2240 return 0; 2241 } 2242 2243 static int add_all_matching_fields(struct perf_evlist *evlist, 2244 char *field_name, bool raw_trace, int level) 2245 { 2246 int ret = -ESRCH; 2247 struct perf_evsel *evsel; 2248 struct format_field *field; 2249 2250 evlist__for_each_entry(evlist, evsel) { 2251 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2252 continue; 2253 2254 field = pevent_find_any_field(evsel->tp_format, field_name); 2255 if (field == NULL) 2256 continue; 2257 2258 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2259 if (ret < 0) 2260 break; 2261 } 2262 return ret; 2263 } 2264 2265 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok, 2266 int level) 2267 { 2268 char *str, *event_name, *field_name, *opt_name; 2269 struct perf_evsel *evsel; 2270 struct format_field *field; 2271 bool raw_trace = symbol_conf.raw_trace; 2272 int ret = 0; 2273 2274 if (evlist == NULL) 2275 return -ENOENT; 2276 2277 str = strdup(tok); 2278 if (str == NULL) 2279 return -ENOMEM; 2280 2281 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2282 ret = -EINVAL; 2283 goto out; 2284 } 2285 2286 if (opt_name) { 2287 if (strcmp(opt_name, "raw")) { 2288 pr_debug("unsupported field option %s\n", opt_name); 2289 ret = -EINVAL; 2290 goto out; 2291 } 2292 raw_trace = true; 2293 } 2294 2295 if (!strcmp(field_name, "trace_fields")) { 2296 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2297 goto out; 2298 } 2299 2300 if (event_name == NULL) { 2301 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2302 goto out; 2303 } 2304 2305 evsel = find_evsel(evlist, event_name); 2306 if (evsel == NULL) { 2307 pr_debug("Cannot find event: %s\n", event_name); 2308 ret = -ENOENT; 2309 goto out; 2310 } 2311 2312 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2313 pr_debug("%s is not a tracepoint event\n", event_name); 2314 ret = -EINVAL; 2315 goto out; 2316 } 2317 2318 if (!strcmp(field_name, "*")) { 2319 ret = add_evsel_fields(evsel, raw_trace, level); 2320 } else { 2321 field = pevent_find_any_field(evsel->tp_format, field_name); 2322 if (field == NULL) { 2323 pr_debug("Cannot find event field for %s.%s\n", 2324 event_name, field_name); 2325 return -ENOENT; 2326 } 2327 2328 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2329 } 2330 2331 out: 2332 free(str); 2333 return ret; 2334 } 2335 2336 static int __sort_dimension__add(struct sort_dimension *sd, 2337 struct perf_hpp_list *list, 2338 int level) 2339 { 2340 if (sd->taken) 2341 return 0; 2342 2343 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2344 return -1; 2345 2346 if (sd->entry->se_collapse) 2347 list->need_collapse = 1; 2348 2349 sd->taken = 1; 2350 2351 return 0; 2352 } 2353 2354 static int __hpp_dimension__add(struct hpp_dimension *hd, 2355 struct perf_hpp_list *list, 2356 int level) 2357 { 2358 struct perf_hpp_fmt *fmt; 2359 2360 if (hd->taken) 2361 return 0; 2362 2363 fmt = __hpp_dimension__alloc_hpp(hd, level); 2364 if (!fmt) 2365 return -1; 2366 2367 hd->taken = 1; 2368 perf_hpp_list__register_sort_field(list, fmt); 2369 return 0; 2370 } 2371 2372 static int __sort_dimension__add_output(struct perf_hpp_list *list, 2373 struct sort_dimension *sd) 2374 { 2375 if (sd->taken) 2376 return 0; 2377 2378 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2379 return -1; 2380 2381 sd->taken = 1; 2382 return 0; 2383 } 2384 2385 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2386 struct hpp_dimension *hd) 2387 { 2388 struct perf_hpp_fmt *fmt; 2389 2390 if (hd->taken) 2391 return 0; 2392 2393 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2394 if (!fmt) 2395 return -1; 2396 2397 hd->taken = 1; 2398 perf_hpp_list__column_register(list, fmt); 2399 return 0; 2400 } 2401 2402 int hpp_dimension__add_output(unsigned col) 2403 { 2404 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2405 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2406 } 2407 2408 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2409 struct perf_evlist *evlist, 2410 int level) 2411 { 2412 unsigned int i; 2413 2414 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2415 struct sort_dimension *sd = &common_sort_dimensions[i]; 2416 2417 if (strncasecmp(tok, sd->name, strlen(tok))) 2418 continue; 2419 2420 if (sd->entry == &sort_parent) { 2421 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 2422 if (ret) { 2423 char err[BUFSIZ]; 2424 2425 regerror(ret, &parent_regex, err, sizeof(err)); 2426 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 2427 return -EINVAL; 2428 } 2429 list->parent = 1; 2430 } else if (sd->entry == &sort_sym) { 2431 list->sym = 1; 2432 /* 2433 * perf diff displays the performance difference amongst 2434 * two or more perf.data files. Those files could come 2435 * from different binaries. So we should not compare 2436 * their ips, but the name of symbol. 2437 */ 2438 if (sort__mode == SORT_MODE__DIFF) 2439 sd->entry->se_collapse = sort__sym_sort; 2440 2441 } else if (sd->entry == &sort_dso) { 2442 list->dso = 1; 2443 } else if (sd->entry == &sort_socket) { 2444 list->socket = 1; 2445 } else if (sd->entry == &sort_thread) { 2446 list->thread = 1; 2447 } else if (sd->entry == &sort_comm) { 2448 list->comm = 1; 2449 } 2450 2451 return __sort_dimension__add(sd, list, level); 2452 } 2453 2454 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2455 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2456 2457 if (strncasecmp(tok, hd->name, strlen(tok))) 2458 continue; 2459 2460 return __hpp_dimension__add(hd, list, level); 2461 } 2462 2463 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2464 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2465 2466 if (strncasecmp(tok, sd->name, strlen(tok))) 2467 continue; 2468 2469 if (sort__mode != SORT_MODE__BRANCH) 2470 return -EINVAL; 2471 2472 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 2473 list->sym = 1; 2474 2475 __sort_dimension__add(sd, list, level); 2476 return 0; 2477 } 2478 2479 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2480 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2481 2482 if (strncasecmp(tok, sd->name, strlen(tok))) 2483 continue; 2484 2485 if (sort__mode != SORT_MODE__MEMORY) 2486 return -EINVAL; 2487 2488 if (sd->entry == &sort_mem_dcacheline && cacheline_size == 0) 2489 return -EINVAL; 2490 2491 if (sd->entry == &sort_mem_daddr_sym) 2492 list->sym = 1; 2493 2494 __sort_dimension__add(sd, list, level); 2495 return 0; 2496 } 2497 2498 if (!add_dynamic_entry(evlist, tok, level)) 2499 return 0; 2500 2501 return -ESRCH; 2502 } 2503 2504 static int setup_sort_list(struct perf_hpp_list *list, char *str, 2505 struct perf_evlist *evlist) 2506 { 2507 char *tmp, *tok; 2508 int ret = 0; 2509 int level = 0; 2510 int next_level = 1; 2511 bool in_group = false; 2512 2513 do { 2514 tok = str; 2515 tmp = strpbrk(str, "{}, "); 2516 if (tmp) { 2517 if (in_group) 2518 next_level = level; 2519 else 2520 next_level = level + 1; 2521 2522 if (*tmp == '{') 2523 in_group = true; 2524 else if (*tmp == '}') 2525 in_group = false; 2526 2527 *tmp = '\0'; 2528 str = tmp + 1; 2529 } 2530 2531 if (*tok) { 2532 ret = sort_dimension__add(list, tok, evlist, level); 2533 if (ret == -EINVAL) { 2534 if (!cacheline_size && !strncasecmp(tok, "dcacheline", strlen(tok))) 2535 error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 2536 else 2537 error("Invalid --sort key: `%s'", tok); 2538 break; 2539 } else if (ret == -ESRCH) { 2540 error("Unknown --sort key: `%s'", tok); 2541 break; 2542 } 2543 } 2544 2545 level = next_level; 2546 } while (tmp); 2547 2548 return ret; 2549 } 2550 2551 static const char *get_default_sort_order(struct perf_evlist *evlist) 2552 { 2553 const char *default_sort_orders[] = { 2554 default_sort_order, 2555 default_branch_sort_order, 2556 default_mem_sort_order, 2557 default_top_sort_order, 2558 default_diff_sort_order, 2559 default_tracepoint_sort_order, 2560 }; 2561 bool use_trace = true; 2562 struct perf_evsel *evsel; 2563 2564 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 2565 2566 if (evlist == NULL) 2567 goto out_no_evlist; 2568 2569 evlist__for_each_entry(evlist, evsel) { 2570 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2571 use_trace = false; 2572 break; 2573 } 2574 } 2575 2576 if (use_trace) { 2577 sort__mode = SORT_MODE__TRACEPOINT; 2578 if (symbol_conf.raw_trace) 2579 return "trace_fields"; 2580 } 2581 out_no_evlist: 2582 return default_sort_orders[sort__mode]; 2583 } 2584 2585 static int setup_sort_order(struct perf_evlist *evlist) 2586 { 2587 char *new_sort_order; 2588 2589 /* 2590 * Append '+'-prefixed sort order to the default sort 2591 * order string. 2592 */ 2593 if (!sort_order || is_strict_order(sort_order)) 2594 return 0; 2595 2596 if (sort_order[1] == '\0') { 2597 error("Invalid --sort key: `+'"); 2598 return -EINVAL; 2599 } 2600 2601 /* 2602 * We allocate new sort_order string, but we never free it, 2603 * because it's checked over the rest of the code. 2604 */ 2605 if (asprintf(&new_sort_order, "%s,%s", 2606 get_default_sort_order(evlist), sort_order + 1) < 0) { 2607 error("Not enough memory to set up --sort"); 2608 return -ENOMEM; 2609 } 2610 2611 sort_order = new_sort_order; 2612 return 0; 2613 } 2614 2615 /* 2616 * Adds 'pre,' prefix into 'str' is 'pre' is 2617 * not already part of 'str'. 2618 */ 2619 static char *prefix_if_not_in(const char *pre, char *str) 2620 { 2621 char *n; 2622 2623 if (!str || strstr(str, pre)) 2624 return str; 2625 2626 if (asprintf(&n, "%s,%s", pre, str) < 0) 2627 return NULL; 2628 2629 free(str); 2630 return n; 2631 } 2632 2633 static char *setup_overhead(char *keys) 2634 { 2635 if (sort__mode == SORT_MODE__DIFF) 2636 return keys; 2637 2638 keys = prefix_if_not_in("overhead", keys); 2639 2640 if (symbol_conf.cumulate_callchain) 2641 keys = prefix_if_not_in("overhead_children", keys); 2642 2643 return keys; 2644 } 2645 2646 static int __setup_sorting(struct perf_evlist *evlist) 2647 { 2648 char *str; 2649 const char *sort_keys; 2650 int ret = 0; 2651 2652 ret = setup_sort_order(evlist); 2653 if (ret) 2654 return ret; 2655 2656 sort_keys = sort_order; 2657 if (sort_keys == NULL) { 2658 if (is_strict_order(field_order)) { 2659 /* 2660 * If user specified field order but no sort order, 2661 * we'll honor it and not add default sort orders. 2662 */ 2663 return 0; 2664 } 2665 2666 sort_keys = get_default_sort_order(evlist); 2667 } 2668 2669 str = strdup(sort_keys); 2670 if (str == NULL) { 2671 error("Not enough memory to setup sort keys"); 2672 return -ENOMEM; 2673 } 2674 2675 /* 2676 * Prepend overhead fields for backward compatibility. 2677 */ 2678 if (!is_strict_order(field_order)) { 2679 str = setup_overhead(str); 2680 if (str == NULL) { 2681 error("Not enough memory to setup overhead keys"); 2682 return -ENOMEM; 2683 } 2684 } 2685 2686 ret = setup_sort_list(&perf_hpp_list, str, evlist); 2687 2688 free(str); 2689 return ret; 2690 } 2691 2692 void perf_hpp__set_elide(int idx, bool elide) 2693 { 2694 struct perf_hpp_fmt *fmt; 2695 struct hpp_sort_entry *hse; 2696 2697 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2698 if (!perf_hpp__is_sort_entry(fmt)) 2699 continue; 2700 2701 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2702 if (hse->se->se_width_idx == idx) { 2703 fmt->elide = elide; 2704 break; 2705 } 2706 } 2707 } 2708 2709 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 2710 { 2711 if (list && strlist__nr_entries(list) == 1) { 2712 if (fp != NULL) 2713 fprintf(fp, "# %s: %s\n", list_name, 2714 strlist__entry(list, 0)->s); 2715 return true; 2716 } 2717 return false; 2718 } 2719 2720 static bool get_elide(int idx, FILE *output) 2721 { 2722 switch (idx) { 2723 case HISTC_SYMBOL: 2724 return __get_elide(symbol_conf.sym_list, "symbol", output); 2725 case HISTC_DSO: 2726 return __get_elide(symbol_conf.dso_list, "dso", output); 2727 case HISTC_COMM: 2728 return __get_elide(symbol_conf.comm_list, "comm", output); 2729 default: 2730 break; 2731 } 2732 2733 if (sort__mode != SORT_MODE__BRANCH) 2734 return false; 2735 2736 switch (idx) { 2737 case HISTC_SYMBOL_FROM: 2738 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 2739 case HISTC_SYMBOL_TO: 2740 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 2741 case HISTC_DSO_FROM: 2742 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 2743 case HISTC_DSO_TO: 2744 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 2745 default: 2746 break; 2747 } 2748 2749 return false; 2750 } 2751 2752 void sort__setup_elide(FILE *output) 2753 { 2754 struct perf_hpp_fmt *fmt; 2755 struct hpp_sort_entry *hse; 2756 2757 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2758 if (!perf_hpp__is_sort_entry(fmt)) 2759 continue; 2760 2761 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2762 fmt->elide = get_elide(hse->se->se_width_idx, output); 2763 } 2764 2765 /* 2766 * It makes no sense to elide all of sort entries. 2767 * Just revert them to show up again. 2768 */ 2769 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2770 if (!perf_hpp__is_sort_entry(fmt)) 2771 continue; 2772 2773 if (!fmt->elide) 2774 return; 2775 } 2776 2777 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2778 if (!perf_hpp__is_sort_entry(fmt)) 2779 continue; 2780 2781 fmt->elide = false; 2782 } 2783 } 2784 2785 int output_field_add(struct perf_hpp_list *list, char *tok) 2786 { 2787 unsigned int i; 2788 2789 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2790 struct sort_dimension *sd = &common_sort_dimensions[i]; 2791 2792 if (strncasecmp(tok, sd->name, strlen(tok))) 2793 continue; 2794 2795 return __sort_dimension__add_output(list, sd); 2796 } 2797 2798 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2799 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2800 2801 if (strncasecmp(tok, hd->name, strlen(tok))) 2802 continue; 2803 2804 return __hpp_dimension__add_output(list, hd); 2805 } 2806 2807 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2808 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2809 2810 if (strncasecmp(tok, sd->name, strlen(tok))) 2811 continue; 2812 2813 return __sort_dimension__add_output(list, sd); 2814 } 2815 2816 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2817 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2818 2819 if (strncasecmp(tok, sd->name, strlen(tok))) 2820 continue; 2821 2822 return __sort_dimension__add_output(list, sd); 2823 } 2824 2825 return -ESRCH; 2826 } 2827 2828 static int setup_output_list(struct perf_hpp_list *list, char *str) 2829 { 2830 char *tmp, *tok; 2831 int ret = 0; 2832 2833 for (tok = strtok_r(str, ", ", &tmp); 2834 tok; tok = strtok_r(NULL, ", ", &tmp)) { 2835 ret = output_field_add(list, tok); 2836 if (ret == -EINVAL) { 2837 error("Invalid --fields key: `%s'", tok); 2838 break; 2839 } else if (ret == -ESRCH) { 2840 error("Unknown --fields key: `%s'", tok); 2841 break; 2842 } 2843 } 2844 2845 return ret; 2846 } 2847 2848 void reset_dimensions(void) 2849 { 2850 unsigned int i; 2851 2852 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 2853 common_sort_dimensions[i].taken = 0; 2854 2855 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 2856 hpp_sort_dimensions[i].taken = 0; 2857 2858 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 2859 bstack_sort_dimensions[i].taken = 0; 2860 2861 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 2862 memory_sort_dimensions[i].taken = 0; 2863 } 2864 2865 bool is_strict_order(const char *order) 2866 { 2867 return order && (*order != '+'); 2868 } 2869 2870 static int __setup_output_field(void) 2871 { 2872 char *str, *strp; 2873 int ret = -EINVAL; 2874 2875 if (field_order == NULL) 2876 return 0; 2877 2878 strp = str = strdup(field_order); 2879 if (str == NULL) { 2880 error("Not enough memory to setup output fields"); 2881 return -ENOMEM; 2882 } 2883 2884 if (!is_strict_order(field_order)) 2885 strp++; 2886 2887 if (!strlen(strp)) { 2888 error("Invalid --fields key: `+'"); 2889 goto out; 2890 } 2891 2892 ret = setup_output_list(&perf_hpp_list, strp); 2893 2894 out: 2895 free(str); 2896 return ret; 2897 } 2898 2899 int setup_sorting(struct perf_evlist *evlist) 2900 { 2901 int err; 2902 2903 err = __setup_sorting(evlist); 2904 if (err < 0) 2905 return err; 2906 2907 if (parent_pattern != default_parent_pattern) { 2908 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 2909 if (err < 0) 2910 return err; 2911 } 2912 2913 reset_dimensions(); 2914 2915 /* 2916 * perf diff doesn't use default hpp output fields. 2917 */ 2918 if (sort__mode != SORT_MODE__DIFF) 2919 perf_hpp__init(); 2920 2921 err = __setup_output_field(); 2922 if (err < 0) 2923 return err; 2924 2925 /* copy sort keys to output fields */ 2926 perf_hpp__setup_output_field(&perf_hpp_list); 2927 /* and then copy output fields to sort keys */ 2928 perf_hpp__append_sort_keys(&perf_hpp_list); 2929 2930 /* setup hists-specific output fields */ 2931 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 2932 return -1; 2933 2934 return 0; 2935 } 2936 2937 void reset_output_field(void) 2938 { 2939 perf_hpp_list.need_collapse = 0; 2940 perf_hpp_list.parent = 0; 2941 perf_hpp_list.sym = 0; 2942 perf_hpp_list.dso = 0; 2943 2944 field_order = NULL; 2945 sort_order = NULL; 2946 2947 reset_dimensions(); 2948 perf_hpp__reset_output_field(&perf_hpp_list); 2949 } 2950