1 #include <sys/mman.h> 2 #include "sort.h" 3 #include "hist.h" 4 #include "comm.h" 5 #include "symbol.h" 6 #include "evsel.h" 7 #include "evlist.h" 8 #include <traceevent/event-parse.h> 9 #include "mem-events.h" 10 11 regex_t parent_regex; 12 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 13 const char *parent_pattern = default_parent_pattern; 14 const char default_sort_order[] = "comm,dso,symbol"; 15 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 16 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; 17 const char default_top_sort_order[] = "dso,symbol"; 18 const char default_diff_sort_order[] = "dso,symbol"; 19 const char default_tracepoint_sort_order[] = "trace"; 20 const char *sort_order; 21 const char *field_order; 22 regex_t ignore_callees_regex; 23 int have_ignore_callees = 0; 24 enum sort_mode sort__mode = SORT_MODE__NORMAL; 25 26 /* 27 * Replaces all occurrences of a char used with the: 28 * 29 * -t, --field-separator 30 * 31 * option, that uses a special separator character and don't pad with spaces, 32 * replacing all occurances of this separator in symbol names (and other 33 * output) with a '.' character, that thus it's the only non valid separator. 34 */ 35 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 36 { 37 int n; 38 va_list ap; 39 40 va_start(ap, fmt); 41 n = vsnprintf(bf, size, fmt, ap); 42 if (symbol_conf.field_sep && n > 0) { 43 char *sep = bf; 44 45 while (1) { 46 sep = strchr(sep, *symbol_conf.field_sep); 47 if (sep == NULL) 48 break; 49 *sep = '.'; 50 } 51 } 52 va_end(ap); 53 54 if (n >= (int)size) 55 return size - 1; 56 return n; 57 } 58 59 static int64_t cmp_null(const void *l, const void *r) 60 { 61 if (!l && !r) 62 return 0; 63 else if (!l) 64 return -1; 65 else 66 return 1; 67 } 68 69 /* --sort pid */ 70 71 static int64_t 72 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 73 { 74 return right->thread->tid - left->thread->tid; 75 } 76 77 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 78 size_t size, unsigned int width) 79 { 80 const char *comm = thread__comm_str(he->thread); 81 82 width = max(7U, width) - 8; 83 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 84 width, width, comm ?: ""); 85 } 86 87 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 88 { 89 const struct thread *th = arg; 90 91 if (type != HIST_FILTER__THREAD) 92 return -1; 93 94 return th && he->thread != th; 95 } 96 97 struct sort_entry sort_thread = { 98 .se_header = " Pid:Command", 99 .se_cmp = sort__thread_cmp, 100 .se_snprintf = hist_entry__thread_snprintf, 101 .se_filter = hist_entry__thread_filter, 102 .se_width_idx = HISTC_THREAD, 103 }; 104 105 /* --sort comm */ 106 107 static int64_t 108 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 109 { 110 /* Compare the addr that should be unique among comm */ 111 return strcmp(comm__str(right->comm), comm__str(left->comm)); 112 } 113 114 static int64_t 115 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 116 { 117 /* Compare the addr that should be unique among comm */ 118 return strcmp(comm__str(right->comm), comm__str(left->comm)); 119 } 120 121 static int64_t 122 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 123 { 124 return strcmp(comm__str(right->comm), comm__str(left->comm)); 125 } 126 127 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 128 size_t size, unsigned int width) 129 { 130 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 131 } 132 133 struct sort_entry sort_comm = { 134 .se_header = "Command", 135 .se_cmp = sort__comm_cmp, 136 .se_collapse = sort__comm_collapse, 137 .se_sort = sort__comm_sort, 138 .se_snprintf = hist_entry__comm_snprintf, 139 .se_filter = hist_entry__thread_filter, 140 .se_width_idx = HISTC_COMM, 141 }; 142 143 /* --sort dso */ 144 145 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 146 { 147 struct dso *dso_l = map_l ? map_l->dso : NULL; 148 struct dso *dso_r = map_r ? map_r->dso : NULL; 149 const char *dso_name_l, *dso_name_r; 150 151 if (!dso_l || !dso_r) 152 return cmp_null(dso_r, dso_l); 153 154 if (verbose) { 155 dso_name_l = dso_l->long_name; 156 dso_name_r = dso_r->long_name; 157 } else { 158 dso_name_l = dso_l->short_name; 159 dso_name_r = dso_r->short_name; 160 } 161 162 return strcmp(dso_name_l, dso_name_r); 163 } 164 165 static int64_t 166 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 167 { 168 return _sort__dso_cmp(right->ms.map, left->ms.map); 169 } 170 171 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 172 size_t size, unsigned int width) 173 { 174 if (map && map->dso) { 175 const char *dso_name = !verbose ? map->dso->short_name : 176 map->dso->long_name; 177 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 178 } 179 180 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 181 } 182 183 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 184 size_t size, unsigned int width) 185 { 186 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 187 } 188 189 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 190 { 191 const struct dso *dso = arg; 192 193 if (type != HIST_FILTER__DSO) 194 return -1; 195 196 return dso && (!he->ms.map || he->ms.map->dso != dso); 197 } 198 199 struct sort_entry sort_dso = { 200 .se_header = "Shared Object", 201 .se_cmp = sort__dso_cmp, 202 .se_snprintf = hist_entry__dso_snprintf, 203 .se_filter = hist_entry__dso_filter, 204 .se_width_idx = HISTC_DSO, 205 }; 206 207 /* --sort symbol */ 208 209 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 210 { 211 return (int64_t)(right_ip - left_ip); 212 } 213 214 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 215 { 216 if (!sym_l || !sym_r) 217 return cmp_null(sym_l, sym_r); 218 219 if (sym_l == sym_r) 220 return 0; 221 222 if (sym_l->start != sym_r->start) 223 return (int64_t)(sym_r->start - sym_l->start); 224 225 return (int64_t)(sym_r->end - sym_l->end); 226 } 227 228 static int64_t 229 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 230 { 231 int64_t ret; 232 233 if (!left->ms.sym && !right->ms.sym) 234 return _sort__addr_cmp(left->ip, right->ip); 235 236 /* 237 * comparing symbol address alone is not enough since it's a 238 * relative address within a dso. 239 */ 240 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 241 ret = sort__dso_cmp(left, right); 242 if (ret != 0) 243 return ret; 244 } 245 246 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 247 } 248 249 static int64_t 250 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 251 { 252 if (!left->ms.sym || !right->ms.sym) 253 return cmp_null(left->ms.sym, right->ms.sym); 254 255 return strcmp(right->ms.sym->name, left->ms.sym->name); 256 } 257 258 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, 259 u64 ip, char level, char *bf, size_t size, 260 unsigned int width) 261 { 262 size_t ret = 0; 263 264 if (verbose) { 265 char o = map ? dso__symtab_origin(map->dso) : '!'; 266 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 267 BITS_PER_LONG / 4 + 2, ip, o); 268 } 269 270 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 271 if (sym && map) { 272 if (map->type == MAP__VARIABLE) { 273 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 274 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 275 ip - map->unmap_ip(map, sym->start)); 276 } else { 277 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 278 width - ret, 279 sym->name); 280 } 281 } else { 282 size_t len = BITS_PER_LONG / 4; 283 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 284 len, ip); 285 } 286 287 return ret; 288 } 289 290 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, 291 size_t size, unsigned int width) 292 { 293 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip, 294 he->level, bf, size, width); 295 } 296 297 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 298 { 299 const char *sym = arg; 300 301 if (type != HIST_FILTER__SYMBOL) 302 return -1; 303 304 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 305 } 306 307 struct sort_entry sort_sym = { 308 .se_header = "Symbol", 309 .se_cmp = sort__sym_cmp, 310 .se_sort = sort__sym_sort, 311 .se_snprintf = hist_entry__sym_snprintf, 312 .se_filter = hist_entry__sym_filter, 313 .se_width_idx = HISTC_SYMBOL, 314 }; 315 316 /* --sort srcline */ 317 318 static char *hist_entry__get_srcline(struct hist_entry *he) 319 { 320 struct map *map = he->ms.map; 321 322 if (!map) 323 return SRCLINE_UNKNOWN; 324 325 return get_srcline(map->dso, map__rip_2objdump(map, he->ip), 326 he->ms.sym, true); 327 } 328 329 static int64_t 330 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 331 { 332 if (!left->srcline) 333 left->srcline = hist_entry__get_srcline(left); 334 if (!right->srcline) 335 right->srcline = hist_entry__get_srcline(right); 336 337 return strcmp(right->srcline, left->srcline); 338 } 339 340 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 341 size_t size, unsigned int width) 342 { 343 if (!he->srcline) 344 he->srcline = hist_entry__get_srcline(he); 345 346 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 347 } 348 349 struct sort_entry sort_srcline = { 350 .se_header = "Source:Line", 351 .se_cmp = sort__srcline_cmp, 352 .se_snprintf = hist_entry__srcline_snprintf, 353 .se_width_idx = HISTC_SRCLINE, 354 }; 355 356 /* --sort srcline_from */ 357 358 static int64_t 359 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 360 { 361 if (!left->branch_info->srcline_from) { 362 struct map *map = left->branch_info->from.map; 363 if (!map) 364 left->branch_info->srcline_from = SRCLINE_UNKNOWN; 365 else 366 left->branch_info->srcline_from = get_srcline(map->dso, 367 map__rip_2objdump(map, 368 left->branch_info->from.al_addr), 369 left->branch_info->from.sym, true); 370 } 371 if (!right->branch_info->srcline_from) { 372 struct map *map = right->branch_info->from.map; 373 if (!map) 374 right->branch_info->srcline_from = SRCLINE_UNKNOWN; 375 else 376 right->branch_info->srcline_from = get_srcline(map->dso, 377 map__rip_2objdump(map, 378 right->branch_info->from.al_addr), 379 right->branch_info->from.sym, true); 380 } 381 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 382 } 383 384 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 385 size_t size, unsigned int width) 386 { 387 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 388 } 389 390 struct sort_entry sort_srcline_from = { 391 .se_header = "From Source:Line", 392 .se_cmp = sort__srcline_from_cmp, 393 .se_snprintf = hist_entry__srcline_from_snprintf, 394 .se_width_idx = HISTC_SRCLINE_FROM, 395 }; 396 397 /* --sort srcline_to */ 398 399 static int64_t 400 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 401 { 402 if (!left->branch_info->srcline_to) { 403 struct map *map = left->branch_info->to.map; 404 if (!map) 405 left->branch_info->srcline_to = SRCLINE_UNKNOWN; 406 else 407 left->branch_info->srcline_to = get_srcline(map->dso, 408 map__rip_2objdump(map, 409 left->branch_info->to.al_addr), 410 left->branch_info->from.sym, true); 411 } 412 if (!right->branch_info->srcline_to) { 413 struct map *map = right->branch_info->to.map; 414 if (!map) 415 right->branch_info->srcline_to = SRCLINE_UNKNOWN; 416 else 417 right->branch_info->srcline_to = get_srcline(map->dso, 418 map__rip_2objdump(map, 419 right->branch_info->to.al_addr), 420 right->branch_info->to.sym, true); 421 } 422 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 423 } 424 425 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 426 size_t size, unsigned int width) 427 { 428 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 429 } 430 431 struct sort_entry sort_srcline_to = { 432 .se_header = "To Source:Line", 433 .se_cmp = sort__srcline_to_cmp, 434 .se_snprintf = hist_entry__srcline_to_snprintf, 435 .se_width_idx = HISTC_SRCLINE_TO, 436 }; 437 438 /* --sort srcfile */ 439 440 static char no_srcfile[1]; 441 442 static char *hist_entry__get_srcfile(struct hist_entry *e) 443 { 444 char *sf, *p; 445 struct map *map = e->ms.map; 446 447 if (!map) 448 return no_srcfile; 449 450 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 451 e->ms.sym, false, true); 452 if (!strcmp(sf, SRCLINE_UNKNOWN)) 453 return no_srcfile; 454 p = strchr(sf, ':'); 455 if (p && *sf) { 456 *p = 0; 457 return sf; 458 } 459 free(sf); 460 return no_srcfile; 461 } 462 463 static int64_t 464 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 465 { 466 if (!left->srcfile) 467 left->srcfile = hist_entry__get_srcfile(left); 468 if (!right->srcfile) 469 right->srcfile = hist_entry__get_srcfile(right); 470 471 return strcmp(right->srcfile, left->srcfile); 472 } 473 474 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 475 size_t size, unsigned int width) 476 { 477 if (!he->srcfile) 478 he->srcfile = hist_entry__get_srcfile(he); 479 480 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 481 } 482 483 struct sort_entry sort_srcfile = { 484 .se_header = "Source File", 485 .se_cmp = sort__srcfile_cmp, 486 .se_snprintf = hist_entry__srcfile_snprintf, 487 .se_width_idx = HISTC_SRCFILE, 488 }; 489 490 /* --sort parent */ 491 492 static int64_t 493 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 494 { 495 struct symbol *sym_l = left->parent; 496 struct symbol *sym_r = right->parent; 497 498 if (!sym_l || !sym_r) 499 return cmp_null(sym_l, sym_r); 500 501 return strcmp(sym_r->name, sym_l->name); 502 } 503 504 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 505 size_t size, unsigned int width) 506 { 507 return repsep_snprintf(bf, size, "%-*.*s", width, width, 508 he->parent ? he->parent->name : "[other]"); 509 } 510 511 struct sort_entry sort_parent = { 512 .se_header = "Parent symbol", 513 .se_cmp = sort__parent_cmp, 514 .se_snprintf = hist_entry__parent_snprintf, 515 .se_width_idx = HISTC_PARENT, 516 }; 517 518 /* --sort cpu */ 519 520 static int64_t 521 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 522 { 523 return right->cpu - left->cpu; 524 } 525 526 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 527 size_t size, unsigned int width) 528 { 529 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 530 } 531 532 struct sort_entry sort_cpu = { 533 .se_header = "CPU", 534 .se_cmp = sort__cpu_cmp, 535 .se_snprintf = hist_entry__cpu_snprintf, 536 .se_width_idx = HISTC_CPU, 537 }; 538 539 /* --sort socket */ 540 541 static int64_t 542 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 543 { 544 return right->socket - left->socket; 545 } 546 547 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 548 size_t size, unsigned int width) 549 { 550 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 551 } 552 553 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 554 { 555 int sk = *(const int *)arg; 556 557 if (type != HIST_FILTER__SOCKET) 558 return -1; 559 560 return sk >= 0 && he->socket != sk; 561 } 562 563 struct sort_entry sort_socket = { 564 .se_header = "Socket", 565 .se_cmp = sort__socket_cmp, 566 .se_snprintf = hist_entry__socket_snprintf, 567 .se_filter = hist_entry__socket_filter, 568 .se_width_idx = HISTC_SOCKET, 569 }; 570 571 /* --sort trace */ 572 573 static char *get_trace_output(struct hist_entry *he) 574 { 575 struct trace_seq seq; 576 struct perf_evsel *evsel; 577 struct pevent_record rec = { 578 .data = he->raw_data, 579 .size = he->raw_size, 580 }; 581 582 evsel = hists_to_evsel(he->hists); 583 584 trace_seq_init(&seq); 585 if (symbol_conf.raw_trace) { 586 pevent_print_fields(&seq, he->raw_data, he->raw_size, 587 evsel->tp_format); 588 } else { 589 pevent_event_info(&seq, evsel->tp_format, &rec); 590 } 591 /* 592 * Trim the buffer, it starts at 4KB and we're not going to 593 * add anything more to this buffer. 594 */ 595 return realloc(seq.buffer, seq.len + 1); 596 } 597 598 static int64_t 599 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 600 { 601 struct perf_evsel *evsel; 602 603 evsel = hists_to_evsel(left->hists); 604 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 605 return 0; 606 607 if (left->trace_output == NULL) 608 left->trace_output = get_trace_output(left); 609 if (right->trace_output == NULL) 610 right->trace_output = get_trace_output(right); 611 612 return strcmp(right->trace_output, left->trace_output); 613 } 614 615 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 616 size_t size, unsigned int width) 617 { 618 struct perf_evsel *evsel; 619 620 evsel = hists_to_evsel(he->hists); 621 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 622 return scnprintf(bf, size, "%-.*s", width, "N/A"); 623 624 if (he->trace_output == NULL) 625 he->trace_output = get_trace_output(he); 626 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 627 } 628 629 struct sort_entry sort_trace = { 630 .se_header = "Trace output", 631 .se_cmp = sort__trace_cmp, 632 .se_snprintf = hist_entry__trace_snprintf, 633 .se_width_idx = HISTC_TRACE, 634 }; 635 636 /* sort keys for branch stacks */ 637 638 static int64_t 639 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 640 { 641 if (!left->branch_info || !right->branch_info) 642 return cmp_null(left->branch_info, right->branch_info); 643 644 return _sort__dso_cmp(left->branch_info->from.map, 645 right->branch_info->from.map); 646 } 647 648 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 649 size_t size, unsigned int width) 650 { 651 if (he->branch_info) 652 return _hist_entry__dso_snprintf(he->branch_info->from.map, 653 bf, size, width); 654 else 655 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 656 } 657 658 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 659 const void *arg) 660 { 661 const struct dso *dso = arg; 662 663 if (type != HIST_FILTER__DSO) 664 return -1; 665 666 return dso && (!he->branch_info || !he->branch_info->from.map || 667 he->branch_info->from.map->dso != dso); 668 } 669 670 static int64_t 671 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 672 { 673 if (!left->branch_info || !right->branch_info) 674 return cmp_null(left->branch_info, right->branch_info); 675 676 return _sort__dso_cmp(left->branch_info->to.map, 677 right->branch_info->to.map); 678 } 679 680 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 681 size_t size, unsigned int width) 682 { 683 if (he->branch_info) 684 return _hist_entry__dso_snprintf(he->branch_info->to.map, 685 bf, size, width); 686 else 687 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 688 } 689 690 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 691 const void *arg) 692 { 693 const struct dso *dso = arg; 694 695 if (type != HIST_FILTER__DSO) 696 return -1; 697 698 return dso && (!he->branch_info || !he->branch_info->to.map || 699 he->branch_info->to.map->dso != dso); 700 } 701 702 static int64_t 703 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 704 { 705 struct addr_map_symbol *from_l = &left->branch_info->from; 706 struct addr_map_symbol *from_r = &right->branch_info->from; 707 708 if (!left->branch_info || !right->branch_info) 709 return cmp_null(left->branch_info, right->branch_info); 710 711 from_l = &left->branch_info->from; 712 from_r = &right->branch_info->from; 713 714 if (!from_l->sym && !from_r->sym) 715 return _sort__addr_cmp(from_l->addr, from_r->addr); 716 717 return _sort__sym_cmp(from_l->sym, from_r->sym); 718 } 719 720 static int64_t 721 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 722 { 723 struct addr_map_symbol *to_l, *to_r; 724 725 if (!left->branch_info || !right->branch_info) 726 return cmp_null(left->branch_info, right->branch_info); 727 728 to_l = &left->branch_info->to; 729 to_r = &right->branch_info->to; 730 731 if (!to_l->sym && !to_r->sym) 732 return _sort__addr_cmp(to_l->addr, to_r->addr); 733 734 return _sort__sym_cmp(to_l->sym, to_r->sym); 735 } 736 737 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 738 size_t size, unsigned int width) 739 { 740 if (he->branch_info) { 741 struct addr_map_symbol *from = &he->branch_info->from; 742 743 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, 744 he->level, bf, size, width); 745 } 746 747 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 748 } 749 750 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 751 size_t size, unsigned int width) 752 { 753 if (he->branch_info) { 754 struct addr_map_symbol *to = &he->branch_info->to; 755 756 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 757 he->level, bf, size, width); 758 } 759 760 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 761 } 762 763 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 764 const void *arg) 765 { 766 const char *sym = arg; 767 768 if (type != HIST_FILTER__SYMBOL) 769 return -1; 770 771 return sym && !(he->branch_info && he->branch_info->from.sym && 772 strstr(he->branch_info->from.sym->name, sym)); 773 } 774 775 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 776 const void *arg) 777 { 778 const char *sym = arg; 779 780 if (type != HIST_FILTER__SYMBOL) 781 return -1; 782 783 return sym && !(he->branch_info && he->branch_info->to.sym && 784 strstr(he->branch_info->to.sym->name, sym)); 785 } 786 787 struct sort_entry sort_dso_from = { 788 .se_header = "Source Shared Object", 789 .se_cmp = sort__dso_from_cmp, 790 .se_snprintf = hist_entry__dso_from_snprintf, 791 .se_filter = hist_entry__dso_from_filter, 792 .se_width_idx = HISTC_DSO_FROM, 793 }; 794 795 struct sort_entry sort_dso_to = { 796 .se_header = "Target Shared Object", 797 .se_cmp = sort__dso_to_cmp, 798 .se_snprintf = hist_entry__dso_to_snprintf, 799 .se_filter = hist_entry__dso_to_filter, 800 .se_width_idx = HISTC_DSO_TO, 801 }; 802 803 struct sort_entry sort_sym_from = { 804 .se_header = "Source Symbol", 805 .se_cmp = sort__sym_from_cmp, 806 .se_snprintf = hist_entry__sym_from_snprintf, 807 .se_filter = hist_entry__sym_from_filter, 808 .se_width_idx = HISTC_SYMBOL_FROM, 809 }; 810 811 struct sort_entry sort_sym_to = { 812 .se_header = "Target Symbol", 813 .se_cmp = sort__sym_to_cmp, 814 .se_snprintf = hist_entry__sym_to_snprintf, 815 .se_filter = hist_entry__sym_to_filter, 816 .se_width_idx = HISTC_SYMBOL_TO, 817 }; 818 819 static int64_t 820 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 821 { 822 unsigned char mp, p; 823 824 if (!left->branch_info || !right->branch_info) 825 return cmp_null(left->branch_info, right->branch_info); 826 827 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 828 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 829 return mp || p; 830 } 831 832 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 833 size_t size, unsigned int width){ 834 static const char *out = "N/A"; 835 836 if (he->branch_info) { 837 if (he->branch_info->flags.predicted) 838 out = "N"; 839 else if (he->branch_info->flags.mispred) 840 out = "Y"; 841 } 842 843 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 844 } 845 846 static int64_t 847 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 848 { 849 return left->branch_info->flags.cycles - 850 right->branch_info->flags.cycles; 851 } 852 853 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 854 size_t size, unsigned int width) 855 { 856 if (he->branch_info->flags.cycles == 0) 857 return repsep_snprintf(bf, size, "%-*s", width, "-"); 858 return repsep_snprintf(bf, size, "%-*hd", width, 859 he->branch_info->flags.cycles); 860 } 861 862 struct sort_entry sort_cycles = { 863 .se_header = "Basic Block Cycles", 864 .se_cmp = sort__cycles_cmp, 865 .se_snprintf = hist_entry__cycles_snprintf, 866 .se_width_idx = HISTC_CYCLES, 867 }; 868 869 /* --sort daddr_sym */ 870 static int64_t 871 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 872 { 873 uint64_t l = 0, r = 0; 874 875 if (left->mem_info) 876 l = left->mem_info->daddr.addr; 877 if (right->mem_info) 878 r = right->mem_info->daddr.addr; 879 880 return (int64_t)(r - l); 881 } 882 883 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 884 size_t size, unsigned int width) 885 { 886 uint64_t addr = 0; 887 struct map *map = NULL; 888 struct symbol *sym = NULL; 889 890 if (he->mem_info) { 891 addr = he->mem_info->daddr.addr; 892 map = he->mem_info->daddr.map; 893 sym = he->mem_info->daddr.sym; 894 } 895 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 896 width); 897 } 898 899 static int64_t 900 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 901 { 902 uint64_t l = 0, r = 0; 903 904 if (left->mem_info) 905 l = left->mem_info->iaddr.addr; 906 if (right->mem_info) 907 r = right->mem_info->iaddr.addr; 908 909 return (int64_t)(r - l); 910 } 911 912 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 913 size_t size, unsigned int width) 914 { 915 uint64_t addr = 0; 916 struct map *map = NULL; 917 struct symbol *sym = NULL; 918 919 if (he->mem_info) { 920 addr = he->mem_info->iaddr.addr; 921 map = he->mem_info->iaddr.map; 922 sym = he->mem_info->iaddr.sym; 923 } 924 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 925 width); 926 } 927 928 static int64_t 929 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 930 { 931 struct map *map_l = NULL; 932 struct map *map_r = NULL; 933 934 if (left->mem_info) 935 map_l = left->mem_info->daddr.map; 936 if (right->mem_info) 937 map_r = right->mem_info->daddr.map; 938 939 return _sort__dso_cmp(map_l, map_r); 940 } 941 942 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 943 size_t size, unsigned int width) 944 { 945 struct map *map = NULL; 946 947 if (he->mem_info) 948 map = he->mem_info->daddr.map; 949 950 return _hist_entry__dso_snprintf(map, bf, size, width); 951 } 952 953 static int64_t 954 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 955 { 956 union perf_mem_data_src data_src_l; 957 union perf_mem_data_src data_src_r; 958 959 if (left->mem_info) 960 data_src_l = left->mem_info->data_src; 961 else 962 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 963 964 if (right->mem_info) 965 data_src_r = right->mem_info->data_src; 966 else 967 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 968 969 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 970 } 971 972 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 973 size_t size, unsigned int width) 974 { 975 char out[10]; 976 977 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 978 return repsep_snprintf(bf, size, "%.*s", width, out); 979 } 980 981 static int64_t 982 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 983 { 984 union perf_mem_data_src data_src_l; 985 union perf_mem_data_src data_src_r; 986 987 if (left->mem_info) 988 data_src_l = left->mem_info->data_src; 989 else 990 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 991 992 if (right->mem_info) 993 data_src_r = right->mem_info->data_src; 994 else 995 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 996 997 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 998 } 999 1000 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1001 size_t size, unsigned int width) 1002 { 1003 char out[64]; 1004 1005 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1006 return repsep_snprintf(bf, size, "%-*s", width, out); 1007 } 1008 1009 static int64_t 1010 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1011 { 1012 union perf_mem_data_src data_src_l; 1013 union perf_mem_data_src data_src_r; 1014 1015 if (left->mem_info) 1016 data_src_l = left->mem_info->data_src; 1017 else 1018 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1019 1020 if (right->mem_info) 1021 data_src_r = right->mem_info->data_src; 1022 else 1023 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1024 1025 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1026 } 1027 1028 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1029 size_t size, unsigned int width) 1030 { 1031 char out[64]; 1032 1033 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1034 return repsep_snprintf(bf, size, "%-*s", width, out); 1035 } 1036 1037 static int64_t 1038 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1039 { 1040 union perf_mem_data_src data_src_l; 1041 union perf_mem_data_src data_src_r; 1042 1043 if (left->mem_info) 1044 data_src_l = left->mem_info->data_src; 1045 else 1046 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1047 1048 if (right->mem_info) 1049 data_src_r = right->mem_info->data_src; 1050 else 1051 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1052 1053 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1054 } 1055 1056 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1057 size_t size, unsigned int width) 1058 { 1059 char out[64]; 1060 1061 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1062 return repsep_snprintf(bf, size, "%-*s", width, out); 1063 } 1064 1065 static int64_t 1066 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1067 { 1068 u64 l, r; 1069 struct map *l_map, *r_map; 1070 1071 if (!left->mem_info) return -1; 1072 if (!right->mem_info) return 1; 1073 1074 /* group event types together */ 1075 if (left->cpumode > right->cpumode) return -1; 1076 if (left->cpumode < right->cpumode) return 1; 1077 1078 l_map = left->mem_info->daddr.map; 1079 r_map = right->mem_info->daddr.map; 1080 1081 /* if both are NULL, jump to sort on al_addr instead */ 1082 if (!l_map && !r_map) 1083 goto addr; 1084 1085 if (!l_map) return -1; 1086 if (!r_map) return 1; 1087 1088 if (l_map->maj > r_map->maj) return -1; 1089 if (l_map->maj < r_map->maj) return 1; 1090 1091 if (l_map->min > r_map->min) return -1; 1092 if (l_map->min < r_map->min) return 1; 1093 1094 if (l_map->ino > r_map->ino) return -1; 1095 if (l_map->ino < r_map->ino) return 1; 1096 1097 if (l_map->ino_generation > r_map->ino_generation) return -1; 1098 if (l_map->ino_generation < r_map->ino_generation) return 1; 1099 1100 /* 1101 * Addresses with no major/minor numbers are assumed to be 1102 * anonymous in userspace. Sort those on pid then address. 1103 * 1104 * The kernel and non-zero major/minor mapped areas are 1105 * assumed to be unity mapped. Sort those on address. 1106 */ 1107 1108 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1109 (!(l_map->flags & MAP_SHARED)) && 1110 !l_map->maj && !l_map->min && !l_map->ino && 1111 !l_map->ino_generation) { 1112 /* userspace anonymous */ 1113 1114 if (left->thread->pid_ > right->thread->pid_) return -1; 1115 if (left->thread->pid_ < right->thread->pid_) return 1; 1116 } 1117 1118 addr: 1119 /* al_addr does all the right addr - start + offset calculations */ 1120 l = cl_address(left->mem_info->daddr.al_addr); 1121 r = cl_address(right->mem_info->daddr.al_addr); 1122 1123 if (l > r) return -1; 1124 if (l < r) return 1; 1125 1126 return 0; 1127 } 1128 1129 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1130 size_t size, unsigned int width) 1131 { 1132 1133 uint64_t addr = 0; 1134 struct map *map = NULL; 1135 struct symbol *sym = NULL; 1136 char level = he->level; 1137 1138 if (he->mem_info) { 1139 addr = cl_address(he->mem_info->daddr.al_addr); 1140 map = he->mem_info->daddr.map; 1141 sym = he->mem_info->daddr.sym; 1142 1143 /* print [s] for shared data mmaps */ 1144 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1145 map && (map->type == MAP__VARIABLE) && 1146 (map->flags & MAP_SHARED) && 1147 (map->maj || map->min || map->ino || 1148 map->ino_generation)) 1149 level = 's'; 1150 else if (!map) 1151 level = 'X'; 1152 } 1153 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size, 1154 width); 1155 } 1156 1157 struct sort_entry sort_mispredict = { 1158 .se_header = "Branch Mispredicted", 1159 .se_cmp = sort__mispredict_cmp, 1160 .se_snprintf = hist_entry__mispredict_snprintf, 1161 .se_width_idx = HISTC_MISPREDICT, 1162 }; 1163 1164 static u64 he_weight(struct hist_entry *he) 1165 { 1166 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; 1167 } 1168 1169 static int64_t 1170 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1171 { 1172 return he_weight(left) - he_weight(right); 1173 } 1174 1175 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1176 size_t size, unsigned int width) 1177 { 1178 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); 1179 } 1180 1181 struct sort_entry sort_local_weight = { 1182 .se_header = "Local Weight", 1183 .se_cmp = sort__local_weight_cmp, 1184 .se_snprintf = hist_entry__local_weight_snprintf, 1185 .se_width_idx = HISTC_LOCAL_WEIGHT, 1186 }; 1187 1188 static int64_t 1189 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1190 { 1191 return left->stat.weight - right->stat.weight; 1192 } 1193 1194 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1195 size_t size, unsigned int width) 1196 { 1197 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); 1198 } 1199 1200 struct sort_entry sort_global_weight = { 1201 .se_header = "Weight", 1202 .se_cmp = sort__global_weight_cmp, 1203 .se_snprintf = hist_entry__global_weight_snprintf, 1204 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1205 }; 1206 1207 struct sort_entry sort_mem_daddr_sym = { 1208 .se_header = "Data Symbol", 1209 .se_cmp = sort__daddr_cmp, 1210 .se_snprintf = hist_entry__daddr_snprintf, 1211 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1212 }; 1213 1214 struct sort_entry sort_mem_iaddr_sym = { 1215 .se_header = "Code Symbol", 1216 .se_cmp = sort__iaddr_cmp, 1217 .se_snprintf = hist_entry__iaddr_snprintf, 1218 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1219 }; 1220 1221 struct sort_entry sort_mem_daddr_dso = { 1222 .se_header = "Data Object", 1223 .se_cmp = sort__dso_daddr_cmp, 1224 .se_snprintf = hist_entry__dso_daddr_snprintf, 1225 .se_width_idx = HISTC_MEM_DADDR_DSO, 1226 }; 1227 1228 struct sort_entry sort_mem_locked = { 1229 .se_header = "Locked", 1230 .se_cmp = sort__locked_cmp, 1231 .se_snprintf = hist_entry__locked_snprintf, 1232 .se_width_idx = HISTC_MEM_LOCKED, 1233 }; 1234 1235 struct sort_entry sort_mem_tlb = { 1236 .se_header = "TLB access", 1237 .se_cmp = sort__tlb_cmp, 1238 .se_snprintf = hist_entry__tlb_snprintf, 1239 .se_width_idx = HISTC_MEM_TLB, 1240 }; 1241 1242 struct sort_entry sort_mem_lvl = { 1243 .se_header = "Memory access", 1244 .se_cmp = sort__lvl_cmp, 1245 .se_snprintf = hist_entry__lvl_snprintf, 1246 .se_width_idx = HISTC_MEM_LVL, 1247 }; 1248 1249 struct sort_entry sort_mem_snoop = { 1250 .se_header = "Snoop", 1251 .se_cmp = sort__snoop_cmp, 1252 .se_snprintf = hist_entry__snoop_snprintf, 1253 .se_width_idx = HISTC_MEM_SNOOP, 1254 }; 1255 1256 struct sort_entry sort_mem_dcacheline = { 1257 .se_header = "Data Cacheline", 1258 .se_cmp = sort__dcacheline_cmp, 1259 .se_snprintf = hist_entry__dcacheline_snprintf, 1260 .se_width_idx = HISTC_MEM_DCACHELINE, 1261 }; 1262 1263 static int64_t 1264 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1265 { 1266 if (!left->branch_info || !right->branch_info) 1267 return cmp_null(left->branch_info, right->branch_info); 1268 1269 return left->branch_info->flags.abort != 1270 right->branch_info->flags.abort; 1271 } 1272 1273 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1274 size_t size, unsigned int width) 1275 { 1276 static const char *out = "N/A"; 1277 1278 if (he->branch_info) { 1279 if (he->branch_info->flags.abort) 1280 out = "A"; 1281 else 1282 out = "."; 1283 } 1284 1285 return repsep_snprintf(bf, size, "%-*s", width, out); 1286 } 1287 1288 struct sort_entry sort_abort = { 1289 .se_header = "Transaction abort", 1290 .se_cmp = sort__abort_cmp, 1291 .se_snprintf = hist_entry__abort_snprintf, 1292 .se_width_idx = HISTC_ABORT, 1293 }; 1294 1295 static int64_t 1296 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1297 { 1298 if (!left->branch_info || !right->branch_info) 1299 return cmp_null(left->branch_info, right->branch_info); 1300 1301 return left->branch_info->flags.in_tx != 1302 right->branch_info->flags.in_tx; 1303 } 1304 1305 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1306 size_t size, unsigned int width) 1307 { 1308 static const char *out = "N/A"; 1309 1310 if (he->branch_info) { 1311 if (he->branch_info->flags.in_tx) 1312 out = "T"; 1313 else 1314 out = "."; 1315 } 1316 1317 return repsep_snprintf(bf, size, "%-*s", width, out); 1318 } 1319 1320 struct sort_entry sort_in_tx = { 1321 .se_header = "Branch in transaction", 1322 .se_cmp = sort__in_tx_cmp, 1323 .se_snprintf = hist_entry__in_tx_snprintf, 1324 .se_width_idx = HISTC_IN_TX, 1325 }; 1326 1327 static int64_t 1328 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1329 { 1330 return left->transaction - right->transaction; 1331 } 1332 1333 static inline char *add_str(char *p, const char *str) 1334 { 1335 strcpy(p, str); 1336 return p + strlen(str); 1337 } 1338 1339 static struct txbit { 1340 unsigned flag; 1341 const char *name; 1342 int skip_for_len; 1343 } txbits[] = { 1344 { PERF_TXN_ELISION, "EL ", 0 }, 1345 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1346 { PERF_TXN_SYNC, "SYNC ", 1 }, 1347 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1348 { PERF_TXN_RETRY, "RETRY ", 0 }, 1349 { PERF_TXN_CONFLICT, "CON ", 0 }, 1350 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1351 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1352 { 0, NULL, 0 } 1353 }; 1354 1355 int hist_entry__transaction_len(void) 1356 { 1357 int i; 1358 int len = 0; 1359 1360 for (i = 0; txbits[i].name; i++) { 1361 if (!txbits[i].skip_for_len) 1362 len += strlen(txbits[i].name); 1363 } 1364 len += 4; /* :XX<space> */ 1365 return len; 1366 } 1367 1368 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1369 size_t size, unsigned int width) 1370 { 1371 u64 t = he->transaction; 1372 char buf[128]; 1373 char *p = buf; 1374 int i; 1375 1376 buf[0] = 0; 1377 for (i = 0; txbits[i].name; i++) 1378 if (txbits[i].flag & t) 1379 p = add_str(p, txbits[i].name); 1380 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1381 p = add_str(p, "NEITHER "); 1382 if (t & PERF_TXN_ABORT_MASK) { 1383 sprintf(p, ":%" PRIx64, 1384 (t & PERF_TXN_ABORT_MASK) >> 1385 PERF_TXN_ABORT_SHIFT); 1386 p += strlen(p); 1387 } 1388 1389 return repsep_snprintf(bf, size, "%-*s", width, buf); 1390 } 1391 1392 struct sort_entry sort_transaction = { 1393 .se_header = "Transaction ", 1394 .se_cmp = sort__transaction_cmp, 1395 .se_snprintf = hist_entry__transaction_snprintf, 1396 .se_width_idx = HISTC_TRANSACTION, 1397 }; 1398 1399 struct sort_dimension { 1400 const char *name; 1401 struct sort_entry *entry; 1402 int taken; 1403 }; 1404 1405 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 1406 1407 static struct sort_dimension common_sort_dimensions[] = { 1408 DIM(SORT_PID, "pid", sort_thread), 1409 DIM(SORT_COMM, "comm", sort_comm), 1410 DIM(SORT_DSO, "dso", sort_dso), 1411 DIM(SORT_SYM, "symbol", sort_sym), 1412 DIM(SORT_PARENT, "parent", sort_parent), 1413 DIM(SORT_CPU, "cpu", sort_cpu), 1414 DIM(SORT_SOCKET, "socket", sort_socket), 1415 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1416 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 1417 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1418 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 1419 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 1420 DIM(SORT_TRACE, "trace", sort_trace), 1421 }; 1422 1423 #undef DIM 1424 1425 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 1426 1427 static struct sort_dimension bstack_sort_dimensions[] = { 1428 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 1429 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 1430 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 1431 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 1432 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 1433 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1434 DIM(SORT_ABORT, "abort", sort_abort), 1435 DIM(SORT_CYCLES, "cycles", sort_cycles), 1436 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1437 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1438 }; 1439 1440 #undef DIM 1441 1442 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 1443 1444 static struct sort_dimension memory_sort_dimensions[] = { 1445 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1446 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 1447 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1448 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1449 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1450 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 1451 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 1452 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 1453 }; 1454 1455 #undef DIM 1456 1457 struct hpp_dimension { 1458 const char *name; 1459 struct perf_hpp_fmt *fmt; 1460 int taken; 1461 }; 1462 1463 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 1464 1465 static struct hpp_dimension hpp_sort_dimensions[] = { 1466 DIM(PERF_HPP__OVERHEAD, "overhead"), 1467 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 1468 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 1469 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 1470 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 1471 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 1472 DIM(PERF_HPP__SAMPLES, "sample"), 1473 DIM(PERF_HPP__PERIOD, "period"), 1474 }; 1475 1476 #undef DIM 1477 1478 struct hpp_sort_entry { 1479 struct perf_hpp_fmt hpp; 1480 struct sort_entry *se; 1481 }; 1482 1483 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 1484 { 1485 struct hpp_sort_entry *hse; 1486 1487 if (!perf_hpp__is_sort_entry(fmt)) 1488 return; 1489 1490 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1491 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 1492 } 1493 1494 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1495 struct hists *hists) 1496 { 1497 struct hpp_sort_entry *hse; 1498 size_t len = fmt->user_len; 1499 1500 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1501 1502 if (!len) 1503 len = hists__col_len(hists, hse->se->se_width_idx); 1504 1505 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 1506 } 1507 1508 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 1509 struct perf_hpp *hpp __maybe_unused, 1510 struct hists *hists) 1511 { 1512 struct hpp_sort_entry *hse; 1513 size_t len = fmt->user_len; 1514 1515 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1516 1517 if (!len) 1518 len = hists__col_len(hists, hse->se->se_width_idx); 1519 1520 return len; 1521 } 1522 1523 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1524 struct hist_entry *he) 1525 { 1526 struct hpp_sort_entry *hse; 1527 size_t len = fmt->user_len; 1528 1529 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1530 1531 if (!len) 1532 len = hists__col_len(he->hists, hse->se->se_width_idx); 1533 1534 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1535 } 1536 1537 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 1538 struct hist_entry *a, struct hist_entry *b) 1539 { 1540 struct hpp_sort_entry *hse; 1541 1542 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1543 return hse->se->se_cmp(a, b); 1544 } 1545 1546 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 1547 struct hist_entry *a, struct hist_entry *b) 1548 { 1549 struct hpp_sort_entry *hse; 1550 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 1551 1552 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1553 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 1554 return collapse_fn(a, b); 1555 } 1556 1557 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 1558 struct hist_entry *a, struct hist_entry *b) 1559 { 1560 struct hpp_sort_entry *hse; 1561 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 1562 1563 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1564 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 1565 return sort_fn(a, b); 1566 } 1567 1568 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 1569 { 1570 return format->header == __sort__hpp_header; 1571 } 1572 1573 #define MK_SORT_ENTRY_CHK(key) \ 1574 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 1575 { \ 1576 struct hpp_sort_entry *hse; \ 1577 \ 1578 if (!perf_hpp__is_sort_entry(fmt)) \ 1579 return false; \ 1580 \ 1581 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 1582 return hse->se == &sort_ ## key ; \ 1583 } 1584 1585 MK_SORT_ENTRY_CHK(trace) 1586 MK_SORT_ENTRY_CHK(srcline) 1587 MK_SORT_ENTRY_CHK(srcfile) 1588 MK_SORT_ENTRY_CHK(thread) 1589 MK_SORT_ENTRY_CHK(comm) 1590 MK_SORT_ENTRY_CHK(dso) 1591 MK_SORT_ENTRY_CHK(sym) 1592 1593 1594 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1595 { 1596 struct hpp_sort_entry *hse_a; 1597 struct hpp_sort_entry *hse_b; 1598 1599 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 1600 return false; 1601 1602 hse_a = container_of(a, struct hpp_sort_entry, hpp); 1603 hse_b = container_of(b, struct hpp_sort_entry, hpp); 1604 1605 return hse_a->se == hse_b->se; 1606 } 1607 1608 static void hse_free(struct perf_hpp_fmt *fmt) 1609 { 1610 struct hpp_sort_entry *hse; 1611 1612 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1613 free(hse); 1614 } 1615 1616 static struct hpp_sort_entry * 1617 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 1618 { 1619 struct hpp_sort_entry *hse; 1620 1621 hse = malloc(sizeof(*hse)); 1622 if (hse == NULL) { 1623 pr_err("Memory allocation failed\n"); 1624 return NULL; 1625 } 1626 1627 hse->se = sd->entry; 1628 hse->hpp.name = sd->entry->se_header; 1629 hse->hpp.header = __sort__hpp_header; 1630 hse->hpp.width = __sort__hpp_width; 1631 hse->hpp.entry = __sort__hpp_entry; 1632 hse->hpp.color = NULL; 1633 1634 hse->hpp.cmp = __sort__hpp_cmp; 1635 hse->hpp.collapse = __sort__hpp_collapse; 1636 hse->hpp.sort = __sort__hpp_sort; 1637 hse->hpp.equal = __sort__hpp_equal; 1638 hse->hpp.free = hse_free; 1639 1640 INIT_LIST_HEAD(&hse->hpp.list); 1641 INIT_LIST_HEAD(&hse->hpp.sort_list); 1642 hse->hpp.elide = false; 1643 hse->hpp.len = 0; 1644 hse->hpp.user_len = 0; 1645 hse->hpp.level = level; 1646 1647 return hse; 1648 } 1649 1650 static void hpp_free(struct perf_hpp_fmt *fmt) 1651 { 1652 free(fmt); 1653 } 1654 1655 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 1656 int level) 1657 { 1658 struct perf_hpp_fmt *fmt; 1659 1660 fmt = memdup(hd->fmt, sizeof(*fmt)); 1661 if (fmt) { 1662 INIT_LIST_HEAD(&fmt->list); 1663 INIT_LIST_HEAD(&fmt->sort_list); 1664 fmt->free = hpp_free; 1665 fmt->level = level; 1666 } 1667 1668 return fmt; 1669 } 1670 1671 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 1672 { 1673 struct perf_hpp_fmt *fmt; 1674 struct hpp_sort_entry *hse; 1675 int ret = -1; 1676 int r; 1677 1678 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1679 if (!perf_hpp__is_sort_entry(fmt)) 1680 continue; 1681 1682 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1683 if (hse->se->se_filter == NULL) 1684 continue; 1685 1686 /* 1687 * hist entry is filtered if any of sort key in the hpp list 1688 * is applied. But it should skip non-matched filter types. 1689 */ 1690 r = hse->se->se_filter(he, type, arg); 1691 if (r >= 0) { 1692 if (ret < 0) 1693 ret = 0; 1694 ret |= r; 1695 } 1696 } 1697 1698 return ret; 1699 } 1700 1701 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 1702 struct perf_hpp_list *list, 1703 int level) 1704 { 1705 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 1706 1707 if (hse == NULL) 1708 return -1; 1709 1710 perf_hpp_list__register_sort_field(list, &hse->hpp); 1711 return 0; 1712 } 1713 1714 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 1715 struct perf_hpp_list *list) 1716 { 1717 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 1718 1719 if (hse == NULL) 1720 return -1; 1721 1722 perf_hpp_list__column_register(list, &hse->hpp); 1723 return 0; 1724 } 1725 1726 struct hpp_dynamic_entry { 1727 struct perf_hpp_fmt hpp; 1728 struct perf_evsel *evsel; 1729 struct format_field *field; 1730 unsigned dynamic_len; 1731 bool raw_trace; 1732 }; 1733 1734 static int hde_width(struct hpp_dynamic_entry *hde) 1735 { 1736 if (!hde->hpp.len) { 1737 int len = hde->dynamic_len; 1738 int namelen = strlen(hde->field->name); 1739 int fieldlen = hde->field->size; 1740 1741 if (namelen > len) 1742 len = namelen; 1743 1744 if (!(hde->field->flags & FIELD_IS_STRING)) { 1745 /* length for print hex numbers */ 1746 fieldlen = hde->field->size * 2 + 2; 1747 } 1748 if (fieldlen > len) 1749 len = fieldlen; 1750 1751 hde->hpp.len = len; 1752 } 1753 return hde->hpp.len; 1754 } 1755 1756 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 1757 struct hist_entry *he) 1758 { 1759 char *str, *pos; 1760 struct format_field *field = hde->field; 1761 size_t namelen; 1762 bool last = false; 1763 1764 if (hde->raw_trace) 1765 return; 1766 1767 /* parse pretty print result and update max length */ 1768 if (!he->trace_output) 1769 he->trace_output = get_trace_output(he); 1770 1771 namelen = strlen(field->name); 1772 str = he->trace_output; 1773 1774 while (str) { 1775 pos = strchr(str, ' '); 1776 if (pos == NULL) { 1777 last = true; 1778 pos = str + strlen(str); 1779 } 1780 1781 if (!strncmp(str, field->name, namelen)) { 1782 size_t len; 1783 1784 str += namelen + 1; 1785 len = pos - str; 1786 1787 if (len > hde->dynamic_len) 1788 hde->dynamic_len = len; 1789 break; 1790 } 1791 1792 if (last) 1793 str = NULL; 1794 else 1795 str = pos + 1; 1796 } 1797 } 1798 1799 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1800 struct hists *hists __maybe_unused) 1801 { 1802 struct hpp_dynamic_entry *hde; 1803 size_t len = fmt->user_len; 1804 1805 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1806 1807 if (!len) 1808 len = hde_width(hde); 1809 1810 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 1811 } 1812 1813 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 1814 struct perf_hpp *hpp __maybe_unused, 1815 struct hists *hists __maybe_unused) 1816 { 1817 struct hpp_dynamic_entry *hde; 1818 size_t len = fmt->user_len; 1819 1820 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1821 1822 if (!len) 1823 len = hde_width(hde); 1824 1825 return len; 1826 } 1827 1828 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 1829 { 1830 struct hpp_dynamic_entry *hde; 1831 1832 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1833 1834 return hists_to_evsel(hists) == hde->evsel; 1835 } 1836 1837 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1838 struct hist_entry *he) 1839 { 1840 struct hpp_dynamic_entry *hde; 1841 size_t len = fmt->user_len; 1842 char *str, *pos; 1843 struct format_field *field; 1844 size_t namelen; 1845 bool last = false; 1846 int ret; 1847 1848 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1849 1850 if (!len) 1851 len = hde_width(hde); 1852 1853 if (hde->raw_trace) 1854 goto raw_field; 1855 1856 if (!he->trace_output) 1857 he->trace_output = get_trace_output(he); 1858 1859 field = hde->field; 1860 namelen = strlen(field->name); 1861 str = he->trace_output; 1862 1863 while (str) { 1864 pos = strchr(str, ' '); 1865 if (pos == NULL) { 1866 last = true; 1867 pos = str + strlen(str); 1868 } 1869 1870 if (!strncmp(str, field->name, namelen)) { 1871 str += namelen + 1; 1872 str = strndup(str, pos - str); 1873 1874 if (str == NULL) 1875 return scnprintf(hpp->buf, hpp->size, 1876 "%*.*s", len, len, "ERROR"); 1877 break; 1878 } 1879 1880 if (last) 1881 str = NULL; 1882 else 1883 str = pos + 1; 1884 } 1885 1886 if (str == NULL) { 1887 struct trace_seq seq; 1888 raw_field: 1889 trace_seq_init(&seq); 1890 pevent_print_field(&seq, he->raw_data, hde->field); 1891 str = seq.buffer; 1892 } 1893 1894 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 1895 free(str); 1896 return ret; 1897 } 1898 1899 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 1900 struct hist_entry *a, struct hist_entry *b) 1901 { 1902 struct hpp_dynamic_entry *hde; 1903 struct format_field *field; 1904 unsigned offset, size; 1905 1906 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1907 1908 if (b == NULL) { 1909 update_dynamic_len(hde, a); 1910 return 0; 1911 } 1912 1913 field = hde->field; 1914 if (field->flags & FIELD_IS_DYNAMIC) { 1915 unsigned long long dyn; 1916 1917 pevent_read_number_field(field, a->raw_data, &dyn); 1918 offset = dyn & 0xffff; 1919 size = (dyn >> 16) & 0xffff; 1920 1921 /* record max width for output */ 1922 if (size > hde->dynamic_len) 1923 hde->dynamic_len = size; 1924 } else { 1925 offset = field->offset; 1926 size = field->size; 1927 } 1928 1929 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 1930 } 1931 1932 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 1933 { 1934 return fmt->cmp == __sort__hde_cmp; 1935 } 1936 1937 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1938 { 1939 struct hpp_dynamic_entry *hde_a; 1940 struct hpp_dynamic_entry *hde_b; 1941 1942 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 1943 return false; 1944 1945 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 1946 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 1947 1948 return hde_a->field == hde_b->field; 1949 } 1950 1951 static void hde_free(struct perf_hpp_fmt *fmt) 1952 { 1953 struct hpp_dynamic_entry *hde; 1954 1955 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1956 free(hde); 1957 } 1958 1959 static struct hpp_dynamic_entry * 1960 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field, 1961 int level) 1962 { 1963 struct hpp_dynamic_entry *hde; 1964 1965 hde = malloc(sizeof(*hde)); 1966 if (hde == NULL) { 1967 pr_debug("Memory allocation failed\n"); 1968 return NULL; 1969 } 1970 1971 hde->evsel = evsel; 1972 hde->field = field; 1973 hde->dynamic_len = 0; 1974 1975 hde->hpp.name = field->name; 1976 hde->hpp.header = __sort__hde_header; 1977 hde->hpp.width = __sort__hde_width; 1978 hde->hpp.entry = __sort__hde_entry; 1979 hde->hpp.color = NULL; 1980 1981 hde->hpp.cmp = __sort__hde_cmp; 1982 hde->hpp.collapse = __sort__hde_cmp; 1983 hde->hpp.sort = __sort__hde_cmp; 1984 hde->hpp.equal = __sort__hde_equal; 1985 hde->hpp.free = hde_free; 1986 1987 INIT_LIST_HEAD(&hde->hpp.list); 1988 INIT_LIST_HEAD(&hde->hpp.sort_list); 1989 hde->hpp.elide = false; 1990 hde->hpp.len = 0; 1991 hde->hpp.user_len = 0; 1992 hde->hpp.level = level; 1993 1994 return hde; 1995 } 1996 1997 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 1998 { 1999 struct perf_hpp_fmt *new_fmt = NULL; 2000 2001 if (perf_hpp__is_sort_entry(fmt)) { 2002 struct hpp_sort_entry *hse, *new_hse; 2003 2004 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2005 new_hse = memdup(hse, sizeof(*hse)); 2006 if (new_hse) 2007 new_fmt = &new_hse->hpp; 2008 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2009 struct hpp_dynamic_entry *hde, *new_hde; 2010 2011 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2012 new_hde = memdup(hde, sizeof(*hde)); 2013 if (new_hde) 2014 new_fmt = &new_hde->hpp; 2015 } else { 2016 new_fmt = memdup(fmt, sizeof(*fmt)); 2017 } 2018 2019 INIT_LIST_HEAD(&new_fmt->list); 2020 INIT_LIST_HEAD(&new_fmt->sort_list); 2021 2022 return new_fmt; 2023 } 2024 2025 static int parse_field_name(char *str, char **event, char **field, char **opt) 2026 { 2027 char *event_name, *field_name, *opt_name; 2028 2029 event_name = str; 2030 field_name = strchr(str, '.'); 2031 2032 if (field_name) { 2033 *field_name++ = '\0'; 2034 } else { 2035 event_name = NULL; 2036 field_name = str; 2037 } 2038 2039 opt_name = strchr(field_name, '/'); 2040 if (opt_name) 2041 *opt_name++ = '\0'; 2042 2043 *event = event_name; 2044 *field = field_name; 2045 *opt = opt_name; 2046 2047 return 0; 2048 } 2049 2050 /* find match evsel using a given event name. The event name can be: 2051 * 1. '%' + event index (e.g. '%1' for first event) 2052 * 2. full event name (e.g. sched:sched_switch) 2053 * 3. partial event name (should not contain ':') 2054 */ 2055 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name) 2056 { 2057 struct perf_evsel *evsel = NULL; 2058 struct perf_evsel *pos; 2059 bool full_name; 2060 2061 /* case 1 */ 2062 if (event_name[0] == '%') { 2063 int nr = strtol(event_name+1, NULL, 0); 2064 2065 if (nr > evlist->nr_entries) 2066 return NULL; 2067 2068 evsel = perf_evlist__first(evlist); 2069 while (--nr > 0) 2070 evsel = perf_evsel__next(evsel); 2071 2072 return evsel; 2073 } 2074 2075 full_name = !!strchr(event_name, ':'); 2076 evlist__for_each_entry(evlist, pos) { 2077 /* case 2 */ 2078 if (full_name && !strcmp(pos->name, event_name)) 2079 return pos; 2080 /* case 3 */ 2081 if (!full_name && strstr(pos->name, event_name)) { 2082 if (evsel) { 2083 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2084 event_name, evsel->name, pos->name); 2085 return NULL; 2086 } 2087 evsel = pos; 2088 } 2089 } 2090 2091 return evsel; 2092 } 2093 2094 static int __dynamic_dimension__add(struct perf_evsel *evsel, 2095 struct format_field *field, 2096 bool raw_trace, int level) 2097 { 2098 struct hpp_dynamic_entry *hde; 2099 2100 hde = __alloc_dynamic_entry(evsel, field, level); 2101 if (hde == NULL) 2102 return -ENOMEM; 2103 2104 hde->raw_trace = raw_trace; 2105 2106 perf_hpp__register_sort_field(&hde->hpp); 2107 return 0; 2108 } 2109 2110 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level) 2111 { 2112 int ret; 2113 struct format_field *field; 2114 2115 field = evsel->tp_format->format.fields; 2116 while (field) { 2117 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2118 if (ret < 0) 2119 return ret; 2120 2121 field = field->next; 2122 } 2123 return 0; 2124 } 2125 2126 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace, 2127 int level) 2128 { 2129 int ret; 2130 struct perf_evsel *evsel; 2131 2132 evlist__for_each_entry(evlist, evsel) { 2133 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2134 continue; 2135 2136 ret = add_evsel_fields(evsel, raw_trace, level); 2137 if (ret < 0) 2138 return ret; 2139 } 2140 return 0; 2141 } 2142 2143 static int add_all_matching_fields(struct perf_evlist *evlist, 2144 char *field_name, bool raw_trace, int level) 2145 { 2146 int ret = -ESRCH; 2147 struct perf_evsel *evsel; 2148 struct format_field *field; 2149 2150 evlist__for_each_entry(evlist, evsel) { 2151 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2152 continue; 2153 2154 field = pevent_find_any_field(evsel->tp_format, field_name); 2155 if (field == NULL) 2156 continue; 2157 2158 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2159 if (ret < 0) 2160 break; 2161 } 2162 return ret; 2163 } 2164 2165 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok, 2166 int level) 2167 { 2168 char *str, *event_name, *field_name, *opt_name; 2169 struct perf_evsel *evsel; 2170 struct format_field *field; 2171 bool raw_trace = symbol_conf.raw_trace; 2172 int ret = 0; 2173 2174 if (evlist == NULL) 2175 return -ENOENT; 2176 2177 str = strdup(tok); 2178 if (str == NULL) 2179 return -ENOMEM; 2180 2181 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2182 ret = -EINVAL; 2183 goto out; 2184 } 2185 2186 if (opt_name) { 2187 if (strcmp(opt_name, "raw")) { 2188 pr_debug("unsupported field option %s\n", opt_name); 2189 ret = -EINVAL; 2190 goto out; 2191 } 2192 raw_trace = true; 2193 } 2194 2195 if (!strcmp(field_name, "trace_fields")) { 2196 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2197 goto out; 2198 } 2199 2200 if (event_name == NULL) { 2201 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2202 goto out; 2203 } 2204 2205 evsel = find_evsel(evlist, event_name); 2206 if (evsel == NULL) { 2207 pr_debug("Cannot find event: %s\n", event_name); 2208 ret = -ENOENT; 2209 goto out; 2210 } 2211 2212 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2213 pr_debug("%s is not a tracepoint event\n", event_name); 2214 ret = -EINVAL; 2215 goto out; 2216 } 2217 2218 if (!strcmp(field_name, "*")) { 2219 ret = add_evsel_fields(evsel, raw_trace, level); 2220 } else { 2221 field = pevent_find_any_field(evsel->tp_format, field_name); 2222 if (field == NULL) { 2223 pr_debug("Cannot find event field for %s.%s\n", 2224 event_name, field_name); 2225 return -ENOENT; 2226 } 2227 2228 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2229 } 2230 2231 out: 2232 free(str); 2233 return ret; 2234 } 2235 2236 static int __sort_dimension__add(struct sort_dimension *sd, 2237 struct perf_hpp_list *list, 2238 int level) 2239 { 2240 if (sd->taken) 2241 return 0; 2242 2243 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2244 return -1; 2245 2246 if (sd->entry->se_collapse) 2247 list->need_collapse = 1; 2248 2249 sd->taken = 1; 2250 2251 return 0; 2252 } 2253 2254 static int __hpp_dimension__add(struct hpp_dimension *hd, 2255 struct perf_hpp_list *list, 2256 int level) 2257 { 2258 struct perf_hpp_fmt *fmt; 2259 2260 if (hd->taken) 2261 return 0; 2262 2263 fmt = __hpp_dimension__alloc_hpp(hd, level); 2264 if (!fmt) 2265 return -1; 2266 2267 hd->taken = 1; 2268 perf_hpp_list__register_sort_field(list, fmt); 2269 return 0; 2270 } 2271 2272 static int __sort_dimension__add_output(struct perf_hpp_list *list, 2273 struct sort_dimension *sd) 2274 { 2275 if (sd->taken) 2276 return 0; 2277 2278 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2279 return -1; 2280 2281 sd->taken = 1; 2282 return 0; 2283 } 2284 2285 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2286 struct hpp_dimension *hd) 2287 { 2288 struct perf_hpp_fmt *fmt; 2289 2290 if (hd->taken) 2291 return 0; 2292 2293 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2294 if (!fmt) 2295 return -1; 2296 2297 hd->taken = 1; 2298 perf_hpp_list__column_register(list, fmt); 2299 return 0; 2300 } 2301 2302 int hpp_dimension__add_output(unsigned col) 2303 { 2304 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2305 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2306 } 2307 2308 static int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2309 struct perf_evlist *evlist, 2310 int level) 2311 { 2312 unsigned int i; 2313 2314 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2315 struct sort_dimension *sd = &common_sort_dimensions[i]; 2316 2317 if (strncasecmp(tok, sd->name, strlen(tok))) 2318 continue; 2319 2320 if (sd->entry == &sort_parent) { 2321 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 2322 if (ret) { 2323 char err[BUFSIZ]; 2324 2325 regerror(ret, &parent_regex, err, sizeof(err)); 2326 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 2327 return -EINVAL; 2328 } 2329 list->parent = 1; 2330 } else if (sd->entry == &sort_sym) { 2331 list->sym = 1; 2332 /* 2333 * perf diff displays the performance difference amongst 2334 * two or more perf.data files. Those files could come 2335 * from different binaries. So we should not compare 2336 * their ips, but the name of symbol. 2337 */ 2338 if (sort__mode == SORT_MODE__DIFF) 2339 sd->entry->se_collapse = sort__sym_sort; 2340 2341 } else if (sd->entry == &sort_dso) { 2342 list->dso = 1; 2343 } else if (sd->entry == &sort_socket) { 2344 list->socket = 1; 2345 } else if (sd->entry == &sort_thread) { 2346 list->thread = 1; 2347 } else if (sd->entry == &sort_comm) { 2348 list->comm = 1; 2349 } 2350 2351 return __sort_dimension__add(sd, list, level); 2352 } 2353 2354 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2355 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2356 2357 if (strncasecmp(tok, hd->name, strlen(tok))) 2358 continue; 2359 2360 return __hpp_dimension__add(hd, list, level); 2361 } 2362 2363 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2364 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2365 2366 if (strncasecmp(tok, sd->name, strlen(tok))) 2367 continue; 2368 2369 if (sort__mode != SORT_MODE__BRANCH) 2370 return -EINVAL; 2371 2372 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 2373 list->sym = 1; 2374 2375 __sort_dimension__add(sd, list, level); 2376 return 0; 2377 } 2378 2379 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2380 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2381 2382 if (strncasecmp(tok, sd->name, strlen(tok))) 2383 continue; 2384 2385 if (sort__mode != SORT_MODE__MEMORY) 2386 return -EINVAL; 2387 2388 if (sd->entry == &sort_mem_dcacheline && cacheline_size == 0) 2389 return -EINVAL; 2390 2391 if (sd->entry == &sort_mem_daddr_sym) 2392 list->sym = 1; 2393 2394 __sort_dimension__add(sd, list, level); 2395 return 0; 2396 } 2397 2398 if (!add_dynamic_entry(evlist, tok, level)) 2399 return 0; 2400 2401 return -ESRCH; 2402 } 2403 2404 static int setup_sort_list(struct perf_hpp_list *list, char *str, 2405 struct perf_evlist *evlist) 2406 { 2407 char *tmp, *tok; 2408 int ret = 0; 2409 int level = 0; 2410 int next_level = 1; 2411 bool in_group = false; 2412 2413 do { 2414 tok = str; 2415 tmp = strpbrk(str, "{}, "); 2416 if (tmp) { 2417 if (in_group) 2418 next_level = level; 2419 else 2420 next_level = level + 1; 2421 2422 if (*tmp == '{') 2423 in_group = true; 2424 else if (*tmp == '}') 2425 in_group = false; 2426 2427 *tmp = '\0'; 2428 str = tmp + 1; 2429 } 2430 2431 if (*tok) { 2432 ret = sort_dimension__add(list, tok, evlist, level); 2433 if (ret == -EINVAL) { 2434 if (!cacheline_size && !strncasecmp(tok, "dcacheline", strlen(tok))) 2435 error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 2436 else 2437 error("Invalid --sort key: `%s'", tok); 2438 break; 2439 } else if (ret == -ESRCH) { 2440 error("Unknown --sort key: `%s'", tok); 2441 break; 2442 } 2443 } 2444 2445 level = next_level; 2446 } while (tmp); 2447 2448 return ret; 2449 } 2450 2451 static const char *get_default_sort_order(struct perf_evlist *evlist) 2452 { 2453 const char *default_sort_orders[] = { 2454 default_sort_order, 2455 default_branch_sort_order, 2456 default_mem_sort_order, 2457 default_top_sort_order, 2458 default_diff_sort_order, 2459 default_tracepoint_sort_order, 2460 }; 2461 bool use_trace = true; 2462 struct perf_evsel *evsel; 2463 2464 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 2465 2466 if (evlist == NULL) 2467 goto out_no_evlist; 2468 2469 evlist__for_each_entry(evlist, evsel) { 2470 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2471 use_trace = false; 2472 break; 2473 } 2474 } 2475 2476 if (use_trace) { 2477 sort__mode = SORT_MODE__TRACEPOINT; 2478 if (symbol_conf.raw_trace) 2479 return "trace_fields"; 2480 } 2481 out_no_evlist: 2482 return default_sort_orders[sort__mode]; 2483 } 2484 2485 static int setup_sort_order(struct perf_evlist *evlist) 2486 { 2487 char *new_sort_order; 2488 2489 /* 2490 * Append '+'-prefixed sort order to the default sort 2491 * order string. 2492 */ 2493 if (!sort_order || is_strict_order(sort_order)) 2494 return 0; 2495 2496 if (sort_order[1] == '\0') { 2497 error("Invalid --sort key: `+'"); 2498 return -EINVAL; 2499 } 2500 2501 /* 2502 * We allocate new sort_order string, but we never free it, 2503 * because it's checked over the rest of the code. 2504 */ 2505 if (asprintf(&new_sort_order, "%s,%s", 2506 get_default_sort_order(evlist), sort_order + 1) < 0) { 2507 error("Not enough memory to set up --sort"); 2508 return -ENOMEM; 2509 } 2510 2511 sort_order = new_sort_order; 2512 return 0; 2513 } 2514 2515 /* 2516 * Adds 'pre,' prefix into 'str' is 'pre' is 2517 * not already part of 'str'. 2518 */ 2519 static char *prefix_if_not_in(const char *pre, char *str) 2520 { 2521 char *n; 2522 2523 if (!str || strstr(str, pre)) 2524 return str; 2525 2526 if (asprintf(&n, "%s,%s", pre, str) < 0) 2527 return NULL; 2528 2529 free(str); 2530 return n; 2531 } 2532 2533 static char *setup_overhead(char *keys) 2534 { 2535 if (sort__mode == SORT_MODE__DIFF) 2536 return keys; 2537 2538 keys = prefix_if_not_in("overhead", keys); 2539 2540 if (symbol_conf.cumulate_callchain) 2541 keys = prefix_if_not_in("overhead_children", keys); 2542 2543 return keys; 2544 } 2545 2546 static int __setup_sorting(struct perf_evlist *evlist) 2547 { 2548 char *str; 2549 const char *sort_keys; 2550 int ret = 0; 2551 2552 ret = setup_sort_order(evlist); 2553 if (ret) 2554 return ret; 2555 2556 sort_keys = sort_order; 2557 if (sort_keys == NULL) { 2558 if (is_strict_order(field_order)) { 2559 /* 2560 * If user specified field order but no sort order, 2561 * we'll honor it and not add default sort orders. 2562 */ 2563 return 0; 2564 } 2565 2566 sort_keys = get_default_sort_order(evlist); 2567 } 2568 2569 str = strdup(sort_keys); 2570 if (str == NULL) { 2571 error("Not enough memory to setup sort keys"); 2572 return -ENOMEM; 2573 } 2574 2575 /* 2576 * Prepend overhead fields for backward compatibility. 2577 */ 2578 if (!is_strict_order(field_order)) { 2579 str = setup_overhead(str); 2580 if (str == NULL) { 2581 error("Not enough memory to setup overhead keys"); 2582 return -ENOMEM; 2583 } 2584 } 2585 2586 ret = setup_sort_list(&perf_hpp_list, str, evlist); 2587 2588 free(str); 2589 return ret; 2590 } 2591 2592 void perf_hpp__set_elide(int idx, bool elide) 2593 { 2594 struct perf_hpp_fmt *fmt; 2595 struct hpp_sort_entry *hse; 2596 2597 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2598 if (!perf_hpp__is_sort_entry(fmt)) 2599 continue; 2600 2601 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2602 if (hse->se->se_width_idx == idx) { 2603 fmt->elide = elide; 2604 break; 2605 } 2606 } 2607 } 2608 2609 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 2610 { 2611 if (list && strlist__nr_entries(list) == 1) { 2612 if (fp != NULL) 2613 fprintf(fp, "# %s: %s\n", list_name, 2614 strlist__entry(list, 0)->s); 2615 return true; 2616 } 2617 return false; 2618 } 2619 2620 static bool get_elide(int idx, FILE *output) 2621 { 2622 switch (idx) { 2623 case HISTC_SYMBOL: 2624 return __get_elide(symbol_conf.sym_list, "symbol", output); 2625 case HISTC_DSO: 2626 return __get_elide(symbol_conf.dso_list, "dso", output); 2627 case HISTC_COMM: 2628 return __get_elide(symbol_conf.comm_list, "comm", output); 2629 default: 2630 break; 2631 } 2632 2633 if (sort__mode != SORT_MODE__BRANCH) 2634 return false; 2635 2636 switch (idx) { 2637 case HISTC_SYMBOL_FROM: 2638 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 2639 case HISTC_SYMBOL_TO: 2640 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 2641 case HISTC_DSO_FROM: 2642 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 2643 case HISTC_DSO_TO: 2644 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 2645 default: 2646 break; 2647 } 2648 2649 return false; 2650 } 2651 2652 void sort__setup_elide(FILE *output) 2653 { 2654 struct perf_hpp_fmt *fmt; 2655 struct hpp_sort_entry *hse; 2656 2657 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2658 if (!perf_hpp__is_sort_entry(fmt)) 2659 continue; 2660 2661 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2662 fmt->elide = get_elide(hse->se->se_width_idx, output); 2663 } 2664 2665 /* 2666 * It makes no sense to elide all of sort entries. 2667 * Just revert them to show up again. 2668 */ 2669 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2670 if (!perf_hpp__is_sort_entry(fmt)) 2671 continue; 2672 2673 if (!fmt->elide) 2674 return; 2675 } 2676 2677 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2678 if (!perf_hpp__is_sort_entry(fmt)) 2679 continue; 2680 2681 fmt->elide = false; 2682 } 2683 } 2684 2685 static int output_field_add(struct perf_hpp_list *list, char *tok) 2686 { 2687 unsigned int i; 2688 2689 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2690 struct sort_dimension *sd = &common_sort_dimensions[i]; 2691 2692 if (strncasecmp(tok, sd->name, strlen(tok))) 2693 continue; 2694 2695 return __sort_dimension__add_output(list, sd); 2696 } 2697 2698 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2699 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2700 2701 if (strncasecmp(tok, hd->name, strlen(tok))) 2702 continue; 2703 2704 return __hpp_dimension__add_output(list, hd); 2705 } 2706 2707 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2708 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2709 2710 if (strncasecmp(tok, sd->name, strlen(tok))) 2711 continue; 2712 2713 return __sort_dimension__add_output(list, sd); 2714 } 2715 2716 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2717 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2718 2719 if (strncasecmp(tok, sd->name, strlen(tok))) 2720 continue; 2721 2722 return __sort_dimension__add_output(list, sd); 2723 } 2724 2725 return -ESRCH; 2726 } 2727 2728 static int setup_output_list(struct perf_hpp_list *list, char *str) 2729 { 2730 char *tmp, *tok; 2731 int ret = 0; 2732 2733 for (tok = strtok_r(str, ", ", &tmp); 2734 tok; tok = strtok_r(NULL, ", ", &tmp)) { 2735 ret = output_field_add(list, tok); 2736 if (ret == -EINVAL) { 2737 error("Invalid --fields key: `%s'", tok); 2738 break; 2739 } else if (ret == -ESRCH) { 2740 error("Unknown --fields key: `%s'", tok); 2741 break; 2742 } 2743 } 2744 2745 return ret; 2746 } 2747 2748 static void reset_dimensions(void) 2749 { 2750 unsigned int i; 2751 2752 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 2753 common_sort_dimensions[i].taken = 0; 2754 2755 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 2756 hpp_sort_dimensions[i].taken = 0; 2757 2758 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 2759 bstack_sort_dimensions[i].taken = 0; 2760 2761 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 2762 memory_sort_dimensions[i].taken = 0; 2763 } 2764 2765 bool is_strict_order(const char *order) 2766 { 2767 return order && (*order != '+'); 2768 } 2769 2770 static int __setup_output_field(void) 2771 { 2772 char *str, *strp; 2773 int ret = -EINVAL; 2774 2775 if (field_order == NULL) 2776 return 0; 2777 2778 strp = str = strdup(field_order); 2779 if (str == NULL) { 2780 error("Not enough memory to setup output fields"); 2781 return -ENOMEM; 2782 } 2783 2784 if (!is_strict_order(field_order)) 2785 strp++; 2786 2787 if (!strlen(strp)) { 2788 error("Invalid --fields key: `+'"); 2789 goto out; 2790 } 2791 2792 ret = setup_output_list(&perf_hpp_list, strp); 2793 2794 out: 2795 free(str); 2796 return ret; 2797 } 2798 2799 int setup_sorting(struct perf_evlist *evlist) 2800 { 2801 int err; 2802 2803 err = __setup_sorting(evlist); 2804 if (err < 0) 2805 return err; 2806 2807 if (parent_pattern != default_parent_pattern) { 2808 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 2809 if (err < 0) 2810 return err; 2811 } 2812 2813 reset_dimensions(); 2814 2815 /* 2816 * perf diff doesn't use default hpp output fields. 2817 */ 2818 if (sort__mode != SORT_MODE__DIFF) 2819 perf_hpp__init(); 2820 2821 err = __setup_output_field(); 2822 if (err < 0) 2823 return err; 2824 2825 /* copy sort keys to output fields */ 2826 perf_hpp__setup_output_field(&perf_hpp_list); 2827 /* and then copy output fields to sort keys */ 2828 perf_hpp__append_sort_keys(&perf_hpp_list); 2829 2830 /* setup hists-specific output fields */ 2831 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 2832 return -1; 2833 2834 return 0; 2835 } 2836 2837 void reset_output_field(void) 2838 { 2839 perf_hpp_list.need_collapse = 0; 2840 perf_hpp_list.parent = 0; 2841 perf_hpp_list.sym = 0; 2842 perf_hpp_list.dso = 0; 2843 2844 field_order = NULL; 2845 sort_order = NULL; 2846 2847 reset_dimensions(); 2848 perf_hpp__reset_output_field(&perf_hpp_list); 2849 } 2850