1 #include <errno.h> 2 #include <inttypes.h> 3 #include <regex.h> 4 #include <sys/mman.h> 5 #include "sort.h" 6 #include "hist.h" 7 #include "comm.h" 8 #include "symbol.h" 9 #include "thread.h" 10 #include "evsel.h" 11 #include "evlist.h" 12 #include "strlist.h" 13 #include <traceevent/event-parse.h> 14 #include "mem-events.h" 15 #include <linux/kernel.h> 16 17 regex_t parent_regex; 18 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 19 const char *parent_pattern = default_parent_pattern; 20 const char *default_sort_order = "comm,dso,symbol"; 21 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles"; 22 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; 23 const char default_top_sort_order[] = "dso,symbol"; 24 const char default_diff_sort_order[] = "dso,symbol"; 25 const char default_tracepoint_sort_order[] = "trace"; 26 const char *sort_order; 27 const char *field_order; 28 regex_t ignore_callees_regex; 29 int have_ignore_callees = 0; 30 enum sort_mode sort__mode = SORT_MODE__NORMAL; 31 32 /* 33 * Replaces all occurrences of a char used with the: 34 * 35 * -t, --field-separator 36 * 37 * option, that uses a special separator character and don't pad with spaces, 38 * replacing all occurances of this separator in symbol names (and other 39 * output) with a '.' character, that thus it's the only non valid separator. 40 */ 41 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 42 { 43 int n; 44 va_list ap; 45 46 va_start(ap, fmt); 47 n = vsnprintf(bf, size, fmt, ap); 48 if (symbol_conf.field_sep && n > 0) { 49 char *sep = bf; 50 51 while (1) { 52 sep = strchr(sep, *symbol_conf.field_sep); 53 if (sep == NULL) 54 break; 55 *sep = '.'; 56 } 57 } 58 va_end(ap); 59 60 if (n >= (int)size) 61 return size - 1; 62 return n; 63 } 64 65 static int64_t cmp_null(const void *l, const void *r) 66 { 67 if (!l && !r) 68 return 0; 69 else if (!l) 70 return -1; 71 else 72 return 1; 73 } 74 75 /* --sort pid */ 76 77 static int64_t 78 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 79 { 80 return right->thread->tid - left->thread->tid; 81 } 82 83 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 84 size_t size, unsigned int width) 85 { 86 const char *comm = thread__comm_str(he->thread); 87 88 width = max(7U, width) - 8; 89 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid, 90 width, width, comm ?: ""); 91 } 92 93 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg) 94 { 95 const struct thread *th = arg; 96 97 if (type != HIST_FILTER__THREAD) 98 return -1; 99 100 return th && he->thread != th; 101 } 102 103 struct sort_entry sort_thread = { 104 .se_header = " Pid:Command", 105 .se_cmp = sort__thread_cmp, 106 .se_snprintf = hist_entry__thread_snprintf, 107 .se_filter = hist_entry__thread_filter, 108 .se_width_idx = HISTC_THREAD, 109 }; 110 111 /* --sort comm */ 112 113 static int64_t 114 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 115 { 116 /* Compare the addr that should be unique among comm */ 117 return strcmp(comm__str(right->comm), comm__str(left->comm)); 118 } 119 120 static int64_t 121 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 122 { 123 /* Compare the addr that should be unique among comm */ 124 return strcmp(comm__str(right->comm), comm__str(left->comm)); 125 } 126 127 static int64_t 128 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 129 { 130 return strcmp(comm__str(right->comm), comm__str(left->comm)); 131 } 132 133 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 134 size_t size, unsigned int width) 135 { 136 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 137 } 138 139 struct sort_entry sort_comm = { 140 .se_header = "Command", 141 .se_cmp = sort__comm_cmp, 142 .se_collapse = sort__comm_collapse, 143 .se_sort = sort__comm_sort, 144 .se_snprintf = hist_entry__comm_snprintf, 145 .se_filter = hist_entry__thread_filter, 146 .se_width_idx = HISTC_COMM, 147 }; 148 149 /* --sort dso */ 150 151 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 152 { 153 struct dso *dso_l = map_l ? map_l->dso : NULL; 154 struct dso *dso_r = map_r ? map_r->dso : NULL; 155 const char *dso_name_l, *dso_name_r; 156 157 if (!dso_l || !dso_r) 158 return cmp_null(dso_r, dso_l); 159 160 if (verbose > 0) { 161 dso_name_l = dso_l->long_name; 162 dso_name_r = dso_r->long_name; 163 } else { 164 dso_name_l = dso_l->short_name; 165 dso_name_r = dso_r->short_name; 166 } 167 168 return strcmp(dso_name_l, dso_name_r); 169 } 170 171 static int64_t 172 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 173 { 174 return _sort__dso_cmp(right->ms.map, left->ms.map); 175 } 176 177 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 178 size_t size, unsigned int width) 179 { 180 if (map && map->dso) { 181 const char *dso_name = verbose > 0 ? map->dso->long_name : 182 map->dso->short_name; 183 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 184 } 185 186 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 187 } 188 189 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 190 size_t size, unsigned int width) 191 { 192 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 193 } 194 195 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg) 196 { 197 const struct dso *dso = arg; 198 199 if (type != HIST_FILTER__DSO) 200 return -1; 201 202 return dso && (!he->ms.map || he->ms.map->dso != dso); 203 } 204 205 struct sort_entry sort_dso = { 206 .se_header = "Shared Object", 207 .se_cmp = sort__dso_cmp, 208 .se_snprintf = hist_entry__dso_snprintf, 209 .se_filter = hist_entry__dso_filter, 210 .se_width_idx = HISTC_DSO, 211 }; 212 213 /* --sort symbol */ 214 215 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 216 { 217 return (int64_t)(right_ip - left_ip); 218 } 219 220 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 221 { 222 if (!sym_l || !sym_r) 223 return cmp_null(sym_l, sym_r); 224 225 if (sym_l == sym_r) 226 return 0; 227 228 if (sym_l->start != sym_r->start) 229 return (int64_t)(sym_r->start - sym_l->start); 230 231 return (int64_t)(sym_r->end - sym_l->end); 232 } 233 234 static int64_t 235 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 236 { 237 int64_t ret; 238 239 if (!left->ms.sym && !right->ms.sym) 240 return _sort__addr_cmp(left->ip, right->ip); 241 242 /* 243 * comparing symbol address alone is not enough since it's a 244 * relative address within a dso. 245 */ 246 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) { 247 ret = sort__dso_cmp(left, right); 248 if (ret != 0) 249 return ret; 250 } 251 252 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 253 } 254 255 static int64_t 256 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 257 { 258 if (!left->ms.sym || !right->ms.sym) 259 return cmp_null(left->ms.sym, right->ms.sym); 260 261 return strcmp(right->ms.sym->name, left->ms.sym->name); 262 } 263 264 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, 265 u64 ip, char level, char *bf, size_t size, 266 unsigned int width) 267 { 268 size_t ret = 0; 269 270 if (verbose > 0) { 271 char o = map ? dso__symtab_origin(map->dso) : '!'; 272 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 273 BITS_PER_LONG / 4 + 2, ip, o); 274 } 275 276 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 277 if (sym && map) { 278 if (map->type == MAP__VARIABLE) { 279 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 280 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 281 ip - map->unmap_ip(map, sym->start)); 282 } else { 283 ret += repsep_snprintf(bf + ret, size - ret, "%.*s", 284 width - ret, 285 sym->name); 286 } 287 } else { 288 size_t len = BITS_PER_LONG / 4; 289 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 290 len, ip); 291 } 292 293 return ret; 294 } 295 296 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, 297 size_t size, unsigned int width) 298 { 299 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip, 300 he->level, bf, size, width); 301 } 302 303 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg) 304 { 305 const char *sym = arg; 306 307 if (type != HIST_FILTER__SYMBOL) 308 return -1; 309 310 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym)); 311 } 312 313 struct sort_entry sort_sym = { 314 .se_header = "Symbol", 315 .se_cmp = sort__sym_cmp, 316 .se_sort = sort__sym_sort, 317 .se_snprintf = hist_entry__sym_snprintf, 318 .se_filter = hist_entry__sym_filter, 319 .se_width_idx = HISTC_SYMBOL, 320 }; 321 322 /* --sort srcline */ 323 324 char *hist_entry__get_srcline(struct hist_entry *he) 325 { 326 struct map *map = he->ms.map; 327 328 if (!map) 329 return SRCLINE_UNKNOWN; 330 331 return get_srcline(map->dso, map__rip_2objdump(map, he->ip), 332 he->ms.sym, true, true); 333 } 334 335 static int64_t 336 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 337 { 338 if (!left->srcline) 339 left->srcline = hist_entry__get_srcline(left); 340 if (!right->srcline) 341 right->srcline = hist_entry__get_srcline(right); 342 343 return strcmp(right->srcline, left->srcline); 344 } 345 346 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 347 size_t size, unsigned int width) 348 { 349 if (!he->srcline) 350 he->srcline = hist_entry__get_srcline(he); 351 352 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline); 353 } 354 355 struct sort_entry sort_srcline = { 356 .se_header = "Source:Line", 357 .se_cmp = sort__srcline_cmp, 358 .se_snprintf = hist_entry__srcline_snprintf, 359 .se_width_idx = HISTC_SRCLINE, 360 }; 361 362 /* --sort srcline_from */ 363 364 static int64_t 365 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 366 { 367 if (!left->branch_info->srcline_from) { 368 struct map *map = left->branch_info->from.map; 369 if (!map) 370 left->branch_info->srcline_from = SRCLINE_UNKNOWN; 371 else 372 left->branch_info->srcline_from = get_srcline(map->dso, 373 map__rip_2objdump(map, 374 left->branch_info->from.al_addr), 375 left->branch_info->from.sym, 376 true, true); 377 } 378 if (!right->branch_info->srcline_from) { 379 struct map *map = right->branch_info->from.map; 380 if (!map) 381 right->branch_info->srcline_from = SRCLINE_UNKNOWN; 382 else 383 right->branch_info->srcline_from = get_srcline(map->dso, 384 map__rip_2objdump(map, 385 right->branch_info->from.al_addr), 386 right->branch_info->from.sym, 387 true, true); 388 } 389 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 390 } 391 392 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 393 size_t size, unsigned int width) 394 { 395 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 396 } 397 398 struct sort_entry sort_srcline_from = { 399 .se_header = "From Source:Line", 400 .se_cmp = sort__srcline_from_cmp, 401 .se_snprintf = hist_entry__srcline_from_snprintf, 402 .se_width_idx = HISTC_SRCLINE_FROM, 403 }; 404 405 /* --sort srcline_to */ 406 407 static int64_t 408 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 409 { 410 if (!left->branch_info->srcline_to) { 411 struct map *map = left->branch_info->to.map; 412 if (!map) 413 left->branch_info->srcline_to = SRCLINE_UNKNOWN; 414 else 415 left->branch_info->srcline_to = get_srcline(map->dso, 416 map__rip_2objdump(map, 417 left->branch_info->to.al_addr), 418 left->branch_info->from.sym, 419 true, true); 420 } 421 if (!right->branch_info->srcline_to) { 422 struct map *map = right->branch_info->to.map; 423 if (!map) 424 right->branch_info->srcline_to = SRCLINE_UNKNOWN; 425 else 426 right->branch_info->srcline_to = get_srcline(map->dso, 427 map__rip_2objdump(map, 428 right->branch_info->to.al_addr), 429 right->branch_info->to.sym, 430 true, true); 431 } 432 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 433 } 434 435 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 436 size_t size, unsigned int width) 437 { 438 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 439 } 440 441 struct sort_entry sort_srcline_to = { 442 .se_header = "To Source:Line", 443 .se_cmp = sort__srcline_to_cmp, 444 .se_snprintf = hist_entry__srcline_to_snprintf, 445 .se_width_idx = HISTC_SRCLINE_TO, 446 }; 447 448 /* --sort srcfile */ 449 450 static char no_srcfile[1]; 451 452 static char *hist_entry__get_srcfile(struct hist_entry *e) 453 { 454 char *sf, *p; 455 struct map *map = e->ms.map; 456 457 if (!map) 458 return no_srcfile; 459 460 sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), 461 e->ms.sym, false, true, true); 462 if (!strcmp(sf, SRCLINE_UNKNOWN)) 463 return no_srcfile; 464 p = strchr(sf, ':'); 465 if (p && *sf) { 466 *p = 0; 467 return sf; 468 } 469 free(sf); 470 return no_srcfile; 471 } 472 473 static int64_t 474 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right) 475 { 476 if (!left->srcfile) 477 left->srcfile = hist_entry__get_srcfile(left); 478 if (!right->srcfile) 479 right->srcfile = hist_entry__get_srcfile(right); 480 481 return strcmp(right->srcfile, left->srcfile); 482 } 483 484 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf, 485 size_t size, unsigned int width) 486 { 487 if (!he->srcfile) 488 he->srcfile = hist_entry__get_srcfile(he); 489 490 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile); 491 } 492 493 struct sort_entry sort_srcfile = { 494 .se_header = "Source File", 495 .se_cmp = sort__srcfile_cmp, 496 .se_snprintf = hist_entry__srcfile_snprintf, 497 .se_width_idx = HISTC_SRCFILE, 498 }; 499 500 /* --sort parent */ 501 502 static int64_t 503 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 504 { 505 struct symbol *sym_l = left->parent; 506 struct symbol *sym_r = right->parent; 507 508 if (!sym_l || !sym_r) 509 return cmp_null(sym_l, sym_r); 510 511 return strcmp(sym_r->name, sym_l->name); 512 } 513 514 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 515 size_t size, unsigned int width) 516 { 517 return repsep_snprintf(bf, size, "%-*.*s", width, width, 518 he->parent ? he->parent->name : "[other]"); 519 } 520 521 struct sort_entry sort_parent = { 522 .se_header = "Parent symbol", 523 .se_cmp = sort__parent_cmp, 524 .se_snprintf = hist_entry__parent_snprintf, 525 .se_width_idx = HISTC_PARENT, 526 }; 527 528 /* --sort cpu */ 529 530 static int64_t 531 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 532 { 533 return right->cpu - left->cpu; 534 } 535 536 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 537 size_t size, unsigned int width) 538 { 539 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 540 } 541 542 struct sort_entry sort_cpu = { 543 .se_header = "CPU", 544 .se_cmp = sort__cpu_cmp, 545 .se_snprintf = hist_entry__cpu_snprintf, 546 .se_width_idx = HISTC_CPU, 547 }; 548 549 /* --sort cgroup_id */ 550 551 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev) 552 { 553 return (int64_t)(right_dev - left_dev); 554 } 555 556 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino) 557 { 558 return (int64_t)(right_ino - left_ino); 559 } 560 561 static int64_t 562 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right) 563 { 564 int64_t ret; 565 566 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev); 567 if (ret != 0) 568 return ret; 569 570 return _sort__cgroup_inode_cmp(right->cgroup_id.ino, 571 left->cgroup_id.ino); 572 } 573 574 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he, 575 char *bf, size_t size, 576 unsigned int width __maybe_unused) 577 { 578 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev, 579 he->cgroup_id.ino); 580 } 581 582 struct sort_entry sort_cgroup_id = { 583 .se_header = "cgroup id (dev/inode)", 584 .se_cmp = sort__cgroup_id_cmp, 585 .se_snprintf = hist_entry__cgroup_id_snprintf, 586 .se_width_idx = HISTC_CGROUP_ID, 587 }; 588 589 /* --sort socket */ 590 591 static int64_t 592 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right) 593 { 594 return right->socket - left->socket; 595 } 596 597 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf, 598 size_t size, unsigned int width) 599 { 600 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket); 601 } 602 603 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg) 604 { 605 int sk = *(const int *)arg; 606 607 if (type != HIST_FILTER__SOCKET) 608 return -1; 609 610 return sk >= 0 && he->socket != sk; 611 } 612 613 struct sort_entry sort_socket = { 614 .se_header = "Socket", 615 .se_cmp = sort__socket_cmp, 616 .se_snprintf = hist_entry__socket_snprintf, 617 .se_filter = hist_entry__socket_filter, 618 .se_width_idx = HISTC_SOCKET, 619 }; 620 621 /* --sort trace */ 622 623 static char *get_trace_output(struct hist_entry *he) 624 { 625 struct trace_seq seq; 626 struct perf_evsel *evsel; 627 struct pevent_record rec = { 628 .data = he->raw_data, 629 .size = he->raw_size, 630 }; 631 632 evsel = hists_to_evsel(he->hists); 633 634 trace_seq_init(&seq); 635 if (symbol_conf.raw_trace) { 636 pevent_print_fields(&seq, he->raw_data, he->raw_size, 637 evsel->tp_format); 638 } else { 639 pevent_event_info(&seq, evsel->tp_format, &rec); 640 } 641 /* 642 * Trim the buffer, it starts at 4KB and we're not going to 643 * add anything more to this buffer. 644 */ 645 return realloc(seq.buffer, seq.len + 1); 646 } 647 648 static int64_t 649 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right) 650 { 651 struct perf_evsel *evsel; 652 653 evsel = hists_to_evsel(left->hists); 654 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 655 return 0; 656 657 if (left->trace_output == NULL) 658 left->trace_output = get_trace_output(left); 659 if (right->trace_output == NULL) 660 right->trace_output = get_trace_output(right); 661 662 return strcmp(right->trace_output, left->trace_output); 663 } 664 665 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf, 666 size_t size, unsigned int width) 667 { 668 struct perf_evsel *evsel; 669 670 evsel = hists_to_evsel(he->hists); 671 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 672 return scnprintf(bf, size, "%-.*s", width, "N/A"); 673 674 if (he->trace_output == NULL) 675 he->trace_output = get_trace_output(he); 676 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output); 677 } 678 679 struct sort_entry sort_trace = { 680 .se_header = "Trace output", 681 .se_cmp = sort__trace_cmp, 682 .se_snprintf = hist_entry__trace_snprintf, 683 .se_width_idx = HISTC_TRACE, 684 }; 685 686 /* sort keys for branch stacks */ 687 688 static int64_t 689 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 690 { 691 if (!left->branch_info || !right->branch_info) 692 return cmp_null(left->branch_info, right->branch_info); 693 694 return _sort__dso_cmp(left->branch_info->from.map, 695 right->branch_info->from.map); 696 } 697 698 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 699 size_t size, unsigned int width) 700 { 701 if (he->branch_info) 702 return _hist_entry__dso_snprintf(he->branch_info->from.map, 703 bf, size, width); 704 else 705 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 706 } 707 708 static int hist_entry__dso_from_filter(struct hist_entry *he, int type, 709 const void *arg) 710 { 711 const struct dso *dso = arg; 712 713 if (type != HIST_FILTER__DSO) 714 return -1; 715 716 return dso && (!he->branch_info || !he->branch_info->from.map || 717 he->branch_info->from.map->dso != dso); 718 } 719 720 static int64_t 721 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 722 { 723 if (!left->branch_info || !right->branch_info) 724 return cmp_null(left->branch_info, right->branch_info); 725 726 return _sort__dso_cmp(left->branch_info->to.map, 727 right->branch_info->to.map); 728 } 729 730 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 731 size_t size, unsigned int width) 732 { 733 if (he->branch_info) 734 return _hist_entry__dso_snprintf(he->branch_info->to.map, 735 bf, size, width); 736 else 737 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 738 } 739 740 static int hist_entry__dso_to_filter(struct hist_entry *he, int type, 741 const void *arg) 742 { 743 const struct dso *dso = arg; 744 745 if (type != HIST_FILTER__DSO) 746 return -1; 747 748 return dso && (!he->branch_info || !he->branch_info->to.map || 749 he->branch_info->to.map->dso != dso); 750 } 751 752 static int64_t 753 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 754 { 755 struct addr_map_symbol *from_l = &left->branch_info->from; 756 struct addr_map_symbol *from_r = &right->branch_info->from; 757 758 if (!left->branch_info || !right->branch_info) 759 return cmp_null(left->branch_info, right->branch_info); 760 761 from_l = &left->branch_info->from; 762 from_r = &right->branch_info->from; 763 764 if (!from_l->sym && !from_r->sym) 765 return _sort__addr_cmp(from_l->addr, from_r->addr); 766 767 return _sort__sym_cmp(from_l->sym, from_r->sym); 768 } 769 770 static int64_t 771 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 772 { 773 struct addr_map_symbol *to_l, *to_r; 774 775 if (!left->branch_info || !right->branch_info) 776 return cmp_null(left->branch_info, right->branch_info); 777 778 to_l = &left->branch_info->to; 779 to_r = &right->branch_info->to; 780 781 if (!to_l->sym && !to_r->sym) 782 return _sort__addr_cmp(to_l->addr, to_r->addr); 783 784 return _sort__sym_cmp(to_l->sym, to_r->sym); 785 } 786 787 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 788 size_t size, unsigned int width) 789 { 790 if (he->branch_info) { 791 struct addr_map_symbol *from = &he->branch_info->from; 792 793 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, 794 he->level, bf, size, width); 795 } 796 797 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 798 } 799 800 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 801 size_t size, unsigned int width) 802 { 803 if (he->branch_info) { 804 struct addr_map_symbol *to = &he->branch_info->to; 805 806 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 807 he->level, bf, size, width); 808 } 809 810 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A"); 811 } 812 813 static int hist_entry__sym_from_filter(struct hist_entry *he, int type, 814 const void *arg) 815 { 816 const char *sym = arg; 817 818 if (type != HIST_FILTER__SYMBOL) 819 return -1; 820 821 return sym && !(he->branch_info && he->branch_info->from.sym && 822 strstr(he->branch_info->from.sym->name, sym)); 823 } 824 825 static int hist_entry__sym_to_filter(struct hist_entry *he, int type, 826 const void *arg) 827 { 828 const char *sym = arg; 829 830 if (type != HIST_FILTER__SYMBOL) 831 return -1; 832 833 return sym && !(he->branch_info && he->branch_info->to.sym && 834 strstr(he->branch_info->to.sym->name, sym)); 835 } 836 837 struct sort_entry sort_dso_from = { 838 .se_header = "Source Shared Object", 839 .se_cmp = sort__dso_from_cmp, 840 .se_snprintf = hist_entry__dso_from_snprintf, 841 .se_filter = hist_entry__dso_from_filter, 842 .se_width_idx = HISTC_DSO_FROM, 843 }; 844 845 struct sort_entry sort_dso_to = { 846 .se_header = "Target Shared Object", 847 .se_cmp = sort__dso_to_cmp, 848 .se_snprintf = hist_entry__dso_to_snprintf, 849 .se_filter = hist_entry__dso_to_filter, 850 .se_width_idx = HISTC_DSO_TO, 851 }; 852 853 struct sort_entry sort_sym_from = { 854 .se_header = "Source Symbol", 855 .se_cmp = sort__sym_from_cmp, 856 .se_snprintf = hist_entry__sym_from_snprintf, 857 .se_filter = hist_entry__sym_from_filter, 858 .se_width_idx = HISTC_SYMBOL_FROM, 859 }; 860 861 struct sort_entry sort_sym_to = { 862 .se_header = "Target Symbol", 863 .se_cmp = sort__sym_to_cmp, 864 .se_snprintf = hist_entry__sym_to_snprintf, 865 .se_filter = hist_entry__sym_to_filter, 866 .se_width_idx = HISTC_SYMBOL_TO, 867 }; 868 869 static int64_t 870 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 871 { 872 unsigned char mp, p; 873 874 if (!left->branch_info || !right->branch_info) 875 return cmp_null(left->branch_info, right->branch_info); 876 877 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred; 878 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted; 879 return mp || p; 880 } 881 882 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 883 size_t size, unsigned int width){ 884 static const char *out = "N/A"; 885 886 if (he->branch_info) { 887 if (he->branch_info->flags.predicted) 888 out = "N"; 889 else if (he->branch_info->flags.mispred) 890 out = "Y"; 891 } 892 893 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 894 } 895 896 static int64_t 897 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) 898 { 899 if (!left->branch_info || !right->branch_info) 900 return cmp_null(left->branch_info, right->branch_info); 901 902 return left->branch_info->flags.cycles - 903 right->branch_info->flags.cycles; 904 } 905 906 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, 907 size_t size, unsigned int width) 908 { 909 if (!he->branch_info) 910 return scnprintf(bf, size, "%-.*s", width, "N/A"); 911 if (he->branch_info->flags.cycles == 0) 912 return repsep_snprintf(bf, size, "%-*s", width, "-"); 913 return repsep_snprintf(bf, size, "%-*hd", width, 914 he->branch_info->flags.cycles); 915 } 916 917 struct sort_entry sort_cycles = { 918 .se_header = "Basic Block Cycles", 919 .se_cmp = sort__cycles_cmp, 920 .se_snprintf = hist_entry__cycles_snprintf, 921 .se_width_idx = HISTC_CYCLES, 922 }; 923 924 /* --sort daddr_sym */ 925 int64_t 926 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 927 { 928 uint64_t l = 0, r = 0; 929 930 if (left->mem_info) 931 l = left->mem_info->daddr.addr; 932 if (right->mem_info) 933 r = right->mem_info->daddr.addr; 934 935 return (int64_t)(r - l); 936 } 937 938 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 939 size_t size, unsigned int width) 940 { 941 uint64_t addr = 0; 942 struct map *map = NULL; 943 struct symbol *sym = NULL; 944 945 if (he->mem_info) { 946 addr = he->mem_info->daddr.addr; 947 map = he->mem_info->daddr.map; 948 sym = he->mem_info->daddr.sym; 949 } 950 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 951 width); 952 } 953 954 int64_t 955 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right) 956 { 957 uint64_t l = 0, r = 0; 958 959 if (left->mem_info) 960 l = left->mem_info->iaddr.addr; 961 if (right->mem_info) 962 r = right->mem_info->iaddr.addr; 963 964 return (int64_t)(r - l); 965 } 966 967 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf, 968 size_t size, unsigned int width) 969 { 970 uint64_t addr = 0; 971 struct map *map = NULL; 972 struct symbol *sym = NULL; 973 974 if (he->mem_info) { 975 addr = he->mem_info->iaddr.addr; 976 map = he->mem_info->iaddr.map; 977 sym = he->mem_info->iaddr.sym; 978 } 979 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 980 width); 981 } 982 983 static int64_t 984 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 985 { 986 struct map *map_l = NULL; 987 struct map *map_r = NULL; 988 989 if (left->mem_info) 990 map_l = left->mem_info->daddr.map; 991 if (right->mem_info) 992 map_r = right->mem_info->daddr.map; 993 994 return _sort__dso_cmp(map_l, map_r); 995 } 996 997 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 998 size_t size, unsigned int width) 999 { 1000 struct map *map = NULL; 1001 1002 if (he->mem_info) 1003 map = he->mem_info->daddr.map; 1004 1005 return _hist_entry__dso_snprintf(map, bf, size, width); 1006 } 1007 1008 static int64_t 1009 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 1010 { 1011 union perf_mem_data_src data_src_l; 1012 union perf_mem_data_src data_src_r; 1013 1014 if (left->mem_info) 1015 data_src_l = left->mem_info->data_src; 1016 else 1017 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 1018 1019 if (right->mem_info) 1020 data_src_r = right->mem_info->data_src; 1021 else 1022 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 1023 1024 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 1025 } 1026 1027 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 1028 size_t size, unsigned int width) 1029 { 1030 char out[10]; 1031 1032 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info); 1033 return repsep_snprintf(bf, size, "%.*s", width, out); 1034 } 1035 1036 static int64_t 1037 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 1038 { 1039 union perf_mem_data_src data_src_l; 1040 union perf_mem_data_src data_src_r; 1041 1042 if (left->mem_info) 1043 data_src_l = left->mem_info->data_src; 1044 else 1045 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 1046 1047 if (right->mem_info) 1048 data_src_r = right->mem_info->data_src; 1049 else 1050 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 1051 1052 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 1053 } 1054 1055 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 1056 size_t size, unsigned int width) 1057 { 1058 char out[64]; 1059 1060 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info); 1061 return repsep_snprintf(bf, size, "%-*s", width, out); 1062 } 1063 1064 static int64_t 1065 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 1066 { 1067 union perf_mem_data_src data_src_l; 1068 union perf_mem_data_src data_src_r; 1069 1070 if (left->mem_info) 1071 data_src_l = left->mem_info->data_src; 1072 else 1073 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 1074 1075 if (right->mem_info) 1076 data_src_r = right->mem_info->data_src; 1077 else 1078 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 1079 1080 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 1081 } 1082 1083 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 1084 size_t size, unsigned int width) 1085 { 1086 char out[64]; 1087 1088 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info); 1089 return repsep_snprintf(bf, size, "%-*s", width, out); 1090 } 1091 1092 static int64_t 1093 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 1094 { 1095 union perf_mem_data_src data_src_l; 1096 union perf_mem_data_src data_src_r; 1097 1098 if (left->mem_info) 1099 data_src_l = left->mem_info->data_src; 1100 else 1101 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 1102 1103 if (right->mem_info) 1104 data_src_r = right->mem_info->data_src; 1105 else 1106 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 1107 1108 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 1109 } 1110 1111 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 1112 size_t size, unsigned int width) 1113 { 1114 char out[64]; 1115 1116 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info); 1117 return repsep_snprintf(bf, size, "%-*s", width, out); 1118 } 1119 1120 int64_t 1121 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 1122 { 1123 u64 l, r; 1124 struct map *l_map, *r_map; 1125 1126 if (!left->mem_info) return -1; 1127 if (!right->mem_info) return 1; 1128 1129 /* group event types together */ 1130 if (left->cpumode > right->cpumode) return -1; 1131 if (left->cpumode < right->cpumode) return 1; 1132 1133 l_map = left->mem_info->daddr.map; 1134 r_map = right->mem_info->daddr.map; 1135 1136 /* if both are NULL, jump to sort on al_addr instead */ 1137 if (!l_map && !r_map) 1138 goto addr; 1139 1140 if (!l_map) return -1; 1141 if (!r_map) return 1; 1142 1143 if (l_map->maj > r_map->maj) return -1; 1144 if (l_map->maj < r_map->maj) return 1; 1145 1146 if (l_map->min > r_map->min) return -1; 1147 if (l_map->min < r_map->min) return 1; 1148 1149 if (l_map->ino > r_map->ino) return -1; 1150 if (l_map->ino < r_map->ino) return 1; 1151 1152 if (l_map->ino_generation > r_map->ino_generation) return -1; 1153 if (l_map->ino_generation < r_map->ino_generation) return 1; 1154 1155 /* 1156 * Addresses with no major/minor numbers are assumed to be 1157 * anonymous in userspace. Sort those on pid then address. 1158 * 1159 * The kernel and non-zero major/minor mapped areas are 1160 * assumed to be unity mapped. Sort those on address. 1161 */ 1162 1163 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 1164 (!(l_map->flags & MAP_SHARED)) && 1165 !l_map->maj && !l_map->min && !l_map->ino && 1166 !l_map->ino_generation) { 1167 /* userspace anonymous */ 1168 1169 if (left->thread->pid_ > right->thread->pid_) return -1; 1170 if (left->thread->pid_ < right->thread->pid_) return 1; 1171 } 1172 1173 addr: 1174 /* al_addr does all the right addr - start + offset calculations */ 1175 l = cl_address(left->mem_info->daddr.al_addr); 1176 r = cl_address(right->mem_info->daddr.al_addr); 1177 1178 if (l > r) return -1; 1179 if (l < r) return 1; 1180 1181 return 0; 1182 } 1183 1184 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 1185 size_t size, unsigned int width) 1186 { 1187 1188 uint64_t addr = 0; 1189 struct map *map = NULL; 1190 struct symbol *sym = NULL; 1191 char level = he->level; 1192 1193 if (he->mem_info) { 1194 addr = cl_address(he->mem_info->daddr.al_addr); 1195 map = he->mem_info->daddr.map; 1196 sym = he->mem_info->daddr.sym; 1197 1198 /* print [s] for shared data mmaps */ 1199 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 1200 map && (map->type == MAP__VARIABLE) && 1201 (map->flags & MAP_SHARED) && 1202 (map->maj || map->min || map->ino || 1203 map->ino_generation)) 1204 level = 's'; 1205 else if (!map) 1206 level = 'X'; 1207 } 1208 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size, 1209 width); 1210 } 1211 1212 struct sort_entry sort_mispredict = { 1213 .se_header = "Branch Mispredicted", 1214 .se_cmp = sort__mispredict_cmp, 1215 .se_snprintf = hist_entry__mispredict_snprintf, 1216 .se_width_idx = HISTC_MISPREDICT, 1217 }; 1218 1219 static u64 he_weight(struct hist_entry *he) 1220 { 1221 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; 1222 } 1223 1224 static int64_t 1225 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1226 { 1227 return he_weight(left) - he_weight(right); 1228 } 1229 1230 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 1231 size_t size, unsigned int width) 1232 { 1233 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); 1234 } 1235 1236 struct sort_entry sort_local_weight = { 1237 .se_header = "Local Weight", 1238 .se_cmp = sort__local_weight_cmp, 1239 .se_snprintf = hist_entry__local_weight_snprintf, 1240 .se_width_idx = HISTC_LOCAL_WEIGHT, 1241 }; 1242 1243 static int64_t 1244 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) 1245 { 1246 return left->stat.weight - right->stat.weight; 1247 } 1248 1249 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 1250 size_t size, unsigned int width) 1251 { 1252 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); 1253 } 1254 1255 struct sort_entry sort_global_weight = { 1256 .se_header = "Weight", 1257 .se_cmp = sort__global_weight_cmp, 1258 .se_snprintf = hist_entry__global_weight_snprintf, 1259 .se_width_idx = HISTC_GLOBAL_WEIGHT, 1260 }; 1261 1262 struct sort_entry sort_mem_daddr_sym = { 1263 .se_header = "Data Symbol", 1264 .se_cmp = sort__daddr_cmp, 1265 .se_snprintf = hist_entry__daddr_snprintf, 1266 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 1267 }; 1268 1269 struct sort_entry sort_mem_iaddr_sym = { 1270 .se_header = "Code Symbol", 1271 .se_cmp = sort__iaddr_cmp, 1272 .se_snprintf = hist_entry__iaddr_snprintf, 1273 .se_width_idx = HISTC_MEM_IADDR_SYMBOL, 1274 }; 1275 1276 struct sort_entry sort_mem_daddr_dso = { 1277 .se_header = "Data Object", 1278 .se_cmp = sort__dso_daddr_cmp, 1279 .se_snprintf = hist_entry__dso_daddr_snprintf, 1280 .se_width_idx = HISTC_MEM_DADDR_DSO, 1281 }; 1282 1283 struct sort_entry sort_mem_locked = { 1284 .se_header = "Locked", 1285 .se_cmp = sort__locked_cmp, 1286 .se_snprintf = hist_entry__locked_snprintf, 1287 .se_width_idx = HISTC_MEM_LOCKED, 1288 }; 1289 1290 struct sort_entry sort_mem_tlb = { 1291 .se_header = "TLB access", 1292 .se_cmp = sort__tlb_cmp, 1293 .se_snprintf = hist_entry__tlb_snprintf, 1294 .se_width_idx = HISTC_MEM_TLB, 1295 }; 1296 1297 struct sort_entry sort_mem_lvl = { 1298 .se_header = "Memory access", 1299 .se_cmp = sort__lvl_cmp, 1300 .se_snprintf = hist_entry__lvl_snprintf, 1301 .se_width_idx = HISTC_MEM_LVL, 1302 }; 1303 1304 struct sort_entry sort_mem_snoop = { 1305 .se_header = "Snoop", 1306 .se_cmp = sort__snoop_cmp, 1307 .se_snprintf = hist_entry__snoop_snprintf, 1308 .se_width_idx = HISTC_MEM_SNOOP, 1309 }; 1310 1311 struct sort_entry sort_mem_dcacheline = { 1312 .se_header = "Data Cacheline", 1313 .se_cmp = sort__dcacheline_cmp, 1314 .se_snprintf = hist_entry__dcacheline_snprintf, 1315 .se_width_idx = HISTC_MEM_DCACHELINE, 1316 }; 1317 1318 static int64_t 1319 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 1320 { 1321 uint64_t l = 0, r = 0; 1322 1323 if (left->mem_info) 1324 l = left->mem_info->daddr.phys_addr; 1325 if (right->mem_info) 1326 r = right->mem_info->daddr.phys_addr; 1327 1328 return (int64_t)(r - l); 1329 } 1330 1331 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf, 1332 size_t size, unsigned int width) 1333 { 1334 uint64_t addr = 0; 1335 size_t ret = 0; 1336 size_t len = BITS_PER_LONG / 4; 1337 1338 addr = he->mem_info->daddr.phys_addr; 1339 1340 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level); 1341 1342 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr); 1343 1344 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, ""); 1345 1346 if (ret > width) 1347 bf[width] = '\0'; 1348 1349 return width; 1350 } 1351 1352 struct sort_entry sort_mem_phys_daddr = { 1353 .se_header = "Data Physical Address", 1354 .se_cmp = sort__phys_daddr_cmp, 1355 .se_snprintf = hist_entry__phys_daddr_snprintf, 1356 .se_width_idx = HISTC_MEM_PHYS_DADDR, 1357 }; 1358 1359 static int64_t 1360 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1361 { 1362 if (!left->branch_info || !right->branch_info) 1363 return cmp_null(left->branch_info, right->branch_info); 1364 1365 return left->branch_info->flags.abort != 1366 right->branch_info->flags.abort; 1367 } 1368 1369 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1370 size_t size, unsigned int width) 1371 { 1372 static const char *out = "N/A"; 1373 1374 if (he->branch_info) { 1375 if (he->branch_info->flags.abort) 1376 out = "A"; 1377 else 1378 out = "."; 1379 } 1380 1381 return repsep_snprintf(bf, size, "%-*s", width, out); 1382 } 1383 1384 struct sort_entry sort_abort = { 1385 .se_header = "Transaction abort", 1386 .se_cmp = sort__abort_cmp, 1387 .se_snprintf = hist_entry__abort_snprintf, 1388 .se_width_idx = HISTC_ABORT, 1389 }; 1390 1391 static int64_t 1392 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1393 { 1394 if (!left->branch_info || !right->branch_info) 1395 return cmp_null(left->branch_info, right->branch_info); 1396 1397 return left->branch_info->flags.in_tx != 1398 right->branch_info->flags.in_tx; 1399 } 1400 1401 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1402 size_t size, unsigned int width) 1403 { 1404 static const char *out = "N/A"; 1405 1406 if (he->branch_info) { 1407 if (he->branch_info->flags.in_tx) 1408 out = "T"; 1409 else 1410 out = "."; 1411 } 1412 1413 return repsep_snprintf(bf, size, "%-*s", width, out); 1414 } 1415 1416 struct sort_entry sort_in_tx = { 1417 .se_header = "Branch in transaction", 1418 .se_cmp = sort__in_tx_cmp, 1419 .se_snprintf = hist_entry__in_tx_snprintf, 1420 .se_width_idx = HISTC_IN_TX, 1421 }; 1422 1423 static int64_t 1424 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1425 { 1426 return left->transaction - right->transaction; 1427 } 1428 1429 static inline char *add_str(char *p, const char *str) 1430 { 1431 strcpy(p, str); 1432 return p + strlen(str); 1433 } 1434 1435 static struct txbit { 1436 unsigned flag; 1437 const char *name; 1438 int skip_for_len; 1439 } txbits[] = { 1440 { PERF_TXN_ELISION, "EL ", 0 }, 1441 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1442 { PERF_TXN_SYNC, "SYNC ", 1 }, 1443 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1444 { PERF_TXN_RETRY, "RETRY ", 0 }, 1445 { PERF_TXN_CONFLICT, "CON ", 0 }, 1446 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1447 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1448 { 0, NULL, 0 } 1449 }; 1450 1451 int hist_entry__transaction_len(void) 1452 { 1453 int i; 1454 int len = 0; 1455 1456 for (i = 0; txbits[i].name; i++) { 1457 if (!txbits[i].skip_for_len) 1458 len += strlen(txbits[i].name); 1459 } 1460 len += 4; /* :XX<space> */ 1461 return len; 1462 } 1463 1464 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1465 size_t size, unsigned int width) 1466 { 1467 u64 t = he->transaction; 1468 char buf[128]; 1469 char *p = buf; 1470 int i; 1471 1472 buf[0] = 0; 1473 for (i = 0; txbits[i].name; i++) 1474 if (txbits[i].flag & t) 1475 p = add_str(p, txbits[i].name); 1476 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1477 p = add_str(p, "NEITHER "); 1478 if (t & PERF_TXN_ABORT_MASK) { 1479 sprintf(p, ":%" PRIx64, 1480 (t & PERF_TXN_ABORT_MASK) >> 1481 PERF_TXN_ABORT_SHIFT); 1482 p += strlen(p); 1483 } 1484 1485 return repsep_snprintf(bf, size, "%-*s", width, buf); 1486 } 1487 1488 struct sort_entry sort_transaction = { 1489 .se_header = "Transaction ", 1490 .se_cmp = sort__transaction_cmp, 1491 .se_snprintf = hist_entry__transaction_snprintf, 1492 .se_width_idx = HISTC_TRANSACTION, 1493 }; 1494 1495 /* --sort symbol_size */ 1496 1497 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r) 1498 { 1499 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0; 1500 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0; 1501 1502 return size_l < size_r ? -1 : 1503 size_l == size_r ? 0 : 1; 1504 } 1505 1506 static int64_t 1507 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right) 1508 { 1509 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym); 1510 } 1511 1512 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf, 1513 size_t bf_size, unsigned int width) 1514 { 1515 if (sym) 1516 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym)); 1517 1518 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown"); 1519 } 1520 1521 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf, 1522 size_t size, unsigned int width) 1523 { 1524 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width); 1525 } 1526 1527 struct sort_entry sort_sym_size = { 1528 .se_header = "Symbol size", 1529 .se_cmp = sort__sym_size_cmp, 1530 .se_snprintf = hist_entry__sym_size_snprintf, 1531 .se_width_idx = HISTC_SYM_SIZE, 1532 }; 1533 1534 1535 struct sort_dimension { 1536 const char *name; 1537 struct sort_entry *entry; 1538 int taken; 1539 }; 1540 1541 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 1542 1543 static struct sort_dimension common_sort_dimensions[] = { 1544 DIM(SORT_PID, "pid", sort_thread), 1545 DIM(SORT_COMM, "comm", sort_comm), 1546 DIM(SORT_DSO, "dso", sort_dso), 1547 DIM(SORT_SYM, "symbol", sort_sym), 1548 DIM(SORT_PARENT, "parent", sort_parent), 1549 DIM(SORT_CPU, "cpu", sort_cpu), 1550 DIM(SORT_SOCKET, "socket", sort_socket), 1551 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1552 DIM(SORT_SRCFILE, "srcfile", sort_srcfile), 1553 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1554 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 1555 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 1556 DIM(SORT_TRACE, "trace", sort_trace), 1557 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), 1558 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 1559 }; 1560 1561 #undef DIM 1562 1563 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 1564 1565 static struct sort_dimension bstack_sort_dimensions[] = { 1566 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 1567 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 1568 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 1569 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 1570 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 1571 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1572 DIM(SORT_ABORT, "abort", sort_abort), 1573 DIM(SORT_CYCLES, "cycles", sort_cycles), 1574 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1575 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1576 }; 1577 1578 #undef DIM 1579 1580 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 1581 1582 static struct sort_dimension memory_sort_dimensions[] = { 1583 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1584 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym), 1585 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1586 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1587 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1588 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 1589 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 1590 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 1591 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr), 1592 }; 1593 1594 #undef DIM 1595 1596 struct hpp_dimension { 1597 const char *name; 1598 struct perf_hpp_fmt *fmt; 1599 int taken; 1600 }; 1601 1602 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 1603 1604 static struct hpp_dimension hpp_sort_dimensions[] = { 1605 DIM(PERF_HPP__OVERHEAD, "overhead"), 1606 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 1607 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 1608 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 1609 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 1610 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 1611 DIM(PERF_HPP__SAMPLES, "sample"), 1612 DIM(PERF_HPP__PERIOD, "period"), 1613 }; 1614 1615 #undef DIM 1616 1617 struct hpp_sort_entry { 1618 struct perf_hpp_fmt hpp; 1619 struct sort_entry *se; 1620 }; 1621 1622 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 1623 { 1624 struct hpp_sort_entry *hse; 1625 1626 if (!perf_hpp__is_sort_entry(fmt)) 1627 return; 1628 1629 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1630 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 1631 } 1632 1633 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1634 struct hists *hists, int line __maybe_unused, 1635 int *span __maybe_unused) 1636 { 1637 struct hpp_sort_entry *hse; 1638 size_t len = fmt->user_len; 1639 1640 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1641 1642 if (!len) 1643 len = hists__col_len(hists, hse->se->se_width_idx); 1644 1645 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 1646 } 1647 1648 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 1649 struct perf_hpp *hpp __maybe_unused, 1650 struct hists *hists) 1651 { 1652 struct hpp_sort_entry *hse; 1653 size_t len = fmt->user_len; 1654 1655 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1656 1657 if (!len) 1658 len = hists__col_len(hists, hse->se->se_width_idx); 1659 1660 return len; 1661 } 1662 1663 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1664 struct hist_entry *he) 1665 { 1666 struct hpp_sort_entry *hse; 1667 size_t len = fmt->user_len; 1668 1669 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1670 1671 if (!len) 1672 len = hists__col_len(he->hists, hse->se->se_width_idx); 1673 1674 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1675 } 1676 1677 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 1678 struct hist_entry *a, struct hist_entry *b) 1679 { 1680 struct hpp_sort_entry *hse; 1681 1682 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1683 return hse->se->se_cmp(a, b); 1684 } 1685 1686 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 1687 struct hist_entry *a, struct hist_entry *b) 1688 { 1689 struct hpp_sort_entry *hse; 1690 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 1691 1692 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1693 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 1694 return collapse_fn(a, b); 1695 } 1696 1697 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 1698 struct hist_entry *a, struct hist_entry *b) 1699 { 1700 struct hpp_sort_entry *hse; 1701 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 1702 1703 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1704 sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 1705 return sort_fn(a, b); 1706 } 1707 1708 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 1709 { 1710 return format->header == __sort__hpp_header; 1711 } 1712 1713 #define MK_SORT_ENTRY_CHK(key) \ 1714 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \ 1715 { \ 1716 struct hpp_sort_entry *hse; \ 1717 \ 1718 if (!perf_hpp__is_sort_entry(fmt)) \ 1719 return false; \ 1720 \ 1721 hse = container_of(fmt, struct hpp_sort_entry, hpp); \ 1722 return hse->se == &sort_ ## key ; \ 1723 } 1724 1725 MK_SORT_ENTRY_CHK(trace) 1726 MK_SORT_ENTRY_CHK(srcline) 1727 MK_SORT_ENTRY_CHK(srcfile) 1728 MK_SORT_ENTRY_CHK(thread) 1729 MK_SORT_ENTRY_CHK(comm) 1730 MK_SORT_ENTRY_CHK(dso) 1731 MK_SORT_ENTRY_CHK(sym) 1732 1733 1734 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1735 { 1736 struct hpp_sort_entry *hse_a; 1737 struct hpp_sort_entry *hse_b; 1738 1739 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 1740 return false; 1741 1742 hse_a = container_of(a, struct hpp_sort_entry, hpp); 1743 hse_b = container_of(b, struct hpp_sort_entry, hpp); 1744 1745 return hse_a->se == hse_b->se; 1746 } 1747 1748 static void hse_free(struct perf_hpp_fmt *fmt) 1749 { 1750 struct hpp_sort_entry *hse; 1751 1752 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1753 free(hse); 1754 } 1755 1756 static struct hpp_sort_entry * 1757 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level) 1758 { 1759 struct hpp_sort_entry *hse; 1760 1761 hse = malloc(sizeof(*hse)); 1762 if (hse == NULL) { 1763 pr_err("Memory allocation failed\n"); 1764 return NULL; 1765 } 1766 1767 hse->se = sd->entry; 1768 hse->hpp.name = sd->entry->se_header; 1769 hse->hpp.header = __sort__hpp_header; 1770 hse->hpp.width = __sort__hpp_width; 1771 hse->hpp.entry = __sort__hpp_entry; 1772 hse->hpp.color = NULL; 1773 1774 hse->hpp.cmp = __sort__hpp_cmp; 1775 hse->hpp.collapse = __sort__hpp_collapse; 1776 hse->hpp.sort = __sort__hpp_sort; 1777 hse->hpp.equal = __sort__hpp_equal; 1778 hse->hpp.free = hse_free; 1779 1780 INIT_LIST_HEAD(&hse->hpp.list); 1781 INIT_LIST_HEAD(&hse->hpp.sort_list); 1782 hse->hpp.elide = false; 1783 hse->hpp.len = 0; 1784 hse->hpp.user_len = 0; 1785 hse->hpp.level = level; 1786 1787 return hse; 1788 } 1789 1790 static void hpp_free(struct perf_hpp_fmt *fmt) 1791 { 1792 free(fmt); 1793 } 1794 1795 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, 1796 int level) 1797 { 1798 struct perf_hpp_fmt *fmt; 1799 1800 fmt = memdup(hd->fmt, sizeof(*fmt)); 1801 if (fmt) { 1802 INIT_LIST_HEAD(&fmt->list); 1803 INIT_LIST_HEAD(&fmt->sort_list); 1804 fmt->free = hpp_free; 1805 fmt->level = level; 1806 } 1807 1808 return fmt; 1809 } 1810 1811 int hist_entry__filter(struct hist_entry *he, int type, const void *arg) 1812 { 1813 struct perf_hpp_fmt *fmt; 1814 struct hpp_sort_entry *hse; 1815 int ret = -1; 1816 int r; 1817 1818 perf_hpp_list__for_each_format(he->hpp_list, fmt) { 1819 if (!perf_hpp__is_sort_entry(fmt)) 1820 continue; 1821 1822 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1823 if (hse->se->se_filter == NULL) 1824 continue; 1825 1826 /* 1827 * hist entry is filtered if any of sort key in the hpp list 1828 * is applied. But it should skip non-matched filter types. 1829 */ 1830 r = hse->se->se_filter(he, type, arg); 1831 if (r >= 0) { 1832 if (ret < 0) 1833 ret = 0; 1834 ret |= r; 1835 } 1836 } 1837 1838 return ret; 1839 } 1840 1841 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, 1842 struct perf_hpp_list *list, 1843 int level) 1844 { 1845 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level); 1846 1847 if (hse == NULL) 1848 return -1; 1849 1850 perf_hpp_list__register_sort_field(list, &hse->hpp); 1851 return 0; 1852 } 1853 1854 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd, 1855 struct perf_hpp_list *list) 1856 { 1857 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0); 1858 1859 if (hse == NULL) 1860 return -1; 1861 1862 perf_hpp_list__column_register(list, &hse->hpp); 1863 return 0; 1864 } 1865 1866 struct hpp_dynamic_entry { 1867 struct perf_hpp_fmt hpp; 1868 struct perf_evsel *evsel; 1869 struct format_field *field; 1870 unsigned dynamic_len; 1871 bool raw_trace; 1872 }; 1873 1874 static int hde_width(struct hpp_dynamic_entry *hde) 1875 { 1876 if (!hde->hpp.len) { 1877 int len = hde->dynamic_len; 1878 int namelen = strlen(hde->field->name); 1879 int fieldlen = hde->field->size; 1880 1881 if (namelen > len) 1882 len = namelen; 1883 1884 if (!(hde->field->flags & FIELD_IS_STRING)) { 1885 /* length for print hex numbers */ 1886 fieldlen = hde->field->size * 2 + 2; 1887 } 1888 if (fieldlen > len) 1889 len = fieldlen; 1890 1891 hde->hpp.len = len; 1892 } 1893 return hde->hpp.len; 1894 } 1895 1896 static void update_dynamic_len(struct hpp_dynamic_entry *hde, 1897 struct hist_entry *he) 1898 { 1899 char *str, *pos; 1900 struct format_field *field = hde->field; 1901 size_t namelen; 1902 bool last = false; 1903 1904 if (hde->raw_trace) 1905 return; 1906 1907 /* parse pretty print result and update max length */ 1908 if (!he->trace_output) 1909 he->trace_output = get_trace_output(he); 1910 1911 namelen = strlen(field->name); 1912 str = he->trace_output; 1913 1914 while (str) { 1915 pos = strchr(str, ' '); 1916 if (pos == NULL) { 1917 last = true; 1918 pos = str + strlen(str); 1919 } 1920 1921 if (!strncmp(str, field->name, namelen)) { 1922 size_t len; 1923 1924 str += namelen + 1; 1925 len = pos - str; 1926 1927 if (len > hde->dynamic_len) 1928 hde->dynamic_len = len; 1929 break; 1930 } 1931 1932 if (last) 1933 str = NULL; 1934 else 1935 str = pos + 1; 1936 } 1937 } 1938 1939 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1940 struct hists *hists __maybe_unused, 1941 int line __maybe_unused, 1942 int *span __maybe_unused) 1943 { 1944 struct hpp_dynamic_entry *hde; 1945 size_t len = fmt->user_len; 1946 1947 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1948 1949 if (!len) 1950 len = hde_width(hde); 1951 1952 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name); 1953 } 1954 1955 static int __sort__hde_width(struct perf_hpp_fmt *fmt, 1956 struct perf_hpp *hpp __maybe_unused, 1957 struct hists *hists __maybe_unused) 1958 { 1959 struct hpp_dynamic_entry *hde; 1960 size_t len = fmt->user_len; 1961 1962 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1963 1964 if (!len) 1965 len = hde_width(hde); 1966 1967 return len; 1968 } 1969 1970 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists) 1971 { 1972 struct hpp_dynamic_entry *hde; 1973 1974 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1975 1976 return hists_to_evsel(hists) == hde->evsel; 1977 } 1978 1979 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1980 struct hist_entry *he) 1981 { 1982 struct hpp_dynamic_entry *hde; 1983 size_t len = fmt->user_len; 1984 char *str, *pos; 1985 struct format_field *field; 1986 size_t namelen; 1987 bool last = false; 1988 int ret; 1989 1990 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 1991 1992 if (!len) 1993 len = hde_width(hde); 1994 1995 if (hde->raw_trace) 1996 goto raw_field; 1997 1998 if (!he->trace_output) 1999 he->trace_output = get_trace_output(he); 2000 2001 field = hde->field; 2002 namelen = strlen(field->name); 2003 str = he->trace_output; 2004 2005 while (str) { 2006 pos = strchr(str, ' '); 2007 if (pos == NULL) { 2008 last = true; 2009 pos = str + strlen(str); 2010 } 2011 2012 if (!strncmp(str, field->name, namelen)) { 2013 str += namelen + 1; 2014 str = strndup(str, pos - str); 2015 2016 if (str == NULL) 2017 return scnprintf(hpp->buf, hpp->size, 2018 "%*.*s", len, len, "ERROR"); 2019 break; 2020 } 2021 2022 if (last) 2023 str = NULL; 2024 else 2025 str = pos + 1; 2026 } 2027 2028 if (str == NULL) { 2029 struct trace_seq seq; 2030 raw_field: 2031 trace_seq_init(&seq); 2032 pevent_print_field(&seq, he->raw_data, hde->field); 2033 str = seq.buffer; 2034 } 2035 2036 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str); 2037 free(str); 2038 return ret; 2039 } 2040 2041 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, 2042 struct hist_entry *a, struct hist_entry *b) 2043 { 2044 struct hpp_dynamic_entry *hde; 2045 struct format_field *field; 2046 unsigned offset, size; 2047 2048 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2049 2050 if (b == NULL) { 2051 update_dynamic_len(hde, a); 2052 return 0; 2053 } 2054 2055 field = hde->field; 2056 if (field->flags & FIELD_IS_DYNAMIC) { 2057 unsigned long long dyn; 2058 2059 pevent_read_number_field(field, a->raw_data, &dyn); 2060 offset = dyn & 0xffff; 2061 size = (dyn >> 16) & 0xffff; 2062 2063 /* record max width for output */ 2064 if (size > hde->dynamic_len) 2065 hde->dynamic_len = size; 2066 } else { 2067 offset = field->offset; 2068 size = field->size; 2069 } 2070 2071 return memcmp(a->raw_data + offset, b->raw_data + offset, size); 2072 } 2073 2074 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt) 2075 { 2076 return fmt->cmp == __sort__hde_cmp; 2077 } 2078 2079 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 2080 { 2081 struct hpp_dynamic_entry *hde_a; 2082 struct hpp_dynamic_entry *hde_b; 2083 2084 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b)) 2085 return false; 2086 2087 hde_a = container_of(a, struct hpp_dynamic_entry, hpp); 2088 hde_b = container_of(b, struct hpp_dynamic_entry, hpp); 2089 2090 return hde_a->field == hde_b->field; 2091 } 2092 2093 static void hde_free(struct perf_hpp_fmt *fmt) 2094 { 2095 struct hpp_dynamic_entry *hde; 2096 2097 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2098 free(hde); 2099 } 2100 2101 static struct hpp_dynamic_entry * 2102 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field, 2103 int level) 2104 { 2105 struct hpp_dynamic_entry *hde; 2106 2107 hde = malloc(sizeof(*hde)); 2108 if (hde == NULL) { 2109 pr_debug("Memory allocation failed\n"); 2110 return NULL; 2111 } 2112 2113 hde->evsel = evsel; 2114 hde->field = field; 2115 hde->dynamic_len = 0; 2116 2117 hde->hpp.name = field->name; 2118 hde->hpp.header = __sort__hde_header; 2119 hde->hpp.width = __sort__hde_width; 2120 hde->hpp.entry = __sort__hde_entry; 2121 hde->hpp.color = NULL; 2122 2123 hde->hpp.cmp = __sort__hde_cmp; 2124 hde->hpp.collapse = __sort__hde_cmp; 2125 hde->hpp.sort = __sort__hde_cmp; 2126 hde->hpp.equal = __sort__hde_equal; 2127 hde->hpp.free = hde_free; 2128 2129 INIT_LIST_HEAD(&hde->hpp.list); 2130 INIT_LIST_HEAD(&hde->hpp.sort_list); 2131 hde->hpp.elide = false; 2132 hde->hpp.len = 0; 2133 hde->hpp.user_len = 0; 2134 hde->hpp.level = level; 2135 2136 return hde; 2137 } 2138 2139 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt) 2140 { 2141 struct perf_hpp_fmt *new_fmt = NULL; 2142 2143 if (perf_hpp__is_sort_entry(fmt)) { 2144 struct hpp_sort_entry *hse, *new_hse; 2145 2146 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2147 new_hse = memdup(hse, sizeof(*hse)); 2148 if (new_hse) 2149 new_fmt = &new_hse->hpp; 2150 } else if (perf_hpp__is_dynamic_entry(fmt)) { 2151 struct hpp_dynamic_entry *hde, *new_hde; 2152 2153 hde = container_of(fmt, struct hpp_dynamic_entry, hpp); 2154 new_hde = memdup(hde, sizeof(*hde)); 2155 if (new_hde) 2156 new_fmt = &new_hde->hpp; 2157 } else { 2158 new_fmt = memdup(fmt, sizeof(*fmt)); 2159 } 2160 2161 INIT_LIST_HEAD(&new_fmt->list); 2162 INIT_LIST_HEAD(&new_fmt->sort_list); 2163 2164 return new_fmt; 2165 } 2166 2167 static int parse_field_name(char *str, char **event, char **field, char **opt) 2168 { 2169 char *event_name, *field_name, *opt_name; 2170 2171 event_name = str; 2172 field_name = strchr(str, '.'); 2173 2174 if (field_name) { 2175 *field_name++ = '\0'; 2176 } else { 2177 event_name = NULL; 2178 field_name = str; 2179 } 2180 2181 opt_name = strchr(field_name, '/'); 2182 if (opt_name) 2183 *opt_name++ = '\0'; 2184 2185 *event = event_name; 2186 *field = field_name; 2187 *opt = opt_name; 2188 2189 return 0; 2190 } 2191 2192 /* find match evsel using a given event name. The event name can be: 2193 * 1. '%' + event index (e.g. '%1' for first event) 2194 * 2. full event name (e.g. sched:sched_switch) 2195 * 3. partial event name (should not contain ':') 2196 */ 2197 static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_name) 2198 { 2199 struct perf_evsel *evsel = NULL; 2200 struct perf_evsel *pos; 2201 bool full_name; 2202 2203 /* case 1 */ 2204 if (event_name[0] == '%') { 2205 int nr = strtol(event_name+1, NULL, 0); 2206 2207 if (nr > evlist->nr_entries) 2208 return NULL; 2209 2210 evsel = perf_evlist__first(evlist); 2211 while (--nr > 0) 2212 evsel = perf_evsel__next(evsel); 2213 2214 return evsel; 2215 } 2216 2217 full_name = !!strchr(event_name, ':'); 2218 evlist__for_each_entry(evlist, pos) { 2219 /* case 2 */ 2220 if (full_name && !strcmp(pos->name, event_name)) 2221 return pos; 2222 /* case 3 */ 2223 if (!full_name && strstr(pos->name, event_name)) { 2224 if (evsel) { 2225 pr_debug("'%s' event is ambiguous: it can be %s or %s\n", 2226 event_name, evsel->name, pos->name); 2227 return NULL; 2228 } 2229 evsel = pos; 2230 } 2231 } 2232 2233 return evsel; 2234 } 2235 2236 static int __dynamic_dimension__add(struct perf_evsel *evsel, 2237 struct format_field *field, 2238 bool raw_trace, int level) 2239 { 2240 struct hpp_dynamic_entry *hde; 2241 2242 hde = __alloc_dynamic_entry(evsel, field, level); 2243 if (hde == NULL) 2244 return -ENOMEM; 2245 2246 hde->raw_trace = raw_trace; 2247 2248 perf_hpp__register_sort_field(&hde->hpp); 2249 return 0; 2250 } 2251 2252 static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level) 2253 { 2254 int ret; 2255 struct format_field *field; 2256 2257 field = evsel->tp_format->format.fields; 2258 while (field) { 2259 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2260 if (ret < 0) 2261 return ret; 2262 2263 field = field->next; 2264 } 2265 return 0; 2266 } 2267 2268 static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace, 2269 int level) 2270 { 2271 int ret; 2272 struct perf_evsel *evsel; 2273 2274 evlist__for_each_entry(evlist, evsel) { 2275 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2276 continue; 2277 2278 ret = add_evsel_fields(evsel, raw_trace, level); 2279 if (ret < 0) 2280 return ret; 2281 } 2282 return 0; 2283 } 2284 2285 static int add_all_matching_fields(struct perf_evlist *evlist, 2286 char *field_name, bool raw_trace, int level) 2287 { 2288 int ret = -ESRCH; 2289 struct perf_evsel *evsel; 2290 struct format_field *field; 2291 2292 evlist__for_each_entry(evlist, evsel) { 2293 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 2294 continue; 2295 2296 field = pevent_find_any_field(evsel->tp_format, field_name); 2297 if (field == NULL) 2298 continue; 2299 2300 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2301 if (ret < 0) 2302 break; 2303 } 2304 return ret; 2305 } 2306 2307 static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok, 2308 int level) 2309 { 2310 char *str, *event_name, *field_name, *opt_name; 2311 struct perf_evsel *evsel; 2312 struct format_field *field; 2313 bool raw_trace = symbol_conf.raw_trace; 2314 int ret = 0; 2315 2316 if (evlist == NULL) 2317 return -ENOENT; 2318 2319 str = strdup(tok); 2320 if (str == NULL) 2321 return -ENOMEM; 2322 2323 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) { 2324 ret = -EINVAL; 2325 goto out; 2326 } 2327 2328 if (opt_name) { 2329 if (strcmp(opt_name, "raw")) { 2330 pr_debug("unsupported field option %s\n", opt_name); 2331 ret = -EINVAL; 2332 goto out; 2333 } 2334 raw_trace = true; 2335 } 2336 2337 if (!strcmp(field_name, "trace_fields")) { 2338 ret = add_all_dynamic_fields(evlist, raw_trace, level); 2339 goto out; 2340 } 2341 2342 if (event_name == NULL) { 2343 ret = add_all_matching_fields(evlist, field_name, raw_trace, level); 2344 goto out; 2345 } 2346 2347 evsel = find_evsel(evlist, event_name); 2348 if (evsel == NULL) { 2349 pr_debug("Cannot find event: %s\n", event_name); 2350 ret = -ENOENT; 2351 goto out; 2352 } 2353 2354 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2355 pr_debug("%s is not a tracepoint event\n", event_name); 2356 ret = -EINVAL; 2357 goto out; 2358 } 2359 2360 if (!strcmp(field_name, "*")) { 2361 ret = add_evsel_fields(evsel, raw_trace, level); 2362 } else { 2363 field = pevent_find_any_field(evsel->tp_format, field_name); 2364 if (field == NULL) { 2365 pr_debug("Cannot find event field for %s.%s\n", 2366 event_name, field_name); 2367 return -ENOENT; 2368 } 2369 2370 ret = __dynamic_dimension__add(evsel, field, raw_trace, level); 2371 } 2372 2373 out: 2374 free(str); 2375 return ret; 2376 } 2377 2378 static int __sort_dimension__add(struct sort_dimension *sd, 2379 struct perf_hpp_list *list, 2380 int level) 2381 { 2382 if (sd->taken) 2383 return 0; 2384 2385 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0) 2386 return -1; 2387 2388 if (sd->entry->se_collapse) 2389 list->need_collapse = 1; 2390 2391 sd->taken = 1; 2392 2393 return 0; 2394 } 2395 2396 static int __hpp_dimension__add(struct hpp_dimension *hd, 2397 struct perf_hpp_list *list, 2398 int level) 2399 { 2400 struct perf_hpp_fmt *fmt; 2401 2402 if (hd->taken) 2403 return 0; 2404 2405 fmt = __hpp_dimension__alloc_hpp(hd, level); 2406 if (!fmt) 2407 return -1; 2408 2409 hd->taken = 1; 2410 perf_hpp_list__register_sort_field(list, fmt); 2411 return 0; 2412 } 2413 2414 static int __sort_dimension__add_output(struct perf_hpp_list *list, 2415 struct sort_dimension *sd) 2416 { 2417 if (sd->taken) 2418 return 0; 2419 2420 if (__sort_dimension__add_hpp_output(sd, list) < 0) 2421 return -1; 2422 2423 sd->taken = 1; 2424 return 0; 2425 } 2426 2427 static int __hpp_dimension__add_output(struct perf_hpp_list *list, 2428 struct hpp_dimension *hd) 2429 { 2430 struct perf_hpp_fmt *fmt; 2431 2432 if (hd->taken) 2433 return 0; 2434 2435 fmt = __hpp_dimension__alloc_hpp(hd, 0); 2436 if (!fmt) 2437 return -1; 2438 2439 hd->taken = 1; 2440 perf_hpp_list__column_register(list, fmt); 2441 return 0; 2442 } 2443 2444 int hpp_dimension__add_output(unsigned col) 2445 { 2446 BUG_ON(col >= PERF_HPP__MAX_INDEX); 2447 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]); 2448 } 2449 2450 int sort_dimension__add(struct perf_hpp_list *list, const char *tok, 2451 struct perf_evlist *evlist, 2452 int level) 2453 { 2454 unsigned int i; 2455 2456 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2457 struct sort_dimension *sd = &common_sort_dimensions[i]; 2458 2459 if (strncasecmp(tok, sd->name, strlen(tok))) 2460 continue; 2461 2462 if (sd->entry == &sort_parent) { 2463 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 2464 if (ret) { 2465 char err[BUFSIZ]; 2466 2467 regerror(ret, &parent_regex, err, sizeof(err)); 2468 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 2469 return -EINVAL; 2470 } 2471 list->parent = 1; 2472 } else if (sd->entry == &sort_sym) { 2473 list->sym = 1; 2474 /* 2475 * perf diff displays the performance difference amongst 2476 * two or more perf.data files. Those files could come 2477 * from different binaries. So we should not compare 2478 * their ips, but the name of symbol. 2479 */ 2480 if (sort__mode == SORT_MODE__DIFF) 2481 sd->entry->se_collapse = sort__sym_sort; 2482 2483 } else if (sd->entry == &sort_dso) { 2484 list->dso = 1; 2485 } else if (sd->entry == &sort_socket) { 2486 list->socket = 1; 2487 } else if (sd->entry == &sort_thread) { 2488 list->thread = 1; 2489 } else if (sd->entry == &sort_comm) { 2490 list->comm = 1; 2491 } 2492 2493 return __sort_dimension__add(sd, list, level); 2494 } 2495 2496 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2497 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2498 2499 if (strncasecmp(tok, hd->name, strlen(tok))) 2500 continue; 2501 2502 return __hpp_dimension__add(hd, list, level); 2503 } 2504 2505 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2506 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2507 2508 if (strncasecmp(tok, sd->name, strlen(tok))) 2509 continue; 2510 2511 if (sort__mode != SORT_MODE__BRANCH) 2512 return -EINVAL; 2513 2514 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 2515 list->sym = 1; 2516 2517 __sort_dimension__add(sd, list, level); 2518 return 0; 2519 } 2520 2521 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2522 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2523 2524 if (strncasecmp(tok, sd->name, strlen(tok))) 2525 continue; 2526 2527 if (sort__mode != SORT_MODE__MEMORY) 2528 return -EINVAL; 2529 2530 if (sd->entry == &sort_mem_dcacheline && cacheline_size == 0) 2531 return -EINVAL; 2532 2533 if (sd->entry == &sort_mem_daddr_sym) 2534 list->sym = 1; 2535 2536 __sort_dimension__add(sd, list, level); 2537 return 0; 2538 } 2539 2540 if (!add_dynamic_entry(evlist, tok, level)) 2541 return 0; 2542 2543 return -ESRCH; 2544 } 2545 2546 static int setup_sort_list(struct perf_hpp_list *list, char *str, 2547 struct perf_evlist *evlist) 2548 { 2549 char *tmp, *tok; 2550 int ret = 0; 2551 int level = 0; 2552 int next_level = 1; 2553 bool in_group = false; 2554 2555 do { 2556 tok = str; 2557 tmp = strpbrk(str, "{}, "); 2558 if (tmp) { 2559 if (in_group) 2560 next_level = level; 2561 else 2562 next_level = level + 1; 2563 2564 if (*tmp == '{') 2565 in_group = true; 2566 else if (*tmp == '}') 2567 in_group = false; 2568 2569 *tmp = '\0'; 2570 str = tmp + 1; 2571 } 2572 2573 if (*tok) { 2574 ret = sort_dimension__add(list, tok, evlist, level); 2575 if (ret == -EINVAL) { 2576 if (!cacheline_size && !strncasecmp(tok, "dcacheline", strlen(tok))) 2577 pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system"); 2578 else 2579 pr_err("Invalid --sort key: `%s'", tok); 2580 break; 2581 } else if (ret == -ESRCH) { 2582 pr_err("Unknown --sort key: `%s'", tok); 2583 break; 2584 } 2585 } 2586 2587 level = next_level; 2588 } while (tmp); 2589 2590 return ret; 2591 } 2592 2593 static const char *get_default_sort_order(struct perf_evlist *evlist) 2594 { 2595 const char *default_sort_orders[] = { 2596 default_sort_order, 2597 default_branch_sort_order, 2598 default_mem_sort_order, 2599 default_top_sort_order, 2600 default_diff_sort_order, 2601 default_tracepoint_sort_order, 2602 }; 2603 bool use_trace = true; 2604 struct perf_evsel *evsel; 2605 2606 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 2607 2608 if (evlist == NULL || perf_evlist__empty(evlist)) 2609 goto out_no_evlist; 2610 2611 evlist__for_each_entry(evlist, evsel) { 2612 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) { 2613 use_trace = false; 2614 break; 2615 } 2616 } 2617 2618 if (use_trace) { 2619 sort__mode = SORT_MODE__TRACEPOINT; 2620 if (symbol_conf.raw_trace) 2621 return "trace_fields"; 2622 } 2623 out_no_evlist: 2624 return default_sort_orders[sort__mode]; 2625 } 2626 2627 static int setup_sort_order(struct perf_evlist *evlist) 2628 { 2629 char *new_sort_order; 2630 2631 /* 2632 * Append '+'-prefixed sort order to the default sort 2633 * order string. 2634 */ 2635 if (!sort_order || is_strict_order(sort_order)) 2636 return 0; 2637 2638 if (sort_order[1] == '\0') { 2639 pr_err("Invalid --sort key: `+'"); 2640 return -EINVAL; 2641 } 2642 2643 /* 2644 * We allocate new sort_order string, but we never free it, 2645 * because it's checked over the rest of the code. 2646 */ 2647 if (asprintf(&new_sort_order, "%s,%s", 2648 get_default_sort_order(evlist), sort_order + 1) < 0) { 2649 pr_err("Not enough memory to set up --sort"); 2650 return -ENOMEM; 2651 } 2652 2653 sort_order = new_sort_order; 2654 return 0; 2655 } 2656 2657 /* 2658 * Adds 'pre,' prefix into 'str' is 'pre' is 2659 * not already part of 'str'. 2660 */ 2661 static char *prefix_if_not_in(const char *pre, char *str) 2662 { 2663 char *n; 2664 2665 if (!str || strstr(str, pre)) 2666 return str; 2667 2668 if (asprintf(&n, "%s,%s", pre, str) < 0) 2669 return NULL; 2670 2671 free(str); 2672 return n; 2673 } 2674 2675 static char *setup_overhead(char *keys) 2676 { 2677 if (sort__mode == SORT_MODE__DIFF) 2678 return keys; 2679 2680 keys = prefix_if_not_in("overhead", keys); 2681 2682 if (symbol_conf.cumulate_callchain) 2683 keys = prefix_if_not_in("overhead_children", keys); 2684 2685 return keys; 2686 } 2687 2688 static int __setup_sorting(struct perf_evlist *evlist) 2689 { 2690 char *str; 2691 const char *sort_keys; 2692 int ret = 0; 2693 2694 ret = setup_sort_order(evlist); 2695 if (ret) 2696 return ret; 2697 2698 sort_keys = sort_order; 2699 if (sort_keys == NULL) { 2700 if (is_strict_order(field_order)) { 2701 /* 2702 * If user specified field order but no sort order, 2703 * we'll honor it and not add default sort orders. 2704 */ 2705 return 0; 2706 } 2707 2708 sort_keys = get_default_sort_order(evlist); 2709 } 2710 2711 str = strdup(sort_keys); 2712 if (str == NULL) { 2713 pr_err("Not enough memory to setup sort keys"); 2714 return -ENOMEM; 2715 } 2716 2717 /* 2718 * Prepend overhead fields for backward compatibility. 2719 */ 2720 if (!is_strict_order(field_order)) { 2721 str = setup_overhead(str); 2722 if (str == NULL) { 2723 pr_err("Not enough memory to setup overhead keys"); 2724 return -ENOMEM; 2725 } 2726 } 2727 2728 ret = setup_sort_list(&perf_hpp_list, str, evlist); 2729 2730 free(str); 2731 return ret; 2732 } 2733 2734 void perf_hpp__set_elide(int idx, bool elide) 2735 { 2736 struct perf_hpp_fmt *fmt; 2737 struct hpp_sort_entry *hse; 2738 2739 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2740 if (!perf_hpp__is_sort_entry(fmt)) 2741 continue; 2742 2743 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2744 if (hse->se->se_width_idx == idx) { 2745 fmt->elide = elide; 2746 break; 2747 } 2748 } 2749 } 2750 2751 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 2752 { 2753 if (list && strlist__nr_entries(list) == 1) { 2754 if (fp != NULL) 2755 fprintf(fp, "# %s: %s\n", list_name, 2756 strlist__entry(list, 0)->s); 2757 return true; 2758 } 2759 return false; 2760 } 2761 2762 static bool get_elide(int idx, FILE *output) 2763 { 2764 switch (idx) { 2765 case HISTC_SYMBOL: 2766 return __get_elide(symbol_conf.sym_list, "symbol", output); 2767 case HISTC_DSO: 2768 return __get_elide(symbol_conf.dso_list, "dso", output); 2769 case HISTC_COMM: 2770 return __get_elide(symbol_conf.comm_list, "comm", output); 2771 default: 2772 break; 2773 } 2774 2775 if (sort__mode != SORT_MODE__BRANCH) 2776 return false; 2777 2778 switch (idx) { 2779 case HISTC_SYMBOL_FROM: 2780 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 2781 case HISTC_SYMBOL_TO: 2782 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 2783 case HISTC_DSO_FROM: 2784 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 2785 case HISTC_DSO_TO: 2786 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 2787 default: 2788 break; 2789 } 2790 2791 return false; 2792 } 2793 2794 void sort__setup_elide(FILE *output) 2795 { 2796 struct perf_hpp_fmt *fmt; 2797 struct hpp_sort_entry *hse; 2798 2799 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2800 if (!perf_hpp__is_sort_entry(fmt)) 2801 continue; 2802 2803 hse = container_of(fmt, struct hpp_sort_entry, hpp); 2804 fmt->elide = get_elide(hse->se->se_width_idx, output); 2805 } 2806 2807 /* 2808 * It makes no sense to elide all of sort entries. 2809 * Just revert them to show up again. 2810 */ 2811 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2812 if (!perf_hpp__is_sort_entry(fmt)) 2813 continue; 2814 2815 if (!fmt->elide) 2816 return; 2817 } 2818 2819 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 2820 if (!perf_hpp__is_sort_entry(fmt)) 2821 continue; 2822 2823 fmt->elide = false; 2824 } 2825 } 2826 2827 int output_field_add(struct perf_hpp_list *list, char *tok) 2828 { 2829 unsigned int i; 2830 2831 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 2832 struct sort_dimension *sd = &common_sort_dimensions[i]; 2833 2834 if (strncasecmp(tok, sd->name, strlen(tok))) 2835 continue; 2836 2837 return __sort_dimension__add_output(list, sd); 2838 } 2839 2840 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 2841 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 2842 2843 if (strncasecmp(tok, hd->name, strlen(tok))) 2844 continue; 2845 2846 return __hpp_dimension__add_output(list, hd); 2847 } 2848 2849 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 2850 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 2851 2852 if (strncasecmp(tok, sd->name, strlen(tok))) 2853 continue; 2854 2855 return __sort_dimension__add_output(list, sd); 2856 } 2857 2858 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 2859 struct sort_dimension *sd = &memory_sort_dimensions[i]; 2860 2861 if (strncasecmp(tok, sd->name, strlen(tok))) 2862 continue; 2863 2864 return __sort_dimension__add_output(list, sd); 2865 } 2866 2867 return -ESRCH; 2868 } 2869 2870 static int setup_output_list(struct perf_hpp_list *list, char *str) 2871 { 2872 char *tmp, *tok; 2873 int ret = 0; 2874 2875 for (tok = strtok_r(str, ", ", &tmp); 2876 tok; tok = strtok_r(NULL, ", ", &tmp)) { 2877 ret = output_field_add(list, tok); 2878 if (ret == -EINVAL) { 2879 pr_err("Invalid --fields key: `%s'", tok); 2880 break; 2881 } else if (ret == -ESRCH) { 2882 pr_err("Unknown --fields key: `%s'", tok); 2883 break; 2884 } 2885 } 2886 2887 return ret; 2888 } 2889 2890 void reset_dimensions(void) 2891 { 2892 unsigned int i; 2893 2894 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 2895 common_sort_dimensions[i].taken = 0; 2896 2897 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 2898 hpp_sort_dimensions[i].taken = 0; 2899 2900 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 2901 bstack_sort_dimensions[i].taken = 0; 2902 2903 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 2904 memory_sort_dimensions[i].taken = 0; 2905 } 2906 2907 bool is_strict_order(const char *order) 2908 { 2909 return order && (*order != '+'); 2910 } 2911 2912 static int __setup_output_field(void) 2913 { 2914 char *str, *strp; 2915 int ret = -EINVAL; 2916 2917 if (field_order == NULL) 2918 return 0; 2919 2920 strp = str = strdup(field_order); 2921 if (str == NULL) { 2922 pr_err("Not enough memory to setup output fields"); 2923 return -ENOMEM; 2924 } 2925 2926 if (!is_strict_order(field_order)) 2927 strp++; 2928 2929 if (!strlen(strp)) { 2930 pr_err("Invalid --fields key: `+'"); 2931 goto out; 2932 } 2933 2934 ret = setup_output_list(&perf_hpp_list, strp); 2935 2936 out: 2937 free(str); 2938 return ret; 2939 } 2940 2941 int setup_sorting(struct perf_evlist *evlist) 2942 { 2943 int err; 2944 2945 err = __setup_sorting(evlist); 2946 if (err < 0) 2947 return err; 2948 2949 if (parent_pattern != default_parent_pattern) { 2950 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1); 2951 if (err < 0) 2952 return err; 2953 } 2954 2955 reset_dimensions(); 2956 2957 /* 2958 * perf diff doesn't use default hpp output fields. 2959 */ 2960 if (sort__mode != SORT_MODE__DIFF) 2961 perf_hpp__init(); 2962 2963 err = __setup_output_field(); 2964 if (err < 0) 2965 return err; 2966 2967 /* copy sort keys to output fields */ 2968 perf_hpp__setup_output_field(&perf_hpp_list); 2969 /* and then copy output fields to sort keys */ 2970 perf_hpp__append_sort_keys(&perf_hpp_list); 2971 2972 /* setup hists-specific output fields */ 2973 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) 2974 return -1; 2975 2976 return 0; 2977 } 2978 2979 void reset_output_field(void) 2980 { 2981 perf_hpp_list.need_collapse = 0; 2982 perf_hpp_list.parent = 0; 2983 perf_hpp_list.sym = 0; 2984 perf_hpp_list.dso = 0; 2985 2986 field_order = NULL; 2987 sort_order = NULL; 2988 2989 reset_dimensions(); 2990 perf_hpp__reset_output_field(&perf_hpp_list); 2991 } 2992