1 #include <sys/mman.h> 2 #include "sort.h" 3 #include "hist.h" 4 #include "comm.h" 5 #include "symbol.h" 6 #include "evsel.h" 7 8 regex_t parent_regex; 9 const char default_parent_pattern[] = "^sys_|^do_page_fault"; 10 const char *parent_pattern = default_parent_pattern; 11 const char default_sort_order[] = "comm,dso,symbol"; 12 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,dso_to,symbol_to"; 13 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked"; 14 const char default_top_sort_order[] = "dso,symbol"; 15 const char default_diff_sort_order[] = "dso,symbol"; 16 const char *sort_order; 17 const char *field_order; 18 regex_t ignore_callees_regex; 19 int have_ignore_callees = 0; 20 int sort__need_collapse = 0; 21 int sort__has_parent = 0; 22 int sort__has_sym = 0; 23 int sort__has_dso = 0; 24 enum sort_mode sort__mode = SORT_MODE__NORMAL; 25 26 27 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) 28 { 29 int n; 30 va_list ap; 31 32 va_start(ap, fmt); 33 n = vsnprintf(bf, size, fmt, ap); 34 if (symbol_conf.field_sep && n > 0) { 35 char *sep = bf; 36 37 while (1) { 38 sep = strchr(sep, *symbol_conf.field_sep); 39 if (sep == NULL) 40 break; 41 *sep = '.'; 42 } 43 } 44 va_end(ap); 45 46 if (n >= (int)size) 47 return size - 1; 48 return n; 49 } 50 51 static int64_t cmp_null(const void *l, const void *r) 52 { 53 if (!l && !r) 54 return 0; 55 else if (!l) 56 return -1; 57 else 58 return 1; 59 } 60 61 /* --sort pid */ 62 63 static int64_t 64 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right) 65 { 66 return right->thread->tid - left->thread->tid; 67 } 68 69 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf, 70 size_t size, unsigned int width) 71 { 72 const char *comm = thread__comm_str(he->thread); 73 74 width = max(7U, width) - 6; 75 return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid, 76 width, width, comm ?: ""); 77 } 78 79 struct sort_entry sort_thread = { 80 .se_header = " Pid:Command", 81 .se_cmp = sort__thread_cmp, 82 .se_snprintf = hist_entry__thread_snprintf, 83 .se_width_idx = HISTC_THREAD, 84 }; 85 86 /* --sort comm */ 87 88 static int64_t 89 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) 90 { 91 /* Compare the addr that should be unique among comm */ 92 return comm__str(right->comm) - comm__str(left->comm); 93 } 94 95 static int64_t 96 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) 97 { 98 /* Compare the addr that should be unique among comm */ 99 return comm__str(right->comm) - comm__str(left->comm); 100 } 101 102 static int64_t 103 sort__comm_sort(struct hist_entry *left, struct hist_entry *right) 104 { 105 return strcmp(comm__str(right->comm), comm__str(left->comm)); 106 } 107 108 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf, 109 size_t size, unsigned int width) 110 { 111 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm)); 112 } 113 114 struct sort_entry sort_comm = { 115 .se_header = "Command", 116 .se_cmp = sort__comm_cmp, 117 .se_collapse = sort__comm_collapse, 118 .se_sort = sort__comm_sort, 119 .se_snprintf = hist_entry__comm_snprintf, 120 .se_width_idx = HISTC_COMM, 121 }; 122 123 /* --sort dso */ 124 125 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 126 { 127 struct dso *dso_l = map_l ? map_l->dso : NULL; 128 struct dso *dso_r = map_r ? map_r->dso : NULL; 129 const char *dso_name_l, *dso_name_r; 130 131 if (!dso_l || !dso_r) 132 return cmp_null(dso_r, dso_l); 133 134 if (verbose) { 135 dso_name_l = dso_l->long_name; 136 dso_name_r = dso_r->long_name; 137 } else { 138 dso_name_l = dso_l->short_name; 139 dso_name_r = dso_r->short_name; 140 } 141 142 return strcmp(dso_name_l, dso_name_r); 143 } 144 145 static int64_t 146 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 147 { 148 return _sort__dso_cmp(right->ms.map, left->ms.map); 149 } 150 151 static int _hist_entry__dso_snprintf(struct map *map, char *bf, 152 size_t size, unsigned int width) 153 { 154 if (map && map->dso) { 155 const char *dso_name = !verbose ? map->dso->short_name : 156 map->dso->long_name; 157 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name); 158 } 159 160 return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]"); 161 } 162 163 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf, 164 size_t size, unsigned int width) 165 { 166 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); 167 } 168 169 struct sort_entry sort_dso = { 170 .se_header = "Shared Object", 171 .se_cmp = sort__dso_cmp, 172 .se_snprintf = hist_entry__dso_snprintf, 173 .se_width_idx = HISTC_DSO, 174 }; 175 176 /* --sort symbol */ 177 178 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip) 179 { 180 return (int64_t)(right_ip - left_ip); 181 } 182 183 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r) 184 { 185 u64 ip_l, ip_r; 186 187 if (!sym_l || !sym_r) 188 return cmp_null(sym_l, sym_r); 189 190 if (sym_l == sym_r) 191 return 0; 192 193 ip_l = sym_l->start; 194 ip_r = sym_r->start; 195 196 return (int64_t)(ip_r - ip_l); 197 } 198 199 static int64_t 200 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 201 { 202 int64_t ret; 203 204 if (!left->ms.sym && !right->ms.sym) 205 return _sort__addr_cmp(left->ip, right->ip); 206 207 /* 208 * comparing symbol address alone is not enough since it's a 209 * relative address within a dso. 210 */ 211 if (!sort__has_dso) { 212 ret = sort__dso_cmp(left, right); 213 if (ret != 0) 214 return ret; 215 } 216 217 return _sort__sym_cmp(left->ms.sym, right->ms.sym); 218 } 219 220 static int64_t 221 sort__sym_sort(struct hist_entry *left, struct hist_entry *right) 222 { 223 if (!left->ms.sym || !right->ms.sym) 224 return cmp_null(left->ms.sym, right->ms.sym); 225 226 return strcmp(right->ms.sym->name, left->ms.sym->name); 227 } 228 229 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, 230 u64 ip, char level, char *bf, size_t size, 231 unsigned int width) 232 { 233 size_t ret = 0; 234 235 if (verbose) { 236 char o = map ? dso__symtab_origin(map->dso) : '!'; 237 ret += repsep_snprintf(bf, size, "%-#*llx %c ", 238 BITS_PER_LONG / 4 + 2, ip, o); 239 } 240 241 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 242 if (sym && map) { 243 if (map->type == MAP__VARIABLE) { 244 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name); 245 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", 246 ip - map->unmap_ip(map, sym->start)); 247 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", 248 width - ret, ""); 249 } else { 250 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", 251 width - ret, 252 sym->name); 253 } 254 } else { 255 size_t len = BITS_PER_LONG / 4; 256 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 257 len, ip); 258 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", 259 width - ret, ""); 260 } 261 262 if (ret > width) 263 bf[width] = '\0'; 264 265 return width; 266 } 267 268 static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, 269 size_t size, unsigned int width) 270 { 271 return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip, 272 he->level, bf, size, width); 273 } 274 275 struct sort_entry sort_sym = { 276 .se_header = "Symbol", 277 .se_cmp = sort__sym_cmp, 278 .se_sort = sort__sym_sort, 279 .se_snprintf = hist_entry__sym_snprintf, 280 .se_width_idx = HISTC_SYMBOL, 281 }; 282 283 /* --sort srcline */ 284 285 static int64_t 286 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) 287 { 288 if (!left->srcline) { 289 if (!left->ms.map) 290 left->srcline = SRCLINE_UNKNOWN; 291 else { 292 struct map *map = left->ms.map; 293 left->srcline = get_srcline(map->dso, 294 map__rip_2objdump(map, left->ip)); 295 } 296 } 297 if (!right->srcline) { 298 if (!right->ms.map) 299 right->srcline = SRCLINE_UNKNOWN; 300 else { 301 struct map *map = right->ms.map; 302 right->srcline = get_srcline(map->dso, 303 map__rip_2objdump(map, right->ip)); 304 } 305 } 306 return strcmp(right->srcline, left->srcline); 307 } 308 309 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf, 310 size_t size, unsigned int width) 311 { 312 return repsep_snprintf(bf, size, "%*.*-s", width, width, he->srcline); 313 } 314 315 struct sort_entry sort_srcline = { 316 .se_header = "Source:Line", 317 .se_cmp = sort__srcline_cmp, 318 .se_snprintf = hist_entry__srcline_snprintf, 319 .se_width_idx = HISTC_SRCLINE, 320 }; 321 322 /* --sort parent */ 323 324 static int64_t 325 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right) 326 { 327 struct symbol *sym_l = left->parent; 328 struct symbol *sym_r = right->parent; 329 330 if (!sym_l || !sym_r) 331 return cmp_null(sym_l, sym_r); 332 333 return strcmp(sym_r->name, sym_l->name); 334 } 335 336 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf, 337 size_t size, unsigned int width) 338 { 339 return repsep_snprintf(bf, size, "%-*.*s", width, width, 340 he->parent ? he->parent->name : "[other]"); 341 } 342 343 struct sort_entry sort_parent = { 344 .se_header = "Parent symbol", 345 .se_cmp = sort__parent_cmp, 346 .se_snprintf = hist_entry__parent_snprintf, 347 .se_width_idx = HISTC_PARENT, 348 }; 349 350 /* --sort cpu */ 351 352 static int64_t 353 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right) 354 { 355 return right->cpu - left->cpu; 356 } 357 358 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf, 359 size_t size, unsigned int width) 360 { 361 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu); 362 } 363 364 struct sort_entry sort_cpu = { 365 .se_header = "CPU", 366 .se_cmp = sort__cpu_cmp, 367 .se_snprintf = hist_entry__cpu_snprintf, 368 .se_width_idx = HISTC_CPU, 369 }; 370 371 /* sort keys for branch stacks */ 372 373 static int64_t 374 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 375 { 376 return _sort__dso_cmp(left->branch_info->from.map, 377 right->branch_info->from.map); 378 } 379 380 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 381 size_t size, unsigned int width) 382 { 383 return _hist_entry__dso_snprintf(he->branch_info->from.map, 384 bf, size, width); 385 } 386 387 static int64_t 388 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 389 { 390 return _sort__dso_cmp(left->branch_info->to.map, 391 right->branch_info->to.map); 392 } 393 394 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 395 size_t size, unsigned int width) 396 { 397 return _hist_entry__dso_snprintf(he->branch_info->to.map, 398 bf, size, width); 399 } 400 401 static int64_t 402 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 403 { 404 struct addr_map_symbol *from_l = &left->branch_info->from; 405 struct addr_map_symbol *from_r = &right->branch_info->from; 406 407 if (!from_l->sym && !from_r->sym) 408 return _sort__addr_cmp(from_l->addr, from_r->addr); 409 410 return _sort__sym_cmp(from_l->sym, from_r->sym); 411 } 412 413 static int64_t 414 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 415 { 416 struct addr_map_symbol *to_l = &left->branch_info->to; 417 struct addr_map_symbol *to_r = &right->branch_info->to; 418 419 if (!to_l->sym && !to_r->sym) 420 return _sort__addr_cmp(to_l->addr, to_r->addr); 421 422 return _sort__sym_cmp(to_l->sym, to_r->sym); 423 } 424 425 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 426 size_t size, unsigned int width) 427 { 428 struct addr_map_symbol *from = &he->branch_info->from; 429 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, 430 he->level, bf, size, width); 431 432 } 433 434 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 435 size_t size, unsigned int width) 436 { 437 struct addr_map_symbol *to = &he->branch_info->to; 438 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 439 he->level, bf, size, width); 440 441 } 442 443 struct sort_entry sort_dso_from = { 444 .se_header = "Source Shared Object", 445 .se_cmp = sort__dso_from_cmp, 446 .se_snprintf = hist_entry__dso_from_snprintf, 447 .se_width_idx = HISTC_DSO_FROM, 448 }; 449 450 struct sort_entry sort_dso_to = { 451 .se_header = "Target Shared Object", 452 .se_cmp = sort__dso_to_cmp, 453 .se_snprintf = hist_entry__dso_to_snprintf, 454 .se_width_idx = HISTC_DSO_TO, 455 }; 456 457 struct sort_entry sort_sym_from = { 458 .se_header = "Source Symbol", 459 .se_cmp = sort__sym_from_cmp, 460 .se_snprintf = hist_entry__sym_from_snprintf, 461 .se_width_idx = HISTC_SYMBOL_FROM, 462 }; 463 464 struct sort_entry sort_sym_to = { 465 .se_header = "Target Symbol", 466 .se_cmp = sort__sym_to_cmp, 467 .se_snprintf = hist_entry__sym_to_snprintf, 468 .se_width_idx = HISTC_SYMBOL_TO, 469 }; 470 471 static int64_t 472 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 473 { 474 const unsigned char mp = left->branch_info->flags.mispred != 475 right->branch_info->flags.mispred; 476 const unsigned char p = left->branch_info->flags.predicted != 477 right->branch_info->flags.predicted; 478 479 return mp || p; 480 } 481 482 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, 483 size_t size, unsigned int width){ 484 static const char *out = "N/A"; 485 486 if (he->branch_info->flags.predicted) 487 out = "N"; 488 else if (he->branch_info->flags.mispred) 489 out = "Y"; 490 491 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 492 } 493 494 /* --sort daddr_sym */ 495 static int64_t 496 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right) 497 { 498 uint64_t l = 0, r = 0; 499 500 if (left->mem_info) 501 l = left->mem_info->daddr.addr; 502 if (right->mem_info) 503 r = right->mem_info->daddr.addr; 504 505 return (int64_t)(r - l); 506 } 507 508 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf, 509 size_t size, unsigned int width) 510 { 511 uint64_t addr = 0; 512 struct map *map = NULL; 513 struct symbol *sym = NULL; 514 515 if (he->mem_info) { 516 addr = he->mem_info->daddr.addr; 517 map = he->mem_info->daddr.map; 518 sym = he->mem_info->daddr.sym; 519 } 520 return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, 521 width); 522 } 523 524 static int64_t 525 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right) 526 { 527 struct map *map_l = NULL; 528 struct map *map_r = NULL; 529 530 if (left->mem_info) 531 map_l = left->mem_info->daddr.map; 532 if (right->mem_info) 533 map_r = right->mem_info->daddr.map; 534 535 return _sort__dso_cmp(map_l, map_r); 536 } 537 538 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf, 539 size_t size, unsigned int width) 540 { 541 struct map *map = NULL; 542 543 if (he->mem_info) 544 map = he->mem_info->daddr.map; 545 546 return _hist_entry__dso_snprintf(map, bf, size, width); 547 } 548 549 static int64_t 550 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right) 551 { 552 union perf_mem_data_src data_src_l; 553 union perf_mem_data_src data_src_r; 554 555 if (left->mem_info) 556 data_src_l = left->mem_info->data_src; 557 else 558 data_src_l.mem_lock = PERF_MEM_LOCK_NA; 559 560 if (right->mem_info) 561 data_src_r = right->mem_info->data_src; 562 else 563 data_src_r.mem_lock = PERF_MEM_LOCK_NA; 564 565 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock); 566 } 567 568 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf, 569 size_t size, unsigned int width) 570 { 571 const char *out; 572 u64 mask = PERF_MEM_LOCK_NA; 573 574 if (he->mem_info) 575 mask = he->mem_info->data_src.mem_lock; 576 577 if (mask & PERF_MEM_LOCK_NA) 578 out = "N/A"; 579 else if (mask & PERF_MEM_LOCK_LOCKED) 580 out = "Yes"; 581 else 582 out = "No"; 583 584 return repsep_snprintf(bf, size, "%-*s", width, out); 585 } 586 587 static int64_t 588 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right) 589 { 590 union perf_mem_data_src data_src_l; 591 union perf_mem_data_src data_src_r; 592 593 if (left->mem_info) 594 data_src_l = left->mem_info->data_src; 595 else 596 data_src_l.mem_dtlb = PERF_MEM_TLB_NA; 597 598 if (right->mem_info) 599 data_src_r = right->mem_info->data_src; 600 else 601 data_src_r.mem_dtlb = PERF_MEM_TLB_NA; 602 603 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb); 604 } 605 606 static const char * const tlb_access[] = { 607 "N/A", 608 "HIT", 609 "MISS", 610 "L1", 611 "L2", 612 "Walker", 613 "Fault", 614 }; 615 #define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *)) 616 617 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf, 618 size_t size, unsigned int width) 619 { 620 char out[64]; 621 size_t sz = sizeof(out) - 1; /* -1 for null termination */ 622 size_t l = 0, i; 623 u64 m = PERF_MEM_TLB_NA; 624 u64 hit, miss; 625 626 out[0] = '\0'; 627 628 if (he->mem_info) 629 m = he->mem_info->data_src.mem_dtlb; 630 631 hit = m & PERF_MEM_TLB_HIT; 632 miss = m & PERF_MEM_TLB_MISS; 633 634 /* already taken care of */ 635 m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS); 636 637 for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) { 638 if (!(m & 0x1)) 639 continue; 640 if (l) { 641 strcat(out, " or "); 642 l += 4; 643 } 644 strncat(out, tlb_access[i], sz - l); 645 l += strlen(tlb_access[i]); 646 } 647 if (*out == '\0') 648 strcpy(out, "N/A"); 649 if (hit) 650 strncat(out, " hit", sz - l); 651 if (miss) 652 strncat(out, " miss", sz - l); 653 654 return repsep_snprintf(bf, size, "%-*s", width, out); 655 } 656 657 static int64_t 658 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right) 659 { 660 union perf_mem_data_src data_src_l; 661 union perf_mem_data_src data_src_r; 662 663 if (left->mem_info) 664 data_src_l = left->mem_info->data_src; 665 else 666 data_src_l.mem_lvl = PERF_MEM_LVL_NA; 667 668 if (right->mem_info) 669 data_src_r = right->mem_info->data_src; 670 else 671 data_src_r.mem_lvl = PERF_MEM_LVL_NA; 672 673 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl); 674 } 675 676 static const char * const mem_lvl[] = { 677 "N/A", 678 "HIT", 679 "MISS", 680 "L1", 681 "LFB", 682 "L2", 683 "L3", 684 "Local RAM", 685 "Remote RAM (1 hop)", 686 "Remote RAM (2 hops)", 687 "Remote Cache (1 hop)", 688 "Remote Cache (2 hops)", 689 "I/O", 690 "Uncached", 691 }; 692 #define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *)) 693 694 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf, 695 size_t size, unsigned int width) 696 { 697 char out[64]; 698 size_t sz = sizeof(out) - 1; /* -1 for null termination */ 699 size_t i, l = 0; 700 u64 m = PERF_MEM_LVL_NA; 701 u64 hit, miss; 702 703 if (he->mem_info) 704 m = he->mem_info->data_src.mem_lvl; 705 706 out[0] = '\0'; 707 708 hit = m & PERF_MEM_LVL_HIT; 709 miss = m & PERF_MEM_LVL_MISS; 710 711 /* already taken care of */ 712 m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS); 713 714 for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) { 715 if (!(m & 0x1)) 716 continue; 717 if (l) { 718 strcat(out, " or "); 719 l += 4; 720 } 721 strncat(out, mem_lvl[i], sz - l); 722 l += strlen(mem_lvl[i]); 723 } 724 if (*out == '\0') 725 strcpy(out, "N/A"); 726 if (hit) 727 strncat(out, " hit", sz - l); 728 if (miss) 729 strncat(out, " miss", sz - l); 730 731 return repsep_snprintf(bf, size, "%-*s", width, out); 732 } 733 734 static int64_t 735 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right) 736 { 737 union perf_mem_data_src data_src_l; 738 union perf_mem_data_src data_src_r; 739 740 if (left->mem_info) 741 data_src_l = left->mem_info->data_src; 742 else 743 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA; 744 745 if (right->mem_info) 746 data_src_r = right->mem_info->data_src; 747 else 748 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA; 749 750 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop); 751 } 752 753 static const char * const snoop_access[] = { 754 "N/A", 755 "None", 756 "Miss", 757 "Hit", 758 "HitM", 759 }; 760 #define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *)) 761 762 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf, 763 size_t size, unsigned int width) 764 { 765 char out[64]; 766 size_t sz = sizeof(out) - 1; /* -1 for null termination */ 767 size_t i, l = 0; 768 u64 m = PERF_MEM_SNOOP_NA; 769 770 out[0] = '\0'; 771 772 if (he->mem_info) 773 m = he->mem_info->data_src.mem_snoop; 774 775 for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) { 776 if (!(m & 0x1)) 777 continue; 778 if (l) { 779 strcat(out, " or "); 780 l += 4; 781 } 782 strncat(out, snoop_access[i], sz - l); 783 l += strlen(snoop_access[i]); 784 } 785 786 if (*out == '\0') 787 strcpy(out, "N/A"); 788 789 return repsep_snprintf(bf, size, "%-*s", width, out); 790 } 791 792 static inline u64 cl_address(u64 address) 793 { 794 /* return the cacheline of the address */ 795 return (address & ~(cacheline_size - 1)); 796 } 797 798 static int64_t 799 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right) 800 { 801 u64 l, r; 802 struct map *l_map, *r_map; 803 804 if (!left->mem_info) return -1; 805 if (!right->mem_info) return 1; 806 807 /* group event types together */ 808 if (left->cpumode > right->cpumode) return -1; 809 if (left->cpumode < right->cpumode) return 1; 810 811 l_map = left->mem_info->daddr.map; 812 r_map = right->mem_info->daddr.map; 813 814 /* if both are NULL, jump to sort on al_addr instead */ 815 if (!l_map && !r_map) 816 goto addr; 817 818 if (!l_map) return -1; 819 if (!r_map) return 1; 820 821 if (l_map->maj > r_map->maj) return -1; 822 if (l_map->maj < r_map->maj) return 1; 823 824 if (l_map->min > r_map->min) return -1; 825 if (l_map->min < r_map->min) return 1; 826 827 if (l_map->ino > r_map->ino) return -1; 828 if (l_map->ino < r_map->ino) return 1; 829 830 if (l_map->ino_generation > r_map->ino_generation) return -1; 831 if (l_map->ino_generation < r_map->ino_generation) return 1; 832 833 /* 834 * Addresses with no major/minor numbers are assumed to be 835 * anonymous in userspace. Sort those on pid then address. 836 * 837 * The kernel and non-zero major/minor mapped areas are 838 * assumed to be unity mapped. Sort those on address. 839 */ 840 841 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) && 842 (!(l_map->flags & MAP_SHARED)) && 843 !l_map->maj && !l_map->min && !l_map->ino && 844 !l_map->ino_generation) { 845 /* userspace anonymous */ 846 847 if (left->thread->pid_ > right->thread->pid_) return -1; 848 if (left->thread->pid_ < right->thread->pid_) return 1; 849 } 850 851 addr: 852 /* al_addr does all the right addr - start + offset calculations */ 853 l = cl_address(left->mem_info->daddr.al_addr); 854 r = cl_address(right->mem_info->daddr.al_addr); 855 856 if (l > r) return -1; 857 if (l < r) return 1; 858 859 return 0; 860 } 861 862 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf, 863 size_t size, unsigned int width) 864 { 865 866 uint64_t addr = 0; 867 struct map *map = NULL; 868 struct symbol *sym = NULL; 869 char level = he->level; 870 871 if (he->mem_info) { 872 addr = cl_address(he->mem_info->daddr.al_addr); 873 map = he->mem_info->daddr.map; 874 sym = he->mem_info->daddr.sym; 875 876 /* print [s] for shared data mmaps */ 877 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) && 878 map && (map->type == MAP__VARIABLE) && 879 (map->flags & MAP_SHARED) && 880 (map->maj || map->min || map->ino || 881 map->ino_generation)) 882 level = 's'; 883 else if (!map) 884 level = 'X'; 885 } 886 return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size, 887 width); 888 } 889 890 struct sort_entry sort_mispredict = { 891 .se_header = "Branch Mispredicted", 892 .se_cmp = sort__mispredict_cmp, 893 .se_snprintf = hist_entry__mispredict_snprintf, 894 .se_width_idx = HISTC_MISPREDICT, 895 }; 896 897 static u64 he_weight(struct hist_entry *he) 898 { 899 return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0; 900 } 901 902 static int64_t 903 sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right) 904 { 905 return he_weight(left) - he_weight(right); 906 } 907 908 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf, 909 size_t size, unsigned int width) 910 { 911 return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he)); 912 } 913 914 struct sort_entry sort_local_weight = { 915 .se_header = "Local Weight", 916 .se_cmp = sort__local_weight_cmp, 917 .se_snprintf = hist_entry__local_weight_snprintf, 918 .se_width_idx = HISTC_LOCAL_WEIGHT, 919 }; 920 921 static int64_t 922 sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right) 923 { 924 return left->stat.weight - right->stat.weight; 925 } 926 927 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf, 928 size_t size, unsigned int width) 929 { 930 return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight); 931 } 932 933 struct sort_entry sort_global_weight = { 934 .se_header = "Weight", 935 .se_cmp = sort__global_weight_cmp, 936 .se_snprintf = hist_entry__global_weight_snprintf, 937 .se_width_idx = HISTC_GLOBAL_WEIGHT, 938 }; 939 940 struct sort_entry sort_mem_daddr_sym = { 941 .se_header = "Data Symbol", 942 .se_cmp = sort__daddr_cmp, 943 .se_snprintf = hist_entry__daddr_snprintf, 944 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 945 }; 946 947 struct sort_entry sort_mem_daddr_dso = { 948 .se_header = "Data Object", 949 .se_cmp = sort__dso_daddr_cmp, 950 .se_snprintf = hist_entry__dso_daddr_snprintf, 951 .se_width_idx = HISTC_MEM_DADDR_SYMBOL, 952 }; 953 954 struct sort_entry sort_mem_locked = { 955 .se_header = "Locked", 956 .se_cmp = sort__locked_cmp, 957 .se_snprintf = hist_entry__locked_snprintf, 958 .se_width_idx = HISTC_MEM_LOCKED, 959 }; 960 961 struct sort_entry sort_mem_tlb = { 962 .se_header = "TLB access", 963 .se_cmp = sort__tlb_cmp, 964 .se_snprintf = hist_entry__tlb_snprintf, 965 .se_width_idx = HISTC_MEM_TLB, 966 }; 967 968 struct sort_entry sort_mem_lvl = { 969 .se_header = "Memory access", 970 .se_cmp = sort__lvl_cmp, 971 .se_snprintf = hist_entry__lvl_snprintf, 972 .se_width_idx = HISTC_MEM_LVL, 973 }; 974 975 struct sort_entry sort_mem_snoop = { 976 .se_header = "Snoop", 977 .se_cmp = sort__snoop_cmp, 978 .se_snprintf = hist_entry__snoop_snprintf, 979 .se_width_idx = HISTC_MEM_SNOOP, 980 }; 981 982 struct sort_entry sort_mem_dcacheline = { 983 .se_header = "Data Cacheline", 984 .se_cmp = sort__dcacheline_cmp, 985 .se_snprintf = hist_entry__dcacheline_snprintf, 986 .se_width_idx = HISTC_MEM_DCACHELINE, 987 }; 988 989 static int64_t 990 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 991 { 992 return left->branch_info->flags.abort != 993 right->branch_info->flags.abort; 994 } 995 996 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 997 size_t size, unsigned int width) 998 { 999 static const char *out = "."; 1000 1001 if (he->branch_info->flags.abort) 1002 out = "A"; 1003 return repsep_snprintf(bf, size, "%-*s", width, out); 1004 } 1005 1006 struct sort_entry sort_abort = { 1007 .se_header = "Transaction abort", 1008 .se_cmp = sort__abort_cmp, 1009 .se_snprintf = hist_entry__abort_snprintf, 1010 .se_width_idx = HISTC_ABORT, 1011 }; 1012 1013 static int64_t 1014 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1015 { 1016 return left->branch_info->flags.in_tx != 1017 right->branch_info->flags.in_tx; 1018 } 1019 1020 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1021 size_t size, unsigned int width) 1022 { 1023 static const char *out = "."; 1024 1025 if (he->branch_info->flags.in_tx) 1026 out = "T"; 1027 1028 return repsep_snprintf(bf, size, "%-*s", width, out); 1029 } 1030 1031 struct sort_entry sort_in_tx = { 1032 .se_header = "Branch in transaction", 1033 .se_cmp = sort__in_tx_cmp, 1034 .se_snprintf = hist_entry__in_tx_snprintf, 1035 .se_width_idx = HISTC_IN_TX, 1036 }; 1037 1038 static int64_t 1039 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right) 1040 { 1041 return left->transaction - right->transaction; 1042 } 1043 1044 static inline char *add_str(char *p, const char *str) 1045 { 1046 strcpy(p, str); 1047 return p + strlen(str); 1048 } 1049 1050 static struct txbit { 1051 unsigned flag; 1052 const char *name; 1053 int skip_for_len; 1054 } txbits[] = { 1055 { PERF_TXN_ELISION, "EL ", 0 }, 1056 { PERF_TXN_TRANSACTION, "TX ", 1 }, 1057 { PERF_TXN_SYNC, "SYNC ", 1 }, 1058 { PERF_TXN_ASYNC, "ASYNC ", 0 }, 1059 { PERF_TXN_RETRY, "RETRY ", 0 }, 1060 { PERF_TXN_CONFLICT, "CON ", 0 }, 1061 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 }, 1062 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 }, 1063 { 0, NULL, 0 } 1064 }; 1065 1066 int hist_entry__transaction_len(void) 1067 { 1068 int i; 1069 int len = 0; 1070 1071 for (i = 0; txbits[i].name; i++) { 1072 if (!txbits[i].skip_for_len) 1073 len += strlen(txbits[i].name); 1074 } 1075 len += 4; /* :XX<space> */ 1076 return len; 1077 } 1078 1079 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf, 1080 size_t size, unsigned int width) 1081 { 1082 u64 t = he->transaction; 1083 char buf[128]; 1084 char *p = buf; 1085 int i; 1086 1087 buf[0] = 0; 1088 for (i = 0; txbits[i].name; i++) 1089 if (txbits[i].flag & t) 1090 p = add_str(p, txbits[i].name); 1091 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC))) 1092 p = add_str(p, "NEITHER "); 1093 if (t & PERF_TXN_ABORT_MASK) { 1094 sprintf(p, ":%" PRIx64, 1095 (t & PERF_TXN_ABORT_MASK) >> 1096 PERF_TXN_ABORT_SHIFT); 1097 p += strlen(p); 1098 } 1099 1100 return repsep_snprintf(bf, size, "%-*s", width, buf); 1101 } 1102 1103 struct sort_entry sort_transaction = { 1104 .se_header = "Transaction ", 1105 .se_cmp = sort__transaction_cmp, 1106 .se_snprintf = hist_entry__transaction_snprintf, 1107 .se_width_idx = HISTC_TRANSACTION, 1108 }; 1109 1110 struct sort_dimension { 1111 const char *name; 1112 struct sort_entry *entry; 1113 int taken; 1114 }; 1115 1116 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 1117 1118 static struct sort_dimension common_sort_dimensions[] = { 1119 DIM(SORT_PID, "pid", sort_thread), 1120 DIM(SORT_COMM, "comm", sort_comm), 1121 DIM(SORT_DSO, "dso", sort_dso), 1122 DIM(SORT_SYM, "symbol", sort_sym), 1123 DIM(SORT_PARENT, "parent", sort_parent), 1124 DIM(SORT_CPU, "cpu", sort_cpu), 1125 DIM(SORT_SRCLINE, "srcline", sort_srcline), 1126 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight), 1127 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight), 1128 DIM(SORT_TRANSACTION, "transaction", sort_transaction), 1129 }; 1130 1131 #undef DIM 1132 1133 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) } 1134 1135 static struct sort_dimension bstack_sort_dimensions[] = { 1136 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 1137 DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 1138 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from), 1139 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to), 1140 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 1141 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1142 DIM(SORT_ABORT, "abort", sort_abort), 1143 }; 1144 1145 #undef DIM 1146 1147 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) } 1148 1149 static struct sort_dimension memory_sort_dimensions[] = { 1150 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym), 1151 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso), 1152 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked), 1153 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb), 1154 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl), 1155 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop), 1156 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline), 1157 }; 1158 1159 #undef DIM 1160 1161 struct hpp_dimension { 1162 const char *name; 1163 struct perf_hpp_fmt *fmt; 1164 int taken; 1165 }; 1166 1167 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], } 1168 1169 static struct hpp_dimension hpp_sort_dimensions[] = { 1170 DIM(PERF_HPP__OVERHEAD, "overhead"), 1171 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"), 1172 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"), 1173 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"), 1174 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"), 1175 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"), 1176 DIM(PERF_HPP__SAMPLES, "sample"), 1177 DIM(PERF_HPP__PERIOD, "period"), 1178 }; 1179 1180 #undef DIM 1181 1182 struct hpp_sort_entry { 1183 struct perf_hpp_fmt hpp; 1184 struct sort_entry *se; 1185 }; 1186 1187 bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 1188 { 1189 struct hpp_sort_entry *hse_a; 1190 struct hpp_sort_entry *hse_b; 1191 1192 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b)) 1193 return false; 1194 1195 hse_a = container_of(a, struct hpp_sort_entry, hpp); 1196 hse_b = container_of(b, struct hpp_sort_entry, hpp); 1197 1198 return hse_a->se == hse_b->se; 1199 } 1200 1201 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists) 1202 { 1203 struct hpp_sort_entry *hse; 1204 1205 if (!perf_hpp__is_sort_entry(fmt)) 1206 return; 1207 1208 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1209 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name)); 1210 } 1211 1212 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1213 struct perf_evsel *evsel) 1214 { 1215 struct hpp_sort_entry *hse; 1216 size_t len = fmt->user_len; 1217 1218 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1219 1220 if (!len) 1221 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx); 1222 1223 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name); 1224 } 1225 1226 static int __sort__hpp_width(struct perf_hpp_fmt *fmt, 1227 struct perf_hpp *hpp __maybe_unused, 1228 struct perf_evsel *evsel) 1229 { 1230 struct hpp_sort_entry *hse; 1231 size_t len = fmt->user_len; 1232 1233 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1234 1235 if (!len) 1236 len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx); 1237 1238 return len; 1239 } 1240 1241 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 1242 struct hist_entry *he) 1243 { 1244 struct hpp_sort_entry *hse; 1245 size_t len = fmt->user_len; 1246 1247 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1248 1249 if (!len) 1250 len = hists__col_len(he->hists, hse->se->se_width_idx); 1251 1252 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1253 } 1254 1255 static struct hpp_sort_entry * 1256 __sort_dimension__alloc_hpp(struct sort_dimension *sd) 1257 { 1258 struct hpp_sort_entry *hse; 1259 1260 hse = malloc(sizeof(*hse)); 1261 if (hse == NULL) { 1262 pr_err("Memory allocation failed\n"); 1263 return NULL; 1264 } 1265 1266 hse->se = sd->entry; 1267 hse->hpp.name = sd->entry->se_header; 1268 hse->hpp.header = __sort__hpp_header; 1269 hse->hpp.width = __sort__hpp_width; 1270 hse->hpp.entry = __sort__hpp_entry; 1271 hse->hpp.color = NULL; 1272 1273 hse->hpp.cmp = sd->entry->se_cmp; 1274 hse->hpp.collapse = sd->entry->se_collapse ? : sd->entry->se_cmp; 1275 hse->hpp.sort = sd->entry->se_sort ? : hse->hpp.collapse; 1276 1277 INIT_LIST_HEAD(&hse->hpp.list); 1278 INIT_LIST_HEAD(&hse->hpp.sort_list); 1279 hse->hpp.elide = false; 1280 hse->hpp.len = 0; 1281 hse->hpp.user_len = 0; 1282 1283 return hse; 1284 } 1285 1286 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format) 1287 { 1288 return format->header == __sort__hpp_header; 1289 } 1290 1291 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd) 1292 { 1293 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd); 1294 1295 if (hse == NULL) 1296 return -1; 1297 1298 perf_hpp__register_sort_field(&hse->hpp); 1299 return 0; 1300 } 1301 1302 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd) 1303 { 1304 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd); 1305 1306 if (hse == NULL) 1307 return -1; 1308 1309 perf_hpp__column_register(&hse->hpp); 1310 return 0; 1311 } 1312 1313 static int __sort_dimension__add(struct sort_dimension *sd) 1314 { 1315 if (sd->taken) 1316 return 0; 1317 1318 if (__sort_dimension__add_hpp_sort(sd) < 0) 1319 return -1; 1320 1321 if (sd->entry->se_collapse) 1322 sort__need_collapse = 1; 1323 1324 sd->taken = 1; 1325 1326 return 0; 1327 } 1328 1329 static int __hpp_dimension__add(struct hpp_dimension *hd) 1330 { 1331 if (!hd->taken) { 1332 hd->taken = 1; 1333 1334 perf_hpp__register_sort_field(hd->fmt); 1335 } 1336 return 0; 1337 } 1338 1339 static int __sort_dimension__add_output(struct sort_dimension *sd) 1340 { 1341 if (sd->taken) 1342 return 0; 1343 1344 if (__sort_dimension__add_hpp_output(sd) < 0) 1345 return -1; 1346 1347 sd->taken = 1; 1348 return 0; 1349 } 1350 1351 static int __hpp_dimension__add_output(struct hpp_dimension *hd) 1352 { 1353 if (!hd->taken) { 1354 hd->taken = 1; 1355 1356 perf_hpp__column_register(hd->fmt); 1357 } 1358 return 0; 1359 } 1360 1361 int sort_dimension__add(const char *tok) 1362 { 1363 unsigned int i; 1364 1365 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 1366 struct sort_dimension *sd = &common_sort_dimensions[i]; 1367 1368 if (strncasecmp(tok, sd->name, strlen(tok))) 1369 continue; 1370 1371 if (sd->entry == &sort_parent) { 1372 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 1373 if (ret) { 1374 char err[BUFSIZ]; 1375 1376 regerror(ret, &parent_regex, err, sizeof(err)); 1377 pr_err("Invalid regex: %s\n%s", parent_pattern, err); 1378 return -EINVAL; 1379 } 1380 sort__has_parent = 1; 1381 } else if (sd->entry == &sort_sym) { 1382 sort__has_sym = 1; 1383 } else if (sd->entry == &sort_dso) { 1384 sort__has_dso = 1; 1385 } 1386 1387 return __sort_dimension__add(sd); 1388 } 1389 1390 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 1391 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 1392 1393 if (strncasecmp(tok, hd->name, strlen(tok))) 1394 continue; 1395 1396 return __hpp_dimension__add(hd); 1397 } 1398 1399 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 1400 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 1401 1402 if (strncasecmp(tok, sd->name, strlen(tok))) 1403 continue; 1404 1405 if (sort__mode != SORT_MODE__BRANCH) 1406 return -EINVAL; 1407 1408 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to) 1409 sort__has_sym = 1; 1410 1411 __sort_dimension__add(sd); 1412 return 0; 1413 } 1414 1415 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 1416 struct sort_dimension *sd = &memory_sort_dimensions[i]; 1417 1418 if (strncasecmp(tok, sd->name, strlen(tok))) 1419 continue; 1420 1421 if (sort__mode != SORT_MODE__MEMORY) 1422 return -EINVAL; 1423 1424 if (sd->entry == &sort_mem_daddr_sym) 1425 sort__has_sym = 1; 1426 1427 __sort_dimension__add(sd); 1428 return 0; 1429 } 1430 1431 return -ESRCH; 1432 } 1433 1434 static const char *get_default_sort_order(void) 1435 { 1436 const char *default_sort_orders[] = { 1437 default_sort_order, 1438 default_branch_sort_order, 1439 default_mem_sort_order, 1440 default_top_sort_order, 1441 default_diff_sort_order, 1442 }; 1443 1444 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders)); 1445 1446 return default_sort_orders[sort__mode]; 1447 } 1448 1449 static int setup_sort_order(void) 1450 { 1451 char *new_sort_order; 1452 1453 /* 1454 * Append '+'-prefixed sort order to the default sort 1455 * order string. 1456 */ 1457 if (!sort_order || is_strict_order(sort_order)) 1458 return 0; 1459 1460 if (sort_order[1] == '\0') { 1461 error("Invalid --sort key: `+'"); 1462 return -EINVAL; 1463 } 1464 1465 /* 1466 * We allocate new sort_order string, but we never free it, 1467 * because it's checked over the rest of the code. 1468 */ 1469 if (asprintf(&new_sort_order, "%s,%s", 1470 get_default_sort_order(), sort_order + 1) < 0) { 1471 error("Not enough memory to set up --sort"); 1472 return -ENOMEM; 1473 } 1474 1475 sort_order = new_sort_order; 1476 return 0; 1477 } 1478 1479 static int __setup_sorting(void) 1480 { 1481 char *tmp, *tok, *str; 1482 const char *sort_keys; 1483 int ret = 0; 1484 1485 ret = setup_sort_order(); 1486 if (ret) 1487 return ret; 1488 1489 sort_keys = sort_order; 1490 if (sort_keys == NULL) { 1491 if (is_strict_order(field_order)) { 1492 /* 1493 * If user specified field order but no sort order, 1494 * we'll honor it and not add default sort orders. 1495 */ 1496 return 0; 1497 } 1498 1499 sort_keys = get_default_sort_order(); 1500 } 1501 1502 str = strdup(sort_keys); 1503 if (str == NULL) { 1504 error("Not enough memory to setup sort keys"); 1505 return -ENOMEM; 1506 } 1507 1508 for (tok = strtok_r(str, ", ", &tmp); 1509 tok; tok = strtok_r(NULL, ", ", &tmp)) { 1510 ret = sort_dimension__add(tok); 1511 if (ret == -EINVAL) { 1512 error("Invalid --sort key: `%s'", tok); 1513 break; 1514 } else if (ret == -ESRCH) { 1515 error("Unknown --sort key: `%s'", tok); 1516 break; 1517 } 1518 } 1519 1520 free(str); 1521 return ret; 1522 } 1523 1524 void perf_hpp__set_elide(int idx, bool elide) 1525 { 1526 struct perf_hpp_fmt *fmt; 1527 struct hpp_sort_entry *hse; 1528 1529 perf_hpp__for_each_format(fmt) { 1530 if (!perf_hpp__is_sort_entry(fmt)) 1531 continue; 1532 1533 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1534 if (hse->se->se_width_idx == idx) { 1535 fmt->elide = elide; 1536 break; 1537 } 1538 } 1539 } 1540 1541 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp) 1542 { 1543 if (list && strlist__nr_entries(list) == 1) { 1544 if (fp != NULL) 1545 fprintf(fp, "# %s: %s\n", list_name, 1546 strlist__entry(list, 0)->s); 1547 return true; 1548 } 1549 return false; 1550 } 1551 1552 static bool get_elide(int idx, FILE *output) 1553 { 1554 switch (idx) { 1555 case HISTC_SYMBOL: 1556 return __get_elide(symbol_conf.sym_list, "symbol", output); 1557 case HISTC_DSO: 1558 return __get_elide(symbol_conf.dso_list, "dso", output); 1559 case HISTC_COMM: 1560 return __get_elide(symbol_conf.comm_list, "comm", output); 1561 default: 1562 break; 1563 } 1564 1565 if (sort__mode != SORT_MODE__BRANCH) 1566 return false; 1567 1568 switch (idx) { 1569 case HISTC_SYMBOL_FROM: 1570 return __get_elide(symbol_conf.sym_from_list, "sym_from", output); 1571 case HISTC_SYMBOL_TO: 1572 return __get_elide(symbol_conf.sym_to_list, "sym_to", output); 1573 case HISTC_DSO_FROM: 1574 return __get_elide(symbol_conf.dso_from_list, "dso_from", output); 1575 case HISTC_DSO_TO: 1576 return __get_elide(symbol_conf.dso_to_list, "dso_to", output); 1577 default: 1578 break; 1579 } 1580 1581 return false; 1582 } 1583 1584 void sort__setup_elide(FILE *output) 1585 { 1586 struct perf_hpp_fmt *fmt; 1587 struct hpp_sort_entry *hse; 1588 1589 perf_hpp__for_each_format(fmt) { 1590 if (!perf_hpp__is_sort_entry(fmt)) 1591 continue; 1592 1593 hse = container_of(fmt, struct hpp_sort_entry, hpp); 1594 fmt->elide = get_elide(hse->se->se_width_idx, output); 1595 } 1596 1597 /* 1598 * It makes no sense to elide all of sort entries. 1599 * Just revert them to show up again. 1600 */ 1601 perf_hpp__for_each_format(fmt) { 1602 if (!perf_hpp__is_sort_entry(fmt)) 1603 continue; 1604 1605 if (!fmt->elide) 1606 return; 1607 } 1608 1609 perf_hpp__for_each_format(fmt) { 1610 if (!perf_hpp__is_sort_entry(fmt)) 1611 continue; 1612 1613 fmt->elide = false; 1614 } 1615 } 1616 1617 static int output_field_add(char *tok) 1618 { 1619 unsigned int i; 1620 1621 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) { 1622 struct sort_dimension *sd = &common_sort_dimensions[i]; 1623 1624 if (strncasecmp(tok, sd->name, strlen(tok))) 1625 continue; 1626 1627 return __sort_dimension__add_output(sd); 1628 } 1629 1630 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) { 1631 struct hpp_dimension *hd = &hpp_sort_dimensions[i]; 1632 1633 if (strncasecmp(tok, hd->name, strlen(tok))) 1634 continue; 1635 1636 return __hpp_dimension__add_output(hd); 1637 } 1638 1639 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) { 1640 struct sort_dimension *sd = &bstack_sort_dimensions[i]; 1641 1642 if (strncasecmp(tok, sd->name, strlen(tok))) 1643 continue; 1644 1645 return __sort_dimension__add_output(sd); 1646 } 1647 1648 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) { 1649 struct sort_dimension *sd = &memory_sort_dimensions[i]; 1650 1651 if (strncasecmp(tok, sd->name, strlen(tok))) 1652 continue; 1653 1654 return __sort_dimension__add_output(sd); 1655 } 1656 1657 return -ESRCH; 1658 } 1659 1660 static void reset_dimensions(void) 1661 { 1662 unsigned int i; 1663 1664 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) 1665 common_sort_dimensions[i].taken = 0; 1666 1667 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) 1668 hpp_sort_dimensions[i].taken = 0; 1669 1670 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) 1671 bstack_sort_dimensions[i].taken = 0; 1672 1673 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) 1674 memory_sort_dimensions[i].taken = 0; 1675 } 1676 1677 bool is_strict_order(const char *order) 1678 { 1679 return order && (*order != '+'); 1680 } 1681 1682 static int __setup_output_field(void) 1683 { 1684 char *tmp, *tok, *str, *strp; 1685 int ret = -EINVAL; 1686 1687 if (field_order == NULL) 1688 return 0; 1689 1690 reset_dimensions(); 1691 1692 strp = str = strdup(field_order); 1693 if (str == NULL) { 1694 error("Not enough memory to setup output fields"); 1695 return -ENOMEM; 1696 } 1697 1698 if (!is_strict_order(field_order)) 1699 strp++; 1700 1701 if (!strlen(strp)) { 1702 error("Invalid --fields key: `+'"); 1703 goto out; 1704 } 1705 1706 for (tok = strtok_r(strp, ", ", &tmp); 1707 tok; tok = strtok_r(NULL, ", ", &tmp)) { 1708 ret = output_field_add(tok); 1709 if (ret == -EINVAL) { 1710 error("Invalid --fields key: `%s'", tok); 1711 break; 1712 } else if (ret == -ESRCH) { 1713 error("Unknown --fields key: `%s'", tok); 1714 break; 1715 } 1716 } 1717 1718 out: 1719 free(str); 1720 return ret; 1721 } 1722 1723 int setup_sorting(void) 1724 { 1725 int err; 1726 1727 err = __setup_sorting(); 1728 if (err < 0) 1729 return err; 1730 1731 if (parent_pattern != default_parent_pattern) { 1732 err = sort_dimension__add("parent"); 1733 if (err < 0) 1734 return err; 1735 } 1736 1737 reset_dimensions(); 1738 1739 /* 1740 * perf diff doesn't use default hpp output fields. 1741 */ 1742 if (sort__mode != SORT_MODE__DIFF) 1743 perf_hpp__init(); 1744 1745 err = __setup_output_field(); 1746 if (err < 0) 1747 return err; 1748 1749 /* copy sort keys to output fields */ 1750 perf_hpp__setup_output_field(); 1751 /* and then copy output fields to sort keys */ 1752 perf_hpp__append_sort_keys(); 1753 1754 return 0; 1755 } 1756 1757 void reset_output_field(void) 1758 { 1759 sort__need_collapse = 0; 1760 sort__has_parent = 0; 1761 sort__has_sym = 0; 1762 sort__has_dso = 0; 1763 1764 field_order = NULL; 1765 sort_order = NULL; 1766 1767 reset_dimensions(); 1768 perf_hpp__reset_output_field(); 1769 } 1770