1 // SPDX-License-Identifier: GPL-2.0 2 #include <inttypes.h> 3 #include <math.h> 4 #include <stdlib.h> 5 #include <string.h> 6 #include <linux/compiler.h> 7 8 #include "../util/callchain.h" 9 #include "../util/debug.h" 10 #include "../util/hist.h" 11 #include "../util/sort.h" 12 #include "../util/evsel.h" 13 #include "../util/evlist.h" 14 #include "../perf.h" 15 16 /* hist period print (hpp) functions */ 17 18 #define hpp__call_print_fn(hpp, fn, fmt, ...) \ 19 ({ \ 20 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \ 21 advance_hpp(hpp, __ret); \ 22 __ret; \ 23 }) 24 25 static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, 26 hpp_field_fn get_field, const char *fmt, int len, 27 hpp_snprint_fn print_fn, bool fmt_percent) 28 { 29 int ret; 30 struct hists *hists = he->hists; 31 struct evsel *evsel = hists_to_evsel(hists); 32 char *buf = hpp->buf; 33 size_t size = hpp->size; 34 35 if (fmt_percent) { 36 double percent = 0.0; 37 u64 total = hists__total_period(hists); 38 39 if (total) 40 percent = 100.0 * get_field(he) / total; 41 42 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent); 43 } else 44 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he)); 45 46 if (perf_evsel__is_group_event(evsel)) { 47 int prev_idx, idx_delta; 48 struct hist_entry *pair; 49 int nr_members = evsel->core.nr_members; 50 51 prev_idx = perf_evsel__group_idx(evsel); 52 53 list_for_each_entry(pair, &he->pairs.head, pairs.node) { 54 u64 period = get_field(pair); 55 u64 total = hists__total_period(pair->hists); 56 57 if (!total) 58 continue; 59 60 evsel = hists_to_evsel(pair->hists); 61 idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1; 62 63 while (idx_delta--) { 64 /* 65 * zero-fill group members in the middle which 66 * have no sample 67 */ 68 if (fmt_percent) { 69 ret += hpp__call_print_fn(hpp, print_fn, 70 fmt, len, 0.0); 71 } else { 72 ret += hpp__call_print_fn(hpp, print_fn, 73 fmt, len, 0ULL); 74 } 75 } 76 77 if (fmt_percent) { 78 ret += hpp__call_print_fn(hpp, print_fn, fmt, len, 79 100.0 * period / total); 80 } else { 81 ret += hpp__call_print_fn(hpp, print_fn, fmt, 82 len, period); 83 } 84 85 prev_idx = perf_evsel__group_idx(evsel); 86 } 87 88 idx_delta = nr_members - prev_idx - 1; 89 90 while (idx_delta--) { 91 /* 92 * zero-fill group members at last which have no sample 93 */ 94 if (fmt_percent) { 95 ret += hpp__call_print_fn(hpp, print_fn, 96 fmt, len, 0.0); 97 } else { 98 ret += hpp__call_print_fn(hpp, print_fn, 99 fmt, len, 0ULL); 100 } 101 } 102 } 103 104 /* 105 * Restore original buf and size as it's where caller expects 106 * the result will be saved. 107 */ 108 hpp->buf = buf; 109 hpp->size = size; 110 111 return ret; 112 } 113 114 int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 115 struct hist_entry *he, hpp_field_fn get_field, 116 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent) 117 { 118 int len = fmt->user_len ?: fmt->len; 119 120 if (symbol_conf.field_sep) { 121 return __hpp__fmt(hpp, he, get_field, fmtstr, 1, 122 print_fn, fmt_percent); 123 } 124 125 if (fmt_percent) 126 len -= 2; /* 2 for a space and a % sign */ 127 else 128 len -= 1; 129 130 return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent); 131 } 132 133 int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 134 struct hist_entry *he, hpp_field_fn get_field, 135 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent) 136 { 137 if (!symbol_conf.cumulate_callchain) { 138 int len = fmt->user_len ?: fmt->len; 139 return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A"); 140 } 141 142 return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent); 143 } 144 145 static int field_cmp(u64 field_a, u64 field_b) 146 { 147 if (field_a > field_b) 148 return 1; 149 if (field_a < field_b) 150 return -1; 151 return 0; 152 } 153 154 static int __hpp__sort(struct hist_entry *a, struct hist_entry *b, 155 hpp_field_fn get_field) 156 { 157 s64 ret; 158 int i, nr_members; 159 struct evsel *evsel; 160 struct hist_entry *pair; 161 u64 *fields_a, *fields_b; 162 163 ret = field_cmp(get_field(a), get_field(b)); 164 if (ret || !symbol_conf.event_group) 165 return ret; 166 167 evsel = hists_to_evsel(a->hists); 168 if (!perf_evsel__is_group_event(evsel)) 169 return ret; 170 171 nr_members = evsel->core.nr_members; 172 fields_a = calloc(nr_members, sizeof(*fields_a)); 173 fields_b = calloc(nr_members, sizeof(*fields_b)); 174 175 if (!fields_a || !fields_b) 176 goto out; 177 178 list_for_each_entry(pair, &a->pairs.head, pairs.node) { 179 evsel = hists_to_evsel(pair->hists); 180 fields_a[perf_evsel__group_idx(evsel)] = get_field(pair); 181 } 182 183 list_for_each_entry(pair, &b->pairs.head, pairs.node) { 184 evsel = hists_to_evsel(pair->hists); 185 fields_b[perf_evsel__group_idx(evsel)] = get_field(pair); 186 } 187 188 for (i = 1; i < nr_members; i++) { 189 ret = field_cmp(fields_a[i], fields_b[i]); 190 if (ret) 191 break; 192 } 193 194 out: 195 free(fields_a); 196 free(fields_b); 197 198 return ret; 199 } 200 201 static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b, 202 hpp_field_fn get_field) 203 { 204 s64 ret = 0; 205 206 if (symbol_conf.cumulate_callchain) { 207 /* 208 * Put caller above callee when they have equal period. 209 */ 210 ret = field_cmp(get_field(a), get_field(b)); 211 if (ret) 212 return ret; 213 214 if (a->thread != b->thread || !hist_entry__has_callchains(a) || !symbol_conf.use_callchain) 215 return 0; 216 217 ret = b->callchain->max_depth - a->callchain->max_depth; 218 if (callchain_param.order == ORDER_CALLER) 219 ret = -ret; 220 } 221 return ret; 222 } 223 224 static int hpp__width_fn(struct perf_hpp_fmt *fmt, 225 struct perf_hpp *hpp __maybe_unused, 226 struct hists *hists) 227 { 228 int len = fmt->user_len ?: fmt->len; 229 struct evsel *evsel = hists_to_evsel(hists); 230 231 if (symbol_conf.event_group) 232 len = max(len, evsel->core.nr_members * fmt->len); 233 234 if (len < (int)strlen(fmt->name)) 235 len = strlen(fmt->name); 236 237 return len; 238 } 239 240 static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 241 struct hists *hists, int line __maybe_unused, 242 int *span __maybe_unused) 243 { 244 int len = hpp__width_fn(fmt, hpp, hists); 245 return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name); 246 } 247 248 int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...) 249 { 250 va_list args; 251 ssize_t ssize = hpp->size; 252 double percent; 253 int ret, len; 254 255 va_start(args, fmt); 256 len = va_arg(args, int); 257 percent = va_arg(args, double); 258 ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent); 259 va_end(args); 260 261 return (ret >= ssize) ? (ssize - 1) : ret; 262 } 263 264 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...) 265 { 266 va_list args; 267 ssize_t ssize = hpp->size; 268 int ret; 269 270 va_start(args, fmt); 271 ret = vsnprintf(hpp->buf, hpp->size, fmt, args); 272 va_end(args); 273 274 return (ret >= ssize) ? (ssize - 1) : ret; 275 } 276 277 #define __HPP_COLOR_PERCENT_FN(_type, _field) \ 278 static u64 he_get_##_field(struct hist_entry *he) \ 279 { \ 280 return he->stat._field; \ 281 } \ 282 \ 283 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \ 284 struct perf_hpp *hpp, struct hist_entry *he) \ 285 { \ 286 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \ 287 hpp_color_scnprintf, true); \ 288 } 289 290 #define __HPP_ENTRY_PERCENT_FN(_type, _field) \ 291 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ 292 struct perf_hpp *hpp, struct hist_entry *he) \ 293 { \ 294 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \ 295 hpp_entry_scnprintf, true); \ 296 } 297 298 #define __HPP_SORT_FN(_type, _field) \ 299 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 300 struct hist_entry *a, struct hist_entry *b) \ 301 { \ 302 return __hpp__sort(a, b, he_get_##_field); \ 303 } 304 305 #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ 306 static u64 he_get_acc_##_field(struct hist_entry *he) \ 307 { \ 308 return he->stat_acc->_field; \ 309 } \ 310 \ 311 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \ 312 struct perf_hpp *hpp, struct hist_entry *he) \ 313 { \ 314 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \ 315 hpp_color_scnprintf, true); \ 316 } 317 318 #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ 319 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ 320 struct perf_hpp *hpp, struct hist_entry *he) \ 321 { \ 322 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \ 323 hpp_entry_scnprintf, true); \ 324 } 325 326 #define __HPP_SORT_ACC_FN(_type, _field) \ 327 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 328 struct hist_entry *a, struct hist_entry *b) \ 329 { \ 330 return __hpp__sort_acc(a, b, he_get_acc_##_field); \ 331 } 332 333 #define __HPP_ENTRY_RAW_FN(_type, _field) \ 334 static u64 he_get_raw_##_field(struct hist_entry *he) \ 335 { \ 336 return he->stat._field; \ 337 } \ 338 \ 339 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ 340 struct perf_hpp *hpp, struct hist_entry *he) \ 341 { \ 342 return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \ 343 hpp_entry_scnprintf, false); \ 344 } 345 346 #define __HPP_SORT_RAW_FN(_type, _field) \ 347 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 348 struct hist_entry *a, struct hist_entry *b) \ 349 { \ 350 return __hpp__sort(a, b, he_get_raw_##_field); \ 351 } 352 353 354 #define HPP_PERCENT_FNS(_type, _field) \ 355 __HPP_COLOR_PERCENT_FN(_type, _field) \ 356 __HPP_ENTRY_PERCENT_FN(_type, _field) \ 357 __HPP_SORT_FN(_type, _field) 358 359 #define HPP_PERCENT_ACC_FNS(_type, _field) \ 360 __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ 361 __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ 362 __HPP_SORT_ACC_FN(_type, _field) 363 364 #define HPP_RAW_FNS(_type, _field) \ 365 __HPP_ENTRY_RAW_FN(_type, _field) \ 366 __HPP_SORT_RAW_FN(_type, _field) 367 368 HPP_PERCENT_FNS(overhead, period) 369 HPP_PERCENT_FNS(overhead_sys, period_sys) 370 HPP_PERCENT_FNS(overhead_us, period_us) 371 HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys) 372 HPP_PERCENT_FNS(overhead_guest_us, period_guest_us) 373 HPP_PERCENT_ACC_FNS(overhead_acc, period) 374 375 HPP_RAW_FNS(samples, nr_events) 376 HPP_RAW_FNS(period, period) 377 378 static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused, 379 struct hist_entry *a __maybe_unused, 380 struct hist_entry *b __maybe_unused) 381 { 382 return 0; 383 } 384 385 static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a) 386 { 387 return a->header == hpp__header_fn; 388 } 389 390 static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 391 { 392 if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b)) 393 return false; 394 395 return a->idx == b->idx; 396 } 397 398 #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \ 399 { \ 400 .name = _name, \ 401 .header = hpp__header_fn, \ 402 .width = hpp__width_fn, \ 403 .color = hpp__color_ ## _fn, \ 404 .entry = hpp__entry_ ## _fn, \ 405 .cmp = hpp__nop_cmp, \ 406 .collapse = hpp__nop_cmp, \ 407 .sort = hpp__sort_ ## _fn, \ 408 .idx = PERF_HPP__ ## _idx, \ 409 .equal = hpp__equal, \ 410 } 411 412 #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \ 413 { \ 414 .name = _name, \ 415 .header = hpp__header_fn, \ 416 .width = hpp__width_fn, \ 417 .color = hpp__color_ ## _fn, \ 418 .entry = hpp__entry_ ## _fn, \ 419 .cmp = hpp__nop_cmp, \ 420 .collapse = hpp__nop_cmp, \ 421 .sort = hpp__sort_ ## _fn, \ 422 .idx = PERF_HPP__ ## _idx, \ 423 .equal = hpp__equal, \ 424 } 425 426 #define HPP__PRINT_FNS(_name, _fn, _idx) \ 427 { \ 428 .name = _name, \ 429 .header = hpp__header_fn, \ 430 .width = hpp__width_fn, \ 431 .entry = hpp__entry_ ## _fn, \ 432 .cmp = hpp__nop_cmp, \ 433 .collapse = hpp__nop_cmp, \ 434 .sort = hpp__sort_ ## _fn, \ 435 .idx = PERF_HPP__ ## _idx, \ 436 .equal = hpp__equal, \ 437 } 438 439 struct perf_hpp_fmt perf_hpp__format[] = { 440 HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD), 441 HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS), 442 HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US), 443 HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS), 444 HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US), 445 HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC), 446 HPP__PRINT_FNS("Samples", samples, SAMPLES), 447 HPP__PRINT_FNS("Period", period, PERIOD) 448 }; 449 450 struct perf_hpp_list perf_hpp_list = { 451 .fields = LIST_HEAD_INIT(perf_hpp_list.fields), 452 .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts), 453 .nr_header_lines = 1, 454 }; 455 456 #undef HPP__COLOR_PRINT_FNS 457 #undef HPP__COLOR_ACC_PRINT_FNS 458 #undef HPP__PRINT_FNS 459 460 #undef HPP_PERCENT_FNS 461 #undef HPP_PERCENT_ACC_FNS 462 #undef HPP_RAW_FNS 463 464 #undef __HPP_HEADER_FN 465 #undef __HPP_WIDTH_FN 466 #undef __HPP_COLOR_PERCENT_FN 467 #undef __HPP_ENTRY_PERCENT_FN 468 #undef __HPP_COLOR_ACC_PERCENT_FN 469 #undef __HPP_ENTRY_ACC_PERCENT_FN 470 #undef __HPP_ENTRY_RAW_FN 471 #undef __HPP_SORT_FN 472 #undef __HPP_SORT_ACC_FN 473 #undef __HPP_SORT_RAW_FN 474 475 476 void perf_hpp__init(void) 477 { 478 int i; 479 480 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { 481 struct perf_hpp_fmt *fmt = &perf_hpp__format[i]; 482 483 INIT_LIST_HEAD(&fmt->list); 484 485 /* sort_list may be linked by setup_sorting() */ 486 if (fmt->sort_list.next == NULL) 487 INIT_LIST_HEAD(&fmt->sort_list); 488 } 489 490 /* 491 * If user specified field order, no need to setup default fields. 492 */ 493 if (is_strict_order(field_order)) 494 return; 495 496 if (symbol_conf.cumulate_callchain) { 497 hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC); 498 perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self"; 499 } 500 501 hpp_dimension__add_output(PERF_HPP__OVERHEAD); 502 503 if (symbol_conf.show_cpu_utilization) { 504 hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS); 505 hpp_dimension__add_output(PERF_HPP__OVERHEAD_US); 506 507 if (perf_guest) { 508 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS); 509 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US); 510 } 511 } 512 513 if (symbol_conf.show_nr_samples) 514 hpp_dimension__add_output(PERF_HPP__SAMPLES); 515 516 if (symbol_conf.show_total_period) 517 hpp_dimension__add_output(PERF_HPP__PERIOD); 518 } 519 520 void perf_hpp_list__column_register(struct perf_hpp_list *list, 521 struct perf_hpp_fmt *format) 522 { 523 list_add_tail(&format->list, &list->fields); 524 } 525 526 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list, 527 struct perf_hpp_fmt *format) 528 { 529 list_add_tail(&format->sort_list, &list->sorts); 530 } 531 532 void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list, 533 struct perf_hpp_fmt *format) 534 { 535 list_add(&format->sort_list, &list->sorts); 536 } 537 538 void perf_hpp__column_unregister(struct perf_hpp_fmt *format) 539 { 540 list_del_init(&format->list); 541 } 542 543 void perf_hpp__cancel_cumulate(void) 544 { 545 struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp; 546 547 if (is_strict_order(field_order)) 548 return; 549 550 ovh = &perf_hpp__format[PERF_HPP__OVERHEAD]; 551 acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC]; 552 553 perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) { 554 if (acc->equal(acc, fmt)) { 555 perf_hpp__column_unregister(fmt); 556 continue; 557 } 558 559 if (ovh->equal(ovh, fmt)) 560 fmt->name = "Overhead"; 561 } 562 } 563 564 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 565 { 566 return a->equal && a->equal(a, b); 567 } 568 569 void perf_hpp__setup_output_field(struct perf_hpp_list *list) 570 { 571 struct perf_hpp_fmt *fmt; 572 573 /* append sort keys to output field */ 574 perf_hpp_list__for_each_sort_list(list, fmt) { 575 struct perf_hpp_fmt *pos; 576 577 /* skip sort-only fields ("sort_compute" in perf diff) */ 578 if (!fmt->entry && !fmt->color) 579 continue; 580 581 perf_hpp_list__for_each_format(list, pos) { 582 if (fmt_equal(fmt, pos)) 583 goto next; 584 } 585 586 perf_hpp__column_register(fmt); 587 next: 588 continue; 589 } 590 } 591 592 void perf_hpp__append_sort_keys(struct perf_hpp_list *list) 593 { 594 struct perf_hpp_fmt *fmt; 595 596 /* append output fields to sort keys */ 597 perf_hpp_list__for_each_format(list, fmt) { 598 struct perf_hpp_fmt *pos; 599 600 perf_hpp_list__for_each_sort_list(list, pos) { 601 if (fmt_equal(fmt, pos)) 602 goto next; 603 } 604 605 perf_hpp__register_sort_field(fmt); 606 next: 607 continue; 608 } 609 } 610 611 612 static void fmt_free(struct perf_hpp_fmt *fmt) 613 { 614 /* 615 * At this point fmt should be completely 616 * unhooked, if not it's a bug. 617 */ 618 BUG_ON(!list_empty(&fmt->list)); 619 BUG_ON(!list_empty(&fmt->sort_list)); 620 621 if (fmt->free) 622 fmt->free(fmt); 623 } 624 625 void perf_hpp__reset_output_field(struct perf_hpp_list *list) 626 { 627 struct perf_hpp_fmt *fmt, *tmp; 628 629 /* reset output fields */ 630 perf_hpp_list__for_each_format_safe(list, fmt, tmp) { 631 list_del_init(&fmt->list); 632 list_del_init(&fmt->sort_list); 633 fmt_free(fmt); 634 } 635 636 /* reset sort keys */ 637 perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) { 638 list_del_init(&fmt->list); 639 list_del_init(&fmt->sort_list); 640 fmt_free(fmt); 641 } 642 } 643 644 /* 645 * See hists__fprintf to match the column widths 646 */ 647 unsigned int hists__sort_list_width(struct hists *hists) 648 { 649 struct perf_hpp_fmt *fmt; 650 int ret = 0; 651 bool first = true; 652 struct perf_hpp dummy_hpp; 653 654 hists__for_each_format(hists, fmt) { 655 if (perf_hpp__should_skip(fmt, hists)) 656 continue; 657 658 if (first) 659 first = false; 660 else 661 ret += 2; 662 663 ret += fmt->width(fmt, &dummy_hpp, hists); 664 } 665 666 if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */ 667 ret += 3 + BITS_PER_LONG / 4; 668 669 return ret; 670 } 671 672 unsigned int hists__overhead_width(struct hists *hists) 673 { 674 struct perf_hpp_fmt *fmt; 675 int ret = 0; 676 bool first = true; 677 struct perf_hpp dummy_hpp; 678 679 hists__for_each_format(hists, fmt) { 680 if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt)) 681 break; 682 683 if (first) 684 first = false; 685 else 686 ret += 2; 687 688 ret += fmt->width(fmt, &dummy_hpp, hists); 689 } 690 691 return ret; 692 } 693 694 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists) 695 { 696 if (perf_hpp__is_sort_entry(fmt)) 697 return perf_hpp__reset_sort_width(fmt, hists); 698 699 if (perf_hpp__is_dynamic_entry(fmt)) 700 return; 701 702 BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX); 703 704 switch (fmt->idx) { 705 case PERF_HPP__OVERHEAD: 706 case PERF_HPP__OVERHEAD_SYS: 707 case PERF_HPP__OVERHEAD_US: 708 case PERF_HPP__OVERHEAD_ACC: 709 fmt->len = 8; 710 break; 711 712 case PERF_HPP__OVERHEAD_GUEST_SYS: 713 case PERF_HPP__OVERHEAD_GUEST_US: 714 fmt->len = 9; 715 break; 716 717 case PERF_HPP__SAMPLES: 718 case PERF_HPP__PERIOD: 719 fmt->len = 12; 720 break; 721 722 default: 723 break; 724 } 725 } 726 727 void hists__reset_column_width(struct hists *hists) 728 { 729 struct perf_hpp_fmt *fmt; 730 struct perf_hpp_list_node *node; 731 732 hists__for_each_format(hists, fmt) 733 perf_hpp__reset_width(fmt, hists); 734 735 /* hierarchy entries have their own hpp list */ 736 list_for_each_entry(node, &hists->hpp_formats, list) { 737 perf_hpp_list__for_each_format(&node->hpp, fmt) 738 perf_hpp__reset_width(fmt, hists); 739 } 740 } 741 742 void perf_hpp__set_user_width(const char *width_list_str) 743 { 744 struct perf_hpp_fmt *fmt; 745 const char *ptr = width_list_str; 746 747 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 748 char *p; 749 750 int len = strtol(ptr, &p, 10); 751 fmt->user_len = len; 752 753 if (*p == ',') 754 ptr = p + 1; 755 else 756 break; 757 } 758 } 759 760 static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt) 761 { 762 struct perf_hpp_list_node *node = NULL; 763 struct perf_hpp_fmt *fmt_copy; 764 bool found = false; 765 bool skip = perf_hpp__should_skip(fmt, hists); 766 767 list_for_each_entry(node, &hists->hpp_formats, list) { 768 if (node->level == fmt->level) { 769 found = true; 770 break; 771 } 772 } 773 774 if (!found) { 775 node = malloc(sizeof(*node)); 776 if (node == NULL) 777 return -1; 778 779 node->skip = skip; 780 node->level = fmt->level; 781 perf_hpp_list__init(&node->hpp); 782 783 hists->nr_hpp_node++; 784 list_add_tail(&node->list, &hists->hpp_formats); 785 } 786 787 fmt_copy = perf_hpp_fmt__dup(fmt); 788 if (fmt_copy == NULL) 789 return -1; 790 791 if (!skip) 792 node->skip = false; 793 794 list_add_tail(&fmt_copy->list, &node->hpp.fields); 795 list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts); 796 797 return 0; 798 } 799 800 int perf_hpp__setup_hists_formats(struct perf_hpp_list *list, 801 struct evlist *evlist) 802 { 803 struct evsel *evsel; 804 struct perf_hpp_fmt *fmt; 805 struct hists *hists; 806 int ret; 807 808 if (!symbol_conf.report_hierarchy) 809 return 0; 810 811 evlist__for_each_entry(evlist, evsel) { 812 hists = evsel__hists(evsel); 813 814 perf_hpp_list__for_each_sort_list(list, fmt) { 815 if (perf_hpp__is_dynamic_entry(fmt) && 816 !perf_hpp__defined_dynamic_entry(fmt, hists)) 817 continue; 818 819 ret = add_hierarchy_fmt(hists, fmt); 820 if (ret < 0) 821 return ret; 822 } 823 } 824 825 return 0; 826 } 827