1 #include <math.h> 2 #include <linux/compiler.h> 3 4 #include "../util/hist.h" 5 #include "../util/util.h" 6 #include "../util/sort.h" 7 #include "../util/evsel.h" 8 #include "../util/evlist.h" 9 10 /* hist period print (hpp) functions */ 11 12 #define hpp__call_print_fn(hpp, fn, fmt, ...) \ 13 ({ \ 14 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \ 15 advance_hpp(hpp, __ret); \ 16 __ret; \ 17 }) 18 19 static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, 20 hpp_field_fn get_field, const char *fmt, int len, 21 hpp_snprint_fn print_fn, bool fmt_percent) 22 { 23 int ret; 24 struct hists *hists = he->hists; 25 struct perf_evsel *evsel = hists_to_evsel(hists); 26 char *buf = hpp->buf; 27 size_t size = hpp->size; 28 29 if (fmt_percent) { 30 double percent = 0.0; 31 u64 total = hists__total_period(hists); 32 33 if (total) 34 percent = 100.0 * get_field(he) / total; 35 36 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent); 37 } else 38 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he)); 39 40 if (perf_evsel__is_group_event(evsel)) { 41 int prev_idx, idx_delta; 42 struct hist_entry *pair; 43 int nr_members = evsel->nr_members; 44 45 prev_idx = perf_evsel__group_idx(evsel); 46 47 list_for_each_entry(pair, &he->pairs.head, pairs.node) { 48 u64 period = get_field(pair); 49 u64 total = hists__total_period(pair->hists); 50 51 if (!total) 52 continue; 53 54 evsel = hists_to_evsel(pair->hists); 55 idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1; 56 57 while (idx_delta--) { 58 /* 59 * zero-fill group members in the middle which 60 * have no sample 61 */ 62 if (fmt_percent) { 63 ret += hpp__call_print_fn(hpp, print_fn, 64 fmt, len, 0.0); 65 } else { 66 ret += hpp__call_print_fn(hpp, print_fn, 67 fmt, len, 0ULL); 68 } 69 } 70 71 if (fmt_percent) { 72 ret += hpp__call_print_fn(hpp, print_fn, fmt, len, 73 100.0 * period / total); 74 } else { 75 ret += hpp__call_print_fn(hpp, print_fn, fmt, 76 len, period); 77 } 78 79 prev_idx = perf_evsel__group_idx(evsel); 80 } 81 82 idx_delta = nr_members - prev_idx - 1; 83 84 while (idx_delta--) { 85 /* 86 * zero-fill group members at last which have no sample 87 */ 88 if (fmt_percent) { 89 ret += hpp__call_print_fn(hpp, print_fn, 90 fmt, len, 0.0); 91 } else { 92 ret += hpp__call_print_fn(hpp, print_fn, 93 fmt, len, 0ULL); 94 } 95 } 96 } 97 98 /* 99 * Restore original buf and size as it's where caller expects 100 * the result will be saved. 101 */ 102 hpp->buf = buf; 103 hpp->size = size; 104 105 return ret; 106 } 107 108 int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 109 struct hist_entry *he, hpp_field_fn get_field, 110 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent) 111 { 112 int len = fmt->user_len ?: fmt->len; 113 114 if (symbol_conf.field_sep) { 115 return __hpp__fmt(hpp, he, get_field, fmtstr, 1, 116 print_fn, fmt_percent); 117 } 118 119 if (fmt_percent) 120 len -= 2; /* 2 for a space and a % sign */ 121 else 122 len -= 1; 123 124 return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent); 125 } 126 127 int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 128 struct hist_entry *he, hpp_field_fn get_field, 129 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent) 130 { 131 if (!symbol_conf.cumulate_callchain) { 132 int len = fmt->user_len ?: fmt->len; 133 return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A"); 134 } 135 136 return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent); 137 } 138 139 static int field_cmp(u64 field_a, u64 field_b) 140 { 141 if (field_a > field_b) 142 return 1; 143 if (field_a < field_b) 144 return -1; 145 return 0; 146 } 147 148 static int __hpp__sort(struct hist_entry *a, struct hist_entry *b, 149 hpp_field_fn get_field) 150 { 151 s64 ret; 152 int i, nr_members; 153 struct perf_evsel *evsel; 154 struct hist_entry *pair; 155 u64 *fields_a, *fields_b; 156 157 ret = field_cmp(get_field(a), get_field(b)); 158 if (ret || !symbol_conf.event_group) 159 return ret; 160 161 evsel = hists_to_evsel(a->hists); 162 if (!perf_evsel__is_group_event(evsel)) 163 return ret; 164 165 nr_members = evsel->nr_members; 166 fields_a = calloc(nr_members, sizeof(*fields_a)); 167 fields_b = calloc(nr_members, sizeof(*fields_b)); 168 169 if (!fields_a || !fields_b) 170 goto out; 171 172 list_for_each_entry(pair, &a->pairs.head, pairs.node) { 173 evsel = hists_to_evsel(pair->hists); 174 fields_a[perf_evsel__group_idx(evsel)] = get_field(pair); 175 } 176 177 list_for_each_entry(pair, &b->pairs.head, pairs.node) { 178 evsel = hists_to_evsel(pair->hists); 179 fields_b[perf_evsel__group_idx(evsel)] = get_field(pair); 180 } 181 182 for (i = 1; i < nr_members; i++) { 183 ret = field_cmp(fields_a[i], fields_b[i]); 184 if (ret) 185 break; 186 } 187 188 out: 189 free(fields_a); 190 free(fields_b); 191 192 return ret; 193 } 194 195 static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b, 196 hpp_field_fn get_field) 197 { 198 s64 ret = 0; 199 200 if (symbol_conf.cumulate_callchain) { 201 /* 202 * Put caller above callee when they have equal period. 203 */ 204 ret = field_cmp(get_field(a), get_field(b)); 205 if (ret) 206 return ret; 207 208 if (a->thread != b->thread || !symbol_conf.use_callchain) 209 return 0; 210 211 ret = b->callchain->max_depth - a->callchain->max_depth; 212 } 213 return ret; 214 } 215 216 static int hpp__width_fn(struct perf_hpp_fmt *fmt, 217 struct perf_hpp *hpp __maybe_unused, 218 struct hists *hists) 219 { 220 int len = fmt->user_len ?: fmt->len; 221 struct perf_evsel *evsel = hists_to_evsel(hists); 222 223 if (symbol_conf.event_group) 224 len = max(len, evsel->nr_members * fmt->len); 225 226 if (len < (int)strlen(fmt->name)) 227 len = strlen(fmt->name); 228 229 return len; 230 } 231 232 static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 233 struct hists *hists, int line __maybe_unused, 234 int *span __maybe_unused) 235 { 236 int len = hpp__width_fn(fmt, hpp, hists); 237 return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name); 238 } 239 240 int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...) 241 { 242 va_list args; 243 ssize_t ssize = hpp->size; 244 double percent; 245 int ret, len; 246 247 va_start(args, fmt); 248 len = va_arg(args, int); 249 percent = va_arg(args, double); 250 ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent); 251 va_end(args); 252 253 return (ret >= ssize) ? (ssize - 1) : ret; 254 } 255 256 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...) 257 { 258 va_list args; 259 ssize_t ssize = hpp->size; 260 int ret; 261 262 va_start(args, fmt); 263 ret = vsnprintf(hpp->buf, hpp->size, fmt, args); 264 va_end(args); 265 266 return (ret >= ssize) ? (ssize - 1) : ret; 267 } 268 269 #define __HPP_COLOR_PERCENT_FN(_type, _field) \ 270 static u64 he_get_##_field(struct hist_entry *he) \ 271 { \ 272 return he->stat._field; \ 273 } \ 274 \ 275 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \ 276 struct perf_hpp *hpp, struct hist_entry *he) \ 277 { \ 278 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \ 279 hpp_color_scnprintf, true); \ 280 } 281 282 #define __HPP_ENTRY_PERCENT_FN(_type, _field) \ 283 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ 284 struct perf_hpp *hpp, struct hist_entry *he) \ 285 { \ 286 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \ 287 hpp_entry_scnprintf, true); \ 288 } 289 290 #define __HPP_SORT_FN(_type, _field) \ 291 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 292 struct hist_entry *a, struct hist_entry *b) \ 293 { \ 294 return __hpp__sort(a, b, he_get_##_field); \ 295 } 296 297 #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ 298 static u64 he_get_acc_##_field(struct hist_entry *he) \ 299 { \ 300 return he->stat_acc->_field; \ 301 } \ 302 \ 303 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \ 304 struct perf_hpp *hpp, struct hist_entry *he) \ 305 { \ 306 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \ 307 hpp_color_scnprintf, true); \ 308 } 309 310 #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ 311 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ 312 struct perf_hpp *hpp, struct hist_entry *he) \ 313 { \ 314 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \ 315 hpp_entry_scnprintf, true); \ 316 } 317 318 #define __HPP_SORT_ACC_FN(_type, _field) \ 319 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 320 struct hist_entry *a, struct hist_entry *b) \ 321 { \ 322 return __hpp__sort_acc(a, b, he_get_acc_##_field); \ 323 } 324 325 #define __HPP_ENTRY_RAW_FN(_type, _field) \ 326 static u64 he_get_raw_##_field(struct hist_entry *he) \ 327 { \ 328 return he->stat._field; \ 329 } \ 330 \ 331 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ 332 struct perf_hpp *hpp, struct hist_entry *he) \ 333 { \ 334 return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \ 335 hpp_entry_scnprintf, false); \ 336 } 337 338 #define __HPP_SORT_RAW_FN(_type, _field) \ 339 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 340 struct hist_entry *a, struct hist_entry *b) \ 341 { \ 342 return __hpp__sort(a, b, he_get_raw_##_field); \ 343 } 344 345 346 #define HPP_PERCENT_FNS(_type, _field) \ 347 __HPP_COLOR_PERCENT_FN(_type, _field) \ 348 __HPP_ENTRY_PERCENT_FN(_type, _field) \ 349 __HPP_SORT_FN(_type, _field) 350 351 #define HPP_PERCENT_ACC_FNS(_type, _field) \ 352 __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ 353 __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ 354 __HPP_SORT_ACC_FN(_type, _field) 355 356 #define HPP_RAW_FNS(_type, _field) \ 357 __HPP_ENTRY_RAW_FN(_type, _field) \ 358 __HPP_SORT_RAW_FN(_type, _field) 359 360 HPP_PERCENT_FNS(overhead, period) 361 HPP_PERCENT_FNS(overhead_sys, period_sys) 362 HPP_PERCENT_FNS(overhead_us, period_us) 363 HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys) 364 HPP_PERCENT_FNS(overhead_guest_us, period_guest_us) 365 HPP_PERCENT_ACC_FNS(overhead_acc, period) 366 367 HPP_RAW_FNS(samples, nr_events) 368 HPP_RAW_FNS(period, period) 369 370 static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused, 371 struct hist_entry *a __maybe_unused, 372 struct hist_entry *b __maybe_unused) 373 { 374 return 0; 375 } 376 377 static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a) 378 { 379 return a->header == hpp__header_fn; 380 } 381 382 static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 383 { 384 if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b)) 385 return false; 386 387 return a->idx == b->idx; 388 } 389 390 #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \ 391 { \ 392 .name = _name, \ 393 .header = hpp__header_fn, \ 394 .width = hpp__width_fn, \ 395 .color = hpp__color_ ## _fn, \ 396 .entry = hpp__entry_ ## _fn, \ 397 .cmp = hpp__nop_cmp, \ 398 .collapse = hpp__nop_cmp, \ 399 .sort = hpp__sort_ ## _fn, \ 400 .idx = PERF_HPP__ ## _idx, \ 401 .equal = hpp__equal, \ 402 } 403 404 #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \ 405 { \ 406 .name = _name, \ 407 .header = hpp__header_fn, \ 408 .width = hpp__width_fn, \ 409 .color = hpp__color_ ## _fn, \ 410 .entry = hpp__entry_ ## _fn, \ 411 .cmp = hpp__nop_cmp, \ 412 .collapse = hpp__nop_cmp, \ 413 .sort = hpp__sort_ ## _fn, \ 414 .idx = PERF_HPP__ ## _idx, \ 415 .equal = hpp__equal, \ 416 } 417 418 #define HPP__PRINT_FNS(_name, _fn, _idx) \ 419 { \ 420 .name = _name, \ 421 .header = hpp__header_fn, \ 422 .width = hpp__width_fn, \ 423 .entry = hpp__entry_ ## _fn, \ 424 .cmp = hpp__nop_cmp, \ 425 .collapse = hpp__nop_cmp, \ 426 .sort = hpp__sort_ ## _fn, \ 427 .idx = PERF_HPP__ ## _idx, \ 428 .equal = hpp__equal, \ 429 } 430 431 struct perf_hpp_fmt perf_hpp__format[] = { 432 HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD), 433 HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS), 434 HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US), 435 HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS), 436 HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US), 437 HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC), 438 HPP__PRINT_FNS("Samples", samples, SAMPLES), 439 HPP__PRINT_FNS("Period", period, PERIOD) 440 }; 441 442 struct perf_hpp_list perf_hpp_list = { 443 .fields = LIST_HEAD_INIT(perf_hpp_list.fields), 444 .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts), 445 .nr_header_lines = 1, 446 }; 447 448 #undef HPP__COLOR_PRINT_FNS 449 #undef HPP__COLOR_ACC_PRINT_FNS 450 #undef HPP__PRINT_FNS 451 452 #undef HPP_PERCENT_FNS 453 #undef HPP_PERCENT_ACC_FNS 454 #undef HPP_RAW_FNS 455 456 #undef __HPP_HEADER_FN 457 #undef __HPP_WIDTH_FN 458 #undef __HPP_COLOR_PERCENT_FN 459 #undef __HPP_ENTRY_PERCENT_FN 460 #undef __HPP_COLOR_ACC_PERCENT_FN 461 #undef __HPP_ENTRY_ACC_PERCENT_FN 462 #undef __HPP_ENTRY_RAW_FN 463 #undef __HPP_SORT_FN 464 #undef __HPP_SORT_ACC_FN 465 #undef __HPP_SORT_RAW_FN 466 467 468 void perf_hpp__init(void) 469 { 470 int i; 471 472 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { 473 struct perf_hpp_fmt *fmt = &perf_hpp__format[i]; 474 475 INIT_LIST_HEAD(&fmt->list); 476 477 /* sort_list may be linked by setup_sorting() */ 478 if (fmt->sort_list.next == NULL) 479 INIT_LIST_HEAD(&fmt->sort_list); 480 } 481 482 /* 483 * If user specified field order, no need to setup default fields. 484 */ 485 if (is_strict_order(field_order)) 486 return; 487 488 if (symbol_conf.cumulate_callchain) { 489 hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC); 490 perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self"; 491 } 492 493 hpp_dimension__add_output(PERF_HPP__OVERHEAD); 494 495 if (symbol_conf.show_cpu_utilization) { 496 hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS); 497 hpp_dimension__add_output(PERF_HPP__OVERHEAD_US); 498 499 if (perf_guest) { 500 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS); 501 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US); 502 } 503 } 504 505 if (symbol_conf.show_nr_samples) 506 hpp_dimension__add_output(PERF_HPP__SAMPLES); 507 508 if (symbol_conf.show_total_period) 509 hpp_dimension__add_output(PERF_HPP__PERIOD); 510 } 511 512 void perf_hpp_list__column_register(struct perf_hpp_list *list, 513 struct perf_hpp_fmt *format) 514 { 515 list_add_tail(&format->list, &list->fields); 516 } 517 518 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list, 519 struct perf_hpp_fmt *format) 520 { 521 list_add_tail(&format->sort_list, &list->sorts); 522 } 523 524 void perf_hpp__column_unregister(struct perf_hpp_fmt *format) 525 { 526 list_del(&format->list); 527 } 528 529 void perf_hpp__cancel_cumulate(void) 530 { 531 struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp; 532 533 if (is_strict_order(field_order)) 534 return; 535 536 ovh = &perf_hpp__format[PERF_HPP__OVERHEAD]; 537 acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC]; 538 539 perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) { 540 if (acc->equal(acc, fmt)) { 541 perf_hpp__column_unregister(fmt); 542 continue; 543 } 544 545 if (ovh->equal(ovh, fmt)) 546 fmt->name = "Overhead"; 547 } 548 } 549 550 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 551 { 552 return a->equal && a->equal(a, b); 553 } 554 555 void perf_hpp__setup_output_field(struct perf_hpp_list *list) 556 { 557 struct perf_hpp_fmt *fmt; 558 559 /* append sort keys to output field */ 560 perf_hpp_list__for_each_sort_list(list, fmt) { 561 struct perf_hpp_fmt *pos; 562 563 perf_hpp_list__for_each_format(list, pos) { 564 if (fmt_equal(fmt, pos)) 565 goto next; 566 } 567 568 perf_hpp__column_register(fmt); 569 next: 570 continue; 571 } 572 } 573 574 void perf_hpp__append_sort_keys(struct perf_hpp_list *list) 575 { 576 struct perf_hpp_fmt *fmt; 577 578 /* append output fields to sort keys */ 579 perf_hpp_list__for_each_format(list, fmt) { 580 struct perf_hpp_fmt *pos; 581 582 perf_hpp_list__for_each_sort_list(list, pos) { 583 if (fmt_equal(fmt, pos)) 584 goto next; 585 } 586 587 perf_hpp__register_sort_field(fmt); 588 next: 589 continue; 590 } 591 } 592 593 594 static void fmt_free(struct perf_hpp_fmt *fmt) 595 { 596 if (fmt->free) 597 fmt->free(fmt); 598 } 599 600 void perf_hpp__reset_output_field(struct perf_hpp_list *list) 601 { 602 struct perf_hpp_fmt *fmt, *tmp; 603 604 /* reset output fields */ 605 perf_hpp_list__for_each_format_safe(list, fmt, tmp) { 606 list_del_init(&fmt->list); 607 list_del_init(&fmt->sort_list); 608 fmt_free(fmt); 609 } 610 611 /* reset sort keys */ 612 perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) { 613 list_del_init(&fmt->list); 614 list_del_init(&fmt->sort_list); 615 fmt_free(fmt); 616 } 617 } 618 619 /* 620 * See hists__fprintf to match the column widths 621 */ 622 unsigned int hists__sort_list_width(struct hists *hists) 623 { 624 struct perf_hpp_fmt *fmt; 625 int ret = 0; 626 bool first = true; 627 struct perf_hpp dummy_hpp; 628 629 hists__for_each_format(hists, fmt) { 630 if (perf_hpp__should_skip(fmt, hists)) 631 continue; 632 633 if (first) 634 first = false; 635 else 636 ret += 2; 637 638 ret += fmt->width(fmt, &dummy_hpp, hists); 639 } 640 641 if (verbose && hists__has(hists, sym)) /* Addr + origin */ 642 ret += 3 + BITS_PER_LONG / 4; 643 644 return ret; 645 } 646 647 unsigned int hists__overhead_width(struct hists *hists) 648 { 649 struct perf_hpp_fmt *fmt; 650 int ret = 0; 651 bool first = true; 652 struct perf_hpp dummy_hpp; 653 654 hists__for_each_format(hists, fmt) { 655 if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt)) 656 break; 657 658 if (first) 659 first = false; 660 else 661 ret += 2; 662 663 ret += fmt->width(fmt, &dummy_hpp, hists); 664 } 665 666 return ret; 667 } 668 669 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists) 670 { 671 if (perf_hpp__is_sort_entry(fmt)) 672 return perf_hpp__reset_sort_width(fmt, hists); 673 674 if (perf_hpp__is_dynamic_entry(fmt)) 675 return; 676 677 BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX); 678 679 switch (fmt->idx) { 680 case PERF_HPP__OVERHEAD: 681 case PERF_HPP__OVERHEAD_SYS: 682 case PERF_HPP__OVERHEAD_US: 683 case PERF_HPP__OVERHEAD_ACC: 684 fmt->len = 8; 685 break; 686 687 case PERF_HPP__OVERHEAD_GUEST_SYS: 688 case PERF_HPP__OVERHEAD_GUEST_US: 689 fmt->len = 9; 690 break; 691 692 case PERF_HPP__SAMPLES: 693 case PERF_HPP__PERIOD: 694 fmt->len = 12; 695 break; 696 697 default: 698 break; 699 } 700 } 701 702 void hists__reset_column_width(struct hists *hists) 703 { 704 struct perf_hpp_fmt *fmt; 705 struct perf_hpp_list_node *node; 706 707 hists__for_each_format(hists, fmt) 708 perf_hpp__reset_width(fmt, hists); 709 710 /* hierarchy entries have their own hpp list */ 711 list_for_each_entry(node, &hists->hpp_formats, list) { 712 perf_hpp_list__for_each_format(&node->hpp, fmt) 713 perf_hpp__reset_width(fmt, hists); 714 } 715 } 716 717 void perf_hpp__set_user_width(const char *width_list_str) 718 { 719 struct perf_hpp_fmt *fmt; 720 const char *ptr = width_list_str; 721 722 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 723 char *p; 724 725 int len = strtol(ptr, &p, 10); 726 fmt->user_len = len; 727 728 if (*p == ',') 729 ptr = p + 1; 730 else 731 break; 732 } 733 } 734 735 static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt) 736 { 737 struct perf_hpp_list_node *node = NULL; 738 struct perf_hpp_fmt *fmt_copy; 739 bool found = false; 740 bool skip = perf_hpp__should_skip(fmt, hists); 741 742 list_for_each_entry(node, &hists->hpp_formats, list) { 743 if (node->level == fmt->level) { 744 found = true; 745 break; 746 } 747 } 748 749 if (!found) { 750 node = malloc(sizeof(*node)); 751 if (node == NULL) 752 return -1; 753 754 node->skip = skip; 755 node->level = fmt->level; 756 perf_hpp_list__init(&node->hpp); 757 758 hists->nr_hpp_node++; 759 list_add_tail(&node->list, &hists->hpp_formats); 760 } 761 762 fmt_copy = perf_hpp_fmt__dup(fmt); 763 if (fmt_copy == NULL) 764 return -1; 765 766 if (!skip) 767 node->skip = false; 768 769 list_add_tail(&fmt_copy->list, &node->hpp.fields); 770 list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts); 771 772 return 0; 773 } 774 775 int perf_hpp__setup_hists_formats(struct perf_hpp_list *list, 776 struct perf_evlist *evlist) 777 { 778 struct perf_evsel *evsel; 779 struct perf_hpp_fmt *fmt; 780 struct hists *hists; 781 int ret; 782 783 if (!symbol_conf.report_hierarchy) 784 return 0; 785 786 evlist__for_each_entry(evlist, evsel) { 787 hists = evsel__hists(evsel); 788 789 perf_hpp_list__for_each_sort_list(list, fmt) { 790 if (perf_hpp__is_dynamic_entry(fmt) && 791 !perf_hpp__defined_dynamic_entry(fmt, hists)) 792 continue; 793 794 ret = add_hierarchy_fmt(hists, fmt); 795 if (ret < 0) 796 return ret; 797 } 798 } 799 800 return 0; 801 } 802