1 #include <math.h> 2 #include <linux/compiler.h> 3 4 #include "../util/hist.h" 5 #include "../util/util.h" 6 #include "../util/sort.h" 7 #include "../util/evsel.h" 8 #include "../util/evlist.h" 9 10 /* hist period print (hpp) functions */ 11 12 #define hpp__call_print_fn(hpp, fn, fmt, ...) \ 13 ({ \ 14 int __ret = fn(hpp, fmt, ##__VA_ARGS__); \ 15 advance_hpp(hpp, __ret); \ 16 __ret; \ 17 }) 18 19 static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he, 20 hpp_field_fn get_field, const char *fmt, int len, 21 hpp_snprint_fn print_fn, bool fmt_percent) 22 { 23 int ret; 24 struct hists *hists = he->hists; 25 struct perf_evsel *evsel = hists_to_evsel(hists); 26 char *buf = hpp->buf; 27 size_t size = hpp->size; 28 29 if (fmt_percent) { 30 double percent = 0.0; 31 u64 total = hists__total_period(hists); 32 33 if (total) 34 percent = 100.0 * get_field(he) / total; 35 36 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent); 37 } else 38 ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he)); 39 40 if (perf_evsel__is_group_event(evsel)) { 41 int prev_idx, idx_delta; 42 struct hist_entry *pair; 43 int nr_members = evsel->nr_members; 44 45 prev_idx = perf_evsel__group_idx(evsel); 46 47 list_for_each_entry(pair, &he->pairs.head, pairs.node) { 48 u64 period = get_field(pair); 49 u64 total = hists__total_period(pair->hists); 50 51 if (!total) 52 continue; 53 54 evsel = hists_to_evsel(pair->hists); 55 idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1; 56 57 while (idx_delta--) { 58 /* 59 * zero-fill group members in the middle which 60 * have no sample 61 */ 62 if (fmt_percent) { 63 ret += hpp__call_print_fn(hpp, print_fn, 64 fmt, len, 0.0); 65 } else { 66 ret += hpp__call_print_fn(hpp, print_fn, 67 fmt, len, 0ULL); 68 } 69 } 70 71 if (fmt_percent) { 72 ret += hpp__call_print_fn(hpp, print_fn, fmt, len, 73 100.0 * period / total); 74 } else { 75 ret += hpp__call_print_fn(hpp, print_fn, fmt, 76 len, period); 77 } 78 79 prev_idx = perf_evsel__group_idx(evsel); 80 } 81 82 idx_delta = nr_members - prev_idx - 1; 83 84 while (idx_delta--) { 85 /* 86 * zero-fill group members at last which have no sample 87 */ 88 if (fmt_percent) { 89 ret += hpp__call_print_fn(hpp, print_fn, 90 fmt, len, 0.0); 91 } else { 92 ret += hpp__call_print_fn(hpp, print_fn, 93 fmt, len, 0ULL); 94 } 95 } 96 } 97 98 /* 99 * Restore original buf and size as it's where caller expects 100 * the result will be saved. 101 */ 102 hpp->buf = buf; 103 hpp->size = size; 104 105 return ret; 106 } 107 108 int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 109 struct hist_entry *he, hpp_field_fn get_field, 110 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent) 111 { 112 int len = fmt->user_len ?: fmt->len; 113 114 if (symbol_conf.field_sep) { 115 return __hpp__fmt(hpp, he, get_field, fmtstr, 1, 116 print_fn, fmt_percent); 117 } 118 119 if (fmt_percent) 120 len -= 2; /* 2 for a space and a % sign */ 121 else 122 len -= 1; 123 124 return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent); 125 } 126 127 int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 128 struct hist_entry *he, hpp_field_fn get_field, 129 const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent) 130 { 131 if (!symbol_conf.cumulate_callchain) { 132 int len = fmt->user_len ?: fmt->len; 133 return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A"); 134 } 135 136 return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent); 137 } 138 139 static int field_cmp(u64 field_a, u64 field_b) 140 { 141 if (field_a > field_b) 142 return 1; 143 if (field_a < field_b) 144 return -1; 145 return 0; 146 } 147 148 static int __hpp__sort(struct hist_entry *a, struct hist_entry *b, 149 hpp_field_fn get_field) 150 { 151 s64 ret; 152 int i, nr_members; 153 struct perf_evsel *evsel; 154 struct hist_entry *pair; 155 u64 *fields_a, *fields_b; 156 157 ret = field_cmp(get_field(a), get_field(b)); 158 if (ret || !symbol_conf.event_group) 159 return ret; 160 161 evsel = hists_to_evsel(a->hists); 162 if (!perf_evsel__is_group_event(evsel)) 163 return ret; 164 165 nr_members = evsel->nr_members; 166 fields_a = calloc(nr_members, sizeof(*fields_a)); 167 fields_b = calloc(nr_members, sizeof(*fields_b)); 168 169 if (!fields_a || !fields_b) 170 goto out; 171 172 list_for_each_entry(pair, &a->pairs.head, pairs.node) { 173 evsel = hists_to_evsel(pair->hists); 174 fields_a[perf_evsel__group_idx(evsel)] = get_field(pair); 175 } 176 177 list_for_each_entry(pair, &b->pairs.head, pairs.node) { 178 evsel = hists_to_evsel(pair->hists); 179 fields_b[perf_evsel__group_idx(evsel)] = get_field(pair); 180 } 181 182 for (i = 1; i < nr_members; i++) { 183 ret = field_cmp(fields_a[i], fields_b[i]); 184 if (ret) 185 break; 186 } 187 188 out: 189 free(fields_a); 190 free(fields_b); 191 192 return ret; 193 } 194 195 static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b, 196 hpp_field_fn get_field) 197 { 198 s64 ret = 0; 199 200 if (symbol_conf.cumulate_callchain) { 201 /* 202 * Put caller above callee when they have equal period. 203 */ 204 ret = field_cmp(get_field(a), get_field(b)); 205 if (ret) 206 return ret; 207 208 if (a->thread != b->thread || !symbol_conf.use_callchain) 209 return 0; 210 211 ret = b->callchain->max_depth - a->callchain->max_depth; 212 } 213 return ret; 214 } 215 216 static int hpp__width_fn(struct perf_hpp_fmt *fmt, 217 struct perf_hpp *hpp __maybe_unused, 218 struct perf_evsel *evsel) 219 { 220 int len = fmt->user_len ?: fmt->len; 221 222 if (symbol_conf.event_group) 223 len = max(len, evsel->nr_members * fmt->len); 224 225 if (len < (int)strlen(fmt->name)) 226 len = strlen(fmt->name); 227 228 return len; 229 } 230 231 static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 232 struct perf_evsel *evsel) 233 { 234 int len = hpp__width_fn(fmt, hpp, evsel); 235 return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name); 236 } 237 238 static int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...) 239 { 240 va_list args; 241 ssize_t ssize = hpp->size; 242 double percent; 243 int ret, len; 244 245 va_start(args, fmt); 246 len = va_arg(args, int); 247 percent = va_arg(args, double); 248 ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent); 249 va_end(args); 250 251 return (ret >= ssize) ? (ssize - 1) : ret; 252 } 253 254 static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...) 255 { 256 va_list args; 257 ssize_t ssize = hpp->size; 258 int ret; 259 260 va_start(args, fmt); 261 ret = vsnprintf(hpp->buf, hpp->size, fmt, args); 262 va_end(args); 263 264 return (ret >= ssize) ? (ssize - 1) : ret; 265 } 266 267 #define __HPP_COLOR_PERCENT_FN(_type, _field) \ 268 static u64 he_get_##_field(struct hist_entry *he) \ 269 { \ 270 return he->stat._field; \ 271 } \ 272 \ 273 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \ 274 struct perf_hpp *hpp, struct hist_entry *he) \ 275 { \ 276 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \ 277 hpp_color_scnprintf, true); \ 278 } 279 280 #define __HPP_ENTRY_PERCENT_FN(_type, _field) \ 281 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ 282 struct perf_hpp *hpp, struct hist_entry *he) \ 283 { \ 284 return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \ 285 hpp_entry_scnprintf, true); \ 286 } 287 288 #define __HPP_SORT_FN(_type, _field) \ 289 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 290 struct hist_entry *a, struct hist_entry *b) \ 291 { \ 292 return __hpp__sort(a, b, he_get_##_field); \ 293 } 294 295 #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ 296 static u64 he_get_acc_##_field(struct hist_entry *he) \ 297 { \ 298 return he->stat_acc->_field; \ 299 } \ 300 \ 301 static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \ 302 struct perf_hpp *hpp, struct hist_entry *he) \ 303 { \ 304 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \ 305 hpp_color_scnprintf, true); \ 306 } 307 308 #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ 309 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ 310 struct perf_hpp *hpp, struct hist_entry *he) \ 311 { \ 312 return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \ 313 hpp_entry_scnprintf, true); \ 314 } 315 316 #define __HPP_SORT_ACC_FN(_type, _field) \ 317 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 318 struct hist_entry *a, struct hist_entry *b) \ 319 { \ 320 return __hpp__sort_acc(a, b, he_get_acc_##_field); \ 321 } 322 323 #define __HPP_ENTRY_RAW_FN(_type, _field) \ 324 static u64 he_get_raw_##_field(struct hist_entry *he) \ 325 { \ 326 return he->stat._field; \ 327 } \ 328 \ 329 static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \ 330 struct perf_hpp *hpp, struct hist_entry *he) \ 331 { \ 332 return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \ 333 hpp_entry_scnprintf, false); \ 334 } 335 336 #define __HPP_SORT_RAW_FN(_type, _field) \ 337 static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 338 struct hist_entry *a, struct hist_entry *b) \ 339 { \ 340 return __hpp__sort(a, b, he_get_raw_##_field); \ 341 } 342 343 344 #define HPP_PERCENT_FNS(_type, _field) \ 345 __HPP_COLOR_PERCENT_FN(_type, _field) \ 346 __HPP_ENTRY_PERCENT_FN(_type, _field) \ 347 __HPP_SORT_FN(_type, _field) 348 349 #define HPP_PERCENT_ACC_FNS(_type, _field) \ 350 __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \ 351 __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \ 352 __HPP_SORT_ACC_FN(_type, _field) 353 354 #define HPP_RAW_FNS(_type, _field) \ 355 __HPP_ENTRY_RAW_FN(_type, _field) \ 356 __HPP_SORT_RAW_FN(_type, _field) 357 358 HPP_PERCENT_FNS(overhead, period) 359 HPP_PERCENT_FNS(overhead_sys, period_sys) 360 HPP_PERCENT_FNS(overhead_us, period_us) 361 HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys) 362 HPP_PERCENT_FNS(overhead_guest_us, period_guest_us) 363 HPP_PERCENT_ACC_FNS(overhead_acc, period) 364 365 HPP_RAW_FNS(samples, nr_events) 366 HPP_RAW_FNS(period, period) 367 368 static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused, 369 struct hist_entry *a __maybe_unused, 370 struct hist_entry *b __maybe_unused) 371 { 372 return 0; 373 } 374 375 static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a) 376 { 377 return a->header == hpp__header_fn; 378 } 379 380 static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 381 { 382 if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b)) 383 return false; 384 385 return a->idx == b->idx; 386 } 387 388 #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \ 389 { \ 390 .name = _name, \ 391 .header = hpp__header_fn, \ 392 .width = hpp__width_fn, \ 393 .color = hpp__color_ ## _fn, \ 394 .entry = hpp__entry_ ## _fn, \ 395 .cmp = hpp__nop_cmp, \ 396 .collapse = hpp__nop_cmp, \ 397 .sort = hpp__sort_ ## _fn, \ 398 .idx = PERF_HPP__ ## _idx, \ 399 .equal = hpp__equal, \ 400 } 401 402 #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \ 403 { \ 404 .name = _name, \ 405 .header = hpp__header_fn, \ 406 .width = hpp__width_fn, \ 407 .color = hpp__color_ ## _fn, \ 408 .entry = hpp__entry_ ## _fn, \ 409 .cmp = hpp__nop_cmp, \ 410 .collapse = hpp__nop_cmp, \ 411 .sort = hpp__sort_ ## _fn, \ 412 .idx = PERF_HPP__ ## _idx, \ 413 .equal = hpp__equal, \ 414 } 415 416 #define HPP__PRINT_FNS(_name, _fn, _idx) \ 417 { \ 418 .name = _name, \ 419 .header = hpp__header_fn, \ 420 .width = hpp__width_fn, \ 421 .entry = hpp__entry_ ## _fn, \ 422 .cmp = hpp__nop_cmp, \ 423 .collapse = hpp__nop_cmp, \ 424 .sort = hpp__sort_ ## _fn, \ 425 .idx = PERF_HPP__ ## _idx, \ 426 .equal = hpp__equal, \ 427 } 428 429 struct perf_hpp_fmt perf_hpp__format[] = { 430 HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD), 431 HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS), 432 HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US), 433 HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS), 434 HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US), 435 HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC), 436 HPP__PRINT_FNS("Samples", samples, SAMPLES), 437 HPP__PRINT_FNS("Period", period, PERIOD) 438 }; 439 440 struct perf_hpp_list perf_hpp_list = { 441 .fields = LIST_HEAD_INIT(perf_hpp_list.fields), 442 .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts), 443 }; 444 445 #undef HPP__COLOR_PRINT_FNS 446 #undef HPP__COLOR_ACC_PRINT_FNS 447 #undef HPP__PRINT_FNS 448 449 #undef HPP_PERCENT_FNS 450 #undef HPP_PERCENT_ACC_FNS 451 #undef HPP_RAW_FNS 452 453 #undef __HPP_HEADER_FN 454 #undef __HPP_WIDTH_FN 455 #undef __HPP_COLOR_PERCENT_FN 456 #undef __HPP_ENTRY_PERCENT_FN 457 #undef __HPP_COLOR_ACC_PERCENT_FN 458 #undef __HPP_ENTRY_ACC_PERCENT_FN 459 #undef __HPP_ENTRY_RAW_FN 460 #undef __HPP_SORT_FN 461 #undef __HPP_SORT_ACC_FN 462 #undef __HPP_SORT_RAW_FN 463 464 465 void perf_hpp__init(void) 466 { 467 int i; 468 469 for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { 470 struct perf_hpp_fmt *fmt = &perf_hpp__format[i]; 471 472 INIT_LIST_HEAD(&fmt->list); 473 474 /* sort_list may be linked by setup_sorting() */ 475 if (fmt->sort_list.next == NULL) 476 INIT_LIST_HEAD(&fmt->sort_list); 477 } 478 479 /* 480 * If user specified field order, no need to setup default fields. 481 */ 482 if (is_strict_order(field_order)) 483 return; 484 485 if (symbol_conf.cumulate_callchain) { 486 hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC); 487 perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self"; 488 } 489 490 hpp_dimension__add_output(PERF_HPP__OVERHEAD); 491 492 if (symbol_conf.show_cpu_utilization) { 493 hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS); 494 hpp_dimension__add_output(PERF_HPP__OVERHEAD_US); 495 496 if (perf_guest) { 497 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS); 498 hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US); 499 } 500 } 501 502 if (symbol_conf.show_nr_samples) 503 hpp_dimension__add_output(PERF_HPP__SAMPLES); 504 505 if (symbol_conf.show_total_period) 506 hpp_dimension__add_output(PERF_HPP__PERIOD); 507 } 508 509 void perf_hpp_list__column_register(struct perf_hpp_list *list, 510 struct perf_hpp_fmt *format) 511 { 512 list_add_tail(&format->list, &list->fields); 513 } 514 515 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list, 516 struct perf_hpp_fmt *format) 517 { 518 list_add_tail(&format->sort_list, &list->sorts); 519 } 520 521 void perf_hpp__column_unregister(struct perf_hpp_fmt *format) 522 { 523 list_del(&format->list); 524 } 525 526 void perf_hpp__cancel_cumulate(void) 527 { 528 struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp; 529 530 if (is_strict_order(field_order)) 531 return; 532 533 ovh = &perf_hpp__format[PERF_HPP__OVERHEAD]; 534 acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC]; 535 536 perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) { 537 if (acc->equal(acc, fmt)) { 538 perf_hpp__column_unregister(fmt); 539 continue; 540 } 541 542 if (ovh->equal(ovh, fmt)) 543 fmt->name = "Overhead"; 544 } 545 } 546 547 static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b) 548 { 549 return a->equal && a->equal(a, b); 550 } 551 552 void perf_hpp__setup_output_field(struct perf_hpp_list *list) 553 { 554 struct perf_hpp_fmt *fmt; 555 556 /* append sort keys to output field */ 557 perf_hpp_list__for_each_sort_list(list, fmt) { 558 struct perf_hpp_fmt *pos; 559 560 perf_hpp_list__for_each_format(list, pos) { 561 if (fmt_equal(fmt, pos)) 562 goto next; 563 } 564 565 perf_hpp__column_register(fmt); 566 next: 567 continue; 568 } 569 } 570 571 void perf_hpp__append_sort_keys(struct perf_hpp_list *list) 572 { 573 struct perf_hpp_fmt *fmt; 574 575 /* append output fields to sort keys */ 576 perf_hpp_list__for_each_format(list, fmt) { 577 struct perf_hpp_fmt *pos; 578 579 perf_hpp_list__for_each_sort_list(list, pos) { 580 if (fmt_equal(fmt, pos)) 581 goto next; 582 } 583 584 perf_hpp__register_sort_field(fmt); 585 next: 586 continue; 587 } 588 } 589 590 591 static void fmt_free(struct perf_hpp_fmt *fmt) 592 { 593 if (fmt->free) 594 fmt->free(fmt); 595 } 596 597 void perf_hpp__reset_output_field(struct perf_hpp_list *list) 598 { 599 struct perf_hpp_fmt *fmt, *tmp; 600 601 /* reset output fields */ 602 perf_hpp_list__for_each_format_safe(list, fmt, tmp) { 603 list_del_init(&fmt->list); 604 list_del_init(&fmt->sort_list); 605 fmt_free(fmt); 606 } 607 608 /* reset sort keys */ 609 perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) { 610 list_del_init(&fmt->list); 611 list_del_init(&fmt->sort_list); 612 fmt_free(fmt); 613 } 614 } 615 616 /* 617 * See hists__fprintf to match the column widths 618 */ 619 unsigned int hists__sort_list_width(struct hists *hists) 620 { 621 struct perf_hpp_fmt *fmt; 622 int ret = 0; 623 bool first = true; 624 struct perf_hpp dummy_hpp; 625 626 hists__for_each_format(hists, fmt) { 627 if (perf_hpp__should_skip(fmt, hists)) 628 continue; 629 630 if (first) 631 first = false; 632 else 633 ret += 2; 634 635 ret += fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists)); 636 } 637 638 if (verbose && hists__has(hists, sym)) /* Addr + origin */ 639 ret += 3 + BITS_PER_LONG / 4; 640 641 return ret; 642 } 643 644 unsigned int hists__overhead_width(struct hists *hists) 645 { 646 struct perf_hpp_fmt *fmt; 647 int ret = 0; 648 bool first = true; 649 struct perf_hpp dummy_hpp; 650 651 hists__for_each_format(hists, fmt) { 652 if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt)) 653 break; 654 655 if (first) 656 first = false; 657 else 658 ret += 2; 659 660 ret += fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists)); 661 } 662 663 return ret; 664 } 665 666 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists) 667 { 668 if (perf_hpp__is_sort_entry(fmt)) 669 return perf_hpp__reset_sort_width(fmt, hists); 670 671 if (perf_hpp__is_dynamic_entry(fmt)) 672 return; 673 674 BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX); 675 676 switch (fmt->idx) { 677 case PERF_HPP__OVERHEAD: 678 case PERF_HPP__OVERHEAD_SYS: 679 case PERF_HPP__OVERHEAD_US: 680 case PERF_HPP__OVERHEAD_ACC: 681 fmt->len = 8; 682 break; 683 684 case PERF_HPP__OVERHEAD_GUEST_SYS: 685 case PERF_HPP__OVERHEAD_GUEST_US: 686 fmt->len = 9; 687 break; 688 689 case PERF_HPP__SAMPLES: 690 case PERF_HPP__PERIOD: 691 fmt->len = 12; 692 break; 693 694 default: 695 break; 696 } 697 } 698 699 void perf_hpp__set_user_width(const char *width_list_str) 700 { 701 struct perf_hpp_fmt *fmt; 702 const char *ptr = width_list_str; 703 704 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) { 705 char *p; 706 707 int len = strtol(ptr, &p, 10); 708 fmt->user_len = len; 709 710 if (*p == ',') 711 ptr = p + 1; 712 else 713 break; 714 } 715 } 716 717 static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt) 718 { 719 struct perf_hpp_list_node *node = NULL; 720 struct perf_hpp_fmt *fmt_copy; 721 bool found = false; 722 bool skip = perf_hpp__should_skip(fmt, hists); 723 724 list_for_each_entry(node, &hists->hpp_formats, list) { 725 if (node->level == fmt->level) { 726 found = true; 727 break; 728 } 729 } 730 731 if (!found) { 732 node = malloc(sizeof(*node)); 733 if (node == NULL) 734 return -1; 735 736 node->skip = skip; 737 node->level = fmt->level; 738 perf_hpp_list__init(&node->hpp); 739 740 hists->nr_hpp_node++; 741 list_add_tail(&node->list, &hists->hpp_formats); 742 } 743 744 fmt_copy = perf_hpp_fmt__dup(fmt); 745 if (fmt_copy == NULL) 746 return -1; 747 748 if (!skip) 749 node->skip = false; 750 751 list_add_tail(&fmt_copy->list, &node->hpp.fields); 752 list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts); 753 754 return 0; 755 } 756 757 int perf_hpp__setup_hists_formats(struct perf_hpp_list *list, 758 struct perf_evlist *evlist) 759 { 760 struct perf_evsel *evsel; 761 struct perf_hpp_fmt *fmt; 762 struct hists *hists; 763 int ret; 764 765 if (!symbol_conf.report_hierarchy) 766 return 0; 767 768 evlist__for_each(evlist, evsel) { 769 hists = evsel__hists(evsel); 770 771 perf_hpp_list__for_each_sort_list(list, fmt) { 772 if (perf_hpp__is_dynamic_entry(fmt) && 773 !perf_hpp__defined_dynamic_entry(fmt, hists)) 774 continue; 775 776 ret = add_hierarchy_fmt(hists, fmt); 777 if (ret < 0) 778 return ret; 779 } 780 } 781 782 return 0; 783 } 784