1 // SPDX-License-Identifier: GPL-2.0 2 #include "util/debug.h" 3 #include "util/dso.h" 4 #include "util/event.h" 5 #include "util/map.h" 6 #include "util/symbol.h" 7 #include "util/sort.h" 8 #include "util/evsel.h" 9 #include "util/evlist.h" 10 #include "util/machine.h" 11 #include "util/thread.h" 12 #include "util/parse-events.h" 13 #include "tests/tests.h" 14 #include "tests/hists_common.h" 15 #include <linux/kernel.h> 16 17 struct sample { 18 u32 pid; 19 u64 ip; 20 struct thread *thread; 21 struct map *map; 22 struct symbol *sym; 23 }; 24 25 /* For the numbers, see hists_common.c */ 26 static struct sample fake_samples[] = { 27 /* perf [kernel] schedule() */ 28 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, }, 29 /* perf [perf] main() */ 30 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, }, 31 /* perf [perf] cmd_record() */ 32 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, }, 33 /* perf [libc] malloc() */ 34 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, }, 35 /* perf [libc] free() */ 36 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, }, 37 /* perf [perf] main() */ 38 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, }, 39 /* perf [kernel] page_fault() */ 40 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, 41 /* bash [bash] main() */ 42 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, }, 43 /* bash [bash] xmalloc() */ 44 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, }, 45 /* bash [kernel] page_fault() */ 46 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, 47 }; 48 49 /* 50 * Will be cast to struct ip_callchain which has all 64 bit entries 51 * of nr and ips[]. 52 */ 53 static u64 fake_callchains[][10] = { 54 /* schedule => run_command => main */ 55 { 3, FAKE_IP_KERNEL_SCHEDULE, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, }, 56 /* main */ 57 { 1, FAKE_IP_PERF_MAIN, }, 58 /* cmd_record => run_command => main */ 59 { 3, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, }, 60 /* malloc => cmd_record => run_command => main */ 61 { 4, FAKE_IP_LIBC_MALLOC, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, 62 FAKE_IP_PERF_MAIN, }, 63 /* free => cmd_record => run_command => main */ 64 { 4, FAKE_IP_LIBC_FREE, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, 65 FAKE_IP_PERF_MAIN, }, 66 /* main */ 67 { 1, FAKE_IP_PERF_MAIN, }, 68 /* page_fault => sys_perf_event_open => run_command => main */ 69 { 4, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, 70 FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, }, 71 /* main */ 72 { 1, FAKE_IP_BASH_MAIN, }, 73 /* xmalloc => malloc => xmalloc => malloc => xmalloc => main */ 74 { 6, FAKE_IP_BASH_XMALLOC, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, 75 FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, FAKE_IP_BASH_MAIN, }, 76 /* page_fault => malloc => main */ 77 { 3, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_MAIN, }, 78 }; 79 80 static int add_hist_entries(struct hists *hists, struct machine *machine) 81 { 82 struct addr_location al; 83 struct evsel *evsel = hists_to_evsel(hists); 84 struct perf_sample sample = { .period = 1000, }; 85 size_t i; 86 87 for (i = 0; i < ARRAY_SIZE(fake_samples); i++) { 88 struct hist_entry_iter iter = { 89 .evsel = evsel, 90 .sample = &sample, 91 .hide_unresolved = false, 92 }; 93 94 if (symbol_conf.cumulate_callchain) 95 iter.ops = &hist_iter_cumulative; 96 else 97 iter.ops = &hist_iter_normal; 98 99 sample.cpumode = PERF_RECORD_MISC_USER; 100 sample.pid = fake_samples[i].pid; 101 sample.tid = fake_samples[i].pid; 102 sample.ip = fake_samples[i].ip; 103 sample.callchain = (struct ip_callchain *)fake_callchains[i]; 104 105 if (machine__resolve(machine, &al, &sample) < 0) 106 goto out; 107 108 if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack, 109 NULL) < 0) { 110 addr_location__put(&al); 111 goto out; 112 } 113 114 fake_samples[i].thread = al.thread; 115 map__put(fake_samples[i].map); 116 fake_samples[i].map = al.map; 117 fake_samples[i].sym = al.sym; 118 } 119 120 return TEST_OK; 121 122 out: 123 pr_debug("Not enough memory for adding a hist entry\n"); 124 return TEST_FAIL; 125 } 126 127 static void del_hist_entries(struct hists *hists) 128 { 129 struct hist_entry *he; 130 struct rb_root_cached *root_in; 131 struct rb_root_cached *root_out; 132 struct rb_node *node; 133 134 if (hists__has(hists, need_collapse)) 135 root_in = &hists->entries_collapsed; 136 else 137 root_in = hists->entries_in; 138 139 root_out = &hists->entries; 140 141 while (!RB_EMPTY_ROOT(&root_out->rb_root)) { 142 node = rb_first_cached(root_out); 143 144 he = rb_entry(node, struct hist_entry, rb_node); 145 rb_erase_cached(node, root_out); 146 rb_erase_cached(&he->rb_node_in, root_in); 147 hist_entry__delete(he); 148 } 149 } 150 151 static void put_fake_samples(void) 152 { 153 size_t i; 154 155 for (i = 0; i < ARRAY_SIZE(fake_samples); i++) 156 map__put(fake_samples[i].map); 157 } 158 159 typedef int (*test_fn_t)(struct evsel *, struct machine *); 160 161 #define COMM(he) (thread__comm_str(he->thread)) 162 #define DSO(he) (map__dso(he->ms.map)->short_name) 163 #define SYM(he) (he->ms.sym->name) 164 #define CPU(he) (he->cpu) 165 #define PID(he) (he->thread->tid) 166 #define DEPTH(he) (he->callchain->max_depth) 167 #define CDSO(cl) (map__dso(cl->ms.map)->short_name) 168 #define CSYM(cl) (cl->ms.sym->name) 169 170 struct result { 171 u64 children; 172 u64 self; 173 const char *comm; 174 const char *dso; 175 const char *sym; 176 }; 177 178 struct callchain_result { 179 u64 nr; 180 struct { 181 const char *dso; 182 const char *sym; 183 } node[10]; 184 }; 185 186 static int do_test(struct hists *hists, struct result *expected, size_t nr_expected, 187 struct callchain_result *expected_callchain, size_t nr_callchain) 188 { 189 char buf[32]; 190 size_t i, c; 191 struct hist_entry *he; 192 struct rb_root *root; 193 struct rb_node *node; 194 struct callchain_node *cnode; 195 struct callchain_list *clist; 196 197 /* 198 * adding and deleting hist entries must be done outside of this 199 * function since TEST_ASSERT_VAL() returns in case of failure. 200 */ 201 hists__collapse_resort(hists, NULL); 202 evsel__output_resort(hists_to_evsel(hists), NULL); 203 204 if (verbose > 2) { 205 pr_info("use callchain: %d, cumulate callchain: %d\n", 206 symbol_conf.use_callchain, 207 symbol_conf.cumulate_callchain); 208 print_hists_out(hists); 209 } 210 211 root = &hists->entries.rb_root; 212 for (node = rb_first(root), i = 0; 213 node && (he = rb_entry(node, struct hist_entry, rb_node)); 214 node = rb_next(node), i++) { 215 scnprintf(buf, sizeof(buf), "Invalid hist entry #%zd", i); 216 217 TEST_ASSERT_VAL("Incorrect number of hist entry", 218 i < nr_expected); 219 TEST_ASSERT_VAL(buf, he->stat.period == expected[i].self && 220 !strcmp(COMM(he), expected[i].comm) && 221 !strcmp(DSO(he), expected[i].dso) && 222 !strcmp(SYM(he), expected[i].sym)); 223 224 if (symbol_conf.cumulate_callchain) 225 TEST_ASSERT_VAL(buf, he->stat_acc->period == expected[i].children); 226 227 if (!symbol_conf.use_callchain) 228 continue; 229 230 /* check callchain entries */ 231 root = &he->callchain->node.rb_root; 232 233 TEST_ASSERT_VAL("callchains expected", !RB_EMPTY_ROOT(root)); 234 cnode = rb_entry(rb_first(root), struct callchain_node, rb_node); 235 236 c = 0; 237 list_for_each_entry(clist, &cnode->val, list) { 238 scnprintf(buf, sizeof(buf), "Invalid callchain entry #%zd/%zd", i, c); 239 240 TEST_ASSERT_VAL("Incorrect number of callchain entry", 241 c < expected_callchain[i].nr); 242 TEST_ASSERT_VAL(buf, 243 !strcmp(CDSO(clist), expected_callchain[i].node[c].dso) && 244 !strcmp(CSYM(clist), expected_callchain[i].node[c].sym)); 245 c++; 246 } 247 /* TODO: handle multiple child nodes properly */ 248 TEST_ASSERT_VAL("Incorrect number of callchain entry", 249 c <= expected_callchain[i].nr); 250 } 251 TEST_ASSERT_VAL("Incorrect number of hist entry", 252 i == nr_expected); 253 TEST_ASSERT_VAL("Incorrect number of callchain entry", 254 !symbol_conf.use_callchain || nr_expected == nr_callchain); 255 return 0; 256 } 257 258 /* NO callchain + NO children */ 259 static int test1(struct evsel *evsel, struct machine *machine) 260 { 261 int err; 262 struct hists *hists = evsel__hists(evsel); 263 /* 264 * expected output: 265 * 266 * Overhead Command Shared Object Symbol 267 * ======== ======= ============= ============== 268 * 20.00% perf perf [.] main 269 * 10.00% bash [kernel] [k] page_fault 270 * 10.00% bash bash [.] main 271 * 10.00% bash bash [.] xmalloc 272 * 10.00% perf [kernel] [k] page_fault 273 * 10.00% perf [kernel] [k] schedule 274 * 10.00% perf libc [.] free 275 * 10.00% perf libc [.] malloc 276 * 10.00% perf perf [.] cmd_record 277 */ 278 struct result expected[] = { 279 { 0, 2000, "perf", "perf", "main" }, 280 { 0, 1000, "bash", "[kernel]", "page_fault" }, 281 { 0, 1000, "bash", "bash", "main" }, 282 { 0, 1000, "bash", "bash", "xmalloc" }, 283 { 0, 1000, "perf", "[kernel]", "page_fault" }, 284 { 0, 1000, "perf", "[kernel]", "schedule" }, 285 { 0, 1000, "perf", "libc", "free" }, 286 { 0, 1000, "perf", "libc", "malloc" }, 287 { 0, 1000, "perf", "perf", "cmd_record" }, 288 }; 289 290 symbol_conf.use_callchain = false; 291 symbol_conf.cumulate_callchain = false; 292 evsel__reset_sample_bit(evsel, CALLCHAIN); 293 294 setup_sorting(NULL); 295 callchain_register_param(&callchain_param); 296 297 err = add_hist_entries(hists, machine); 298 if (err < 0) 299 goto out; 300 301 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0); 302 303 out: 304 del_hist_entries(hists); 305 reset_output_field(); 306 return err; 307 } 308 309 /* callchain + NO children */ 310 static int test2(struct evsel *evsel, struct machine *machine) 311 { 312 int err; 313 struct hists *hists = evsel__hists(evsel); 314 /* 315 * expected output: 316 * 317 * Overhead Command Shared Object Symbol 318 * ======== ======= ============= ============== 319 * 20.00% perf perf [.] main 320 * | 321 * --- main 322 * 323 * 10.00% bash [kernel] [k] page_fault 324 * | 325 * --- page_fault 326 * malloc 327 * main 328 * 329 * 10.00% bash bash [.] main 330 * | 331 * --- main 332 * 333 * 10.00% bash bash [.] xmalloc 334 * | 335 * --- xmalloc 336 * malloc 337 * xmalloc <--- NOTE: there's a cycle 338 * malloc 339 * xmalloc 340 * main 341 * 342 * 10.00% perf [kernel] [k] page_fault 343 * | 344 * --- page_fault 345 * sys_perf_event_open 346 * run_command 347 * main 348 * 349 * 10.00% perf [kernel] [k] schedule 350 * | 351 * --- schedule 352 * run_command 353 * main 354 * 355 * 10.00% perf libc [.] free 356 * | 357 * --- free 358 * cmd_record 359 * run_command 360 * main 361 * 362 * 10.00% perf libc [.] malloc 363 * | 364 * --- malloc 365 * cmd_record 366 * run_command 367 * main 368 * 369 * 10.00% perf perf [.] cmd_record 370 * | 371 * --- cmd_record 372 * run_command 373 * main 374 * 375 */ 376 struct result expected[] = { 377 { 0, 2000, "perf", "perf", "main" }, 378 { 0, 1000, "bash", "[kernel]", "page_fault" }, 379 { 0, 1000, "bash", "bash", "main" }, 380 { 0, 1000, "bash", "bash", "xmalloc" }, 381 { 0, 1000, "perf", "[kernel]", "page_fault" }, 382 { 0, 1000, "perf", "[kernel]", "schedule" }, 383 { 0, 1000, "perf", "libc", "free" }, 384 { 0, 1000, "perf", "libc", "malloc" }, 385 { 0, 1000, "perf", "perf", "cmd_record" }, 386 }; 387 struct callchain_result expected_callchain[] = { 388 { 389 1, { { "perf", "main" }, }, 390 }, 391 { 392 3, { { "[kernel]", "page_fault" }, 393 { "libc", "malloc" }, 394 { "bash", "main" }, }, 395 }, 396 { 397 1, { { "bash", "main" }, }, 398 }, 399 { 400 6, { { "bash", "xmalloc" }, 401 { "libc", "malloc" }, 402 { "bash", "xmalloc" }, 403 { "libc", "malloc" }, 404 { "bash", "xmalloc" }, 405 { "bash", "main" }, }, 406 }, 407 { 408 4, { { "[kernel]", "page_fault" }, 409 { "[kernel]", "sys_perf_event_open" }, 410 { "perf", "run_command" }, 411 { "perf", "main" }, }, 412 }, 413 { 414 3, { { "[kernel]", "schedule" }, 415 { "perf", "run_command" }, 416 { "perf", "main" }, }, 417 }, 418 { 419 4, { { "libc", "free" }, 420 { "perf", "cmd_record" }, 421 { "perf", "run_command" }, 422 { "perf", "main" }, }, 423 }, 424 { 425 4, { { "libc", "malloc" }, 426 { "perf", "cmd_record" }, 427 { "perf", "run_command" }, 428 { "perf", "main" }, }, 429 }, 430 { 431 3, { { "perf", "cmd_record" }, 432 { "perf", "run_command" }, 433 { "perf", "main" }, }, 434 }, 435 }; 436 437 symbol_conf.use_callchain = true; 438 symbol_conf.cumulate_callchain = false; 439 evsel__set_sample_bit(evsel, CALLCHAIN); 440 441 setup_sorting(NULL); 442 callchain_register_param(&callchain_param); 443 444 err = add_hist_entries(hists, machine); 445 if (err < 0) 446 goto out; 447 448 err = do_test(hists, expected, ARRAY_SIZE(expected), 449 expected_callchain, ARRAY_SIZE(expected_callchain)); 450 451 out: 452 del_hist_entries(hists); 453 reset_output_field(); 454 return err; 455 } 456 457 /* NO callchain + children */ 458 static int test3(struct evsel *evsel, struct machine *machine) 459 { 460 int err; 461 struct hists *hists = evsel__hists(evsel); 462 /* 463 * expected output: 464 * 465 * Children Self Command Shared Object Symbol 466 * ======== ======== ======= ============= ======================= 467 * 70.00% 20.00% perf perf [.] main 468 * 50.00% 0.00% perf perf [.] run_command 469 * 30.00% 10.00% bash bash [.] main 470 * 30.00% 10.00% perf perf [.] cmd_record 471 * 20.00% 0.00% bash libc [.] malloc 472 * 10.00% 10.00% bash [kernel] [k] page_fault 473 * 10.00% 10.00% bash bash [.] xmalloc 474 * 10.00% 10.00% perf [kernel] [k] page_fault 475 * 10.00% 10.00% perf libc [.] malloc 476 * 10.00% 10.00% perf [kernel] [k] schedule 477 * 10.00% 10.00% perf libc [.] free 478 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open 479 */ 480 struct result expected[] = { 481 { 7000, 2000, "perf", "perf", "main" }, 482 { 5000, 0, "perf", "perf", "run_command" }, 483 { 3000, 1000, "bash", "bash", "main" }, 484 { 3000, 1000, "perf", "perf", "cmd_record" }, 485 { 2000, 0, "bash", "libc", "malloc" }, 486 { 1000, 1000, "bash", "[kernel]", "page_fault" }, 487 { 1000, 1000, "bash", "bash", "xmalloc" }, 488 { 1000, 1000, "perf", "[kernel]", "page_fault" }, 489 { 1000, 1000, "perf", "[kernel]", "schedule" }, 490 { 1000, 1000, "perf", "libc", "free" }, 491 { 1000, 1000, "perf", "libc", "malloc" }, 492 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, 493 }; 494 495 symbol_conf.use_callchain = false; 496 symbol_conf.cumulate_callchain = true; 497 evsel__reset_sample_bit(evsel, CALLCHAIN); 498 499 setup_sorting(NULL); 500 callchain_register_param(&callchain_param); 501 502 err = add_hist_entries(hists, machine); 503 if (err < 0) 504 goto out; 505 506 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0); 507 508 out: 509 del_hist_entries(hists); 510 reset_output_field(); 511 return err; 512 } 513 514 /* callchain + children */ 515 static int test4(struct evsel *evsel, struct machine *machine) 516 { 517 int err; 518 struct hists *hists = evsel__hists(evsel); 519 /* 520 * expected output: 521 * 522 * Children Self Command Shared Object Symbol 523 * ======== ======== ======= ============= ======================= 524 * 70.00% 20.00% perf perf [.] main 525 * | 526 * --- main 527 * 528 * 50.00% 0.00% perf perf [.] run_command 529 * | 530 * --- run_command 531 * main 532 * 533 * 30.00% 10.00% bash bash [.] main 534 * | 535 * --- main 536 * 537 * 30.00% 10.00% perf perf [.] cmd_record 538 * | 539 * --- cmd_record 540 * run_command 541 * main 542 * 543 * 20.00% 0.00% bash libc [.] malloc 544 * | 545 * --- malloc 546 * | 547 * |--50.00%-- xmalloc 548 * | main 549 * --50.00%-- main 550 * 551 * 10.00% 10.00% bash [kernel] [k] page_fault 552 * | 553 * --- page_fault 554 * malloc 555 * main 556 * 557 * 10.00% 10.00% bash bash [.] xmalloc 558 * | 559 * --- xmalloc 560 * malloc 561 * xmalloc <--- NOTE: there's a cycle 562 * malloc 563 * xmalloc 564 * main 565 * 566 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open 567 * | 568 * --- sys_perf_event_open 569 * run_command 570 * main 571 * 572 * 10.00% 10.00% perf [kernel] [k] page_fault 573 * | 574 * --- page_fault 575 * sys_perf_event_open 576 * run_command 577 * main 578 * 579 * 10.00% 10.00% perf [kernel] [k] schedule 580 * | 581 * --- schedule 582 * run_command 583 * main 584 * 585 * 10.00% 10.00% perf libc [.] free 586 * | 587 * --- free 588 * cmd_record 589 * run_command 590 * main 591 * 592 * 10.00% 10.00% perf libc [.] malloc 593 * | 594 * --- malloc 595 * cmd_record 596 * run_command 597 * main 598 * 599 */ 600 struct result expected[] = { 601 { 7000, 2000, "perf", "perf", "main" }, 602 { 5000, 0, "perf", "perf", "run_command" }, 603 { 3000, 1000, "bash", "bash", "main" }, 604 { 3000, 1000, "perf", "perf", "cmd_record" }, 605 { 2000, 0, "bash", "libc", "malloc" }, 606 { 1000, 1000, "bash", "[kernel]", "page_fault" }, 607 { 1000, 1000, "bash", "bash", "xmalloc" }, 608 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, 609 { 1000, 1000, "perf", "[kernel]", "page_fault" }, 610 { 1000, 1000, "perf", "[kernel]", "schedule" }, 611 { 1000, 1000, "perf", "libc", "free" }, 612 { 1000, 1000, "perf", "libc", "malloc" }, 613 }; 614 struct callchain_result expected_callchain[] = { 615 { 616 1, { { "perf", "main" }, }, 617 }, 618 { 619 2, { { "perf", "run_command" }, 620 { "perf", "main" }, }, 621 }, 622 { 623 1, { { "bash", "main" }, }, 624 }, 625 { 626 3, { { "perf", "cmd_record" }, 627 { "perf", "run_command" }, 628 { "perf", "main" }, }, 629 }, 630 { 631 4, { { "libc", "malloc" }, 632 { "bash", "xmalloc" }, 633 { "bash", "main" }, 634 { "bash", "main" }, }, 635 }, 636 { 637 3, { { "[kernel]", "page_fault" }, 638 { "libc", "malloc" }, 639 { "bash", "main" }, }, 640 }, 641 { 642 6, { { "bash", "xmalloc" }, 643 { "libc", "malloc" }, 644 { "bash", "xmalloc" }, 645 { "libc", "malloc" }, 646 { "bash", "xmalloc" }, 647 { "bash", "main" }, }, 648 }, 649 { 650 3, { { "[kernel]", "sys_perf_event_open" }, 651 { "perf", "run_command" }, 652 { "perf", "main" }, }, 653 }, 654 { 655 4, { { "[kernel]", "page_fault" }, 656 { "[kernel]", "sys_perf_event_open" }, 657 { "perf", "run_command" }, 658 { "perf", "main" }, }, 659 }, 660 { 661 3, { { "[kernel]", "schedule" }, 662 { "perf", "run_command" }, 663 { "perf", "main" }, }, 664 }, 665 { 666 4, { { "libc", "free" }, 667 { "perf", "cmd_record" }, 668 { "perf", "run_command" }, 669 { "perf", "main" }, }, 670 }, 671 { 672 4, { { "libc", "malloc" }, 673 { "perf", "cmd_record" }, 674 { "perf", "run_command" }, 675 { "perf", "main" }, }, 676 }, 677 }; 678 679 symbol_conf.use_callchain = true; 680 symbol_conf.cumulate_callchain = true; 681 evsel__set_sample_bit(evsel, CALLCHAIN); 682 683 setup_sorting(NULL); 684 685 callchain_param = callchain_param_default; 686 callchain_register_param(&callchain_param); 687 688 err = add_hist_entries(hists, machine); 689 if (err < 0) 690 goto out; 691 692 err = do_test(hists, expected, ARRAY_SIZE(expected), 693 expected_callchain, ARRAY_SIZE(expected_callchain)); 694 695 out: 696 del_hist_entries(hists); 697 reset_output_field(); 698 return err; 699 } 700 701 static int test__hists_cumulate(struct test_suite *test __maybe_unused, int subtest __maybe_unused) 702 { 703 int err = TEST_FAIL; 704 struct machines machines; 705 struct machine *machine; 706 struct evsel *evsel; 707 struct evlist *evlist = evlist__new(); 708 size_t i; 709 test_fn_t testcases[] = { 710 test1, 711 test2, 712 test3, 713 test4, 714 }; 715 716 TEST_ASSERT_VAL("No memory", evlist); 717 718 err = parse_event(evlist, "cpu-clock"); 719 if (err) 720 goto out; 721 err = TEST_FAIL; 722 723 machines__init(&machines); 724 725 /* setup threads/dso/map/symbols also */ 726 machine = setup_fake_machine(&machines); 727 if (!machine) 728 goto out; 729 730 if (verbose > 1) 731 machine__fprintf(machine, stderr); 732 733 evsel = evlist__first(evlist); 734 735 for (i = 0; i < ARRAY_SIZE(testcases); i++) { 736 err = testcases[i](evsel, machine); 737 if (err < 0) 738 break; 739 } 740 741 out: 742 /* tear down everything */ 743 evlist__delete(evlist); 744 machines__exit(&machines); 745 put_fake_samples(); 746 747 return err; 748 } 749 750 DEFINE_SUITE("Cumulate child hist entries", hists_cumulate); 751