1 // SPDX-License-Identifier: GPL-2.0 2 #include "perf.h" 3 #include "util/debug.h" 4 #include "util/event.h" 5 #include "util/symbol.h" 6 #include "util/sort.h" 7 #include "util/evsel.h" 8 #include "util/evlist.h" 9 #include "util/machine.h" 10 #include "util/thread.h" 11 #include "util/parse-events.h" 12 #include "tests/tests.h" 13 #include "tests/hists_common.h" 14 #include <linux/kernel.h> 15 16 struct sample { 17 u32 pid; 18 u64 ip; 19 struct thread *thread; 20 struct map *map; 21 struct symbol *sym; 22 }; 23 24 /* For the numbers, see hists_common.c */ 25 static struct sample fake_samples[] = { 26 /* perf [kernel] schedule() */ 27 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, }, 28 /* perf [perf] main() */ 29 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, }, 30 /* perf [perf] cmd_record() */ 31 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, }, 32 /* perf [libc] malloc() */ 33 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, }, 34 /* perf [libc] free() */ 35 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, }, 36 /* perf [perf] main() */ 37 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, }, 38 /* perf [kernel] page_fault() */ 39 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, 40 /* bash [bash] main() */ 41 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, }, 42 /* bash [bash] xmalloc() */ 43 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, }, 44 /* bash [kernel] page_fault() */ 45 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, 46 }; 47 48 /* 49 * Will be casted to struct ip_callchain which has all 64 bit entries 50 * of nr and ips[]. 51 */ 52 static u64 fake_callchains[][10] = { 53 /* schedule => run_command => main */ 54 { 3, FAKE_IP_KERNEL_SCHEDULE, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, }, 55 /* main */ 56 { 1, FAKE_IP_PERF_MAIN, }, 57 /* cmd_record => run_command => main */ 58 { 3, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, }, 59 /* malloc => cmd_record => run_command => main */ 60 { 4, FAKE_IP_LIBC_MALLOC, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, 61 FAKE_IP_PERF_MAIN, }, 62 /* free => cmd_record => run_command => main */ 63 { 4, FAKE_IP_LIBC_FREE, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, 64 FAKE_IP_PERF_MAIN, }, 65 /* main */ 66 { 1, FAKE_IP_PERF_MAIN, }, 67 /* page_fault => sys_perf_event_open => run_command => main */ 68 { 4, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, 69 FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, }, 70 /* main */ 71 { 1, FAKE_IP_BASH_MAIN, }, 72 /* xmalloc => malloc => xmalloc => malloc => xmalloc => main */ 73 { 6, FAKE_IP_BASH_XMALLOC, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, 74 FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, FAKE_IP_BASH_MAIN, }, 75 /* page_fault => malloc => main */ 76 { 3, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_MAIN, }, 77 }; 78 79 static int add_hist_entries(struct hists *hists, struct machine *machine) 80 { 81 struct addr_location al; 82 struct perf_evsel *evsel = hists_to_evsel(hists); 83 struct perf_sample sample = { .period = 1000, }; 84 size_t i; 85 86 for (i = 0; i < ARRAY_SIZE(fake_samples); i++) { 87 struct hist_entry_iter iter = { 88 .evsel = evsel, 89 .sample = &sample, 90 .hide_unresolved = false, 91 }; 92 93 if (symbol_conf.cumulate_callchain) 94 iter.ops = &hist_iter_cumulative; 95 else 96 iter.ops = &hist_iter_normal; 97 98 sample.cpumode = PERF_RECORD_MISC_USER; 99 sample.pid = fake_samples[i].pid; 100 sample.tid = fake_samples[i].pid; 101 sample.ip = fake_samples[i].ip; 102 sample.callchain = (struct ip_callchain *)fake_callchains[i]; 103 104 if (machine__resolve(machine, &al, &sample) < 0) 105 goto out; 106 107 if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack, 108 NULL) < 0) { 109 addr_location__put(&al); 110 goto out; 111 } 112 113 fake_samples[i].thread = al.thread; 114 fake_samples[i].map = al.map; 115 fake_samples[i].sym = al.sym; 116 } 117 118 return TEST_OK; 119 120 out: 121 pr_debug("Not enough memory for adding a hist entry\n"); 122 return TEST_FAIL; 123 } 124 125 static void del_hist_entries(struct hists *hists) 126 { 127 struct hist_entry *he; 128 struct rb_root *root_in; 129 struct rb_root *root_out; 130 struct rb_node *node; 131 132 if (hists__has(hists, need_collapse)) 133 root_in = &hists->entries_collapsed; 134 else 135 root_in = hists->entries_in; 136 137 root_out = &hists->entries; 138 139 while (!RB_EMPTY_ROOT(root_out)) { 140 node = rb_first(root_out); 141 142 he = rb_entry(node, struct hist_entry, rb_node); 143 rb_erase(node, root_out); 144 rb_erase(&he->rb_node_in, root_in); 145 hist_entry__delete(he); 146 } 147 } 148 149 typedef int (*test_fn_t)(struct perf_evsel *, struct machine *); 150 151 #define COMM(he) (thread__comm_str(he->thread)) 152 #define DSO(he) (he->ms.map->dso->short_name) 153 #define SYM(he) (he->ms.sym->name) 154 #define CPU(he) (he->cpu) 155 #define PID(he) (he->thread->tid) 156 #define DEPTH(he) (he->callchain->max_depth) 157 #define CDSO(cl) (cl->ms.map->dso->short_name) 158 #define CSYM(cl) (cl->ms.sym->name) 159 160 struct result { 161 u64 children; 162 u64 self; 163 const char *comm; 164 const char *dso; 165 const char *sym; 166 }; 167 168 struct callchain_result { 169 u64 nr; 170 struct { 171 const char *dso; 172 const char *sym; 173 } node[10]; 174 }; 175 176 static int do_test(struct hists *hists, struct result *expected, size_t nr_expected, 177 struct callchain_result *expected_callchain, size_t nr_callchain) 178 { 179 char buf[32]; 180 size_t i, c; 181 struct hist_entry *he; 182 struct rb_root *root; 183 struct rb_node *node; 184 struct callchain_node *cnode; 185 struct callchain_list *clist; 186 187 /* 188 * adding and deleting hist entries must be done outside of this 189 * function since TEST_ASSERT_VAL() returns in case of failure. 190 */ 191 hists__collapse_resort(hists, NULL); 192 perf_evsel__output_resort(hists_to_evsel(hists), NULL); 193 194 if (verbose > 2) { 195 pr_info("use callchain: %d, cumulate callchain: %d\n", 196 symbol_conf.use_callchain, 197 symbol_conf.cumulate_callchain); 198 print_hists_out(hists); 199 } 200 201 root = &hists->entries; 202 for (node = rb_first(root), i = 0; 203 node && (he = rb_entry(node, struct hist_entry, rb_node)); 204 node = rb_next(node), i++) { 205 scnprintf(buf, sizeof(buf), "Invalid hist entry #%zd", i); 206 207 TEST_ASSERT_VAL("Incorrect number of hist entry", 208 i < nr_expected); 209 TEST_ASSERT_VAL(buf, he->stat.period == expected[i].self && 210 !strcmp(COMM(he), expected[i].comm) && 211 !strcmp(DSO(he), expected[i].dso) && 212 !strcmp(SYM(he), expected[i].sym)); 213 214 if (symbol_conf.cumulate_callchain) 215 TEST_ASSERT_VAL(buf, he->stat_acc->period == expected[i].children); 216 217 if (!symbol_conf.use_callchain) 218 continue; 219 220 /* check callchain entries */ 221 root = &he->callchain->node.rb_root; 222 223 TEST_ASSERT_VAL("callchains expected", !RB_EMPTY_ROOT(root)); 224 cnode = rb_entry(rb_first(root), struct callchain_node, rb_node); 225 226 c = 0; 227 list_for_each_entry(clist, &cnode->val, list) { 228 scnprintf(buf, sizeof(buf), "Invalid callchain entry #%zd/%zd", i, c); 229 230 TEST_ASSERT_VAL("Incorrect number of callchain entry", 231 c < expected_callchain[i].nr); 232 TEST_ASSERT_VAL(buf, 233 !strcmp(CDSO(clist), expected_callchain[i].node[c].dso) && 234 !strcmp(CSYM(clist), expected_callchain[i].node[c].sym)); 235 c++; 236 } 237 /* TODO: handle multiple child nodes properly */ 238 TEST_ASSERT_VAL("Incorrect number of callchain entry", 239 c <= expected_callchain[i].nr); 240 } 241 TEST_ASSERT_VAL("Incorrect number of hist entry", 242 i == nr_expected); 243 TEST_ASSERT_VAL("Incorrect number of callchain entry", 244 !symbol_conf.use_callchain || nr_expected == nr_callchain); 245 return 0; 246 } 247 248 /* NO callchain + NO children */ 249 static int test1(struct perf_evsel *evsel, struct machine *machine) 250 { 251 int err; 252 struct hists *hists = evsel__hists(evsel); 253 /* 254 * expected output: 255 * 256 * Overhead Command Shared Object Symbol 257 * ======== ======= ============= ============== 258 * 20.00% perf perf [.] main 259 * 10.00% bash [kernel] [k] page_fault 260 * 10.00% bash bash [.] main 261 * 10.00% bash bash [.] xmalloc 262 * 10.00% perf [kernel] [k] page_fault 263 * 10.00% perf [kernel] [k] schedule 264 * 10.00% perf libc [.] free 265 * 10.00% perf libc [.] malloc 266 * 10.00% perf perf [.] cmd_record 267 */ 268 struct result expected[] = { 269 { 0, 2000, "perf", "perf", "main" }, 270 { 0, 1000, "bash", "[kernel]", "page_fault" }, 271 { 0, 1000, "bash", "bash", "main" }, 272 { 0, 1000, "bash", "bash", "xmalloc" }, 273 { 0, 1000, "perf", "[kernel]", "page_fault" }, 274 { 0, 1000, "perf", "[kernel]", "schedule" }, 275 { 0, 1000, "perf", "libc", "free" }, 276 { 0, 1000, "perf", "libc", "malloc" }, 277 { 0, 1000, "perf", "perf", "cmd_record" }, 278 }; 279 280 symbol_conf.use_callchain = false; 281 symbol_conf.cumulate_callchain = false; 282 perf_evsel__reset_sample_bit(evsel, CALLCHAIN); 283 284 setup_sorting(NULL); 285 callchain_register_param(&callchain_param); 286 287 err = add_hist_entries(hists, machine); 288 if (err < 0) 289 goto out; 290 291 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0); 292 293 out: 294 del_hist_entries(hists); 295 reset_output_field(); 296 return err; 297 } 298 299 /* callcain + NO children */ 300 static int test2(struct perf_evsel *evsel, struct machine *machine) 301 { 302 int err; 303 struct hists *hists = evsel__hists(evsel); 304 /* 305 * expected output: 306 * 307 * Overhead Command Shared Object Symbol 308 * ======== ======= ============= ============== 309 * 20.00% perf perf [.] main 310 * | 311 * --- main 312 * 313 * 10.00% bash [kernel] [k] page_fault 314 * | 315 * --- page_fault 316 * malloc 317 * main 318 * 319 * 10.00% bash bash [.] main 320 * | 321 * --- main 322 * 323 * 10.00% bash bash [.] xmalloc 324 * | 325 * --- xmalloc 326 * malloc 327 * xmalloc <--- NOTE: there's a cycle 328 * malloc 329 * xmalloc 330 * main 331 * 332 * 10.00% perf [kernel] [k] page_fault 333 * | 334 * --- page_fault 335 * sys_perf_event_open 336 * run_command 337 * main 338 * 339 * 10.00% perf [kernel] [k] schedule 340 * | 341 * --- schedule 342 * run_command 343 * main 344 * 345 * 10.00% perf libc [.] free 346 * | 347 * --- free 348 * cmd_record 349 * run_command 350 * main 351 * 352 * 10.00% perf libc [.] malloc 353 * | 354 * --- malloc 355 * cmd_record 356 * run_command 357 * main 358 * 359 * 10.00% perf perf [.] cmd_record 360 * | 361 * --- cmd_record 362 * run_command 363 * main 364 * 365 */ 366 struct result expected[] = { 367 { 0, 2000, "perf", "perf", "main" }, 368 { 0, 1000, "bash", "[kernel]", "page_fault" }, 369 { 0, 1000, "bash", "bash", "main" }, 370 { 0, 1000, "bash", "bash", "xmalloc" }, 371 { 0, 1000, "perf", "[kernel]", "page_fault" }, 372 { 0, 1000, "perf", "[kernel]", "schedule" }, 373 { 0, 1000, "perf", "libc", "free" }, 374 { 0, 1000, "perf", "libc", "malloc" }, 375 { 0, 1000, "perf", "perf", "cmd_record" }, 376 }; 377 struct callchain_result expected_callchain[] = { 378 { 379 1, { { "perf", "main" }, }, 380 }, 381 { 382 3, { { "[kernel]", "page_fault" }, 383 { "libc", "malloc" }, 384 { "bash", "main" }, }, 385 }, 386 { 387 1, { { "bash", "main" }, }, 388 }, 389 { 390 6, { { "bash", "xmalloc" }, 391 { "libc", "malloc" }, 392 { "bash", "xmalloc" }, 393 { "libc", "malloc" }, 394 { "bash", "xmalloc" }, 395 { "bash", "main" }, }, 396 }, 397 { 398 4, { { "[kernel]", "page_fault" }, 399 { "[kernel]", "sys_perf_event_open" }, 400 { "perf", "run_command" }, 401 { "perf", "main" }, }, 402 }, 403 { 404 3, { { "[kernel]", "schedule" }, 405 { "perf", "run_command" }, 406 { "perf", "main" }, }, 407 }, 408 { 409 4, { { "libc", "free" }, 410 { "perf", "cmd_record" }, 411 { "perf", "run_command" }, 412 { "perf", "main" }, }, 413 }, 414 { 415 4, { { "libc", "malloc" }, 416 { "perf", "cmd_record" }, 417 { "perf", "run_command" }, 418 { "perf", "main" }, }, 419 }, 420 { 421 3, { { "perf", "cmd_record" }, 422 { "perf", "run_command" }, 423 { "perf", "main" }, }, 424 }, 425 }; 426 427 symbol_conf.use_callchain = true; 428 symbol_conf.cumulate_callchain = false; 429 perf_evsel__set_sample_bit(evsel, CALLCHAIN); 430 431 setup_sorting(NULL); 432 callchain_register_param(&callchain_param); 433 434 err = add_hist_entries(hists, machine); 435 if (err < 0) 436 goto out; 437 438 err = do_test(hists, expected, ARRAY_SIZE(expected), 439 expected_callchain, ARRAY_SIZE(expected_callchain)); 440 441 out: 442 del_hist_entries(hists); 443 reset_output_field(); 444 return err; 445 } 446 447 /* NO callchain + children */ 448 static int test3(struct perf_evsel *evsel, struct machine *machine) 449 { 450 int err; 451 struct hists *hists = evsel__hists(evsel); 452 /* 453 * expected output: 454 * 455 * Children Self Command Shared Object Symbol 456 * ======== ======== ======= ============= ======================= 457 * 70.00% 20.00% perf perf [.] main 458 * 50.00% 0.00% perf perf [.] run_command 459 * 30.00% 10.00% bash bash [.] main 460 * 30.00% 10.00% perf perf [.] cmd_record 461 * 20.00% 0.00% bash libc [.] malloc 462 * 10.00% 10.00% bash [kernel] [k] page_fault 463 * 10.00% 10.00% bash bash [.] xmalloc 464 * 10.00% 10.00% perf [kernel] [k] page_fault 465 * 10.00% 10.00% perf libc [.] malloc 466 * 10.00% 10.00% perf [kernel] [k] schedule 467 * 10.00% 10.00% perf libc [.] free 468 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open 469 */ 470 struct result expected[] = { 471 { 7000, 2000, "perf", "perf", "main" }, 472 { 5000, 0, "perf", "perf", "run_command" }, 473 { 3000, 1000, "bash", "bash", "main" }, 474 { 3000, 1000, "perf", "perf", "cmd_record" }, 475 { 2000, 0, "bash", "libc", "malloc" }, 476 { 1000, 1000, "bash", "[kernel]", "page_fault" }, 477 { 1000, 1000, "bash", "bash", "xmalloc" }, 478 { 1000, 1000, "perf", "[kernel]", "page_fault" }, 479 { 1000, 1000, "perf", "[kernel]", "schedule" }, 480 { 1000, 1000, "perf", "libc", "free" }, 481 { 1000, 1000, "perf", "libc", "malloc" }, 482 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, 483 }; 484 485 symbol_conf.use_callchain = false; 486 symbol_conf.cumulate_callchain = true; 487 perf_evsel__reset_sample_bit(evsel, CALLCHAIN); 488 489 setup_sorting(NULL); 490 callchain_register_param(&callchain_param); 491 492 err = add_hist_entries(hists, machine); 493 if (err < 0) 494 goto out; 495 496 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0); 497 498 out: 499 del_hist_entries(hists); 500 reset_output_field(); 501 return err; 502 } 503 504 /* callchain + children */ 505 static int test4(struct perf_evsel *evsel, struct machine *machine) 506 { 507 int err; 508 struct hists *hists = evsel__hists(evsel); 509 /* 510 * expected output: 511 * 512 * Children Self Command Shared Object Symbol 513 * ======== ======== ======= ============= ======================= 514 * 70.00% 20.00% perf perf [.] main 515 * | 516 * --- main 517 * 518 * 50.00% 0.00% perf perf [.] run_command 519 * | 520 * --- run_command 521 * main 522 * 523 * 30.00% 10.00% bash bash [.] main 524 * | 525 * --- main 526 * 527 * 30.00% 10.00% perf perf [.] cmd_record 528 * | 529 * --- cmd_record 530 * run_command 531 * main 532 * 533 * 20.00% 0.00% bash libc [.] malloc 534 * | 535 * --- malloc 536 * | 537 * |--50.00%-- xmalloc 538 * | main 539 * --50.00%-- main 540 * 541 * 10.00% 10.00% bash [kernel] [k] page_fault 542 * | 543 * --- page_fault 544 * malloc 545 * main 546 * 547 * 10.00% 10.00% bash bash [.] xmalloc 548 * | 549 * --- xmalloc 550 * malloc 551 * xmalloc <--- NOTE: there's a cycle 552 * malloc 553 * xmalloc 554 * main 555 * 556 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open 557 * | 558 * --- sys_perf_event_open 559 * run_command 560 * main 561 * 562 * 10.00% 10.00% perf [kernel] [k] page_fault 563 * | 564 * --- page_fault 565 * sys_perf_event_open 566 * run_command 567 * main 568 * 569 * 10.00% 10.00% perf [kernel] [k] schedule 570 * | 571 * --- schedule 572 * run_command 573 * main 574 * 575 * 10.00% 10.00% perf libc [.] free 576 * | 577 * --- free 578 * cmd_record 579 * run_command 580 * main 581 * 582 * 10.00% 10.00% perf libc [.] malloc 583 * | 584 * --- malloc 585 * cmd_record 586 * run_command 587 * main 588 * 589 */ 590 struct result expected[] = { 591 { 7000, 2000, "perf", "perf", "main" }, 592 { 5000, 0, "perf", "perf", "run_command" }, 593 { 3000, 1000, "bash", "bash", "main" }, 594 { 3000, 1000, "perf", "perf", "cmd_record" }, 595 { 2000, 0, "bash", "libc", "malloc" }, 596 { 1000, 1000, "bash", "[kernel]", "page_fault" }, 597 { 1000, 1000, "bash", "bash", "xmalloc" }, 598 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, 599 { 1000, 1000, "perf", "[kernel]", "page_fault" }, 600 { 1000, 1000, "perf", "[kernel]", "schedule" }, 601 { 1000, 1000, "perf", "libc", "free" }, 602 { 1000, 1000, "perf", "libc", "malloc" }, 603 }; 604 struct callchain_result expected_callchain[] = { 605 { 606 1, { { "perf", "main" }, }, 607 }, 608 { 609 2, { { "perf", "run_command" }, 610 { "perf", "main" }, }, 611 }, 612 { 613 1, { { "bash", "main" }, }, 614 }, 615 { 616 3, { { "perf", "cmd_record" }, 617 { "perf", "run_command" }, 618 { "perf", "main" }, }, 619 }, 620 { 621 4, { { "libc", "malloc" }, 622 { "bash", "xmalloc" }, 623 { "bash", "main" }, 624 { "bash", "main" }, }, 625 }, 626 { 627 3, { { "[kernel]", "page_fault" }, 628 { "libc", "malloc" }, 629 { "bash", "main" }, }, 630 }, 631 { 632 6, { { "bash", "xmalloc" }, 633 { "libc", "malloc" }, 634 { "bash", "xmalloc" }, 635 { "libc", "malloc" }, 636 { "bash", "xmalloc" }, 637 { "bash", "main" }, }, 638 }, 639 { 640 3, { { "[kernel]", "sys_perf_event_open" }, 641 { "perf", "run_command" }, 642 { "perf", "main" }, }, 643 }, 644 { 645 4, { { "[kernel]", "page_fault" }, 646 { "[kernel]", "sys_perf_event_open" }, 647 { "perf", "run_command" }, 648 { "perf", "main" }, }, 649 }, 650 { 651 3, { { "[kernel]", "schedule" }, 652 { "perf", "run_command" }, 653 { "perf", "main" }, }, 654 }, 655 { 656 4, { { "libc", "free" }, 657 { "perf", "cmd_record" }, 658 { "perf", "run_command" }, 659 { "perf", "main" }, }, 660 }, 661 { 662 4, { { "libc", "malloc" }, 663 { "perf", "cmd_record" }, 664 { "perf", "run_command" }, 665 { "perf", "main" }, }, 666 }, 667 }; 668 669 symbol_conf.use_callchain = true; 670 symbol_conf.cumulate_callchain = true; 671 perf_evsel__set_sample_bit(evsel, CALLCHAIN); 672 673 setup_sorting(NULL); 674 675 callchain_param = callchain_param_default; 676 callchain_register_param(&callchain_param); 677 678 err = add_hist_entries(hists, machine); 679 if (err < 0) 680 goto out; 681 682 err = do_test(hists, expected, ARRAY_SIZE(expected), 683 expected_callchain, ARRAY_SIZE(expected_callchain)); 684 685 out: 686 del_hist_entries(hists); 687 reset_output_field(); 688 return err; 689 } 690 691 int test__hists_cumulate(struct test *test __maybe_unused, int subtest __maybe_unused) 692 { 693 int err = TEST_FAIL; 694 struct machines machines; 695 struct machine *machine; 696 struct perf_evsel *evsel; 697 struct perf_evlist *evlist = perf_evlist__new(); 698 size_t i; 699 test_fn_t testcases[] = { 700 test1, 701 test2, 702 test3, 703 test4, 704 }; 705 706 TEST_ASSERT_VAL("No memory", evlist); 707 708 err = parse_events(evlist, "cpu-clock", NULL); 709 if (err) 710 goto out; 711 err = TEST_FAIL; 712 713 machines__init(&machines); 714 715 /* setup threads/dso/map/symbols also */ 716 machine = setup_fake_machine(&machines); 717 if (!machine) 718 goto out; 719 720 if (verbose > 1) 721 machine__fprintf(machine, stderr); 722 723 evsel = perf_evlist__first(evlist); 724 725 for (i = 0; i < ARRAY_SIZE(testcases); i++) { 726 err = testcases[i](evsel, machine); 727 if (err < 0) 728 break; 729 } 730 731 out: 732 /* tear down everything */ 733 perf_evlist__delete(evlist); 734 machines__exit(&machines); 735 736 return err; 737 } 738