1 #include "perf.h" 2 #include "util/debug.h" 3 #include "util/symbol.h" 4 #include "util/sort.h" 5 #include "util/evsel.h" 6 #include "util/evlist.h" 7 #include "util/machine.h" 8 #include "util/thread.h" 9 #include "util/parse-events.h" 10 #include "tests/tests.h" 11 #include "tests/hists_common.h" 12 13 struct sample { 14 u32 pid; 15 u64 ip; 16 struct thread *thread; 17 struct map *map; 18 struct symbol *sym; 19 }; 20 21 /* For the numbers, see hists_common.c */ 22 static struct sample fake_samples[] = { 23 /* perf [kernel] schedule() */ 24 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, }, 25 /* perf [perf] main() */ 26 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, }, 27 /* perf [perf] cmd_record() */ 28 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, }, 29 /* perf [libc] malloc() */ 30 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, }, 31 /* perf [libc] free() */ 32 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, }, 33 /* perf [perf] main() */ 34 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, }, 35 /* perf [kernel] page_fault() */ 36 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, 37 /* bash [bash] main() */ 38 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, }, 39 /* bash [bash] xmalloc() */ 40 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, }, 41 /* bash [kernel] page_fault() */ 42 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, 43 }; 44 45 /* 46 * Will be casted to struct ip_callchain which has all 64 bit entries 47 * of nr and ips[]. 48 */ 49 static u64 fake_callchains[][10] = { 50 /* schedule => run_command => main */ 51 { 3, FAKE_IP_KERNEL_SCHEDULE, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, }, 52 /* main */ 53 { 1, FAKE_IP_PERF_MAIN, }, 54 /* cmd_record => run_command => main */ 55 { 3, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, }, 56 /* malloc => cmd_record => run_command => main */ 57 { 4, FAKE_IP_LIBC_MALLOC, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, 58 FAKE_IP_PERF_MAIN, }, 59 /* free => cmd_record => run_command => main */ 60 { 4, FAKE_IP_LIBC_FREE, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, 61 FAKE_IP_PERF_MAIN, }, 62 /* main */ 63 { 1, FAKE_IP_PERF_MAIN, }, 64 /* page_fault => sys_perf_event_open => run_command => main */ 65 { 4, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, 66 FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, }, 67 /* main */ 68 { 1, FAKE_IP_BASH_MAIN, }, 69 /* xmalloc => malloc => xmalloc => malloc => xmalloc => main */ 70 { 6, FAKE_IP_BASH_XMALLOC, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, 71 FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, FAKE_IP_BASH_MAIN, }, 72 /* page_fault => malloc => main */ 73 { 3, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_MAIN, }, 74 }; 75 76 static int add_hist_entries(struct hists *hists, struct machine *machine) 77 { 78 struct addr_location al; 79 struct perf_evsel *evsel = hists_to_evsel(hists); 80 struct perf_sample sample = { .period = 1000, }; 81 size_t i; 82 83 for (i = 0; i < ARRAY_SIZE(fake_samples); i++) { 84 const union perf_event event = { 85 .header = { 86 .misc = PERF_RECORD_MISC_USER, 87 }, 88 }; 89 struct hist_entry_iter iter = { 90 .evsel = evsel, 91 .sample = &sample, 92 .hide_unresolved = false, 93 }; 94 95 if (symbol_conf.cumulate_callchain) 96 iter.ops = &hist_iter_cumulative; 97 else 98 iter.ops = &hist_iter_normal; 99 100 sample.pid = fake_samples[i].pid; 101 sample.tid = fake_samples[i].pid; 102 sample.ip = fake_samples[i].ip; 103 sample.callchain = (struct ip_callchain *)fake_callchains[i]; 104 105 if (perf_event__preprocess_sample(&event, machine, &al, 106 &sample) < 0) 107 goto out; 108 109 if (hist_entry_iter__add(&iter, &al, PERF_MAX_STACK_DEPTH, 110 NULL) < 0) { 111 addr_location__put(&al); 112 goto out; 113 } 114 115 fake_samples[i].thread = al.thread; 116 fake_samples[i].map = al.map; 117 fake_samples[i].sym = al.sym; 118 } 119 120 return TEST_OK; 121 122 out: 123 pr_debug("Not enough memory for adding a hist entry\n"); 124 return TEST_FAIL; 125 } 126 127 static void del_hist_entries(struct hists *hists) 128 { 129 struct hist_entry *he; 130 struct rb_root *root_in; 131 struct rb_root *root_out; 132 struct rb_node *node; 133 134 if (sort__need_collapse) 135 root_in = &hists->entries_collapsed; 136 else 137 root_in = hists->entries_in; 138 139 root_out = &hists->entries; 140 141 while (!RB_EMPTY_ROOT(root_out)) { 142 node = rb_first(root_out); 143 144 he = rb_entry(node, struct hist_entry, rb_node); 145 rb_erase(node, root_out); 146 rb_erase(&he->rb_node_in, root_in); 147 hist_entry__delete(he); 148 } 149 } 150 151 typedef int (*test_fn_t)(struct perf_evsel *, struct machine *); 152 153 #define COMM(he) (thread__comm_str(he->thread)) 154 #define DSO(he) (he->ms.map->dso->short_name) 155 #define SYM(he) (he->ms.sym->name) 156 #define CPU(he) (he->cpu) 157 #define PID(he) (he->thread->tid) 158 #define DEPTH(he) (he->callchain->max_depth) 159 #define CDSO(cl) (cl->ms.map->dso->short_name) 160 #define CSYM(cl) (cl->ms.sym->name) 161 162 struct result { 163 u64 children; 164 u64 self; 165 const char *comm; 166 const char *dso; 167 const char *sym; 168 }; 169 170 struct callchain_result { 171 u64 nr; 172 struct { 173 const char *dso; 174 const char *sym; 175 } node[10]; 176 }; 177 178 static int do_test(struct hists *hists, struct result *expected, size_t nr_expected, 179 struct callchain_result *expected_callchain, size_t nr_callchain) 180 { 181 char buf[32]; 182 size_t i, c; 183 struct hist_entry *he; 184 struct rb_root *root; 185 struct rb_node *node; 186 struct callchain_node *cnode; 187 struct callchain_list *clist; 188 189 /* 190 * adding and deleting hist entries must be done outside of this 191 * function since TEST_ASSERT_VAL() returns in case of failure. 192 */ 193 hists__collapse_resort(hists, NULL); 194 hists__output_resort(hists, NULL); 195 196 if (verbose > 2) { 197 pr_info("use callchain: %d, cumulate callchain: %d\n", 198 symbol_conf.use_callchain, 199 symbol_conf.cumulate_callchain); 200 print_hists_out(hists); 201 } 202 203 root = &hists->entries; 204 for (node = rb_first(root), i = 0; 205 node && (he = rb_entry(node, struct hist_entry, rb_node)); 206 node = rb_next(node), i++) { 207 scnprintf(buf, sizeof(buf), "Invalid hist entry #%zd", i); 208 209 TEST_ASSERT_VAL("Incorrect number of hist entry", 210 i < nr_expected); 211 TEST_ASSERT_VAL(buf, he->stat.period == expected[i].self && 212 !strcmp(COMM(he), expected[i].comm) && 213 !strcmp(DSO(he), expected[i].dso) && 214 !strcmp(SYM(he), expected[i].sym)); 215 216 if (symbol_conf.cumulate_callchain) 217 TEST_ASSERT_VAL(buf, he->stat_acc->period == expected[i].children); 218 219 if (!symbol_conf.use_callchain) 220 continue; 221 222 /* check callchain entries */ 223 root = &he->callchain->node.rb_root; 224 cnode = rb_entry(rb_first(root), struct callchain_node, rb_node); 225 226 c = 0; 227 list_for_each_entry(clist, &cnode->val, list) { 228 scnprintf(buf, sizeof(buf), "Invalid callchain entry #%zd/%zd", i, c); 229 230 TEST_ASSERT_VAL("Incorrect number of callchain entry", 231 c < expected_callchain[i].nr); 232 TEST_ASSERT_VAL(buf, 233 !strcmp(CDSO(clist), expected_callchain[i].node[c].dso) && 234 !strcmp(CSYM(clist), expected_callchain[i].node[c].sym)); 235 c++; 236 } 237 /* TODO: handle multiple child nodes properly */ 238 TEST_ASSERT_VAL("Incorrect number of callchain entry", 239 c <= expected_callchain[i].nr); 240 } 241 TEST_ASSERT_VAL("Incorrect number of hist entry", 242 i == nr_expected); 243 TEST_ASSERT_VAL("Incorrect number of callchain entry", 244 !symbol_conf.use_callchain || nr_expected == nr_callchain); 245 return 0; 246 } 247 248 /* NO callchain + NO children */ 249 static int test1(struct perf_evsel *evsel, struct machine *machine) 250 { 251 int err; 252 struct hists *hists = evsel__hists(evsel); 253 /* 254 * expected output: 255 * 256 * Overhead Command Shared Object Symbol 257 * ======== ======= ============= ============== 258 * 20.00% perf perf [.] main 259 * 10.00% bash [kernel] [k] page_fault 260 * 10.00% bash bash [.] main 261 * 10.00% bash bash [.] xmalloc 262 * 10.00% perf [kernel] [k] page_fault 263 * 10.00% perf [kernel] [k] schedule 264 * 10.00% perf libc [.] free 265 * 10.00% perf libc [.] malloc 266 * 10.00% perf perf [.] cmd_record 267 */ 268 struct result expected[] = { 269 { 0, 2000, "perf", "perf", "main" }, 270 { 0, 1000, "bash", "[kernel]", "page_fault" }, 271 { 0, 1000, "bash", "bash", "main" }, 272 { 0, 1000, "bash", "bash", "xmalloc" }, 273 { 0, 1000, "perf", "[kernel]", "page_fault" }, 274 { 0, 1000, "perf", "[kernel]", "schedule" }, 275 { 0, 1000, "perf", "libc", "free" }, 276 { 0, 1000, "perf", "libc", "malloc" }, 277 { 0, 1000, "perf", "perf", "cmd_record" }, 278 }; 279 280 symbol_conf.use_callchain = false; 281 symbol_conf.cumulate_callchain = false; 282 283 setup_sorting(); 284 callchain_register_param(&callchain_param); 285 286 err = add_hist_entries(hists, machine); 287 if (err < 0) 288 goto out; 289 290 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0); 291 292 out: 293 del_hist_entries(hists); 294 reset_output_field(); 295 return err; 296 } 297 298 /* callcain + NO children */ 299 static int test2(struct perf_evsel *evsel, struct machine *machine) 300 { 301 int err; 302 struct hists *hists = evsel__hists(evsel); 303 /* 304 * expected output: 305 * 306 * Overhead Command Shared Object Symbol 307 * ======== ======= ============= ============== 308 * 20.00% perf perf [.] main 309 * | 310 * --- main 311 * 312 * 10.00% bash [kernel] [k] page_fault 313 * | 314 * --- page_fault 315 * malloc 316 * main 317 * 318 * 10.00% bash bash [.] main 319 * | 320 * --- main 321 * 322 * 10.00% bash bash [.] xmalloc 323 * | 324 * --- xmalloc 325 * malloc 326 * xmalloc <--- NOTE: there's a cycle 327 * malloc 328 * xmalloc 329 * main 330 * 331 * 10.00% perf [kernel] [k] page_fault 332 * | 333 * --- page_fault 334 * sys_perf_event_open 335 * run_command 336 * main 337 * 338 * 10.00% perf [kernel] [k] schedule 339 * | 340 * --- schedule 341 * run_command 342 * main 343 * 344 * 10.00% perf libc [.] free 345 * | 346 * --- free 347 * cmd_record 348 * run_command 349 * main 350 * 351 * 10.00% perf libc [.] malloc 352 * | 353 * --- malloc 354 * cmd_record 355 * run_command 356 * main 357 * 358 * 10.00% perf perf [.] cmd_record 359 * | 360 * --- cmd_record 361 * run_command 362 * main 363 * 364 */ 365 struct result expected[] = { 366 { 0, 2000, "perf", "perf", "main" }, 367 { 0, 1000, "bash", "[kernel]", "page_fault" }, 368 { 0, 1000, "bash", "bash", "main" }, 369 { 0, 1000, "bash", "bash", "xmalloc" }, 370 { 0, 1000, "perf", "[kernel]", "page_fault" }, 371 { 0, 1000, "perf", "[kernel]", "schedule" }, 372 { 0, 1000, "perf", "libc", "free" }, 373 { 0, 1000, "perf", "libc", "malloc" }, 374 { 0, 1000, "perf", "perf", "cmd_record" }, 375 }; 376 struct callchain_result expected_callchain[] = { 377 { 378 1, { { "perf", "main" }, }, 379 }, 380 { 381 3, { { "[kernel]", "page_fault" }, 382 { "libc", "malloc" }, 383 { "bash", "main" }, }, 384 }, 385 { 386 1, { { "bash", "main" }, }, 387 }, 388 { 389 6, { { "bash", "xmalloc" }, 390 { "libc", "malloc" }, 391 { "bash", "xmalloc" }, 392 { "libc", "malloc" }, 393 { "bash", "xmalloc" }, 394 { "bash", "main" }, }, 395 }, 396 { 397 4, { { "[kernel]", "page_fault" }, 398 { "[kernel]", "sys_perf_event_open" }, 399 { "perf", "run_command" }, 400 { "perf", "main" }, }, 401 }, 402 { 403 3, { { "[kernel]", "schedule" }, 404 { "perf", "run_command" }, 405 { "perf", "main" }, }, 406 }, 407 { 408 4, { { "libc", "free" }, 409 { "perf", "cmd_record" }, 410 { "perf", "run_command" }, 411 { "perf", "main" }, }, 412 }, 413 { 414 4, { { "libc", "malloc" }, 415 { "perf", "cmd_record" }, 416 { "perf", "run_command" }, 417 { "perf", "main" }, }, 418 }, 419 { 420 3, { { "perf", "cmd_record" }, 421 { "perf", "run_command" }, 422 { "perf", "main" }, }, 423 }, 424 }; 425 426 symbol_conf.use_callchain = true; 427 symbol_conf.cumulate_callchain = false; 428 429 setup_sorting(); 430 callchain_register_param(&callchain_param); 431 432 err = add_hist_entries(hists, machine); 433 if (err < 0) 434 goto out; 435 436 err = do_test(hists, expected, ARRAY_SIZE(expected), 437 expected_callchain, ARRAY_SIZE(expected_callchain)); 438 439 out: 440 del_hist_entries(hists); 441 reset_output_field(); 442 return err; 443 } 444 445 /* NO callchain + children */ 446 static int test3(struct perf_evsel *evsel, struct machine *machine) 447 { 448 int err; 449 struct hists *hists = evsel__hists(evsel); 450 /* 451 * expected output: 452 * 453 * Children Self Command Shared Object Symbol 454 * ======== ======== ======= ============= ======================= 455 * 70.00% 20.00% perf perf [.] main 456 * 50.00% 0.00% perf perf [.] run_command 457 * 30.00% 10.00% bash bash [.] main 458 * 30.00% 10.00% perf perf [.] cmd_record 459 * 20.00% 0.00% bash libc [.] malloc 460 * 10.00% 10.00% bash [kernel] [k] page_fault 461 * 10.00% 10.00% bash bash [.] xmalloc 462 * 10.00% 10.00% perf [kernel] [k] page_fault 463 * 10.00% 10.00% perf libc [.] malloc 464 * 10.00% 10.00% perf [kernel] [k] schedule 465 * 10.00% 10.00% perf libc [.] free 466 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open 467 */ 468 struct result expected[] = { 469 { 7000, 2000, "perf", "perf", "main" }, 470 { 5000, 0, "perf", "perf", "run_command" }, 471 { 3000, 1000, "bash", "bash", "main" }, 472 { 3000, 1000, "perf", "perf", "cmd_record" }, 473 { 2000, 0, "bash", "libc", "malloc" }, 474 { 1000, 1000, "bash", "[kernel]", "page_fault" }, 475 { 1000, 1000, "bash", "bash", "xmalloc" }, 476 { 1000, 1000, "perf", "[kernel]", "page_fault" }, 477 { 1000, 1000, "perf", "[kernel]", "schedule" }, 478 { 1000, 1000, "perf", "libc", "free" }, 479 { 1000, 1000, "perf", "libc", "malloc" }, 480 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, 481 }; 482 483 symbol_conf.use_callchain = false; 484 symbol_conf.cumulate_callchain = true; 485 486 setup_sorting(); 487 callchain_register_param(&callchain_param); 488 489 err = add_hist_entries(hists, machine); 490 if (err < 0) 491 goto out; 492 493 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0); 494 495 out: 496 del_hist_entries(hists); 497 reset_output_field(); 498 return err; 499 } 500 501 /* callchain + children */ 502 static int test4(struct perf_evsel *evsel, struct machine *machine) 503 { 504 int err; 505 struct hists *hists = evsel__hists(evsel); 506 /* 507 * expected output: 508 * 509 * Children Self Command Shared Object Symbol 510 * ======== ======== ======= ============= ======================= 511 * 70.00% 20.00% perf perf [.] main 512 * | 513 * --- main 514 * 515 * 50.00% 0.00% perf perf [.] run_command 516 * | 517 * --- run_command 518 * main 519 * 520 * 30.00% 10.00% bash bash [.] main 521 * | 522 * --- main 523 * 524 * 30.00% 10.00% perf perf [.] cmd_record 525 * | 526 * --- cmd_record 527 * run_command 528 * main 529 * 530 * 20.00% 0.00% bash libc [.] malloc 531 * | 532 * --- malloc 533 * | 534 * |--50.00%-- xmalloc 535 * | main 536 * --50.00%-- main 537 * 538 * 10.00% 10.00% bash [kernel] [k] page_fault 539 * | 540 * --- page_fault 541 * malloc 542 * main 543 * 544 * 10.00% 10.00% bash bash [.] xmalloc 545 * | 546 * --- xmalloc 547 * malloc 548 * xmalloc <--- NOTE: there's a cycle 549 * malloc 550 * xmalloc 551 * main 552 * 553 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open 554 * | 555 * --- sys_perf_event_open 556 * run_command 557 * main 558 * 559 * 10.00% 10.00% perf [kernel] [k] page_fault 560 * | 561 * --- page_fault 562 * sys_perf_event_open 563 * run_command 564 * main 565 * 566 * 10.00% 10.00% perf [kernel] [k] schedule 567 * | 568 * --- schedule 569 * run_command 570 * main 571 * 572 * 10.00% 10.00% perf libc [.] free 573 * | 574 * --- free 575 * cmd_record 576 * run_command 577 * main 578 * 579 * 10.00% 10.00% perf libc [.] malloc 580 * | 581 * --- malloc 582 * cmd_record 583 * run_command 584 * main 585 * 586 */ 587 struct result expected[] = { 588 { 7000, 2000, "perf", "perf", "main" }, 589 { 5000, 0, "perf", "perf", "run_command" }, 590 { 3000, 1000, "bash", "bash", "main" }, 591 { 3000, 1000, "perf", "perf", "cmd_record" }, 592 { 2000, 0, "bash", "libc", "malloc" }, 593 { 1000, 1000, "bash", "[kernel]", "page_fault" }, 594 { 1000, 1000, "bash", "bash", "xmalloc" }, 595 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, 596 { 1000, 1000, "perf", "[kernel]", "page_fault" }, 597 { 1000, 1000, "perf", "[kernel]", "schedule" }, 598 { 1000, 1000, "perf", "libc", "free" }, 599 { 1000, 1000, "perf", "libc", "malloc" }, 600 }; 601 struct callchain_result expected_callchain[] = { 602 { 603 1, { { "perf", "main" }, }, 604 }, 605 { 606 2, { { "perf", "run_command" }, 607 { "perf", "main" }, }, 608 }, 609 { 610 1, { { "bash", "main" }, }, 611 }, 612 { 613 3, { { "perf", "cmd_record" }, 614 { "perf", "run_command" }, 615 { "perf", "main" }, }, 616 }, 617 { 618 4, { { "libc", "malloc" }, 619 { "bash", "xmalloc" }, 620 { "bash", "main" }, 621 { "bash", "main" }, }, 622 }, 623 { 624 3, { { "[kernel]", "page_fault" }, 625 { "libc", "malloc" }, 626 { "bash", "main" }, }, 627 }, 628 { 629 6, { { "bash", "xmalloc" }, 630 { "libc", "malloc" }, 631 { "bash", "xmalloc" }, 632 { "libc", "malloc" }, 633 { "bash", "xmalloc" }, 634 { "bash", "main" }, }, 635 }, 636 { 637 3, { { "[kernel]", "sys_perf_event_open" }, 638 { "perf", "run_command" }, 639 { "perf", "main" }, }, 640 }, 641 { 642 4, { { "[kernel]", "page_fault" }, 643 { "[kernel]", "sys_perf_event_open" }, 644 { "perf", "run_command" }, 645 { "perf", "main" }, }, 646 }, 647 { 648 3, { { "[kernel]", "schedule" }, 649 { "perf", "run_command" }, 650 { "perf", "main" }, }, 651 }, 652 { 653 4, { { "libc", "free" }, 654 { "perf", "cmd_record" }, 655 { "perf", "run_command" }, 656 { "perf", "main" }, }, 657 }, 658 { 659 4, { { "libc", "malloc" }, 660 { "perf", "cmd_record" }, 661 { "perf", "run_command" }, 662 { "perf", "main" }, }, 663 }, 664 }; 665 666 symbol_conf.use_callchain = true; 667 symbol_conf.cumulate_callchain = true; 668 669 setup_sorting(); 670 callchain_register_param(&callchain_param); 671 672 err = add_hist_entries(hists, machine); 673 if (err < 0) 674 goto out; 675 676 err = do_test(hists, expected, ARRAY_SIZE(expected), 677 expected_callchain, ARRAY_SIZE(expected_callchain)); 678 679 out: 680 del_hist_entries(hists); 681 reset_output_field(); 682 return err; 683 } 684 685 int test__hists_cumulate(void) 686 { 687 int err = TEST_FAIL; 688 struct machines machines; 689 struct machine *machine; 690 struct perf_evsel *evsel; 691 struct perf_evlist *evlist = perf_evlist__new(); 692 size_t i; 693 test_fn_t testcases[] = { 694 test1, 695 test2, 696 test3, 697 test4, 698 }; 699 700 TEST_ASSERT_VAL("No memory", evlist); 701 702 err = parse_events(evlist, "cpu-clock", NULL); 703 if (err) 704 goto out; 705 706 machines__init(&machines); 707 708 /* setup threads/dso/map/symbols also */ 709 machine = setup_fake_machine(&machines); 710 if (!machine) 711 goto out; 712 713 if (verbose > 1) 714 machine__fprintf(machine, stderr); 715 716 evsel = perf_evlist__first(evlist); 717 718 for (i = 0; i < ARRAY_SIZE(testcases); i++) { 719 err = testcases[i](evsel, machine); 720 if (err < 0) 721 break; 722 } 723 724 out: 725 /* tear down everything */ 726 perf_evlist__delete(evlist); 727 machines__exit(&machines); 728 729 return err; 730 } 731