1 #include "perf.h" 2 #include "util/debug.h" 3 #include "util/symbol.h" 4 #include "util/sort.h" 5 #include "util/evsel.h" 6 #include "util/evlist.h" 7 #include "util/machine.h" 8 #include "util/thread.h" 9 #include "util/parse-events.h" 10 #include "tests/tests.h" 11 #include "tests/hists_common.h" 12 13 struct sample { 14 u32 pid; 15 u64 ip; 16 struct thread *thread; 17 struct map *map; 18 struct symbol *sym; 19 }; 20 21 /* For the numbers, see hists_common.c */ 22 static struct sample fake_samples[] = { 23 /* perf [kernel] schedule() */ 24 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, }, 25 /* perf [perf] main() */ 26 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, }, 27 /* perf [perf] cmd_record() */ 28 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, }, 29 /* perf [libc] malloc() */ 30 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, }, 31 /* perf [libc] free() */ 32 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, }, 33 /* perf [perf] main() */ 34 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, }, 35 /* perf [kernel] page_fault() */ 36 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, 37 /* bash [bash] main() */ 38 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, }, 39 /* bash [bash] xmalloc() */ 40 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, }, 41 /* bash [kernel] page_fault() */ 42 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, 43 }; 44 45 /* 46 * Will be casted to struct ip_callchain which has all 64 bit entries 47 * of nr and ips[]. 48 */ 49 static u64 fake_callchains[][10] = { 50 /* schedule => run_command => main */ 51 { 3, FAKE_IP_KERNEL_SCHEDULE, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, }, 52 /* main */ 53 { 1, FAKE_IP_PERF_MAIN, }, 54 /* cmd_record => run_command => main */ 55 { 3, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, }, 56 /* malloc => cmd_record => run_command => main */ 57 { 4, FAKE_IP_LIBC_MALLOC, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, 58 FAKE_IP_PERF_MAIN, }, 59 /* free => cmd_record => run_command => main */ 60 { 4, FAKE_IP_LIBC_FREE, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, 61 FAKE_IP_PERF_MAIN, }, 62 /* main */ 63 { 1, FAKE_IP_PERF_MAIN, }, 64 /* page_fault => sys_perf_event_open => run_command => main */ 65 { 4, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, 66 FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, }, 67 /* main */ 68 { 1, FAKE_IP_BASH_MAIN, }, 69 /* xmalloc => malloc => xmalloc => malloc => xmalloc => main */ 70 { 6, FAKE_IP_BASH_XMALLOC, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, 71 FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, FAKE_IP_BASH_MAIN, }, 72 /* page_fault => malloc => main */ 73 { 3, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_MAIN, }, 74 }; 75 76 static int add_hist_entries(struct hists *hists, struct machine *machine) 77 { 78 struct addr_location al; 79 struct perf_evsel *evsel = hists_to_evsel(hists); 80 struct perf_sample sample = { .period = 1000, }; 81 size_t i; 82 83 for (i = 0; i < ARRAY_SIZE(fake_samples); i++) { 84 struct hist_entry_iter iter = { 85 .evsel = evsel, 86 .sample = &sample, 87 .hide_unresolved = false, 88 }; 89 90 if (symbol_conf.cumulate_callchain) 91 iter.ops = &hist_iter_cumulative; 92 else 93 iter.ops = &hist_iter_normal; 94 95 sample.cpumode = PERF_RECORD_MISC_USER; 96 sample.pid = fake_samples[i].pid; 97 sample.tid = fake_samples[i].pid; 98 sample.ip = fake_samples[i].ip; 99 sample.callchain = (struct ip_callchain *)fake_callchains[i]; 100 101 if (machine__resolve(machine, &al, &sample) < 0) 102 goto out; 103 104 if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack, 105 NULL) < 0) { 106 addr_location__put(&al); 107 goto out; 108 } 109 110 fake_samples[i].thread = al.thread; 111 fake_samples[i].map = al.map; 112 fake_samples[i].sym = al.sym; 113 } 114 115 return TEST_OK; 116 117 out: 118 pr_debug("Not enough memory for adding a hist entry\n"); 119 return TEST_FAIL; 120 } 121 122 static void del_hist_entries(struct hists *hists) 123 { 124 struct hist_entry *he; 125 struct rb_root *root_in; 126 struct rb_root *root_out; 127 struct rb_node *node; 128 129 if (hists__has(hists, need_collapse)) 130 root_in = &hists->entries_collapsed; 131 else 132 root_in = hists->entries_in; 133 134 root_out = &hists->entries; 135 136 while (!RB_EMPTY_ROOT(root_out)) { 137 node = rb_first(root_out); 138 139 he = rb_entry(node, struct hist_entry, rb_node); 140 rb_erase(node, root_out); 141 rb_erase(&he->rb_node_in, root_in); 142 hist_entry__delete(he); 143 } 144 } 145 146 typedef int (*test_fn_t)(struct perf_evsel *, struct machine *); 147 148 #define COMM(he) (thread__comm_str(he->thread)) 149 #define DSO(he) (he->ms.map->dso->short_name) 150 #define SYM(he) (he->ms.sym->name) 151 #define CPU(he) (he->cpu) 152 #define PID(he) (he->thread->tid) 153 #define DEPTH(he) (he->callchain->max_depth) 154 #define CDSO(cl) (cl->ms.map->dso->short_name) 155 #define CSYM(cl) (cl->ms.sym->name) 156 157 struct result { 158 u64 children; 159 u64 self; 160 const char *comm; 161 const char *dso; 162 const char *sym; 163 }; 164 165 struct callchain_result { 166 u64 nr; 167 struct { 168 const char *dso; 169 const char *sym; 170 } node[10]; 171 }; 172 173 static int do_test(struct hists *hists, struct result *expected, size_t nr_expected, 174 struct callchain_result *expected_callchain, size_t nr_callchain) 175 { 176 char buf[32]; 177 size_t i, c; 178 struct hist_entry *he; 179 struct rb_root *root; 180 struct rb_node *node; 181 struct callchain_node *cnode; 182 struct callchain_list *clist; 183 184 /* 185 * adding and deleting hist entries must be done outside of this 186 * function since TEST_ASSERT_VAL() returns in case of failure. 187 */ 188 hists__collapse_resort(hists, NULL); 189 perf_evsel__output_resort(hists_to_evsel(hists), NULL); 190 191 if (verbose > 2) { 192 pr_info("use callchain: %d, cumulate callchain: %d\n", 193 symbol_conf.use_callchain, 194 symbol_conf.cumulate_callchain); 195 print_hists_out(hists); 196 } 197 198 root = &hists->entries; 199 for (node = rb_first(root), i = 0; 200 node && (he = rb_entry(node, struct hist_entry, rb_node)); 201 node = rb_next(node), i++) { 202 scnprintf(buf, sizeof(buf), "Invalid hist entry #%zd", i); 203 204 TEST_ASSERT_VAL("Incorrect number of hist entry", 205 i < nr_expected); 206 TEST_ASSERT_VAL(buf, he->stat.period == expected[i].self && 207 !strcmp(COMM(he), expected[i].comm) && 208 !strcmp(DSO(he), expected[i].dso) && 209 !strcmp(SYM(he), expected[i].sym)); 210 211 if (symbol_conf.cumulate_callchain) 212 TEST_ASSERT_VAL(buf, he->stat_acc->period == expected[i].children); 213 214 if (!symbol_conf.use_callchain) 215 continue; 216 217 /* check callchain entries */ 218 root = &he->callchain->node.rb_root; 219 cnode = rb_entry(rb_first(root), struct callchain_node, rb_node); 220 221 c = 0; 222 list_for_each_entry(clist, &cnode->val, list) { 223 scnprintf(buf, sizeof(buf), "Invalid callchain entry #%zd/%zd", i, c); 224 225 TEST_ASSERT_VAL("Incorrect number of callchain entry", 226 c < expected_callchain[i].nr); 227 TEST_ASSERT_VAL(buf, 228 !strcmp(CDSO(clist), expected_callchain[i].node[c].dso) && 229 !strcmp(CSYM(clist), expected_callchain[i].node[c].sym)); 230 c++; 231 } 232 /* TODO: handle multiple child nodes properly */ 233 TEST_ASSERT_VAL("Incorrect number of callchain entry", 234 c <= expected_callchain[i].nr); 235 } 236 TEST_ASSERT_VAL("Incorrect number of hist entry", 237 i == nr_expected); 238 TEST_ASSERT_VAL("Incorrect number of callchain entry", 239 !symbol_conf.use_callchain || nr_expected == nr_callchain); 240 return 0; 241 } 242 243 /* NO callchain + NO children */ 244 static int test1(struct perf_evsel *evsel, struct machine *machine) 245 { 246 int err; 247 struct hists *hists = evsel__hists(evsel); 248 /* 249 * expected output: 250 * 251 * Overhead Command Shared Object Symbol 252 * ======== ======= ============= ============== 253 * 20.00% perf perf [.] main 254 * 10.00% bash [kernel] [k] page_fault 255 * 10.00% bash bash [.] main 256 * 10.00% bash bash [.] xmalloc 257 * 10.00% perf [kernel] [k] page_fault 258 * 10.00% perf [kernel] [k] schedule 259 * 10.00% perf libc [.] free 260 * 10.00% perf libc [.] malloc 261 * 10.00% perf perf [.] cmd_record 262 */ 263 struct result expected[] = { 264 { 0, 2000, "perf", "perf", "main" }, 265 { 0, 1000, "bash", "[kernel]", "page_fault" }, 266 { 0, 1000, "bash", "bash", "main" }, 267 { 0, 1000, "bash", "bash", "xmalloc" }, 268 { 0, 1000, "perf", "[kernel]", "page_fault" }, 269 { 0, 1000, "perf", "[kernel]", "schedule" }, 270 { 0, 1000, "perf", "libc", "free" }, 271 { 0, 1000, "perf", "libc", "malloc" }, 272 { 0, 1000, "perf", "perf", "cmd_record" }, 273 }; 274 275 symbol_conf.use_callchain = false; 276 symbol_conf.cumulate_callchain = false; 277 perf_evsel__reset_sample_bit(evsel, CALLCHAIN); 278 279 setup_sorting(NULL); 280 callchain_register_param(&callchain_param); 281 282 err = add_hist_entries(hists, machine); 283 if (err < 0) 284 goto out; 285 286 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0); 287 288 out: 289 del_hist_entries(hists); 290 reset_output_field(); 291 return err; 292 } 293 294 /* callcain + NO children */ 295 static int test2(struct perf_evsel *evsel, struct machine *machine) 296 { 297 int err; 298 struct hists *hists = evsel__hists(evsel); 299 /* 300 * expected output: 301 * 302 * Overhead Command Shared Object Symbol 303 * ======== ======= ============= ============== 304 * 20.00% perf perf [.] main 305 * | 306 * --- main 307 * 308 * 10.00% bash [kernel] [k] page_fault 309 * | 310 * --- page_fault 311 * malloc 312 * main 313 * 314 * 10.00% bash bash [.] main 315 * | 316 * --- main 317 * 318 * 10.00% bash bash [.] xmalloc 319 * | 320 * --- xmalloc 321 * malloc 322 * xmalloc <--- NOTE: there's a cycle 323 * malloc 324 * xmalloc 325 * main 326 * 327 * 10.00% perf [kernel] [k] page_fault 328 * | 329 * --- page_fault 330 * sys_perf_event_open 331 * run_command 332 * main 333 * 334 * 10.00% perf [kernel] [k] schedule 335 * | 336 * --- schedule 337 * run_command 338 * main 339 * 340 * 10.00% perf libc [.] free 341 * | 342 * --- free 343 * cmd_record 344 * run_command 345 * main 346 * 347 * 10.00% perf libc [.] malloc 348 * | 349 * --- malloc 350 * cmd_record 351 * run_command 352 * main 353 * 354 * 10.00% perf perf [.] cmd_record 355 * | 356 * --- cmd_record 357 * run_command 358 * main 359 * 360 */ 361 struct result expected[] = { 362 { 0, 2000, "perf", "perf", "main" }, 363 { 0, 1000, "bash", "[kernel]", "page_fault" }, 364 { 0, 1000, "bash", "bash", "main" }, 365 { 0, 1000, "bash", "bash", "xmalloc" }, 366 { 0, 1000, "perf", "[kernel]", "page_fault" }, 367 { 0, 1000, "perf", "[kernel]", "schedule" }, 368 { 0, 1000, "perf", "libc", "free" }, 369 { 0, 1000, "perf", "libc", "malloc" }, 370 { 0, 1000, "perf", "perf", "cmd_record" }, 371 }; 372 struct callchain_result expected_callchain[] = { 373 { 374 1, { { "perf", "main" }, }, 375 }, 376 { 377 3, { { "[kernel]", "page_fault" }, 378 { "libc", "malloc" }, 379 { "bash", "main" }, }, 380 }, 381 { 382 1, { { "bash", "main" }, }, 383 }, 384 { 385 6, { { "bash", "xmalloc" }, 386 { "libc", "malloc" }, 387 { "bash", "xmalloc" }, 388 { "libc", "malloc" }, 389 { "bash", "xmalloc" }, 390 { "bash", "main" }, }, 391 }, 392 { 393 4, { { "[kernel]", "page_fault" }, 394 { "[kernel]", "sys_perf_event_open" }, 395 { "perf", "run_command" }, 396 { "perf", "main" }, }, 397 }, 398 { 399 3, { { "[kernel]", "schedule" }, 400 { "perf", "run_command" }, 401 { "perf", "main" }, }, 402 }, 403 { 404 4, { { "libc", "free" }, 405 { "perf", "cmd_record" }, 406 { "perf", "run_command" }, 407 { "perf", "main" }, }, 408 }, 409 { 410 4, { { "libc", "malloc" }, 411 { "perf", "cmd_record" }, 412 { "perf", "run_command" }, 413 { "perf", "main" }, }, 414 }, 415 { 416 3, { { "perf", "cmd_record" }, 417 { "perf", "run_command" }, 418 { "perf", "main" }, }, 419 }, 420 }; 421 422 symbol_conf.use_callchain = true; 423 symbol_conf.cumulate_callchain = false; 424 perf_evsel__set_sample_bit(evsel, CALLCHAIN); 425 426 setup_sorting(NULL); 427 callchain_register_param(&callchain_param); 428 429 err = add_hist_entries(hists, machine); 430 if (err < 0) 431 goto out; 432 433 err = do_test(hists, expected, ARRAY_SIZE(expected), 434 expected_callchain, ARRAY_SIZE(expected_callchain)); 435 436 out: 437 del_hist_entries(hists); 438 reset_output_field(); 439 return err; 440 } 441 442 /* NO callchain + children */ 443 static int test3(struct perf_evsel *evsel, struct machine *machine) 444 { 445 int err; 446 struct hists *hists = evsel__hists(evsel); 447 /* 448 * expected output: 449 * 450 * Children Self Command Shared Object Symbol 451 * ======== ======== ======= ============= ======================= 452 * 70.00% 20.00% perf perf [.] main 453 * 50.00% 0.00% perf perf [.] run_command 454 * 30.00% 10.00% bash bash [.] main 455 * 30.00% 10.00% perf perf [.] cmd_record 456 * 20.00% 0.00% bash libc [.] malloc 457 * 10.00% 10.00% bash [kernel] [k] page_fault 458 * 10.00% 10.00% bash bash [.] xmalloc 459 * 10.00% 10.00% perf [kernel] [k] page_fault 460 * 10.00% 10.00% perf libc [.] malloc 461 * 10.00% 10.00% perf [kernel] [k] schedule 462 * 10.00% 10.00% perf libc [.] free 463 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open 464 */ 465 struct result expected[] = { 466 { 7000, 2000, "perf", "perf", "main" }, 467 { 5000, 0, "perf", "perf", "run_command" }, 468 { 3000, 1000, "bash", "bash", "main" }, 469 { 3000, 1000, "perf", "perf", "cmd_record" }, 470 { 2000, 0, "bash", "libc", "malloc" }, 471 { 1000, 1000, "bash", "[kernel]", "page_fault" }, 472 { 1000, 1000, "bash", "bash", "xmalloc" }, 473 { 1000, 1000, "perf", "[kernel]", "page_fault" }, 474 { 1000, 1000, "perf", "[kernel]", "schedule" }, 475 { 1000, 1000, "perf", "libc", "free" }, 476 { 1000, 1000, "perf", "libc", "malloc" }, 477 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, 478 }; 479 480 symbol_conf.use_callchain = false; 481 symbol_conf.cumulate_callchain = true; 482 perf_evsel__reset_sample_bit(evsel, CALLCHAIN); 483 484 setup_sorting(NULL); 485 callchain_register_param(&callchain_param); 486 487 err = add_hist_entries(hists, machine); 488 if (err < 0) 489 goto out; 490 491 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0); 492 493 out: 494 del_hist_entries(hists); 495 reset_output_field(); 496 return err; 497 } 498 499 /* callchain + children */ 500 static int test4(struct perf_evsel *evsel, struct machine *machine) 501 { 502 int err; 503 struct hists *hists = evsel__hists(evsel); 504 /* 505 * expected output: 506 * 507 * Children Self Command Shared Object Symbol 508 * ======== ======== ======= ============= ======================= 509 * 70.00% 20.00% perf perf [.] main 510 * | 511 * --- main 512 * 513 * 50.00% 0.00% perf perf [.] run_command 514 * | 515 * --- run_command 516 * main 517 * 518 * 30.00% 10.00% bash bash [.] main 519 * | 520 * --- main 521 * 522 * 30.00% 10.00% perf perf [.] cmd_record 523 * | 524 * --- cmd_record 525 * run_command 526 * main 527 * 528 * 20.00% 0.00% bash libc [.] malloc 529 * | 530 * --- malloc 531 * | 532 * |--50.00%-- xmalloc 533 * | main 534 * --50.00%-- main 535 * 536 * 10.00% 10.00% bash [kernel] [k] page_fault 537 * | 538 * --- page_fault 539 * malloc 540 * main 541 * 542 * 10.00% 10.00% bash bash [.] xmalloc 543 * | 544 * --- xmalloc 545 * malloc 546 * xmalloc <--- NOTE: there's a cycle 547 * malloc 548 * xmalloc 549 * main 550 * 551 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open 552 * | 553 * --- sys_perf_event_open 554 * run_command 555 * main 556 * 557 * 10.00% 10.00% perf [kernel] [k] page_fault 558 * | 559 * --- page_fault 560 * sys_perf_event_open 561 * run_command 562 * main 563 * 564 * 10.00% 10.00% perf [kernel] [k] schedule 565 * | 566 * --- schedule 567 * run_command 568 * main 569 * 570 * 10.00% 10.00% perf libc [.] free 571 * | 572 * --- free 573 * cmd_record 574 * run_command 575 * main 576 * 577 * 10.00% 10.00% perf libc [.] malloc 578 * | 579 * --- malloc 580 * cmd_record 581 * run_command 582 * main 583 * 584 */ 585 struct result expected[] = { 586 { 7000, 2000, "perf", "perf", "main" }, 587 { 5000, 0, "perf", "perf", "run_command" }, 588 { 3000, 1000, "bash", "bash", "main" }, 589 { 3000, 1000, "perf", "perf", "cmd_record" }, 590 { 2000, 0, "bash", "libc", "malloc" }, 591 { 1000, 1000, "bash", "[kernel]", "page_fault" }, 592 { 1000, 1000, "bash", "bash", "xmalloc" }, 593 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, 594 { 1000, 1000, "perf", "[kernel]", "page_fault" }, 595 { 1000, 1000, "perf", "[kernel]", "schedule" }, 596 { 1000, 1000, "perf", "libc", "free" }, 597 { 1000, 1000, "perf", "libc", "malloc" }, 598 }; 599 struct callchain_result expected_callchain[] = { 600 { 601 1, { { "perf", "main" }, }, 602 }, 603 { 604 2, { { "perf", "run_command" }, 605 { "perf", "main" }, }, 606 }, 607 { 608 1, { { "bash", "main" }, }, 609 }, 610 { 611 3, { { "perf", "cmd_record" }, 612 { "perf", "run_command" }, 613 { "perf", "main" }, }, 614 }, 615 { 616 4, { { "libc", "malloc" }, 617 { "bash", "xmalloc" }, 618 { "bash", "main" }, 619 { "bash", "main" }, }, 620 }, 621 { 622 3, { { "[kernel]", "page_fault" }, 623 { "libc", "malloc" }, 624 { "bash", "main" }, }, 625 }, 626 { 627 6, { { "bash", "xmalloc" }, 628 { "libc", "malloc" }, 629 { "bash", "xmalloc" }, 630 { "libc", "malloc" }, 631 { "bash", "xmalloc" }, 632 { "bash", "main" }, }, 633 }, 634 { 635 3, { { "[kernel]", "sys_perf_event_open" }, 636 { "perf", "run_command" }, 637 { "perf", "main" }, }, 638 }, 639 { 640 4, { { "[kernel]", "page_fault" }, 641 { "[kernel]", "sys_perf_event_open" }, 642 { "perf", "run_command" }, 643 { "perf", "main" }, }, 644 }, 645 { 646 3, { { "[kernel]", "schedule" }, 647 { "perf", "run_command" }, 648 { "perf", "main" }, }, 649 }, 650 { 651 4, { { "libc", "free" }, 652 { "perf", "cmd_record" }, 653 { "perf", "run_command" }, 654 { "perf", "main" }, }, 655 }, 656 { 657 4, { { "libc", "malloc" }, 658 { "perf", "cmd_record" }, 659 { "perf", "run_command" }, 660 { "perf", "main" }, }, 661 }, 662 }; 663 664 symbol_conf.use_callchain = true; 665 symbol_conf.cumulate_callchain = true; 666 perf_evsel__set_sample_bit(evsel, CALLCHAIN); 667 668 setup_sorting(NULL); 669 callchain_register_param(&callchain_param); 670 671 err = add_hist_entries(hists, machine); 672 if (err < 0) 673 goto out; 674 675 err = do_test(hists, expected, ARRAY_SIZE(expected), 676 expected_callchain, ARRAY_SIZE(expected_callchain)); 677 678 out: 679 del_hist_entries(hists); 680 reset_output_field(); 681 return err; 682 } 683 684 int test__hists_cumulate(int subtest __maybe_unused) 685 { 686 int err = TEST_FAIL; 687 struct machines machines; 688 struct machine *machine; 689 struct perf_evsel *evsel; 690 struct perf_evlist *evlist = perf_evlist__new(); 691 size_t i; 692 test_fn_t testcases[] = { 693 test1, 694 test2, 695 test3, 696 test4, 697 }; 698 699 TEST_ASSERT_VAL("No memory", evlist); 700 701 err = parse_events(evlist, "cpu-clock", NULL); 702 if (err) 703 goto out; 704 err = TEST_FAIL; 705 706 machines__init(&machines); 707 708 /* setup threads/dso/map/symbols also */ 709 machine = setup_fake_machine(&machines); 710 if (!machine) 711 goto out; 712 713 if (verbose > 1) 714 machine__fprintf(machine, stderr); 715 716 evsel = perf_evlist__first(evlist); 717 718 for (i = 0; i < ARRAY_SIZE(testcases); i++) { 719 err = testcases[i](evsel, machine); 720 if (err < 0) 721 break; 722 } 723 724 out: 725 /* tear down everything */ 726 perf_evlist__delete(evlist); 727 machines__exit(&machines); 728 729 return err; 730 } 731