1 // SPDX-License-Identifier: GPL-2.0 2 #include "util/debug.h" 3 #include "util/dso.h" 4 #include "util/event.h" 5 #include "util/map.h" 6 #include "util/symbol.h" 7 #include "util/sort.h" 8 #include "util/evsel.h" 9 #include "util/evlist.h" 10 #include "util/machine.h" 11 #include "util/thread.h" 12 #include "util/parse-events.h" 13 #include "tests/tests.h" 14 #include "tests/hists_common.h" 15 #include <linux/kernel.h> 16 17 struct sample { 18 u32 cpu; 19 u32 pid; 20 u64 ip; 21 struct thread *thread; 22 struct map *map; 23 struct symbol *sym; 24 }; 25 26 /* For the numbers, see hists_common.c */ 27 static struct sample fake_samples[] = { 28 /* perf [kernel] schedule() */ 29 { .cpu = 0, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, }, 30 /* perf [perf] main() */ 31 { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, }, 32 /* perf [perf] cmd_record() */ 33 { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, }, 34 /* perf [libc] malloc() */ 35 { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, }, 36 /* perf [libc] free() */ 37 { .cpu = 2, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, }, 38 /* perf [perf] main() */ 39 { .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, }, 40 /* perf [kernel] page_fault() */ 41 { .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, 42 /* bash [bash] main() */ 43 { .cpu = 3, .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, }, 44 /* bash [bash] xmalloc() */ 45 { .cpu = 0, .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, }, 46 /* bash [kernel] page_fault() */ 47 { .cpu = 1, .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, }, 48 }; 49 50 static int add_hist_entries(struct hists *hists, struct machine *machine) 51 { 52 struct addr_location al; 53 struct evsel *evsel = hists_to_evsel(hists); 54 struct perf_sample sample = { .period = 100, }; 55 size_t i; 56 57 for (i = 0; i < ARRAY_SIZE(fake_samples); i++) { 58 struct hist_entry_iter iter = { 59 .evsel = evsel, 60 .sample = &sample, 61 .ops = &hist_iter_normal, 62 .hide_unresolved = false, 63 }; 64 65 sample.cpumode = PERF_RECORD_MISC_USER; 66 sample.cpu = fake_samples[i].cpu; 67 sample.pid = fake_samples[i].pid; 68 sample.tid = fake_samples[i].pid; 69 sample.ip = fake_samples[i].ip; 70 71 if (machine__resolve(machine, &al, &sample) < 0) 72 goto out; 73 74 if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack, 75 NULL) < 0) { 76 addr_location__put(&al); 77 goto out; 78 } 79 80 fake_samples[i].thread = al.thread; 81 fake_samples[i].map = al.map; 82 fake_samples[i].sym = al.sym; 83 } 84 85 return TEST_OK; 86 87 out: 88 pr_debug("Not enough memory for adding a hist entry\n"); 89 return TEST_FAIL; 90 } 91 92 static void del_hist_entries(struct hists *hists) 93 { 94 struct hist_entry *he; 95 struct rb_root_cached *root_in; 96 struct rb_root_cached *root_out; 97 struct rb_node *node; 98 99 if (hists__has(hists, need_collapse)) 100 root_in = &hists->entries_collapsed; 101 else 102 root_in = hists->entries_in; 103 104 root_out = &hists->entries; 105 106 while (!RB_EMPTY_ROOT(&root_out->rb_root)) { 107 node = rb_first_cached(root_out); 108 109 he = rb_entry(node, struct hist_entry, rb_node); 110 rb_erase_cached(node, root_out); 111 rb_erase_cached(&he->rb_node_in, root_in); 112 hist_entry__delete(he); 113 } 114 } 115 116 typedef int (*test_fn_t)(struct evsel *, struct machine *); 117 118 #define COMM(he) (thread__comm_str(he->thread)) 119 #define DSO(he) (he->ms.map->dso->short_name) 120 #define SYM(he) (he->ms.sym->name) 121 #define CPU(he) (he->cpu) 122 #define PID(he) (he->thread->tid) 123 124 /* default sort keys (no field) */ 125 static int test1(struct evsel *evsel, struct machine *machine) 126 { 127 int err; 128 struct hists *hists = evsel__hists(evsel); 129 struct hist_entry *he; 130 struct rb_root_cached *root; 131 struct rb_node *node; 132 133 field_order = NULL; 134 sort_order = NULL; /* equivalent to sort_order = "comm,dso,sym" */ 135 136 setup_sorting(NULL); 137 138 /* 139 * expected output: 140 * 141 * Overhead Command Shared Object Symbol 142 * ======== ======= ============= ============== 143 * 20.00% perf perf [.] main 144 * 10.00% bash [kernel] [k] page_fault 145 * 10.00% bash bash [.] main 146 * 10.00% bash bash [.] xmalloc 147 * 10.00% perf [kernel] [k] page_fault 148 * 10.00% perf [kernel] [k] schedule 149 * 10.00% perf libc [.] free 150 * 10.00% perf libc [.] malloc 151 * 10.00% perf perf [.] cmd_record 152 */ 153 err = add_hist_entries(hists, machine); 154 if (err < 0) 155 goto out; 156 157 hists__collapse_resort(hists, NULL); 158 perf_evsel__output_resort(evsel, NULL); 159 160 if (verbose > 2) { 161 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); 162 print_hists_out(hists); 163 } 164 165 root = &hists->entries; 166 node = rb_first_cached(root); 167 he = rb_entry(node, struct hist_entry, rb_node); 168 TEST_ASSERT_VAL("Invalid hist entry", 169 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") && 170 !strcmp(SYM(he), "main") && he->stat.period == 200); 171 172 node = rb_next(node); 173 he = rb_entry(node, struct hist_entry, rb_node); 174 TEST_ASSERT_VAL("Invalid hist entry", 175 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") && 176 !strcmp(SYM(he), "page_fault") && he->stat.period == 100); 177 178 node = rb_next(node); 179 he = rb_entry(node, struct hist_entry, rb_node); 180 TEST_ASSERT_VAL("Invalid hist entry", 181 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") && 182 !strcmp(SYM(he), "main") && he->stat.period == 100); 183 184 node = rb_next(node); 185 he = rb_entry(node, struct hist_entry, rb_node); 186 TEST_ASSERT_VAL("Invalid hist entry", 187 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") && 188 !strcmp(SYM(he), "xmalloc") && he->stat.period == 100); 189 190 node = rb_next(node); 191 he = rb_entry(node, struct hist_entry, rb_node); 192 TEST_ASSERT_VAL("Invalid hist entry", 193 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") && 194 !strcmp(SYM(he), "page_fault") && he->stat.period == 100); 195 196 node = rb_next(node); 197 he = rb_entry(node, struct hist_entry, rb_node); 198 TEST_ASSERT_VAL("Invalid hist entry", 199 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") && 200 !strcmp(SYM(he), "schedule") && he->stat.period == 100); 201 202 node = rb_next(node); 203 he = rb_entry(node, struct hist_entry, rb_node); 204 TEST_ASSERT_VAL("Invalid hist entry", 205 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") && 206 !strcmp(SYM(he), "free") && he->stat.period == 100); 207 208 node = rb_next(node); 209 he = rb_entry(node, struct hist_entry, rb_node); 210 TEST_ASSERT_VAL("Invalid hist entry", 211 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") && 212 !strcmp(SYM(he), "malloc") && he->stat.period == 100); 213 214 node = rb_next(node); 215 he = rb_entry(node, struct hist_entry, rb_node); 216 TEST_ASSERT_VAL("Invalid hist entry", 217 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") && 218 !strcmp(SYM(he), "cmd_record") && he->stat.period == 100); 219 220 out: 221 del_hist_entries(hists); 222 reset_output_field(); 223 return err; 224 } 225 226 /* mixed fields and sort keys */ 227 static int test2(struct evsel *evsel, struct machine *machine) 228 { 229 int err; 230 struct hists *hists = evsel__hists(evsel); 231 struct hist_entry *he; 232 struct rb_root_cached *root; 233 struct rb_node *node; 234 235 field_order = "overhead,cpu"; 236 sort_order = "pid"; 237 238 setup_sorting(NULL); 239 240 /* 241 * expected output: 242 * 243 * Overhead CPU Command: Pid 244 * ======== === ============= 245 * 30.00% 1 perf : 100 246 * 10.00% 0 perf : 100 247 * 10.00% 2 perf : 100 248 * 20.00% 2 perf : 200 249 * 10.00% 0 bash : 300 250 * 10.00% 1 bash : 300 251 * 10.00% 3 bash : 300 252 */ 253 err = add_hist_entries(hists, machine); 254 if (err < 0) 255 goto out; 256 257 hists__collapse_resort(hists, NULL); 258 perf_evsel__output_resort(evsel, NULL); 259 260 if (verbose > 2) { 261 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); 262 print_hists_out(hists); 263 } 264 265 root = &hists->entries; 266 node = rb_first_cached(root); 267 he = rb_entry(node, struct hist_entry, rb_node); 268 TEST_ASSERT_VAL("Invalid hist entry", 269 CPU(he) == 1 && PID(he) == 100 && he->stat.period == 300); 270 271 node = rb_next(node); 272 he = rb_entry(node, struct hist_entry, rb_node); 273 TEST_ASSERT_VAL("Invalid hist entry", 274 CPU(he) == 0 && PID(he) == 100 && he->stat.period == 100); 275 276 out: 277 del_hist_entries(hists); 278 reset_output_field(); 279 return err; 280 } 281 282 /* fields only (no sort key) */ 283 static int test3(struct evsel *evsel, struct machine *machine) 284 { 285 int err; 286 struct hists *hists = evsel__hists(evsel); 287 struct hist_entry *he; 288 struct rb_root_cached *root; 289 struct rb_node *node; 290 291 field_order = "comm,overhead,dso"; 292 sort_order = NULL; 293 294 setup_sorting(NULL); 295 296 /* 297 * expected output: 298 * 299 * Command Overhead Shared Object 300 * ======= ======== ============= 301 * bash 20.00% bash 302 * bash 10.00% [kernel] 303 * perf 30.00% perf 304 * perf 20.00% [kernel] 305 * perf 20.00% libc 306 */ 307 err = add_hist_entries(hists, machine); 308 if (err < 0) 309 goto out; 310 311 hists__collapse_resort(hists, NULL); 312 perf_evsel__output_resort(evsel, NULL); 313 314 if (verbose > 2) { 315 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); 316 print_hists_out(hists); 317 } 318 319 root = &hists->entries; 320 node = rb_first_cached(root); 321 he = rb_entry(node, struct hist_entry, rb_node); 322 TEST_ASSERT_VAL("Invalid hist entry", 323 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") && 324 he->stat.period == 200); 325 326 node = rb_next(node); 327 he = rb_entry(node, struct hist_entry, rb_node); 328 TEST_ASSERT_VAL("Invalid hist entry", 329 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") && 330 he->stat.period == 100); 331 332 node = rb_next(node); 333 he = rb_entry(node, struct hist_entry, rb_node); 334 TEST_ASSERT_VAL("Invalid hist entry", 335 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") && 336 he->stat.period == 300); 337 338 node = rb_next(node); 339 he = rb_entry(node, struct hist_entry, rb_node); 340 TEST_ASSERT_VAL("Invalid hist entry", 341 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") && 342 he->stat.period == 200); 343 344 node = rb_next(node); 345 he = rb_entry(node, struct hist_entry, rb_node); 346 TEST_ASSERT_VAL("Invalid hist entry", 347 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") && 348 he->stat.period == 200); 349 350 out: 351 del_hist_entries(hists); 352 reset_output_field(); 353 return err; 354 } 355 356 /* handle duplicate 'dso' field */ 357 static int test4(struct evsel *evsel, struct machine *machine) 358 { 359 int err; 360 struct hists *hists = evsel__hists(evsel); 361 struct hist_entry *he; 362 struct rb_root_cached *root; 363 struct rb_node *node; 364 365 field_order = "dso,sym,comm,overhead,dso"; 366 sort_order = "sym"; 367 368 setup_sorting(NULL); 369 370 /* 371 * expected output: 372 * 373 * Shared Object Symbol Command Overhead 374 * ============= ============== ======= ======== 375 * perf [.] cmd_record perf 10.00% 376 * libc [.] free perf 10.00% 377 * bash [.] main bash 10.00% 378 * perf [.] main perf 20.00% 379 * libc [.] malloc perf 10.00% 380 * [kernel] [k] page_fault bash 10.00% 381 * [kernel] [k] page_fault perf 10.00% 382 * [kernel] [k] schedule perf 10.00% 383 * bash [.] xmalloc bash 10.00% 384 */ 385 err = add_hist_entries(hists, machine); 386 if (err < 0) 387 goto out; 388 389 hists__collapse_resort(hists, NULL); 390 perf_evsel__output_resort(evsel, NULL); 391 392 if (verbose > 2) { 393 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); 394 print_hists_out(hists); 395 } 396 397 root = &hists->entries; 398 node = rb_first_cached(root); 399 he = rb_entry(node, struct hist_entry, rb_node); 400 TEST_ASSERT_VAL("Invalid hist entry", 401 !strcmp(DSO(he), "perf") && !strcmp(SYM(he), "cmd_record") && 402 !strcmp(COMM(he), "perf") && he->stat.period == 100); 403 404 node = rb_next(node); 405 he = rb_entry(node, struct hist_entry, rb_node); 406 TEST_ASSERT_VAL("Invalid hist entry", 407 !strcmp(DSO(he), "libc") && !strcmp(SYM(he), "free") && 408 !strcmp(COMM(he), "perf") && he->stat.period == 100); 409 410 node = rb_next(node); 411 he = rb_entry(node, struct hist_entry, rb_node); 412 TEST_ASSERT_VAL("Invalid hist entry", 413 !strcmp(DSO(he), "bash") && !strcmp(SYM(he), "main") && 414 !strcmp(COMM(he), "bash") && he->stat.period == 100); 415 416 node = rb_next(node); 417 he = rb_entry(node, struct hist_entry, rb_node); 418 TEST_ASSERT_VAL("Invalid hist entry", 419 !strcmp(DSO(he), "perf") && !strcmp(SYM(he), "main") && 420 !strcmp(COMM(he), "perf") && he->stat.period == 200); 421 422 node = rb_next(node); 423 he = rb_entry(node, struct hist_entry, rb_node); 424 TEST_ASSERT_VAL("Invalid hist entry", 425 !strcmp(DSO(he), "libc") && !strcmp(SYM(he), "malloc") && 426 !strcmp(COMM(he), "perf") && he->stat.period == 100); 427 428 node = rb_next(node); 429 he = rb_entry(node, struct hist_entry, rb_node); 430 TEST_ASSERT_VAL("Invalid hist entry", 431 !strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "page_fault") && 432 !strcmp(COMM(he), "bash") && he->stat.period == 100); 433 434 node = rb_next(node); 435 he = rb_entry(node, struct hist_entry, rb_node); 436 TEST_ASSERT_VAL("Invalid hist entry", 437 !strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "page_fault") && 438 !strcmp(COMM(he), "perf") && he->stat.period == 100); 439 440 node = rb_next(node); 441 he = rb_entry(node, struct hist_entry, rb_node); 442 TEST_ASSERT_VAL("Invalid hist entry", 443 !strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "schedule") && 444 !strcmp(COMM(he), "perf") && he->stat.period == 100); 445 446 node = rb_next(node); 447 he = rb_entry(node, struct hist_entry, rb_node); 448 TEST_ASSERT_VAL("Invalid hist entry", 449 !strcmp(DSO(he), "bash") && !strcmp(SYM(he), "xmalloc") && 450 !strcmp(COMM(he), "bash") && he->stat.period == 100); 451 452 out: 453 del_hist_entries(hists); 454 reset_output_field(); 455 return err; 456 } 457 458 /* full sort keys w/o overhead field */ 459 static int test5(struct evsel *evsel, struct machine *machine) 460 { 461 int err; 462 struct hists *hists = evsel__hists(evsel); 463 struct hist_entry *he; 464 struct rb_root_cached *root; 465 struct rb_node *node; 466 467 field_order = "cpu,pid,comm,dso,sym"; 468 sort_order = "dso,pid"; 469 470 setup_sorting(NULL); 471 472 /* 473 * expected output: 474 * 475 * CPU Command: Pid Command Shared Object Symbol 476 * === ============= ======= ============= ============== 477 * 0 perf: 100 perf [kernel] [k] schedule 478 * 2 perf: 200 perf [kernel] [k] page_fault 479 * 1 bash: 300 bash [kernel] [k] page_fault 480 * 0 bash: 300 bash bash [.] xmalloc 481 * 3 bash: 300 bash bash [.] main 482 * 1 perf: 100 perf libc [.] malloc 483 * 2 perf: 100 perf libc [.] free 484 * 1 perf: 100 perf perf [.] cmd_record 485 * 1 perf: 100 perf perf [.] main 486 * 2 perf: 200 perf perf [.] main 487 */ 488 err = add_hist_entries(hists, machine); 489 if (err < 0) 490 goto out; 491 492 hists__collapse_resort(hists, NULL); 493 perf_evsel__output_resort(evsel, NULL); 494 495 if (verbose > 2) { 496 pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); 497 print_hists_out(hists); 498 } 499 500 root = &hists->entries; 501 node = rb_first_cached(root); 502 he = rb_entry(node, struct hist_entry, rb_node); 503 504 TEST_ASSERT_VAL("Invalid hist entry", 505 CPU(he) == 0 && PID(he) == 100 && 506 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") && 507 !strcmp(SYM(he), "schedule") && he->stat.period == 100); 508 509 node = rb_next(node); 510 he = rb_entry(node, struct hist_entry, rb_node); 511 TEST_ASSERT_VAL("Invalid hist entry", 512 CPU(he) == 2 && PID(he) == 200 && 513 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") && 514 !strcmp(SYM(he), "page_fault") && he->stat.period == 100); 515 516 node = rb_next(node); 517 he = rb_entry(node, struct hist_entry, rb_node); 518 TEST_ASSERT_VAL("Invalid hist entry", 519 CPU(he) == 1 && PID(he) == 300 && 520 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") && 521 !strcmp(SYM(he), "page_fault") && he->stat.period == 100); 522 523 node = rb_next(node); 524 he = rb_entry(node, struct hist_entry, rb_node); 525 TEST_ASSERT_VAL("Invalid hist entry", 526 CPU(he) == 0 && PID(he) == 300 && 527 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") && 528 !strcmp(SYM(he), "xmalloc") && he->stat.period == 100); 529 530 node = rb_next(node); 531 he = rb_entry(node, struct hist_entry, rb_node); 532 TEST_ASSERT_VAL("Invalid hist entry", 533 CPU(he) == 3 && PID(he) == 300 && 534 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") && 535 !strcmp(SYM(he), "main") && he->stat.period == 100); 536 537 node = rb_next(node); 538 he = rb_entry(node, struct hist_entry, rb_node); 539 TEST_ASSERT_VAL("Invalid hist entry", 540 CPU(he) == 1 && PID(he) == 100 && 541 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") && 542 !strcmp(SYM(he), "malloc") && he->stat.period == 100); 543 544 node = rb_next(node); 545 he = rb_entry(node, struct hist_entry, rb_node); 546 TEST_ASSERT_VAL("Invalid hist entry", 547 CPU(he) == 2 && PID(he) == 100 && 548 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") && 549 !strcmp(SYM(he), "free") && he->stat.period == 100); 550 551 node = rb_next(node); 552 he = rb_entry(node, struct hist_entry, rb_node); 553 TEST_ASSERT_VAL("Invalid hist entry", 554 CPU(he) == 1 && PID(he) == 100 && 555 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") && 556 !strcmp(SYM(he), "cmd_record") && he->stat.period == 100); 557 558 node = rb_next(node); 559 he = rb_entry(node, struct hist_entry, rb_node); 560 TEST_ASSERT_VAL("Invalid hist entry", 561 CPU(he) == 1 && PID(he) == 100 && 562 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") && 563 !strcmp(SYM(he), "main") && he->stat.period == 100); 564 565 node = rb_next(node); 566 he = rb_entry(node, struct hist_entry, rb_node); 567 TEST_ASSERT_VAL("Invalid hist entry", 568 CPU(he) == 2 && PID(he) == 200 && 569 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") && 570 !strcmp(SYM(he), "main") && he->stat.period == 100); 571 572 out: 573 del_hist_entries(hists); 574 reset_output_field(); 575 return err; 576 } 577 578 int test__hists_output(struct test *test __maybe_unused, int subtest __maybe_unused) 579 { 580 int err = TEST_FAIL; 581 struct machines machines; 582 struct machine *machine; 583 struct evsel *evsel; 584 struct evlist *evlist = evlist__new(); 585 size_t i; 586 test_fn_t testcases[] = { 587 test1, 588 test2, 589 test3, 590 test4, 591 test5, 592 }; 593 594 TEST_ASSERT_VAL("No memory", evlist); 595 596 err = parse_events(evlist, "cpu-clock", NULL); 597 if (err) 598 goto out; 599 err = TEST_FAIL; 600 601 machines__init(&machines); 602 603 /* setup threads/dso/map/symbols also */ 604 machine = setup_fake_machine(&machines); 605 if (!machine) 606 goto out; 607 608 if (verbose > 1) 609 machine__fprintf(machine, stderr); 610 611 evsel = evlist__first(evlist); 612 613 for (i = 0; i < ARRAY_SIZE(testcases); i++) { 614 err = testcases[i](evsel, machine); 615 if (err < 0) 616 break; 617 } 618 619 out: 620 /* tear down everything */ 621 evlist__delete(evlist); 622 machines__exit(&machines); 623 624 return err; 625 } 626