1 #include "perf.h" 2 #include "tests.h" 3 #include "debug.h" 4 #include "symbol.h" 5 #include "sort.h" 6 #include "evsel.h" 7 #include "evlist.h" 8 #include "machine.h" 9 #include "thread.h" 10 #include "parse-events.h" 11 12 static struct { 13 u32 pid; 14 const char *comm; 15 } fake_threads[] = { 16 { 100, "perf" }, 17 { 200, "perf" }, 18 { 300, "bash" }, 19 }; 20 21 static struct { 22 u32 pid; 23 u64 start; 24 const char *filename; 25 } fake_mmap_info[] = { 26 { 100, 0x40000, "perf" }, 27 { 100, 0x50000, "libc" }, 28 { 100, 0xf0000, "[kernel]" }, 29 { 200, 0x40000, "perf" }, 30 { 200, 0x50000, "libc" }, 31 { 200, 0xf0000, "[kernel]" }, 32 { 300, 0x40000, "bash" }, 33 { 300, 0x50000, "libc" }, 34 { 300, 0xf0000, "[kernel]" }, 35 }; 36 37 struct fake_sym { 38 u64 start; 39 u64 length; 40 const char *name; 41 }; 42 43 static struct fake_sym perf_syms[] = { 44 { 700, 100, "main" }, 45 { 800, 100, "run_command" }, 46 { 900, 100, "cmd_record" }, 47 }; 48 49 static struct fake_sym bash_syms[] = { 50 { 700, 100, "main" }, 51 { 800, 100, "xmalloc" }, 52 { 900, 100, "xfree" }, 53 }; 54 55 static struct fake_sym libc_syms[] = { 56 { 700, 100, "malloc" }, 57 { 800, 100, "free" }, 58 { 900, 100, "realloc" }, 59 }; 60 61 static struct fake_sym kernel_syms[] = { 62 { 700, 100, "schedule" }, 63 { 800, 100, "page_fault" }, 64 { 900, 100, "sys_perf_event_open" }, 65 }; 66 67 static struct { 68 const char *dso_name; 69 struct fake_sym *syms; 70 size_t nr_syms; 71 } fake_symbols[] = { 72 { "perf", perf_syms, ARRAY_SIZE(perf_syms) }, 73 { "bash", bash_syms, ARRAY_SIZE(bash_syms) }, 74 { "libc", libc_syms, ARRAY_SIZE(libc_syms) }, 75 { "[kernel]", kernel_syms, ARRAY_SIZE(kernel_syms) }, 76 }; 77 78 static struct machine *setup_fake_machine(struct machines *machines) 79 { 80 struct machine *machine = machines__find(machines, HOST_KERNEL_ID); 81 size_t i; 82 83 if (machine == NULL) { 84 pr_debug("Not enough memory for machine setup\n"); 85 return NULL; 86 } 87 88 for (i = 0; i < ARRAY_SIZE(fake_threads); i++) { 89 struct thread *thread; 90 91 thread = machine__findnew_thread(machine, fake_threads[i].pid); 92 if (thread == NULL) 93 goto out; 94 95 thread__set_comm(thread, fake_threads[i].comm); 96 } 97 98 for (i = 0; i < ARRAY_SIZE(fake_mmap_info); i++) { 99 union perf_event fake_mmap_event = { 100 .mmap = { 101 .header = { .misc = PERF_RECORD_MISC_USER, }, 102 .pid = fake_mmap_info[i].pid, 103 .start = fake_mmap_info[i].start, 104 .len = 0x1000ULL, 105 .pgoff = 0ULL, 106 }, 107 }; 108 109 strcpy(fake_mmap_event.mmap.filename, 110 fake_mmap_info[i].filename); 111 112 machine__process_mmap_event(machine, &fake_mmap_event); 113 } 114 115 for (i = 0; i < ARRAY_SIZE(fake_symbols); i++) { 116 size_t k; 117 struct dso *dso; 118 119 dso = __dsos__findnew(&machine->user_dsos, 120 fake_symbols[i].dso_name); 121 if (dso == NULL) 122 goto out; 123 124 /* emulate dso__load() */ 125 dso__set_loaded(dso, MAP__FUNCTION); 126 127 for (k = 0; k < fake_symbols[i].nr_syms; k++) { 128 struct symbol *sym; 129 struct fake_sym *fsym = &fake_symbols[i].syms[k]; 130 131 sym = symbol__new(fsym->start, fsym->length, 132 STB_GLOBAL, fsym->name); 133 if (sym == NULL) 134 goto out; 135 136 symbols__insert(&dso->symbols[MAP__FUNCTION], sym); 137 } 138 } 139 140 return machine; 141 142 out: 143 pr_debug("Not enough memory for machine setup\n"); 144 machine__delete_threads(machine); 145 machine__delete(machine); 146 return NULL; 147 } 148 149 struct sample { 150 u32 pid; 151 u64 ip; 152 struct thread *thread; 153 struct map *map; 154 struct symbol *sym; 155 }; 156 157 static struct sample fake_common_samples[] = { 158 /* perf [kernel] schedule() */ 159 { .pid = 100, .ip = 0xf0000 + 700, }, 160 /* perf [perf] main() */ 161 { .pid = 200, .ip = 0x40000 + 700, }, 162 /* perf [perf] cmd_record() */ 163 { .pid = 200, .ip = 0x40000 + 900, }, 164 /* bash [bash] xmalloc() */ 165 { .pid = 300, .ip = 0x40000 + 800, }, 166 /* bash [libc] malloc() */ 167 { .pid = 300, .ip = 0x50000 + 700, }, 168 }; 169 170 static struct sample fake_samples[][5] = { 171 { 172 /* perf [perf] run_command() */ 173 { .pid = 100, .ip = 0x40000 + 800, }, 174 /* perf [libc] malloc() */ 175 { .pid = 100, .ip = 0x50000 + 700, }, 176 /* perf [kernel] page_fault() */ 177 { .pid = 100, .ip = 0xf0000 + 800, }, 178 /* perf [kernel] sys_perf_event_open() */ 179 { .pid = 200, .ip = 0xf0000 + 900, }, 180 /* bash [libc] free() */ 181 { .pid = 300, .ip = 0x50000 + 800, }, 182 }, 183 { 184 /* perf [libc] free() */ 185 { .pid = 200, .ip = 0x50000 + 800, }, 186 /* bash [libc] malloc() */ 187 { .pid = 300, .ip = 0x50000 + 700, }, /* will be merged */ 188 /* bash [bash] xfee() */ 189 { .pid = 300, .ip = 0x40000 + 900, }, 190 /* bash [libc] realloc() */ 191 { .pid = 300, .ip = 0x50000 + 900, }, 192 /* bash [kernel] page_fault() */ 193 { .pid = 300, .ip = 0xf0000 + 800, }, 194 }, 195 }; 196 197 static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine) 198 { 199 struct perf_evsel *evsel; 200 struct addr_location al; 201 struct hist_entry *he; 202 struct perf_sample sample = { .cpu = 0, }; 203 size_t i = 0, k; 204 205 /* 206 * each evsel will have 10 samples - 5 common and 5 distinct. 207 * However the second evsel also has a collapsed entry for 208 * "bash [libc] malloc" so total 9 entries will be in the tree. 209 */ 210 list_for_each_entry(evsel, &evlist->entries, node) { 211 for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) { 212 const union perf_event event = { 213 .ip = { 214 .header = { 215 .misc = PERF_RECORD_MISC_USER, 216 }, 217 .pid = fake_common_samples[k].pid, 218 .ip = fake_common_samples[k].ip, 219 }, 220 }; 221 222 if (perf_event__preprocess_sample(&event, machine, &al, 223 &sample, 0) < 0) 224 goto out; 225 226 he = __hists__add_entry(&evsel->hists, &al, NULL, 1, 1); 227 if (he == NULL) 228 goto out; 229 230 fake_common_samples[k].thread = al.thread; 231 fake_common_samples[k].map = al.map; 232 fake_common_samples[k].sym = al.sym; 233 } 234 235 for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) { 236 const union perf_event event = { 237 .ip = { 238 .header = { 239 .misc = PERF_RECORD_MISC_USER, 240 }, 241 .pid = fake_samples[i][k].pid, 242 .ip = fake_samples[i][k].ip, 243 }, 244 }; 245 246 if (perf_event__preprocess_sample(&event, machine, &al, 247 &sample, 0) < 0) 248 goto out; 249 250 he = __hists__add_entry(&evsel->hists, &al, NULL, 1, 1); 251 if (he == NULL) 252 goto out; 253 254 fake_samples[i][k].thread = al.thread; 255 fake_samples[i][k].map = al.map; 256 fake_samples[i][k].sym = al.sym; 257 } 258 i++; 259 } 260 261 return 0; 262 263 out: 264 pr_debug("Not enough memory for adding a hist entry\n"); 265 return -1; 266 } 267 268 static int find_sample(struct sample *samples, size_t nr_samples, 269 struct thread *t, struct map *m, struct symbol *s) 270 { 271 while (nr_samples--) { 272 if (samples->thread == t && samples->map == m && 273 samples->sym == s) 274 return 1; 275 samples++; 276 } 277 return 0; 278 } 279 280 static int __validate_match(struct hists *hists) 281 { 282 size_t count = 0; 283 struct rb_root *root; 284 struct rb_node *node; 285 286 /* 287 * Only entries from fake_common_samples should have a pair. 288 */ 289 if (sort__need_collapse) 290 root = &hists->entries_collapsed; 291 else 292 root = hists->entries_in; 293 294 node = rb_first(root); 295 while (node) { 296 struct hist_entry *he; 297 298 he = rb_entry(node, struct hist_entry, rb_node_in); 299 300 if (hist_entry__has_pairs(he)) { 301 if (find_sample(fake_common_samples, 302 ARRAY_SIZE(fake_common_samples), 303 he->thread, he->ms.map, he->ms.sym)) { 304 count++; 305 } else { 306 pr_debug("Can't find the matched entry\n"); 307 return -1; 308 } 309 } 310 311 node = rb_next(node); 312 } 313 314 if (count != ARRAY_SIZE(fake_common_samples)) { 315 pr_debug("Invalid count for matched entries: %zd of %zd\n", 316 count, ARRAY_SIZE(fake_common_samples)); 317 return -1; 318 } 319 320 return 0; 321 } 322 323 static int validate_match(struct hists *leader, struct hists *other) 324 { 325 return __validate_match(leader) || __validate_match(other); 326 } 327 328 static int __validate_link(struct hists *hists, int idx) 329 { 330 size_t count = 0; 331 size_t count_pair = 0; 332 size_t count_dummy = 0; 333 struct rb_root *root; 334 struct rb_node *node; 335 336 /* 337 * Leader hists (idx = 0) will have dummy entries from other, 338 * and some entries will have no pair. However every entry 339 * in other hists should have (dummy) pair. 340 */ 341 if (sort__need_collapse) 342 root = &hists->entries_collapsed; 343 else 344 root = hists->entries_in; 345 346 node = rb_first(root); 347 while (node) { 348 struct hist_entry *he; 349 350 he = rb_entry(node, struct hist_entry, rb_node_in); 351 352 if (hist_entry__has_pairs(he)) { 353 if (!find_sample(fake_common_samples, 354 ARRAY_SIZE(fake_common_samples), 355 he->thread, he->ms.map, he->ms.sym) && 356 !find_sample(fake_samples[idx], 357 ARRAY_SIZE(fake_samples[idx]), 358 he->thread, he->ms.map, he->ms.sym)) { 359 count_dummy++; 360 } 361 count_pair++; 362 } else if (idx) { 363 pr_debug("A entry from the other hists should have pair\n"); 364 return -1; 365 } 366 367 count++; 368 node = rb_next(node); 369 } 370 371 /* 372 * Note that we have a entry collapsed in the other (idx = 1) hists. 373 */ 374 if (idx == 0) { 375 if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) { 376 pr_debug("Invalid count of dummy entries: %zd of %zd\n", 377 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1); 378 return -1; 379 } 380 if (count != count_pair + ARRAY_SIZE(fake_samples[0])) { 381 pr_debug("Invalid count of total leader entries: %zd of %zd\n", 382 count, count_pair + ARRAY_SIZE(fake_samples[0])); 383 return -1; 384 } 385 } else { 386 if (count != count_pair) { 387 pr_debug("Invalid count of total other entries: %zd of %zd\n", 388 count, count_pair); 389 return -1; 390 } 391 if (count_dummy > 0) { 392 pr_debug("Other hists should not have dummy entries: %zd\n", 393 count_dummy); 394 return -1; 395 } 396 } 397 398 return 0; 399 } 400 401 static int validate_link(struct hists *leader, struct hists *other) 402 { 403 return __validate_link(leader, 0) || __validate_link(other, 1); 404 } 405 406 static void print_hists(struct hists *hists) 407 { 408 int i = 0; 409 struct rb_root *root; 410 struct rb_node *node; 411 412 if (sort__need_collapse) 413 root = &hists->entries_collapsed; 414 else 415 root = hists->entries_in; 416 417 pr_info("----- %s --------\n", __func__); 418 node = rb_first(root); 419 while (node) { 420 struct hist_entry *he; 421 422 he = rb_entry(node, struct hist_entry, rb_node_in); 423 424 pr_info("%2d: entry: %-8s [%-8s] %20s: period = %"PRIu64"\n", 425 i, he->thread->comm, he->ms.map->dso->short_name, 426 he->ms.sym->name, he->stat.period); 427 428 i++; 429 node = rb_next(node); 430 } 431 } 432 433 int test__hists_link(void) 434 { 435 int err = -1; 436 struct machines machines; 437 struct machine *machine = NULL; 438 struct perf_evsel *evsel, *first; 439 struct perf_evlist *evlist = perf_evlist__new(); 440 441 if (evlist == NULL) 442 return -ENOMEM; 443 444 err = parse_events(evlist, "cpu-clock"); 445 if (err) 446 goto out; 447 err = parse_events(evlist, "task-clock"); 448 if (err) 449 goto out; 450 451 /* default sort order (comm,dso,sym) will be used */ 452 if (setup_sorting() < 0) 453 goto out; 454 455 machines__init(&machines); 456 457 /* setup threads/dso/map/symbols also */ 458 machine = setup_fake_machine(&machines); 459 if (!machine) 460 goto out; 461 462 if (verbose > 1) 463 machine__fprintf(machine, stderr); 464 465 /* process sample events */ 466 err = add_hist_entries(evlist, machine); 467 if (err < 0) 468 goto out; 469 470 list_for_each_entry(evsel, &evlist->entries, node) { 471 hists__collapse_resort(&evsel->hists); 472 473 if (verbose > 2) 474 print_hists(&evsel->hists); 475 } 476 477 first = perf_evlist__first(evlist); 478 evsel = perf_evlist__last(evlist); 479 480 /* match common entries */ 481 hists__match(&first->hists, &evsel->hists); 482 err = validate_match(&first->hists, &evsel->hists); 483 if (err) 484 goto out; 485 486 /* link common and/or dummy entries */ 487 hists__link(&first->hists, &evsel->hists); 488 err = validate_link(&first->hists, &evsel->hists); 489 if (err) 490 goto out; 491 492 err = 0; 493 494 out: 495 /* tear down everything */ 496 perf_evlist__delete(evlist); 497 machines__exit(&machines); 498 499 return err; 500 } 501