1 // SPDX-License-Identifier: GPL-2.0
2 #include "util/debug.h"
3 #include "util/dso.h"
4 #include "util/event.h"
5 #include "util/map.h"
6 #include "util/symbol.h"
7 #include "util/sort.h"
8 #include "util/evsel.h"
9 #include "util/evlist.h"
10 #include "util/machine.h"
11 #include "util/parse-events.h"
12 #include "util/thread.h"
13 #include "tests/tests.h"
14 #include "tests/hists_common.h"
15 #include <linux/kernel.h>
16 
17 struct sample {
18 	u32 pid;
19 	u64 ip;
20 	struct thread *thread;
21 	struct map *map;
22 	struct symbol *sym;
23 };
24 
25 /* For the numbers, see hists_common.c */
26 static struct sample fake_samples[] = {
27 	/* perf [kernel] schedule() */
28 	{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
29 	/* perf [perf]   main() */
30 	{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, },
31 	/* perf [perf]   cmd_record() */
32 	{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, },
33 	/* perf [libc]   malloc() */
34 	{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
35 	/* perf [libc]   free() */
36 	{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, },
37 	/* perf [perf]   main() */
38 	{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
39 	/* perf [kernel] page_fault() */
40 	{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
41 	/* bash [bash]   main() */
42 	{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_MAIN, },
43 	/* bash [bash]   xmalloc() */
44 	{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_XMALLOC, },
45 	/* bash [kernel] page_fault() */
46 	{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
47 };
48 
49 /*
50  * Will be cast to struct ip_callchain which has all 64 bit entries
51  * of nr and ips[].
52  */
53 static u64 fake_callchains[][10] = {
54 	/*   schedule => run_command => main */
55 	{ 3, FAKE_IP_KERNEL_SCHEDULE, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
56 	/*   main  */
57 	{ 1, FAKE_IP_PERF_MAIN, },
58 	/*   cmd_record => run_command => main */
59 	{ 3, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
60 	/*   malloc => cmd_record => run_command => main */
61 	{ 4, FAKE_IP_LIBC_MALLOC, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
62 	     FAKE_IP_PERF_MAIN, },
63 	/*   free => cmd_record => run_command => main */
64 	{ 4, FAKE_IP_LIBC_FREE, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
65 	     FAKE_IP_PERF_MAIN, },
66 	/*   main */
67 	{ 1, FAKE_IP_PERF_MAIN, },
68 	/*   page_fault => sys_perf_event_open => run_command => main */
69 	{ 4, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN,
70 	     FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
71 	/*   main */
72 	{ 1, FAKE_IP_BASH_MAIN, },
73 	/*   xmalloc => malloc => xmalloc => malloc => xmalloc => main */
74 	{ 6, FAKE_IP_BASH_XMALLOC, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC,
75 	     FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, FAKE_IP_BASH_MAIN, },
76 	/*   page_fault => malloc => main */
77 	{ 3, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_MAIN, },
78 };
79 
80 static int add_hist_entries(struct hists *hists, struct machine *machine)
81 {
82 	struct addr_location al;
83 	struct evsel *evsel = hists_to_evsel(hists);
84 	struct perf_sample sample = { .period = 1000, };
85 	size_t i;
86 
87 	addr_location__init(&al);
88 	for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
89 		struct hist_entry_iter iter = {
90 			.evsel = evsel,
91 			.sample	= &sample,
92 			.hide_unresolved = false,
93 		};
94 
95 		if (symbol_conf.cumulate_callchain)
96 			iter.ops = &hist_iter_cumulative;
97 		else
98 			iter.ops = &hist_iter_normal;
99 
100 		sample.cpumode = PERF_RECORD_MISC_USER;
101 		sample.pid = fake_samples[i].pid;
102 		sample.tid = fake_samples[i].pid;
103 		sample.ip = fake_samples[i].ip;
104 		sample.callchain = (struct ip_callchain *)fake_callchains[i];
105 
106 		if (machine__resolve(machine, &al, &sample) < 0)
107 			goto out;
108 
109 		if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
110 					 NULL) < 0) {
111 			goto out;
112 		}
113 
114 		thread__put(fake_samples[i].thread);
115 		fake_samples[i].thread = thread__get(al.thread);
116 		map__put(fake_samples[i].map);
117 		fake_samples[i].map = map__get(al.map);
118 		fake_samples[i].sym = al.sym;
119 	}
120 
121 	addr_location__exit(&al);
122 	return TEST_OK;
123 
124 out:
125 	pr_debug("Not enough memory for adding a hist entry\n");
126 	addr_location__exit(&al);
127 	return TEST_FAIL;
128 }
129 
130 static void del_hist_entries(struct hists *hists)
131 {
132 	struct hist_entry *he;
133 	struct rb_root_cached *root_in;
134 	struct rb_root_cached *root_out;
135 	struct rb_node *node;
136 
137 	if (hists__has(hists, need_collapse))
138 		root_in = &hists->entries_collapsed;
139 	else
140 		root_in = hists->entries_in;
141 
142 	root_out = &hists->entries;
143 
144 	while (!RB_EMPTY_ROOT(&root_out->rb_root)) {
145 		node = rb_first_cached(root_out);
146 
147 		he = rb_entry(node, struct hist_entry, rb_node);
148 		rb_erase_cached(node, root_out);
149 		rb_erase_cached(&he->rb_node_in, root_in);
150 		hist_entry__delete(he);
151 	}
152 }
153 
154 static void put_fake_samples(void)
155 {
156 	size_t i;
157 
158 	for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
159 		map__zput(fake_samples[i].map);
160 		thread__zput(fake_samples[i].thread);
161 	}
162 }
163 
164 typedef int (*test_fn_t)(struct evsel *, struct machine *);
165 
166 #define COMM(he)  (thread__comm_str(he->thread))
167 #define DSO(he)   (map__dso(he->ms.map)->short_name)
168 #define SYM(he)   (he->ms.sym->name)
169 #define CPU(he)   (he->cpu)
170 #define DEPTH(he) (he->callchain->max_depth)
171 #define CDSO(cl)  (map__dso(cl->ms.map)->short_name)
172 #define CSYM(cl)  (cl->ms.sym->name)
173 
174 struct result {
175 	u64 children;
176 	u64 self;
177 	const char *comm;
178 	const char *dso;
179 	const char *sym;
180 };
181 
182 struct callchain_result {
183 	u64 nr;
184 	struct {
185 		const char *dso;
186 		const char *sym;
187 	} node[10];
188 };
189 
190 static int do_test(struct hists *hists, struct result *expected, size_t nr_expected,
191 		   struct callchain_result *expected_callchain, size_t nr_callchain)
192 {
193 	char buf[32];
194 	size_t i, c;
195 	struct hist_entry *he;
196 	struct rb_root *root;
197 	struct rb_node *node;
198 	struct callchain_node *cnode;
199 	struct callchain_list *clist;
200 
201 	/*
202 	 * adding and deleting hist entries must be done outside of this
203 	 * function since TEST_ASSERT_VAL() returns in case of failure.
204 	 */
205 	hists__collapse_resort(hists, NULL);
206 	evsel__output_resort(hists_to_evsel(hists), NULL);
207 
208 	if (verbose > 2) {
209 		pr_info("use callchain: %d, cumulate callchain: %d\n",
210 			symbol_conf.use_callchain,
211 			symbol_conf.cumulate_callchain);
212 		print_hists_out(hists);
213 	}
214 
215 	root = &hists->entries.rb_root;
216 	for (node = rb_first(root), i = 0;
217 	     node && (he = rb_entry(node, struct hist_entry, rb_node));
218 	     node = rb_next(node), i++) {
219 		scnprintf(buf, sizeof(buf), "Invalid hist entry #%zd", i);
220 
221 		TEST_ASSERT_VAL("Incorrect number of hist entry",
222 				i < nr_expected);
223 		TEST_ASSERT_VAL(buf, he->stat.period == expected[i].self &&
224 				!strcmp(COMM(he), expected[i].comm) &&
225 				!strcmp(DSO(he), expected[i].dso) &&
226 				!strcmp(SYM(he), expected[i].sym));
227 
228 		if (symbol_conf.cumulate_callchain)
229 			TEST_ASSERT_VAL(buf, he->stat_acc->period == expected[i].children);
230 
231 		if (!symbol_conf.use_callchain)
232 			continue;
233 
234 		/* check callchain entries */
235 		root = &he->callchain->node.rb_root;
236 
237 		TEST_ASSERT_VAL("callchains expected", !RB_EMPTY_ROOT(root));
238 		cnode = rb_entry(rb_first(root), struct callchain_node, rb_node);
239 
240 		c = 0;
241 		list_for_each_entry(clist, &cnode->val, list) {
242 			scnprintf(buf, sizeof(buf), "Invalid callchain entry #%zd/%zd", i, c);
243 
244 			TEST_ASSERT_VAL("Incorrect number of callchain entry",
245 					c < expected_callchain[i].nr);
246 			TEST_ASSERT_VAL(buf,
247 				!strcmp(CDSO(clist), expected_callchain[i].node[c].dso) &&
248 				!strcmp(CSYM(clist), expected_callchain[i].node[c].sym));
249 			c++;
250 		}
251 		/* TODO: handle multiple child nodes properly */
252 		TEST_ASSERT_VAL("Incorrect number of callchain entry",
253 				c <= expected_callchain[i].nr);
254 	}
255 	TEST_ASSERT_VAL("Incorrect number of hist entry",
256 			i == nr_expected);
257 	TEST_ASSERT_VAL("Incorrect number of callchain entry",
258 			!symbol_conf.use_callchain || nr_expected == nr_callchain);
259 	return 0;
260 }
261 
262 /* NO callchain + NO children */
263 static int test1(struct evsel *evsel, struct machine *machine)
264 {
265 	int err;
266 	struct hists *hists = evsel__hists(evsel);
267 	/*
268 	 * expected output:
269 	 *
270 	 * Overhead  Command  Shared Object          Symbol
271 	 * ========  =======  =============  ==============
272 	 *   20.00%     perf  perf           [.] main
273 	 *   10.00%     bash  [kernel]       [k] page_fault
274 	 *   10.00%     bash  bash           [.] main
275 	 *   10.00%     bash  bash           [.] xmalloc
276 	 *   10.00%     perf  [kernel]       [k] page_fault
277 	 *   10.00%     perf  [kernel]       [k] schedule
278 	 *   10.00%     perf  libc           [.] free
279 	 *   10.00%     perf  libc           [.] malloc
280 	 *   10.00%     perf  perf           [.] cmd_record
281 	 */
282 	struct result expected[] = {
283 		{ 0, 2000, "perf", "perf",     "main" },
284 		{ 0, 1000, "bash", "[kernel]", "page_fault" },
285 		{ 0, 1000, "bash", "bash",     "main" },
286 		{ 0, 1000, "bash", "bash",     "xmalloc" },
287 		{ 0, 1000, "perf", "[kernel]", "page_fault" },
288 		{ 0, 1000, "perf", "[kernel]", "schedule" },
289 		{ 0, 1000, "perf", "libc",     "free" },
290 		{ 0, 1000, "perf", "libc",     "malloc" },
291 		{ 0, 1000, "perf", "perf",     "cmd_record" },
292 	};
293 
294 	symbol_conf.use_callchain = false;
295 	symbol_conf.cumulate_callchain = false;
296 	evsel__reset_sample_bit(evsel, CALLCHAIN);
297 
298 	setup_sorting(NULL);
299 	callchain_register_param(&callchain_param);
300 
301 	err = add_hist_entries(hists, machine);
302 	if (err < 0)
303 		goto out;
304 
305 	err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
306 
307 out:
308 	del_hist_entries(hists);
309 	reset_output_field();
310 	return err;
311 }
312 
313 /* callchain + NO children */
314 static int test2(struct evsel *evsel, struct machine *machine)
315 {
316 	int err;
317 	struct hists *hists = evsel__hists(evsel);
318 	/*
319 	 * expected output:
320 	 *
321 	 * Overhead  Command  Shared Object          Symbol
322 	 * ========  =======  =============  ==============
323 	 *   20.00%     perf  perf           [.] main
324 	 *              |
325 	 *              --- main
326 	 *
327 	 *   10.00%     bash  [kernel]       [k] page_fault
328 	 *              |
329 	 *              --- page_fault
330 	 *                  malloc
331 	 *                  main
332 	 *
333 	 *   10.00%     bash  bash           [.] main
334 	 *              |
335 	 *              --- main
336 	 *
337 	 *   10.00%     bash  bash           [.] xmalloc
338 	 *              |
339 	 *              --- xmalloc
340 	 *                  malloc
341 	 *                  xmalloc     <--- NOTE: there's a cycle
342 	 *                  malloc
343 	 *                  xmalloc
344 	 *                  main
345 	 *
346 	 *   10.00%     perf  [kernel]       [k] page_fault
347 	 *              |
348 	 *              --- page_fault
349 	 *                  sys_perf_event_open
350 	 *                  run_command
351 	 *                  main
352 	 *
353 	 *   10.00%     perf  [kernel]       [k] schedule
354 	 *              |
355 	 *              --- schedule
356 	 *                  run_command
357 	 *                  main
358 	 *
359 	 *   10.00%     perf  libc           [.] free
360 	 *              |
361 	 *              --- free
362 	 *                  cmd_record
363 	 *                  run_command
364 	 *                  main
365 	 *
366 	 *   10.00%     perf  libc           [.] malloc
367 	 *              |
368 	 *              --- malloc
369 	 *                  cmd_record
370 	 *                  run_command
371 	 *                  main
372 	 *
373 	 *   10.00%     perf  perf           [.] cmd_record
374 	 *              |
375 	 *              --- cmd_record
376 	 *                  run_command
377 	 *                  main
378 	 *
379 	 */
380 	struct result expected[] = {
381 		{ 0, 2000, "perf", "perf",     "main" },
382 		{ 0, 1000, "bash", "[kernel]", "page_fault" },
383 		{ 0, 1000, "bash", "bash",     "main" },
384 		{ 0, 1000, "bash", "bash",     "xmalloc" },
385 		{ 0, 1000, "perf", "[kernel]", "page_fault" },
386 		{ 0, 1000, "perf", "[kernel]", "schedule" },
387 		{ 0, 1000, "perf", "libc",     "free" },
388 		{ 0, 1000, "perf", "libc",     "malloc" },
389 		{ 0, 1000, "perf", "perf",     "cmd_record" },
390 	};
391 	struct callchain_result expected_callchain[] = {
392 		{
393 			1, {	{ "perf",     "main" }, },
394 		},
395 		{
396 			3, {	{ "[kernel]", "page_fault" },
397 				{ "libc",     "malloc" },
398 				{ "bash",     "main" }, },
399 		},
400 		{
401 			1, {	{ "bash",     "main" }, },
402 		},
403 		{
404 			6, {	{ "bash",     "xmalloc" },
405 				{ "libc",     "malloc" },
406 				{ "bash",     "xmalloc" },
407 				{ "libc",     "malloc" },
408 				{ "bash",     "xmalloc" },
409 				{ "bash",     "main" }, },
410 		},
411 		{
412 			4, {	{ "[kernel]", "page_fault" },
413 				{ "[kernel]", "sys_perf_event_open" },
414 				{ "perf",     "run_command" },
415 				{ "perf",     "main" }, },
416 		},
417 		{
418 			3, {	{ "[kernel]", "schedule" },
419 				{ "perf",     "run_command" },
420 				{ "perf",     "main" }, },
421 		},
422 		{
423 			4, {	{ "libc",     "free" },
424 				{ "perf",     "cmd_record" },
425 				{ "perf",     "run_command" },
426 				{ "perf",     "main" }, },
427 		},
428 		{
429 			4, {	{ "libc",     "malloc" },
430 				{ "perf",     "cmd_record" },
431 				{ "perf",     "run_command" },
432 				{ "perf",     "main" }, },
433 		},
434 		{
435 			3, {	{ "perf",     "cmd_record" },
436 				{ "perf",     "run_command" },
437 				{ "perf",     "main" }, },
438 		},
439 	};
440 
441 	symbol_conf.use_callchain = true;
442 	symbol_conf.cumulate_callchain = false;
443 	evsel__set_sample_bit(evsel, CALLCHAIN);
444 
445 	setup_sorting(NULL);
446 	callchain_register_param(&callchain_param);
447 
448 	err = add_hist_entries(hists, machine);
449 	if (err < 0)
450 		goto out;
451 
452 	err = do_test(hists, expected, ARRAY_SIZE(expected),
453 		      expected_callchain, ARRAY_SIZE(expected_callchain));
454 
455 out:
456 	del_hist_entries(hists);
457 	reset_output_field();
458 	return err;
459 }
460 
461 /* NO callchain + children */
462 static int test3(struct evsel *evsel, struct machine *machine)
463 {
464 	int err;
465 	struct hists *hists = evsel__hists(evsel);
466 	/*
467 	 * expected output:
468 	 *
469 	 * Children      Self  Command  Shared Object                   Symbol
470 	 * ========  ========  =======  =============  =======================
471 	 *   70.00%    20.00%     perf  perf           [.] main
472 	 *   50.00%     0.00%     perf  perf           [.] run_command
473 	 *   30.00%    10.00%     bash  bash           [.] main
474 	 *   30.00%    10.00%     perf  perf           [.] cmd_record
475 	 *   20.00%     0.00%     bash  libc           [.] malloc
476 	 *   10.00%    10.00%     bash  [kernel]       [k] page_fault
477 	 *   10.00%    10.00%     bash  bash           [.] xmalloc
478 	 *   10.00%    10.00%     perf  [kernel]       [k] page_fault
479 	 *   10.00%    10.00%     perf  libc           [.] malloc
480 	 *   10.00%    10.00%     perf  [kernel]       [k] schedule
481 	 *   10.00%    10.00%     perf  libc           [.] free
482 	 *   10.00%     0.00%     perf  [kernel]       [k] sys_perf_event_open
483 	 */
484 	struct result expected[] = {
485 		{ 7000, 2000, "perf", "perf",     "main" },
486 		{ 5000,    0, "perf", "perf",     "run_command" },
487 		{ 3000, 1000, "bash", "bash",     "main" },
488 		{ 3000, 1000, "perf", "perf",     "cmd_record" },
489 		{ 2000,    0, "bash", "libc",     "malloc" },
490 		{ 1000, 1000, "bash", "[kernel]", "page_fault" },
491 		{ 1000, 1000, "bash", "bash",     "xmalloc" },
492 		{ 1000, 1000, "perf", "[kernel]", "page_fault" },
493 		{ 1000, 1000, "perf", "[kernel]", "schedule" },
494 		{ 1000, 1000, "perf", "libc",     "free" },
495 		{ 1000, 1000, "perf", "libc",     "malloc" },
496 		{ 1000,    0, "perf", "[kernel]", "sys_perf_event_open" },
497 	};
498 
499 	symbol_conf.use_callchain = false;
500 	symbol_conf.cumulate_callchain = true;
501 	evsel__reset_sample_bit(evsel, CALLCHAIN);
502 
503 	setup_sorting(NULL);
504 	callchain_register_param(&callchain_param);
505 
506 	err = add_hist_entries(hists, machine);
507 	if (err < 0)
508 		goto out;
509 
510 	err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
511 
512 out:
513 	del_hist_entries(hists);
514 	reset_output_field();
515 	return err;
516 }
517 
518 /* callchain + children */
519 static int test4(struct evsel *evsel, struct machine *machine)
520 {
521 	int err;
522 	struct hists *hists = evsel__hists(evsel);
523 	/*
524 	 * expected output:
525 	 *
526 	 * Children      Self  Command  Shared Object                   Symbol
527 	 * ========  ========  =======  =============  =======================
528 	 *   70.00%    20.00%     perf  perf           [.] main
529 	 *              |
530 	 *              --- main
531 	 *
532 	 *   50.00%     0.00%     perf  perf           [.] run_command
533 	 *              |
534 	 *              --- run_command
535 	 *                  main
536 	 *
537 	 *   30.00%    10.00%     bash  bash           [.] main
538 	 *              |
539 	 *              --- main
540 	 *
541 	 *   30.00%    10.00%     perf  perf           [.] cmd_record
542 	 *              |
543 	 *              --- cmd_record
544 	 *                  run_command
545 	 *                  main
546 	 *
547 	 *   20.00%     0.00%     bash  libc           [.] malloc
548 	 *              |
549 	 *              --- malloc
550 	 *                 |
551 	 *                 |--50.00%-- xmalloc
552 	 *                 |           main
553 	 *                  --50.00%-- main
554 	 *
555 	 *   10.00%    10.00%     bash  [kernel]       [k] page_fault
556 	 *              |
557 	 *              --- page_fault
558 	 *                  malloc
559 	 *                  main
560 	 *
561 	 *   10.00%    10.00%     bash  bash           [.] xmalloc
562 	 *              |
563 	 *              --- xmalloc
564 	 *                  malloc
565 	 *                  xmalloc     <--- NOTE: there's a cycle
566 	 *                  malloc
567 	 *                  xmalloc
568 	 *                  main
569 	 *
570 	 *   10.00%     0.00%     perf  [kernel]       [k] sys_perf_event_open
571 	 *              |
572 	 *              --- sys_perf_event_open
573 	 *                  run_command
574 	 *                  main
575 	 *
576 	 *   10.00%    10.00%     perf  [kernel]       [k] page_fault
577 	 *              |
578 	 *              --- page_fault
579 	 *                  sys_perf_event_open
580 	 *                  run_command
581 	 *                  main
582 	 *
583 	 *   10.00%    10.00%     perf  [kernel]       [k] schedule
584 	 *              |
585 	 *              --- schedule
586 	 *                  run_command
587 	 *                  main
588 	 *
589 	 *   10.00%    10.00%     perf  libc           [.] free
590 	 *              |
591 	 *              --- free
592 	 *                  cmd_record
593 	 *                  run_command
594 	 *                  main
595 	 *
596 	 *   10.00%    10.00%     perf  libc           [.] malloc
597 	 *              |
598 	 *              --- malloc
599 	 *                  cmd_record
600 	 *                  run_command
601 	 *                  main
602 	 *
603 	 */
604 	struct result expected[] = {
605 		{ 7000, 2000, "perf", "perf",     "main" },
606 		{ 5000,    0, "perf", "perf",     "run_command" },
607 		{ 3000, 1000, "bash", "bash",     "main" },
608 		{ 3000, 1000, "perf", "perf",     "cmd_record" },
609 		{ 2000,    0, "bash", "libc",     "malloc" },
610 		{ 1000, 1000, "bash", "[kernel]", "page_fault" },
611 		{ 1000, 1000, "bash", "bash",     "xmalloc" },
612 		{ 1000,    0, "perf", "[kernel]", "sys_perf_event_open" },
613 		{ 1000, 1000, "perf", "[kernel]", "page_fault" },
614 		{ 1000, 1000, "perf", "[kernel]", "schedule" },
615 		{ 1000, 1000, "perf", "libc",     "free" },
616 		{ 1000, 1000, "perf", "libc",     "malloc" },
617 	};
618 	struct callchain_result expected_callchain[] = {
619 		{
620 			1, {	{ "perf",     "main" }, },
621 		},
622 		{
623 			2, {	{ "perf",     "run_command" },
624 				{ "perf",     "main" }, },
625 		},
626 		{
627 			1, {	{ "bash",     "main" }, },
628 		},
629 		{
630 			3, {	{ "perf",     "cmd_record" },
631 				{ "perf",     "run_command" },
632 				{ "perf",     "main" }, },
633 		},
634 		{
635 			4, {	{ "libc",     "malloc" },
636 				{ "bash",     "xmalloc" },
637 				{ "bash",     "main" },
638 				{ "bash",     "main" }, },
639 		},
640 		{
641 			3, {	{ "[kernel]", "page_fault" },
642 				{ "libc",     "malloc" },
643 				{ "bash",     "main" }, },
644 		},
645 		{
646 			6, {	{ "bash",     "xmalloc" },
647 				{ "libc",     "malloc" },
648 				{ "bash",     "xmalloc" },
649 				{ "libc",     "malloc" },
650 				{ "bash",     "xmalloc" },
651 				{ "bash",     "main" }, },
652 		},
653 		{
654 			3, {	{ "[kernel]", "sys_perf_event_open" },
655 				{ "perf",     "run_command" },
656 				{ "perf",     "main" }, },
657 		},
658 		{
659 			4, {	{ "[kernel]", "page_fault" },
660 				{ "[kernel]", "sys_perf_event_open" },
661 				{ "perf",     "run_command" },
662 				{ "perf",     "main" }, },
663 		},
664 		{
665 			3, {	{ "[kernel]", "schedule" },
666 				{ "perf",     "run_command" },
667 				{ "perf",     "main" }, },
668 		},
669 		{
670 			4, {	{ "libc",     "free" },
671 				{ "perf",     "cmd_record" },
672 				{ "perf",     "run_command" },
673 				{ "perf",     "main" }, },
674 		},
675 		{
676 			4, {	{ "libc",     "malloc" },
677 				{ "perf",     "cmd_record" },
678 				{ "perf",     "run_command" },
679 				{ "perf",     "main" }, },
680 		},
681 	};
682 
683 	symbol_conf.use_callchain = true;
684 	symbol_conf.cumulate_callchain = true;
685 	evsel__set_sample_bit(evsel, CALLCHAIN);
686 
687 	setup_sorting(NULL);
688 
689 	callchain_param = callchain_param_default;
690 	callchain_register_param(&callchain_param);
691 
692 	err = add_hist_entries(hists, machine);
693 	if (err < 0)
694 		goto out;
695 
696 	err = do_test(hists, expected, ARRAY_SIZE(expected),
697 		      expected_callchain, ARRAY_SIZE(expected_callchain));
698 
699 out:
700 	del_hist_entries(hists);
701 	reset_output_field();
702 	return err;
703 }
704 
705 static int test__hists_cumulate(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
706 {
707 	int err = TEST_FAIL;
708 	struct machines machines;
709 	struct machine *machine;
710 	struct evsel *evsel;
711 	struct evlist *evlist = evlist__new();
712 	size_t i;
713 	test_fn_t testcases[] = {
714 		test1,
715 		test2,
716 		test3,
717 		test4,
718 	};
719 
720 	TEST_ASSERT_VAL("No memory", evlist);
721 
722 	err = parse_event(evlist, "cpu-clock");
723 	if (err)
724 		goto out;
725 	err = TEST_FAIL;
726 
727 	machines__init(&machines);
728 
729 	/* setup threads/dso/map/symbols also */
730 	machine = setup_fake_machine(&machines);
731 	if (!machine)
732 		goto out;
733 
734 	if (verbose > 1)
735 		machine__fprintf(machine, stderr);
736 
737 	evsel = evlist__first(evlist);
738 
739 	for (i = 0; i < ARRAY_SIZE(testcases); i++) {
740 		err = testcases[i](evsel, machine);
741 		if (err < 0)
742 			break;
743 	}
744 
745 out:
746 	/* tear down everything */
747 	evlist__delete(evlist);
748 	machines__exit(&machines);
749 	put_fake_samples();
750 
751 	return err;
752 }
753 
754 DEFINE_SUITE("Cumulate child hist entries", hists_cumulate);
755