xref: /openbmc/linux/tools/perf/tests/hists_link.c (revision 2fa5ebe3)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "tests.h"
3 #include "debug.h"
4 #include "symbol.h"
5 #include "sort.h"
6 #include "evsel.h"
7 #include "evlist.h"
8 #include "machine.h"
9 #include "parse-events.h"
10 #include "hists_common.h"
11 #include "util/mmap.h"
12 #include <errno.h>
13 #include <linux/kernel.h>
14 
15 struct sample {
16 	u32 pid;
17 	u64 ip;
18 	struct thread *thread;
19 	struct map *map;
20 	struct symbol *sym;
21 };
22 
23 /* For the numbers, see hists_common.c */
24 static struct sample fake_common_samples[] = {
25 	/* perf [kernel] schedule() */
26 	{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
27 	/* perf [perf]   main() */
28 	{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
29 	/* perf [perf]   cmd_record() */
30 	{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, },
31 	/* bash [bash]   xmalloc() */
32 	{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_XMALLOC, },
33 	/* bash [libc]   malloc() */
34 	{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_MALLOC, },
35 };
36 
37 static struct sample fake_samples[][5] = {
38 	{
39 		/* perf [perf]   run_command() */
40 		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_RUN_COMMAND, },
41 		/* perf [libc]   malloc() */
42 		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
43 		/* perf [kernel] page_fault() */
44 		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
45 		/* perf [kernel] sys_perf_event_open() */
46 		{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, },
47 		/* bash [libc]   free() */
48 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_FREE, },
49 	},
50 	{
51 		/* perf [libc]   free() */
52 		{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_LIBC_FREE, },
53 		/* bash [libc]   malloc() */
54 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_MALLOC, }, /* will be merged */
55 		/* bash [bash]   xfee() */
56 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_XFREE, },
57 		/* bash [libc]   realloc() */
58 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_REALLOC, },
59 		/* bash [kernel] page_fault() */
60 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
61 	},
62 };
63 
64 static int add_hist_entries(struct evlist *evlist, struct machine *machine)
65 {
66 	struct evsel *evsel;
67 	struct addr_location al;
68 	struct hist_entry *he;
69 	struct perf_sample sample = { .period = 1, .weight = 1, };
70 	size_t i = 0, k;
71 
72 	/*
73 	 * each evsel will have 10 samples - 5 common and 5 distinct.
74 	 * However the second evsel also has a collapsed entry for
75 	 * "bash [libc] malloc" so total 9 entries will be in the tree.
76 	 */
77 	evlist__for_each_entry(evlist, evsel) {
78 		struct hists *hists = evsel__hists(evsel);
79 
80 		for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
81 			sample.cpumode = PERF_RECORD_MISC_USER;
82 			sample.pid = fake_common_samples[k].pid;
83 			sample.tid = fake_common_samples[k].pid;
84 			sample.ip = fake_common_samples[k].ip;
85 
86 			if (machine__resolve(machine, &al, &sample) < 0)
87 				goto out;
88 
89 			he = hists__add_entry(hists, &al, NULL,
90 						NULL, NULL, &sample, true);
91 			if (he == NULL) {
92 				addr_location__put(&al);
93 				goto out;
94 			}
95 
96 			fake_common_samples[k].thread = al.thread;
97 			fake_common_samples[k].map = al.map;
98 			fake_common_samples[k].sym = al.sym;
99 		}
100 
101 		for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
102 			sample.pid = fake_samples[i][k].pid;
103 			sample.tid = fake_samples[i][k].pid;
104 			sample.ip = fake_samples[i][k].ip;
105 			if (machine__resolve(machine, &al, &sample) < 0)
106 				goto out;
107 
108 			he = hists__add_entry(hists, &al, NULL,
109 						NULL, NULL, &sample, true);
110 			if (he == NULL) {
111 				addr_location__put(&al);
112 				goto out;
113 			}
114 
115 			fake_samples[i][k].thread = al.thread;
116 			fake_samples[i][k].map = al.map;
117 			fake_samples[i][k].sym = al.sym;
118 		}
119 		i++;
120 	}
121 
122 	return 0;
123 
124 out:
125 	pr_debug("Not enough memory for adding a hist entry\n");
126 	return -1;
127 }
128 
129 static int find_sample(struct sample *samples, size_t nr_samples,
130 		       struct thread *t, struct map *m, struct symbol *s)
131 {
132 	while (nr_samples--) {
133 		if (samples->thread == t && samples->map == m &&
134 		    samples->sym == s)
135 			return 1;
136 		samples++;
137 	}
138 	return 0;
139 }
140 
141 static int __validate_match(struct hists *hists)
142 {
143 	size_t count = 0;
144 	struct rb_root_cached *root;
145 	struct rb_node *node;
146 
147 	/*
148 	 * Only entries from fake_common_samples should have a pair.
149 	 */
150 	if (hists__has(hists, need_collapse))
151 		root = &hists->entries_collapsed;
152 	else
153 		root = hists->entries_in;
154 
155 	node = rb_first_cached(root);
156 	while (node) {
157 		struct hist_entry *he;
158 
159 		he = rb_entry(node, struct hist_entry, rb_node_in);
160 
161 		if (hist_entry__has_pairs(he)) {
162 			if (find_sample(fake_common_samples,
163 					ARRAY_SIZE(fake_common_samples),
164 					he->thread, he->ms.map, he->ms.sym)) {
165 				count++;
166 			} else {
167 				pr_debug("Can't find the matched entry\n");
168 				return -1;
169 			}
170 		}
171 
172 		node = rb_next(node);
173 	}
174 
175 	if (count != ARRAY_SIZE(fake_common_samples)) {
176 		pr_debug("Invalid count for matched entries: %zd of %zd\n",
177 			 count, ARRAY_SIZE(fake_common_samples));
178 		return -1;
179 	}
180 
181 	return 0;
182 }
183 
184 static int validate_match(struct hists *leader, struct hists *other)
185 {
186 	return __validate_match(leader) || __validate_match(other);
187 }
188 
189 static int __validate_link(struct hists *hists, int idx)
190 {
191 	size_t count = 0;
192 	size_t count_pair = 0;
193 	size_t count_dummy = 0;
194 	struct rb_root_cached *root;
195 	struct rb_node *node;
196 
197 	/*
198 	 * Leader hists (idx = 0) will have dummy entries from other,
199 	 * and some entries will have no pair.  However every entry
200 	 * in other hists should have (dummy) pair.
201 	 */
202 	if (hists__has(hists, need_collapse))
203 		root = &hists->entries_collapsed;
204 	else
205 		root = hists->entries_in;
206 
207 	node = rb_first_cached(root);
208 	while (node) {
209 		struct hist_entry *he;
210 
211 		he = rb_entry(node, struct hist_entry, rb_node_in);
212 
213 		if (hist_entry__has_pairs(he)) {
214 			if (!find_sample(fake_common_samples,
215 					 ARRAY_SIZE(fake_common_samples),
216 					 he->thread, he->ms.map, he->ms.sym) &&
217 			    !find_sample(fake_samples[idx],
218 					 ARRAY_SIZE(fake_samples[idx]),
219 					 he->thread, he->ms.map, he->ms.sym)) {
220 				count_dummy++;
221 			}
222 			count_pair++;
223 		} else if (idx) {
224 			pr_debug("A entry from the other hists should have pair\n");
225 			return -1;
226 		}
227 
228 		count++;
229 		node = rb_next(node);
230 	}
231 
232 	/*
233 	 * Note that we have a entry collapsed in the other (idx = 1) hists.
234 	 */
235 	if (idx == 0) {
236 		if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
237 			pr_debug("Invalid count of dummy entries: %zd of %zd\n",
238 				 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
239 			return -1;
240 		}
241 		if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
242 			pr_debug("Invalid count of total leader entries: %zd of %zd\n",
243 				 count, count_pair + ARRAY_SIZE(fake_samples[0]));
244 			return -1;
245 		}
246 	} else {
247 		if (count != count_pair) {
248 			pr_debug("Invalid count of total other entries: %zd of %zd\n",
249 				 count, count_pair);
250 			return -1;
251 		}
252 		if (count_dummy > 0) {
253 			pr_debug("Other hists should not have dummy entries: %zd\n",
254 				 count_dummy);
255 			return -1;
256 		}
257 	}
258 
259 	return 0;
260 }
261 
262 static int validate_link(struct hists *leader, struct hists *other)
263 {
264 	return __validate_link(leader, 0) || __validate_link(other, 1);
265 }
266 
267 static int test__hists_link(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
268 {
269 	int err = -1;
270 	struct hists *hists, *first_hists;
271 	struct machines machines;
272 	struct machine *machine = NULL;
273 	struct evsel *evsel, *first;
274 	struct evlist *evlist = evlist__new();
275 
276 	if (evlist == NULL)
277                 return -ENOMEM;
278 
279 	err = parse_event(evlist, "cpu-clock");
280 	if (err)
281 		goto out;
282 	err = parse_event(evlist, "task-clock");
283 	if (err)
284 		goto out;
285 
286 	err = TEST_FAIL;
287 	/* default sort order (comm,dso,sym) will be used */
288 	if (setup_sorting(NULL) < 0)
289 		goto out;
290 
291 	machines__init(&machines);
292 
293 	/* setup threads/dso/map/symbols also */
294 	machine = setup_fake_machine(&machines);
295 	if (!machine)
296 		goto out;
297 
298 	if (verbose > 1)
299 		machine__fprintf(machine, stderr);
300 
301 	/* process sample events */
302 	err = add_hist_entries(evlist, machine);
303 	if (err < 0)
304 		goto out;
305 
306 	evlist__for_each_entry(evlist, evsel) {
307 		hists = evsel__hists(evsel);
308 		hists__collapse_resort(hists, NULL);
309 
310 		if (verbose > 2)
311 			print_hists_in(hists);
312 	}
313 
314 	first = evlist__first(evlist);
315 	evsel = evlist__last(evlist);
316 
317 	first_hists = evsel__hists(first);
318 	hists = evsel__hists(evsel);
319 
320 	/* match common entries */
321 	hists__match(first_hists, hists);
322 	err = validate_match(first_hists, hists);
323 	if (err)
324 		goto out;
325 
326 	/* link common and/or dummy entries */
327 	hists__link(first_hists, hists);
328 	err = validate_link(first_hists, hists);
329 	if (err)
330 		goto out;
331 
332 	err = 0;
333 
334 out:
335 	/* tear down everything */
336 	evlist__delete(evlist);
337 	reset_output_field();
338 	machines__exit(&machines);
339 
340 	return err;
341 }
342 
343 DEFINE_SUITE("Match and link multiple hists", hists_link);
344