xref: /openbmc/linux/tools/perf/tests/hists_link.c (revision 6355592e)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "tests.h"
3 #include "debug.h"
4 #include "symbol.h"
5 #include "sort.h"
6 #include "evsel.h"
7 #include "evlist.h"
8 #include "machine.h"
9 #include "parse-events.h"
10 #include "hists_common.h"
11 #include <errno.h>
12 #include <linux/kernel.h>
13 
14 struct sample {
15 	u32 pid;
16 	u64 ip;
17 	struct thread *thread;
18 	struct map *map;
19 	struct symbol *sym;
20 };
21 
22 /* For the numbers, see hists_common.c */
23 static struct sample fake_common_samples[] = {
24 	/* perf [kernel] schedule() */
25 	{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
26 	/* perf [perf]   main() */
27 	{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
28 	/* perf [perf]   cmd_record() */
29 	{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, },
30 	/* bash [bash]   xmalloc() */
31 	{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_XMALLOC, },
32 	/* bash [libc]   malloc() */
33 	{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_MALLOC, },
34 };
35 
36 static struct sample fake_samples[][5] = {
37 	{
38 		/* perf [perf]   run_command() */
39 		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_RUN_COMMAND, },
40 		/* perf [libc]   malloc() */
41 		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
42 		/* perf [kernel] page_fault() */
43 		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
44 		/* perf [kernel] sys_perf_event_open() */
45 		{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, },
46 		/* bash [libc]   free() */
47 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_FREE, },
48 	},
49 	{
50 		/* perf [libc]   free() */
51 		{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_LIBC_FREE, },
52 		/* bash [libc]   malloc() */
53 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_MALLOC, }, /* will be merged */
54 		/* bash [bash]   xfee() */
55 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_XFREE, },
56 		/* bash [libc]   realloc() */
57 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_REALLOC, },
58 		/* bash [kernel] page_fault() */
59 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
60 	},
61 };
62 
63 static int add_hist_entries(struct evlist *evlist, struct machine *machine)
64 {
65 	struct evsel *evsel;
66 	struct addr_location al;
67 	struct hist_entry *he;
68 	struct perf_sample sample = { .period = 1, .weight = 1, };
69 	size_t i = 0, k;
70 
71 	/*
72 	 * each evsel will have 10 samples - 5 common and 5 distinct.
73 	 * However the second evsel also has a collapsed entry for
74 	 * "bash [libc] malloc" so total 9 entries will be in the tree.
75 	 */
76 	evlist__for_each_entry(evlist, evsel) {
77 		struct hists *hists = evsel__hists(evsel);
78 
79 		for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
80 			sample.cpumode = PERF_RECORD_MISC_USER;
81 			sample.pid = fake_common_samples[k].pid;
82 			sample.tid = fake_common_samples[k].pid;
83 			sample.ip = fake_common_samples[k].ip;
84 
85 			if (machine__resolve(machine, &al, &sample) < 0)
86 				goto out;
87 
88 			he = hists__add_entry(hists, &al, NULL,
89 						NULL, NULL, &sample, true);
90 			if (he == NULL) {
91 				addr_location__put(&al);
92 				goto out;
93 			}
94 
95 			fake_common_samples[k].thread = al.thread;
96 			fake_common_samples[k].map = al.map;
97 			fake_common_samples[k].sym = al.sym;
98 		}
99 
100 		for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
101 			sample.pid = fake_samples[i][k].pid;
102 			sample.tid = fake_samples[i][k].pid;
103 			sample.ip = fake_samples[i][k].ip;
104 			if (machine__resolve(machine, &al, &sample) < 0)
105 				goto out;
106 
107 			he = hists__add_entry(hists, &al, NULL,
108 						NULL, NULL, &sample, true);
109 			if (he == NULL) {
110 				addr_location__put(&al);
111 				goto out;
112 			}
113 
114 			fake_samples[i][k].thread = al.thread;
115 			fake_samples[i][k].map = al.map;
116 			fake_samples[i][k].sym = al.sym;
117 		}
118 		i++;
119 	}
120 
121 	return 0;
122 
123 out:
124 	pr_debug("Not enough memory for adding a hist entry\n");
125 	return -1;
126 }
127 
128 static int find_sample(struct sample *samples, size_t nr_samples,
129 		       struct thread *t, struct map *m, struct symbol *s)
130 {
131 	while (nr_samples--) {
132 		if (samples->thread == t && samples->map == m &&
133 		    samples->sym == s)
134 			return 1;
135 		samples++;
136 	}
137 	return 0;
138 }
139 
140 static int __validate_match(struct hists *hists)
141 {
142 	size_t count = 0;
143 	struct rb_root_cached *root;
144 	struct rb_node *node;
145 
146 	/*
147 	 * Only entries from fake_common_samples should have a pair.
148 	 */
149 	if (hists__has(hists, need_collapse))
150 		root = &hists->entries_collapsed;
151 	else
152 		root = hists->entries_in;
153 
154 	node = rb_first_cached(root);
155 	while (node) {
156 		struct hist_entry *he;
157 
158 		he = rb_entry(node, struct hist_entry, rb_node_in);
159 
160 		if (hist_entry__has_pairs(he)) {
161 			if (find_sample(fake_common_samples,
162 					ARRAY_SIZE(fake_common_samples),
163 					he->thread, he->ms.map, he->ms.sym)) {
164 				count++;
165 			} else {
166 				pr_debug("Can't find the matched entry\n");
167 				return -1;
168 			}
169 		}
170 
171 		node = rb_next(node);
172 	}
173 
174 	if (count != ARRAY_SIZE(fake_common_samples)) {
175 		pr_debug("Invalid count for matched entries: %zd of %zd\n",
176 			 count, ARRAY_SIZE(fake_common_samples));
177 		return -1;
178 	}
179 
180 	return 0;
181 }
182 
183 static int validate_match(struct hists *leader, struct hists *other)
184 {
185 	return __validate_match(leader) || __validate_match(other);
186 }
187 
188 static int __validate_link(struct hists *hists, int idx)
189 {
190 	size_t count = 0;
191 	size_t count_pair = 0;
192 	size_t count_dummy = 0;
193 	struct rb_root_cached *root;
194 	struct rb_node *node;
195 
196 	/*
197 	 * Leader hists (idx = 0) will have dummy entries from other,
198 	 * and some entries will have no pair.  However every entry
199 	 * in other hists should have (dummy) pair.
200 	 */
201 	if (hists__has(hists, need_collapse))
202 		root = &hists->entries_collapsed;
203 	else
204 		root = hists->entries_in;
205 
206 	node = rb_first_cached(root);
207 	while (node) {
208 		struct hist_entry *he;
209 
210 		he = rb_entry(node, struct hist_entry, rb_node_in);
211 
212 		if (hist_entry__has_pairs(he)) {
213 			if (!find_sample(fake_common_samples,
214 					 ARRAY_SIZE(fake_common_samples),
215 					 he->thread, he->ms.map, he->ms.sym) &&
216 			    !find_sample(fake_samples[idx],
217 					 ARRAY_SIZE(fake_samples[idx]),
218 					 he->thread, he->ms.map, he->ms.sym)) {
219 				count_dummy++;
220 			}
221 			count_pair++;
222 		} else if (idx) {
223 			pr_debug("A entry from the other hists should have pair\n");
224 			return -1;
225 		}
226 
227 		count++;
228 		node = rb_next(node);
229 	}
230 
231 	/*
232 	 * Note that we have a entry collapsed in the other (idx = 1) hists.
233 	 */
234 	if (idx == 0) {
235 		if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
236 			pr_debug("Invalid count of dummy entries: %zd of %zd\n",
237 				 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
238 			return -1;
239 		}
240 		if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
241 			pr_debug("Invalid count of total leader entries: %zd of %zd\n",
242 				 count, count_pair + ARRAY_SIZE(fake_samples[0]));
243 			return -1;
244 		}
245 	} else {
246 		if (count != count_pair) {
247 			pr_debug("Invalid count of total other entries: %zd of %zd\n",
248 				 count, count_pair);
249 			return -1;
250 		}
251 		if (count_dummy > 0) {
252 			pr_debug("Other hists should not have dummy entries: %zd\n",
253 				 count_dummy);
254 			return -1;
255 		}
256 	}
257 
258 	return 0;
259 }
260 
261 static int validate_link(struct hists *leader, struct hists *other)
262 {
263 	return __validate_link(leader, 0) || __validate_link(other, 1);
264 }
265 
266 int test__hists_link(struct test *test __maybe_unused, int subtest __maybe_unused)
267 {
268 	int err = -1;
269 	struct hists *hists, *first_hists;
270 	struct machines machines;
271 	struct machine *machine = NULL;
272 	struct evsel *evsel, *first;
273 	struct evlist *evlist = evlist__new();
274 
275 	if (evlist == NULL)
276                 return -ENOMEM;
277 
278 	err = parse_events(evlist, "cpu-clock", NULL);
279 	if (err)
280 		goto out;
281 	err = parse_events(evlist, "task-clock", NULL);
282 	if (err)
283 		goto out;
284 
285 	err = TEST_FAIL;
286 	/* default sort order (comm,dso,sym) will be used */
287 	if (setup_sorting(NULL) < 0)
288 		goto out;
289 
290 	machines__init(&machines);
291 
292 	/* setup threads/dso/map/symbols also */
293 	machine = setup_fake_machine(&machines);
294 	if (!machine)
295 		goto out;
296 
297 	if (verbose > 1)
298 		machine__fprintf(machine, stderr);
299 
300 	/* process sample events */
301 	err = add_hist_entries(evlist, machine);
302 	if (err < 0)
303 		goto out;
304 
305 	evlist__for_each_entry(evlist, evsel) {
306 		hists = evsel__hists(evsel);
307 		hists__collapse_resort(hists, NULL);
308 
309 		if (verbose > 2)
310 			print_hists_in(hists);
311 	}
312 
313 	first = perf_evlist__first(evlist);
314 	evsel = perf_evlist__last(evlist);
315 
316 	first_hists = evsel__hists(first);
317 	hists = evsel__hists(evsel);
318 
319 	/* match common entries */
320 	hists__match(first_hists, hists);
321 	err = validate_match(first_hists, hists);
322 	if (err)
323 		goto out;
324 
325 	/* link common and/or dummy entries */
326 	hists__link(first_hists, hists);
327 	err = validate_link(first_hists, hists);
328 	if (err)
329 		goto out;
330 
331 	err = 0;
332 
333 out:
334 	/* tear down everything */
335 	evlist__delete(evlist);
336 	reset_output_field();
337 	machines__exit(&machines);
338 
339 	return err;
340 }
341