xref: /openbmc/linux/tools/perf/tests/hists_link.c (revision e2c75e76)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "perf.h"
3 #include "tests.h"
4 #include "debug.h"
5 #include "symbol.h"
6 #include "sort.h"
7 #include "evsel.h"
8 #include "evlist.h"
9 #include "machine.h"
10 #include "thread.h"
11 #include "parse-events.h"
12 #include "hists_common.h"
13 #include <errno.h>
14 #include <linux/kernel.h>
15 
16 struct sample {
17 	u32 pid;
18 	u64 ip;
19 	struct thread *thread;
20 	struct map *map;
21 	struct symbol *sym;
22 };
23 
24 /* For the numbers, see hists_common.c */
25 static struct sample fake_common_samples[] = {
26 	/* perf [kernel] schedule() */
27 	{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
28 	/* perf [perf]   main() */
29 	{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
30 	/* perf [perf]   cmd_record() */
31 	{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, },
32 	/* bash [bash]   xmalloc() */
33 	{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_XMALLOC, },
34 	/* bash [libc]   malloc() */
35 	{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_MALLOC, },
36 };
37 
38 static struct sample fake_samples[][5] = {
39 	{
40 		/* perf [perf]   run_command() */
41 		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_RUN_COMMAND, },
42 		/* perf [libc]   malloc() */
43 		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
44 		/* perf [kernel] page_fault() */
45 		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
46 		/* perf [kernel] sys_perf_event_open() */
47 		{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, },
48 		/* bash [libc]   free() */
49 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_FREE, },
50 	},
51 	{
52 		/* perf [libc]   free() */
53 		{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_LIBC_FREE, },
54 		/* bash [libc]   malloc() */
55 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_MALLOC, }, /* will be merged */
56 		/* bash [bash]   xfee() */
57 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_XFREE, },
58 		/* bash [libc]   realloc() */
59 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_REALLOC, },
60 		/* bash [kernel] page_fault() */
61 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
62 	},
63 };
64 
65 static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
66 {
67 	struct perf_evsel *evsel;
68 	struct addr_location al;
69 	struct hist_entry *he;
70 	struct perf_sample sample = { .period = 1, .weight = 1, };
71 	size_t i = 0, k;
72 
73 	/*
74 	 * each evsel will have 10 samples - 5 common and 5 distinct.
75 	 * However the second evsel also has a collapsed entry for
76 	 * "bash [libc] malloc" so total 9 entries will be in the tree.
77 	 */
78 	evlist__for_each_entry(evlist, evsel) {
79 		struct hists *hists = evsel__hists(evsel);
80 
81 		for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
82 			sample.cpumode = PERF_RECORD_MISC_USER;
83 			sample.pid = fake_common_samples[k].pid;
84 			sample.tid = fake_common_samples[k].pid;
85 			sample.ip = fake_common_samples[k].ip;
86 
87 			if (machine__resolve(machine, &al, &sample) < 0)
88 				goto out;
89 
90 			he = hists__add_entry(hists, &al, NULL,
91 						NULL, NULL, &sample, true);
92 			if (he == NULL) {
93 				addr_location__put(&al);
94 				goto out;
95 			}
96 
97 			fake_common_samples[k].thread = al.thread;
98 			fake_common_samples[k].map = al.map;
99 			fake_common_samples[k].sym = al.sym;
100 		}
101 
102 		for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
103 			sample.pid = fake_samples[i][k].pid;
104 			sample.tid = fake_samples[i][k].pid;
105 			sample.ip = fake_samples[i][k].ip;
106 			if (machine__resolve(machine, &al, &sample) < 0)
107 				goto out;
108 
109 			he = hists__add_entry(hists, &al, NULL,
110 						NULL, NULL, &sample, true);
111 			if (he == NULL) {
112 				addr_location__put(&al);
113 				goto out;
114 			}
115 
116 			fake_samples[i][k].thread = al.thread;
117 			fake_samples[i][k].map = al.map;
118 			fake_samples[i][k].sym = al.sym;
119 		}
120 		i++;
121 	}
122 
123 	return 0;
124 
125 out:
126 	pr_debug("Not enough memory for adding a hist entry\n");
127 	return -1;
128 }
129 
130 static int find_sample(struct sample *samples, size_t nr_samples,
131 		       struct thread *t, struct map *m, struct symbol *s)
132 {
133 	while (nr_samples--) {
134 		if (samples->thread == t && samples->map == m &&
135 		    samples->sym == s)
136 			return 1;
137 		samples++;
138 	}
139 	return 0;
140 }
141 
142 static int __validate_match(struct hists *hists)
143 {
144 	size_t count = 0;
145 	struct rb_root *root;
146 	struct rb_node *node;
147 
148 	/*
149 	 * Only entries from fake_common_samples should have a pair.
150 	 */
151 	if (hists__has(hists, need_collapse))
152 		root = &hists->entries_collapsed;
153 	else
154 		root = hists->entries_in;
155 
156 	node = rb_first(root);
157 	while (node) {
158 		struct hist_entry *he;
159 
160 		he = rb_entry(node, struct hist_entry, rb_node_in);
161 
162 		if (hist_entry__has_pairs(he)) {
163 			if (find_sample(fake_common_samples,
164 					ARRAY_SIZE(fake_common_samples),
165 					he->thread, he->ms.map, he->ms.sym)) {
166 				count++;
167 			} else {
168 				pr_debug("Can't find the matched entry\n");
169 				return -1;
170 			}
171 		}
172 
173 		node = rb_next(node);
174 	}
175 
176 	if (count != ARRAY_SIZE(fake_common_samples)) {
177 		pr_debug("Invalid count for matched entries: %zd of %zd\n",
178 			 count, ARRAY_SIZE(fake_common_samples));
179 		return -1;
180 	}
181 
182 	return 0;
183 }
184 
185 static int validate_match(struct hists *leader, struct hists *other)
186 {
187 	return __validate_match(leader) || __validate_match(other);
188 }
189 
190 static int __validate_link(struct hists *hists, int idx)
191 {
192 	size_t count = 0;
193 	size_t count_pair = 0;
194 	size_t count_dummy = 0;
195 	struct rb_root *root;
196 	struct rb_node *node;
197 
198 	/*
199 	 * Leader hists (idx = 0) will have dummy entries from other,
200 	 * and some entries will have no pair.  However every entry
201 	 * in other hists should have (dummy) pair.
202 	 */
203 	if (hists__has(hists, need_collapse))
204 		root = &hists->entries_collapsed;
205 	else
206 		root = hists->entries_in;
207 
208 	node = rb_first(root);
209 	while (node) {
210 		struct hist_entry *he;
211 
212 		he = rb_entry(node, struct hist_entry, rb_node_in);
213 
214 		if (hist_entry__has_pairs(he)) {
215 			if (!find_sample(fake_common_samples,
216 					 ARRAY_SIZE(fake_common_samples),
217 					 he->thread, he->ms.map, he->ms.sym) &&
218 			    !find_sample(fake_samples[idx],
219 					 ARRAY_SIZE(fake_samples[idx]),
220 					 he->thread, he->ms.map, he->ms.sym)) {
221 				count_dummy++;
222 			}
223 			count_pair++;
224 		} else if (idx) {
225 			pr_debug("A entry from the other hists should have pair\n");
226 			return -1;
227 		}
228 
229 		count++;
230 		node = rb_next(node);
231 	}
232 
233 	/*
234 	 * Note that we have a entry collapsed in the other (idx = 1) hists.
235 	 */
236 	if (idx == 0) {
237 		if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
238 			pr_debug("Invalid count of dummy entries: %zd of %zd\n",
239 				 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
240 			return -1;
241 		}
242 		if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
243 			pr_debug("Invalid count of total leader entries: %zd of %zd\n",
244 				 count, count_pair + ARRAY_SIZE(fake_samples[0]));
245 			return -1;
246 		}
247 	} else {
248 		if (count != count_pair) {
249 			pr_debug("Invalid count of total other entries: %zd of %zd\n",
250 				 count, count_pair);
251 			return -1;
252 		}
253 		if (count_dummy > 0) {
254 			pr_debug("Other hists should not have dummy entries: %zd\n",
255 				 count_dummy);
256 			return -1;
257 		}
258 	}
259 
260 	return 0;
261 }
262 
263 static int validate_link(struct hists *leader, struct hists *other)
264 {
265 	return __validate_link(leader, 0) || __validate_link(other, 1);
266 }
267 
268 int test__hists_link(struct test *test __maybe_unused, int subtest __maybe_unused)
269 {
270 	int err = -1;
271 	struct hists *hists, *first_hists;
272 	struct machines machines;
273 	struct machine *machine = NULL;
274 	struct perf_evsel *evsel, *first;
275 	struct perf_evlist *evlist = perf_evlist__new();
276 
277 	if (evlist == NULL)
278                 return -ENOMEM;
279 
280 	err = parse_events(evlist, "cpu-clock", NULL);
281 	if (err)
282 		goto out;
283 	err = parse_events(evlist, "task-clock", NULL);
284 	if (err)
285 		goto out;
286 
287 	err = TEST_FAIL;
288 	/* default sort order (comm,dso,sym) will be used */
289 	if (setup_sorting(NULL) < 0)
290 		goto out;
291 
292 	machines__init(&machines);
293 
294 	/* setup threads/dso/map/symbols also */
295 	machine = setup_fake_machine(&machines);
296 	if (!machine)
297 		goto out;
298 
299 	if (verbose > 1)
300 		machine__fprintf(machine, stderr);
301 
302 	/* process sample events */
303 	err = add_hist_entries(evlist, machine);
304 	if (err < 0)
305 		goto out;
306 
307 	evlist__for_each_entry(evlist, evsel) {
308 		hists = evsel__hists(evsel);
309 		hists__collapse_resort(hists, NULL);
310 
311 		if (verbose > 2)
312 			print_hists_in(hists);
313 	}
314 
315 	first = perf_evlist__first(evlist);
316 	evsel = perf_evlist__last(evlist);
317 
318 	first_hists = evsel__hists(first);
319 	hists = evsel__hists(evsel);
320 
321 	/* match common entries */
322 	hists__match(first_hists, hists);
323 	err = validate_match(first_hists, hists);
324 	if (err)
325 		goto out;
326 
327 	/* link common and/or dummy entries */
328 	hists__link(first_hists, hists);
329 	err = validate_link(first_hists, hists);
330 	if (err)
331 		goto out;
332 
333 	err = 0;
334 
335 out:
336 	/* tear down everything */
337 	perf_evlist__delete(evlist);
338 	reset_output_field();
339 	machines__exit(&machines);
340 
341 	return err;
342 }
343