xref: /openbmc/linux/tools/perf/tests/hists_link.c (revision c0e297dc)
1 #include "perf.h"
2 #include "tests.h"
3 #include "debug.h"
4 #include "symbol.h"
5 #include "sort.h"
6 #include "evsel.h"
7 #include "evlist.h"
8 #include "machine.h"
9 #include "thread.h"
10 #include "parse-events.h"
11 #include "hists_common.h"
12 
13 struct sample {
14 	u32 pid;
15 	u64 ip;
16 	struct thread *thread;
17 	struct map *map;
18 	struct symbol *sym;
19 };
20 
21 /* For the numbers, see hists_common.c */
22 static struct sample fake_common_samples[] = {
23 	/* perf [kernel] schedule() */
24 	{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
25 	/* perf [perf]   main() */
26 	{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
27 	/* perf [perf]   cmd_record() */
28 	{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, },
29 	/* bash [bash]   xmalloc() */
30 	{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_XMALLOC, },
31 	/* bash [libc]   malloc() */
32 	{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_MALLOC, },
33 };
34 
35 static struct sample fake_samples[][5] = {
36 	{
37 		/* perf [perf]   run_command() */
38 		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_RUN_COMMAND, },
39 		/* perf [libc]   malloc() */
40 		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
41 		/* perf [kernel] page_fault() */
42 		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
43 		/* perf [kernel] sys_perf_event_open() */
44 		{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, },
45 		/* bash [libc]   free() */
46 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_FREE, },
47 	},
48 	{
49 		/* perf [libc]   free() */
50 		{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_LIBC_FREE, },
51 		/* bash [libc]   malloc() */
52 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_MALLOC, }, /* will be merged */
53 		/* bash [bash]   xfee() */
54 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_XFREE, },
55 		/* bash [libc]   realloc() */
56 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_REALLOC, },
57 		/* bash [kernel] page_fault() */
58 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
59 	},
60 };
61 
62 static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
63 {
64 	struct perf_evsel *evsel;
65 	struct addr_location al;
66 	struct hist_entry *he;
67 	struct perf_sample sample = { .period = 1, };
68 	size_t i = 0, k;
69 
70 	/*
71 	 * each evsel will have 10 samples - 5 common and 5 distinct.
72 	 * However the second evsel also has a collapsed entry for
73 	 * "bash [libc] malloc" so total 9 entries will be in the tree.
74 	 */
75 	evlist__for_each(evlist, evsel) {
76 		struct hists *hists = evsel__hists(evsel);
77 
78 		for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
79 			const union perf_event event = {
80 				.header = {
81 					.misc = PERF_RECORD_MISC_USER,
82 				},
83 			};
84 
85 			sample.pid = fake_common_samples[k].pid;
86 			sample.tid = fake_common_samples[k].pid;
87 			sample.ip = fake_common_samples[k].ip;
88 			if (perf_event__preprocess_sample(&event, machine, &al,
89 							  &sample) < 0)
90 				goto out;
91 
92 			he = __hists__add_entry(hists, &al, NULL,
93 						NULL, NULL, 1, 1, 0, true);
94 			if (he == NULL) {
95 				addr_location__put(&al);
96 				goto out;
97 			}
98 
99 			fake_common_samples[k].thread = al.thread;
100 			fake_common_samples[k].map = al.map;
101 			fake_common_samples[k].sym = al.sym;
102 		}
103 
104 		for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
105 			const union perf_event event = {
106 				.header = {
107 					.misc = PERF_RECORD_MISC_USER,
108 				},
109 			};
110 
111 			sample.pid = fake_samples[i][k].pid;
112 			sample.tid = fake_samples[i][k].pid;
113 			sample.ip = fake_samples[i][k].ip;
114 			if (perf_event__preprocess_sample(&event, machine, &al,
115 							  &sample) < 0)
116 				goto out;
117 
118 			he = __hists__add_entry(hists, &al, NULL,
119 						NULL, NULL, 1, 1, 0, true);
120 			if (he == NULL) {
121 				addr_location__put(&al);
122 				goto out;
123 			}
124 
125 			fake_samples[i][k].thread = al.thread;
126 			fake_samples[i][k].map = al.map;
127 			fake_samples[i][k].sym = al.sym;
128 		}
129 		i++;
130 	}
131 
132 	return 0;
133 
134 out:
135 	pr_debug("Not enough memory for adding a hist entry\n");
136 	return -1;
137 }
138 
139 static int find_sample(struct sample *samples, size_t nr_samples,
140 		       struct thread *t, struct map *m, struct symbol *s)
141 {
142 	while (nr_samples--) {
143 		if (samples->thread == t && samples->map == m &&
144 		    samples->sym == s)
145 			return 1;
146 		samples++;
147 	}
148 	return 0;
149 }
150 
151 static int __validate_match(struct hists *hists)
152 {
153 	size_t count = 0;
154 	struct rb_root *root;
155 	struct rb_node *node;
156 
157 	/*
158 	 * Only entries from fake_common_samples should have a pair.
159 	 */
160 	if (sort__need_collapse)
161 		root = &hists->entries_collapsed;
162 	else
163 		root = hists->entries_in;
164 
165 	node = rb_first(root);
166 	while (node) {
167 		struct hist_entry *he;
168 
169 		he = rb_entry(node, struct hist_entry, rb_node_in);
170 
171 		if (hist_entry__has_pairs(he)) {
172 			if (find_sample(fake_common_samples,
173 					ARRAY_SIZE(fake_common_samples),
174 					he->thread, he->ms.map, he->ms.sym)) {
175 				count++;
176 			} else {
177 				pr_debug("Can't find the matched entry\n");
178 				return -1;
179 			}
180 		}
181 
182 		node = rb_next(node);
183 	}
184 
185 	if (count != ARRAY_SIZE(fake_common_samples)) {
186 		pr_debug("Invalid count for matched entries: %zd of %zd\n",
187 			 count, ARRAY_SIZE(fake_common_samples));
188 		return -1;
189 	}
190 
191 	return 0;
192 }
193 
194 static int validate_match(struct hists *leader, struct hists *other)
195 {
196 	return __validate_match(leader) || __validate_match(other);
197 }
198 
199 static int __validate_link(struct hists *hists, int idx)
200 {
201 	size_t count = 0;
202 	size_t count_pair = 0;
203 	size_t count_dummy = 0;
204 	struct rb_root *root;
205 	struct rb_node *node;
206 
207 	/*
208 	 * Leader hists (idx = 0) will have dummy entries from other,
209 	 * and some entries will have no pair.  However every entry
210 	 * in other hists should have (dummy) pair.
211 	 */
212 	if (sort__need_collapse)
213 		root = &hists->entries_collapsed;
214 	else
215 		root = hists->entries_in;
216 
217 	node = rb_first(root);
218 	while (node) {
219 		struct hist_entry *he;
220 
221 		he = rb_entry(node, struct hist_entry, rb_node_in);
222 
223 		if (hist_entry__has_pairs(he)) {
224 			if (!find_sample(fake_common_samples,
225 					 ARRAY_SIZE(fake_common_samples),
226 					 he->thread, he->ms.map, he->ms.sym) &&
227 			    !find_sample(fake_samples[idx],
228 					 ARRAY_SIZE(fake_samples[idx]),
229 					 he->thread, he->ms.map, he->ms.sym)) {
230 				count_dummy++;
231 			}
232 			count_pair++;
233 		} else if (idx) {
234 			pr_debug("A entry from the other hists should have pair\n");
235 			return -1;
236 		}
237 
238 		count++;
239 		node = rb_next(node);
240 	}
241 
242 	/*
243 	 * Note that we have a entry collapsed in the other (idx = 1) hists.
244 	 */
245 	if (idx == 0) {
246 		if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
247 			pr_debug("Invalid count of dummy entries: %zd of %zd\n",
248 				 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
249 			return -1;
250 		}
251 		if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
252 			pr_debug("Invalid count of total leader entries: %zd of %zd\n",
253 				 count, count_pair + ARRAY_SIZE(fake_samples[0]));
254 			return -1;
255 		}
256 	} else {
257 		if (count != count_pair) {
258 			pr_debug("Invalid count of total other entries: %zd of %zd\n",
259 				 count, count_pair);
260 			return -1;
261 		}
262 		if (count_dummy > 0) {
263 			pr_debug("Other hists should not have dummy entries: %zd\n",
264 				 count_dummy);
265 			return -1;
266 		}
267 	}
268 
269 	return 0;
270 }
271 
272 static int validate_link(struct hists *leader, struct hists *other)
273 {
274 	return __validate_link(leader, 0) || __validate_link(other, 1);
275 }
276 
277 int test__hists_link(void)
278 {
279 	int err = -1;
280 	struct hists *hists, *first_hists;
281 	struct machines machines;
282 	struct machine *machine = NULL;
283 	struct perf_evsel *evsel, *first;
284 	struct perf_evlist *evlist = perf_evlist__new();
285 
286 	if (evlist == NULL)
287                 return -ENOMEM;
288 
289 	err = parse_events(evlist, "cpu-clock", NULL);
290 	if (err)
291 		goto out;
292 	err = parse_events(evlist, "task-clock", NULL);
293 	if (err)
294 		goto out;
295 
296 	/* default sort order (comm,dso,sym) will be used */
297 	if (setup_sorting() < 0)
298 		goto out;
299 
300 	machines__init(&machines);
301 
302 	/* setup threads/dso/map/symbols also */
303 	machine = setup_fake_machine(&machines);
304 	if (!machine)
305 		goto out;
306 
307 	if (verbose > 1)
308 		machine__fprintf(machine, stderr);
309 
310 	/* process sample events */
311 	err = add_hist_entries(evlist, machine);
312 	if (err < 0)
313 		goto out;
314 
315 	evlist__for_each(evlist, evsel) {
316 		hists = evsel__hists(evsel);
317 		hists__collapse_resort(hists, NULL);
318 
319 		if (verbose > 2)
320 			print_hists_in(hists);
321 	}
322 
323 	first = perf_evlist__first(evlist);
324 	evsel = perf_evlist__last(evlist);
325 
326 	first_hists = evsel__hists(first);
327 	hists = evsel__hists(evsel);
328 
329 	/* match common entries */
330 	hists__match(first_hists, hists);
331 	err = validate_match(first_hists, hists);
332 	if (err)
333 		goto out;
334 
335 	/* link common and/or dummy entries */
336 	hists__link(first_hists, hists);
337 	err = validate_link(first_hists, hists);
338 	if (err)
339 		goto out;
340 
341 	err = 0;
342 
343 out:
344 	/* tear down everything */
345 	perf_evlist__delete(evlist);
346 	reset_output_field();
347 	machines__exit(&machines);
348 
349 	return err;
350 }
351