xref: /openbmc/linux/tools/perf/tests/hists_link.c (revision 4949009e)
1 #include "perf.h"
2 #include "tests.h"
3 #include "debug.h"
4 #include "symbol.h"
5 #include "sort.h"
6 #include "evsel.h"
7 #include "evlist.h"
8 #include "machine.h"
9 #include "thread.h"
10 #include "parse-events.h"
11 #include "hists_common.h"
12 
13 struct sample {
14 	u32 pid;
15 	u64 ip;
16 	struct thread *thread;
17 	struct map *map;
18 	struct symbol *sym;
19 };
20 
21 /* For the numbers, see hists_common.c */
22 static struct sample fake_common_samples[] = {
23 	/* perf [kernel] schedule() */
24 	{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
25 	/* perf [perf]   main() */
26 	{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
27 	/* perf [perf]   cmd_record() */
28 	{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, },
29 	/* bash [bash]   xmalloc() */
30 	{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_XMALLOC, },
31 	/* bash [libc]   malloc() */
32 	{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_MALLOC, },
33 };
34 
35 static struct sample fake_samples[][5] = {
36 	{
37 		/* perf [perf]   run_command() */
38 		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_RUN_COMMAND, },
39 		/* perf [libc]   malloc() */
40 		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
41 		/* perf [kernel] page_fault() */
42 		{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
43 		/* perf [kernel] sys_perf_event_open() */
44 		{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, },
45 		/* bash [libc]   free() */
46 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_FREE, },
47 	},
48 	{
49 		/* perf [libc]   free() */
50 		{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_LIBC_FREE, },
51 		/* bash [libc]   malloc() */
52 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_MALLOC, }, /* will be merged */
53 		/* bash [bash]   xfee() */
54 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_BASH_XFREE, },
55 		/* bash [libc]   realloc() */
56 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_LIBC_REALLOC, },
57 		/* bash [kernel] page_fault() */
58 		{ .pid = FAKE_PID_BASH,  .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
59 	},
60 };
61 
62 static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
63 {
64 	struct perf_evsel *evsel;
65 	struct addr_location al;
66 	struct hist_entry *he;
67 	struct perf_sample sample = { .period = 1, };
68 	size_t i = 0, k;
69 
70 	/*
71 	 * each evsel will have 10 samples - 5 common and 5 distinct.
72 	 * However the second evsel also has a collapsed entry for
73 	 * "bash [libc] malloc" so total 9 entries will be in the tree.
74 	 */
75 	evlist__for_each(evlist, evsel) {
76 		struct hists *hists = evsel__hists(evsel);
77 
78 		for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
79 			const union perf_event event = {
80 				.header = {
81 					.misc = PERF_RECORD_MISC_USER,
82 				},
83 			};
84 
85 			sample.pid = fake_common_samples[k].pid;
86 			sample.tid = fake_common_samples[k].pid;
87 			sample.ip = fake_common_samples[k].ip;
88 			if (perf_event__preprocess_sample(&event, machine, &al,
89 							  &sample) < 0)
90 				goto out;
91 
92 			he = __hists__add_entry(hists, &al, NULL,
93 						NULL, NULL, 1, 1, 0, true);
94 			if (he == NULL)
95 				goto out;
96 
97 			fake_common_samples[k].thread = al.thread;
98 			fake_common_samples[k].map = al.map;
99 			fake_common_samples[k].sym = al.sym;
100 		}
101 
102 		for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
103 			const union perf_event event = {
104 				.header = {
105 					.misc = PERF_RECORD_MISC_USER,
106 				},
107 			};
108 
109 			sample.pid = fake_samples[i][k].pid;
110 			sample.tid = fake_samples[i][k].pid;
111 			sample.ip = fake_samples[i][k].ip;
112 			if (perf_event__preprocess_sample(&event, machine, &al,
113 							  &sample) < 0)
114 				goto out;
115 
116 			he = __hists__add_entry(hists, &al, NULL,
117 						NULL, NULL, 1, 1, 0, true);
118 			if (he == NULL)
119 				goto out;
120 
121 			fake_samples[i][k].thread = al.thread;
122 			fake_samples[i][k].map = al.map;
123 			fake_samples[i][k].sym = al.sym;
124 		}
125 		i++;
126 	}
127 
128 	return 0;
129 
130 out:
131 	pr_debug("Not enough memory for adding a hist entry\n");
132 	return -1;
133 }
134 
135 static int find_sample(struct sample *samples, size_t nr_samples,
136 		       struct thread *t, struct map *m, struct symbol *s)
137 {
138 	while (nr_samples--) {
139 		if (samples->thread == t && samples->map == m &&
140 		    samples->sym == s)
141 			return 1;
142 		samples++;
143 	}
144 	return 0;
145 }
146 
147 static int __validate_match(struct hists *hists)
148 {
149 	size_t count = 0;
150 	struct rb_root *root;
151 	struct rb_node *node;
152 
153 	/*
154 	 * Only entries from fake_common_samples should have a pair.
155 	 */
156 	if (sort__need_collapse)
157 		root = &hists->entries_collapsed;
158 	else
159 		root = hists->entries_in;
160 
161 	node = rb_first(root);
162 	while (node) {
163 		struct hist_entry *he;
164 
165 		he = rb_entry(node, struct hist_entry, rb_node_in);
166 
167 		if (hist_entry__has_pairs(he)) {
168 			if (find_sample(fake_common_samples,
169 					ARRAY_SIZE(fake_common_samples),
170 					he->thread, he->ms.map, he->ms.sym)) {
171 				count++;
172 			} else {
173 				pr_debug("Can't find the matched entry\n");
174 				return -1;
175 			}
176 		}
177 
178 		node = rb_next(node);
179 	}
180 
181 	if (count != ARRAY_SIZE(fake_common_samples)) {
182 		pr_debug("Invalid count for matched entries: %zd of %zd\n",
183 			 count, ARRAY_SIZE(fake_common_samples));
184 		return -1;
185 	}
186 
187 	return 0;
188 }
189 
190 static int validate_match(struct hists *leader, struct hists *other)
191 {
192 	return __validate_match(leader) || __validate_match(other);
193 }
194 
195 static int __validate_link(struct hists *hists, int idx)
196 {
197 	size_t count = 0;
198 	size_t count_pair = 0;
199 	size_t count_dummy = 0;
200 	struct rb_root *root;
201 	struct rb_node *node;
202 
203 	/*
204 	 * Leader hists (idx = 0) will have dummy entries from other,
205 	 * and some entries will have no pair.  However every entry
206 	 * in other hists should have (dummy) pair.
207 	 */
208 	if (sort__need_collapse)
209 		root = &hists->entries_collapsed;
210 	else
211 		root = hists->entries_in;
212 
213 	node = rb_first(root);
214 	while (node) {
215 		struct hist_entry *he;
216 
217 		he = rb_entry(node, struct hist_entry, rb_node_in);
218 
219 		if (hist_entry__has_pairs(he)) {
220 			if (!find_sample(fake_common_samples,
221 					 ARRAY_SIZE(fake_common_samples),
222 					 he->thread, he->ms.map, he->ms.sym) &&
223 			    !find_sample(fake_samples[idx],
224 					 ARRAY_SIZE(fake_samples[idx]),
225 					 he->thread, he->ms.map, he->ms.sym)) {
226 				count_dummy++;
227 			}
228 			count_pair++;
229 		} else if (idx) {
230 			pr_debug("A entry from the other hists should have pair\n");
231 			return -1;
232 		}
233 
234 		count++;
235 		node = rb_next(node);
236 	}
237 
238 	/*
239 	 * Note that we have a entry collapsed in the other (idx = 1) hists.
240 	 */
241 	if (idx == 0) {
242 		if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
243 			pr_debug("Invalid count of dummy entries: %zd of %zd\n",
244 				 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
245 			return -1;
246 		}
247 		if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
248 			pr_debug("Invalid count of total leader entries: %zd of %zd\n",
249 				 count, count_pair + ARRAY_SIZE(fake_samples[0]));
250 			return -1;
251 		}
252 	} else {
253 		if (count != count_pair) {
254 			pr_debug("Invalid count of total other entries: %zd of %zd\n",
255 				 count, count_pair);
256 			return -1;
257 		}
258 		if (count_dummy > 0) {
259 			pr_debug("Other hists should not have dummy entries: %zd\n",
260 				 count_dummy);
261 			return -1;
262 		}
263 	}
264 
265 	return 0;
266 }
267 
268 static int validate_link(struct hists *leader, struct hists *other)
269 {
270 	return __validate_link(leader, 0) || __validate_link(other, 1);
271 }
272 
273 int test__hists_link(void)
274 {
275 	int err = -1;
276 	struct hists *hists, *first_hists;
277 	struct machines machines;
278 	struct machine *machine = NULL;
279 	struct perf_evsel *evsel, *first;
280 	struct perf_evlist *evlist = perf_evlist__new();
281 
282 	if (evlist == NULL)
283                 return -ENOMEM;
284 
285 	err = parse_events(evlist, "cpu-clock");
286 	if (err)
287 		goto out;
288 	err = parse_events(evlist, "task-clock");
289 	if (err)
290 		goto out;
291 
292 	/* default sort order (comm,dso,sym) will be used */
293 	if (setup_sorting() < 0)
294 		goto out;
295 
296 	machines__init(&machines);
297 
298 	/* setup threads/dso/map/symbols also */
299 	machine = setup_fake_machine(&machines);
300 	if (!machine)
301 		goto out;
302 
303 	if (verbose > 1)
304 		machine__fprintf(machine, stderr);
305 
306 	/* process sample events */
307 	err = add_hist_entries(evlist, machine);
308 	if (err < 0)
309 		goto out;
310 
311 	evlist__for_each(evlist, evsel) {
312 		hists = evsel__hists(evsel);
313 		hists__collapse_resort(hists, NULL);
314 
315 		if (verbose > 2)
316 			print_hists_in(hists);
317 	}
318 
319 	first = perf_evlist__first(evlist);
320 	evsel = perf_evlist__last(evlist);
321 
322 	first_hists = evsel__hists(first);
323 	hists = evsel__hists(evsel);
324 
325 	/* match common entries */
326 	hists__match(first_hists, hists);
327 	err = validate_match(first_hists, hists);
328 	if (err)
329 		goto out;
330 
331 	/* link common and/or dummy entries */
332 	hists__link(first_hists, hists);
333 	err = validate_link(first_hists, hists);
334 	if (err)
335 		goto out;
336 
337 	err = 0;
338 
339 out:
340 	/* tear down everything */
341 	perf_evlist__delete(evlist);
342 	reset_output_field();
343 	machines__exit(&machines);
344 
345 	return err;
346 }
347