1 // SPDX-License-Identifier: GPL-2.0
2 #include <stdbool.h>
3 #include <inttypes.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <linux/bitops.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 
10 #include "map_symbol.h"
11 #include "branch.h"
12 #include "event.h"
13 #include "evsel.h"
14 #include "debug.h"
15 #include "util/synthetic-events.h"
16 #include "util/trace-event.h"
17 
18 #include "tests.h"
19 
20 #define COMP(m) do {					\
21 	if (s1->m != s2->m) {				\
22 		pr_debug("Samples differ at '"#m"'\n");	\
23 		return false;				\
24 	}						\
25 } while (0)
26 
27 #define MCOMP(m) do {					\
28 	if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) {	\
29 		pr_debug("Samples differ at '"#m"'\n");	\
30 		return false;				\
31 	}						\
32 } while (0)
33 
34 /*
35  * Hardcode the expected values for branch_entry flags.
36  * These are based on the input value (213) specified
37  * in branch_stack variable.
38  */
39 #define BS_EXPECTED_BE	0xa00d000000000000
40 #define BS_EXPECTED_LE	0xd5000000
41 #define FLAG(s)	s->branch_stack->entries[i].flags
42 
43 static bool samples_same(const struct perf_sample *s1,
44 			 const struct perf_sample *s2,
45 			 u64 type, u64 read_format, bool needs_swap)
46 {
47 	size_t i;
48 
49 	if (type & PERF_SAMPLE_IDENTIFIER)
50 		COMP(id);
51 
52 	if (type & PERF_SAMPLE_IP)
53 		COMP(ip);
54 
55 	if (type & PERF_SAMPLE_TID) {
56 		COMP(pid);
57 		COMP(tid);
58 	}
59 
60 	if (type & PERF_SAMPLE_TIME)
61 		COMP(time);
62 
63 	if (type & PERF_SAMPLE_ADDR)
64 		COMP(addr);
65 
66 	if (type & PERF_SAMPLE_ID)
67 		COMP(id);
68 
69 	if (type & PERF_SAMPLE_STREAM_ID)
70 		COMP(stream_id);
71 
72 	if (type & PERF_SAMPLE_CPU)
73 		COMP(cpu);
74 
75 	if (type & PERF_SAMPLE_PERIOD)
76 		COMP(period);
77 
78 	if (type & PERF_SAMPLE_READ) {
79 		if (read_format & PERF_FORMAT_GROUP)
80 			COMP(read.group.nr);
81 		else
82 			COMP(read.one.value);
83 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
84 			COMP(read.time_enabled);
85 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
86 			COMP(read.time_running);
87 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
88 		if (read_format & PERF_FORMAT_GROUP) {
89 			for (i = 0; i < s1->read.group.nr; i++)
90 				MCOMP(read.group.values[i]);
91 		} else {
92 			COMP(read.one.id);
93 		}
94 	}
95 
96 	if (type & PERF_SAMPLE_CALLCHAIN) {
97 		COMP(callchain->nr);
98 		for (i = 0; i < s1->callchain->nr; i++)
99 			COMP(callchain->ips[i]);
100 	}
101 
102 	if (type & PERF_SAMPLE_RAW) {
103 		COMP(raw_size);
104 		if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
105 			pr_debug("Samples differ at 'raw_data'\n");
106 			return false;
107 		}
108 	}
109 
110 	if (type & PERF_SAMPLE_BRANCH_STACK) {
111 		COMP(branch_stack->nr);
112 		COMP(branch_stack->hw_idx);
113 		for (i = 0; i < s1->branch_stack->nr; i++) {
114 			if (needs_swap)
115 				return ((tep_is_bigendian()) ?
116 					(FLAG(s2).value == BS_EXPECTED_BE) :
117 					(FLAG(s2).value == BS_EXPECTED_LE));
118 			else
119 				MCOMP(branch_stack->entries[i]);
120 		}
121 	}
122 
123 	if (type & PERF_SAMPLE_REGS_USER) {
124 		size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
125 
126 		COMP(user_regs.mask);
127 		COMP(user_regs.abi);
128 		if (s1->user_regs.abi &&
129 		    (!s1->user_regs.regs || !s2->user_regs.regs ||
130 		     memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
131 			pr_debug("Samples differ at 'user_regs'\n");
132 			return false;
133 		}
134 	}
135 
136 	if (type & PERF_SAMPLE_STACK_USER) {
137 		COMP(user_stack.size);
138 		if (memcmp(s1->user_stack.data, s2->user_stack.data,
139 			   s1->user_stack.size)) {
140 			pr_debug("Samples differ at 'user_stack'\n");
141 			return false;
142 		}
143 	}
144 
145 	if (type & PERF_SAMPLE_WEIGHT)
146 		COMP(weight);
147 
148 	if (type & PERF_SAMPLE_DATA_SRC)
149 		COMP(data_src);
150 
151 	if (type & PERF_SAMPLE_TRANSACTION)
152 		COMP(transaction);
153 
154 	if (type & PERF_SAMPLE_REGS_INTR) {
155 		size_t sz = hweight_long(s1->intr_regs.mask) * sizeof(u64);
156 
157 		COMP(intr_regs.mask);
158 		COMP(intr_regs.abi);
159 		if (s1->intr_regs.abi &&
160 		    (!s1->intr_regs.regs || !s2->intr_regs.regs ||
161 		     memcmp(s1->intr_regs.regs, s2->intr_regs.regs, sz))) {
162 			pr_debug("Samples differ at 'intr_regs'\n");
163 			return false;
164 		}
165 	}
166 
167 	if (type & PERF_SAMPLE_PHYS_ADDR)
168 		COMP(phys_addr);
169 
170 	if (type & PERF_SAMPLE_CGROUP)
171 		COMP(cgroup);
172 
173 	if (type & PERF_SAMPLE_DATA_PAGE_SIZE)
174 		COMP(data_page_size);
175 
176 	if (type & PERF_SAMPLE_CODE_PAGE_SIZE)
177 		COMP(code_page_size);
178 
179 	if (type & PERF_SAMPLE_AUX) {
180 		COMP(aux_sample.size);
181 		if (memcmp(s1->aux_sample.data, s2->aux_sample.data,
182 			   s1->aux_sample.size)) {
183 			pr_debug("Samples differ at 'aux_sample'\n");
184 			return false;
185 		}
186 	}
187 
188 	return true;
189 }
190 
191 static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
192 {
193 	struct evsel evsel = {
194 		.needs_swap = false,
195 		.core = {
196 			. attr = {
197 				.sample_type = sample_type,
198 				.read_format = read_format,
199 			},
200 		},
201 	};
202 	union perf_event *event;
203 	union {
204 		struct ip_callchain callchain;
205 		u64 data[64];
206 	} callchain = {
207 		/* 3 ips */
208 		.data = {3, 201, 202, 203},
209 	};
210 	union {
211 		struct branch_stack branch_stack;
212 		u64 data[64];
213 	} branch_stack = {
214 		/* 1 branch_entry */
215 		.data = {1, -1ULL, 211, 212, 213},
216 	};
217 	u64 regs[64];
218 	const u32 raw_data[] = {0x12345678, 0x0a0b0c0d, 0x11020304, 0x05060708, 0 };
219 	const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
220 	const u64 aux_data[] = {0xa55a, 0, 0xeeddee, 0x0282028202820282};
221 	struct perf_sample sample = {
222 		.ip		= 101,
223 		.pid		= 102,
224 		.tid		= 103,
225 		.time		= 104,
226 		.addr		= 105,
227 		.id		= 106,
228 		.stream_id	= 107,
229 		.period		= 108,
230 		.weight		= 109,
231 		.cpu		= 110,
232 		.raw_size	= sizeof(raw_data),
233 		.data_src	= 111,
234 		.transaction	= 112,
235 		.raw_data	= (void *)raw_data,
236 		.callchain	= &callchain.callchain,
237 		.no_hw_idx      = false,
238 		.branch_stack	= &branch_stack.branch_stack,
239 		.user_regs	= {
240 			.abi	= PERF_SAMPLE_REGS_ABI_64,
241 			.mask	= sample_regs,
242 			.regs	= regs,
243 		},
244 		.user_stack	= {
245 			.size	= sizeof(data),
246 			.data	= (void *)data,
247 		},
248 		.read		= {
249 			.time_enabled = 0x030a59d664fca7deULL,
250 			.time_running = 0x011b6ae553eb98edULL,
251 		},
252 		.intr_regs	= {
253 			.abi	= PERF_SAMPLE_REGS_ABI_64,
254 			.mask	= sample_regs,
255 			.regs	= regs,
256 		},
257 		.phys_addr	= 113,
258 		.cgroup		= 114,
259 		.data_page_size = 115,
260 		.code_page_size = 116,
261 		.aux_sample	= {
262 			.size	= sizeof(aux_data),
263 			.data	= (void *)aux_data,
264 		},
265 	};
266 	struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
267 	struct perf_sample sample_out, sample_out_endian;
268 	size_t i, sz, bufsz;
269 	int err, ret = -1;
270 
271 	if (sample_type & PERF_SAMPLE_REGS_USER)
272 		evsel.core.attr.sample_regs_user = sample_regs;
273 
274 	if (sample_type & PERF_SAMPLE_REGS_INTR)
275 		evsel.core.attr.sample_regs_intr = sample_regs;
276 
277 	if (sample_type & PERF_SAMPLE_BRANCH_STACK)
278 		evsel.core.attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
279 
280 	for (i = 0; i < sizeof(regs); i++)
281 		*(i + (u8 *)regs) = i & 0xfe;
282 
283 	if (read_format & PERF_FORMAT_GROUP) {
284 		sample.read.group.nr     = 4;
285 		sample.read.group.values = values;
286 	} else {
287 		sample.read.one.value = 0x08789faeb786aa87ULL;
288 		sample.read.one.id    = 99;
289 	}
290 
291 	sz = perf_event__sample_event_size(&sample, sample_type, read_format);
292 	bufsz = sz + 4096; /* Add a bit for overrun checking */
293 	event = malloc(bufsz);
294 	if (!event) {
295 		pr_debug("malloc failed\n");
296 		return -1;
297 	}
298 
299 	memset(event, 0xff, bufsz);
300 	event->header.type = PERF_RECORD_SAMPLE;
301 	event->header.misc = 0;
302 	event->header.size = sz;
303 
304 	err = perf_event__synthesize_sample(event, sample_type, read_format,
305 					    &sample);
306 	if (err) {
307 		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
308 			 "perf_event__synthesize_sample", sample_type, err);
309 		goto out_free;
310 	}
311 
312 	/* The data does not contain 0xff so we use that to check the size */
313 	for (i = bufsz; i > 0; i--) {
314 		if (*(i - 1 + (u8 *)event) != 0xff)
315 			break;
316 	}
317 	if (i != sz) {
318 		pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
319 			 i, sz);
320 		goto out_free;
321 	}
322 
323 	evsel.sample_size = __evsel__sample_size(sample_type);
324 
325 	err = evsel__parse_sample(&evsel, event, &sample_out);
326 	if (err) {
327 		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
328 			 "evsel__parse_sample", sample_type, err);
329 		goto out_free;
330 	}
331 
332 	if (!samples_same(&sample, &sample_out, sample_type, read_format, evsel.needs_swap)) {
333 		pr_debug("parsing failed for sample_type %#"PRIx64"\n",
334 			 sample_type);
335 		goto out_free;
336 	}
337 
338 	if (sample_type == PERF_SAMPLE_BRANCH_STACK) {
339 		evsel.needs_swap = true;
340 		evsel.sample_size = __evsel__sample_size(sample_type);
341 		err = evsel__parse_sample(&evsel, event, &sample_out_endian);
342 		if (err) {
343 			pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
344 				 "evsel__parse_sample", sample_type, err);
345 			goto out_free;
346 		}
347 
348 		if (!samples_same(&sample, &sample_out_endian, sample_type, read_format, evsel.needs_swap)) {
349 			pr_debug("parsing failed for sample_type %#"PRIx64"\n",
350 				 sample_type);
351 			goto out_free;
352 		}
353 	}
354 
355 	ret = 0;
356 out_free:
357 	free(event);
358 	if (ret && read_format)
359 		pr_debug("read_format %#"PRIx64"\n", read_format);
360 	return ret;
361 }
362 
363 /**
364  * test__sample_parsing - test sample parsing.
365  *
366  * This function implements a test that synthesizes a sample event, parses it
367  * and then checks that the parsed sample matches the original sample.  The test
368  * checks sample format bits separately and together.  If the test passes %0 is
369  * returned, otherwise %-1 is returned.
370  */
371 static int test__sample_parsing(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
372 {
373 	const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
374 	u64 sample_type;
375 	u64 sample_regs;
376 	size_t i;
377 	int err;
378 
379 	/*
380 	 * Fail the test if it has not been updated when new sample format bits
381 	 * were added.  Please actually update the test rather than just change
382 	 * the condition below.
383 	 */
384 	if (PERF_SAMPLE_MAX > PERF_SAMPLE_WEIGHT_STRUCT << 1) {
385 		pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
386 		return -1;
387 	}
388 
389 	/* Test each sample format bit separately */
390 	for (sample_type = 1; sample_type != PERF_SAMPLE_MAX;
391 	     sample_type <<= 1) {
392 		/* Test read_format variations */
393 		if (sample_type == PERF_SAMPLE_READ) {
394 			for (i = 0; i < ARRAY_SIZE(rf); i++) {
395 				err = do_test(sample_type, 0, rf[i]);
396 				if (err)
397 					return err;
398 			}
399 			continue;
400 		}
401 		sample_regs = 0;
402 
403 		if (sample_type == PERF_SAMPLE_REGS_USER)
404 			sample_regs = 0x3fff;
405 
406 		if (sample_type == PERF_SAMPLE_REGS_INTR)
407 			sample_regs = 0xff0fff;
408 
409 		err = do_test(sample_type, sample_regs, 0);
410 		if (err)
411 			return err;
412 	}
413 
414 	/*
415 	 * Test all sample format bits together
416 	 * Note: PERF_SAMPLE_WEIGHT and PERF_SAMPLE_WEIGHT_STRUCT cannot
417 	 *       be set simultaneously.
418 	 */
419 	sample_type = (PERF_SAMPLE_MAX - 1) & ~PERF_SAMPLE_WEIGHT;
420 	sample_regs = 0x3fff; /* shared yb intr and user regs */
421 	for (i = 0; i < ARRAY_SIZE(rf); i++) {
422 		err = do_test(sample_type, sample_regs, rf[i]);
423 		if (err)
424 			return err;
425 	}
426 
427 	return 0;
428 }
429 
430 DEFINE_SUITE("Sample parsing", sample_parsing);
431