1 // SPDX-License-Identifier: GPL-2.0
2 #include <stdbool.h>
3 #include <inttypes.h>
4 #include <linux/bitops.h>
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 
8 #include "branch.h"
9 #include "util.h"
10 #include "event.h"
11 #include "evsel.h"
12 #include "debug.h"
13 
14 #include "tests.h"
15 
16 #define COMP(m) do {					\
17 	if (s1->m != s2->m) {				\
18 		pr_debug("Samples differ at '"#m"'\n");	\
19 		return false;				\
20 	}						\
21 } while (0)
22 
23 #define MCOMP(m) do {					\
24 	if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) {	\
25 		pr_debug("Samples differ at '"#m"'\n");	\
26 		return false;				\
27 	}						\
28 } while (0)
29 
30 static bool samples_same(const struct perf_sample *s1,
31 			 const struct perf_sample *s2,
32 			 u64 type, u64 read_format)
33 {
34 	size_t i;
35 
36 	if (type & PERF_SAMPLE_IDENTIFIER)
37 		COMP(id);
38 
39 	if (type & PERF_SAMPLE_IP)
40 		COMP(ip);
41 
42 	if (type & PERF_SAMPLE_TID) {
43 		COMP(pid);
44 		COMP(tid);
45 	}
46 
47 	if (type & PERF_SAMPLE_TIME)
48 		COMP(time);
49 
50 	if (type & PERF_SAMPLE_ADDR)
51 		COMP(addr);
52 
53 	if (type & PERF_SAMPLE_ID)
54 		COMP(id);
55 
56 	if (type & PERF_SAMPLE_STREAM_ID)
57 		COMP(stream_id);
58 
59 	if (type & PERF_SAMPLE_CPU)
60 		COMP(cpu);
61 
62 	if (type & PERF_SAMPLE_PERIOD)
63 		COMP(period);
64 
65 	if (type & PERF_SAMPLE_READ) {
66 		if (read_format & PERF_FORMAT_GROUP)
67 			COMP(read.group.nr);
68 		else
69 			COMP(read.one.value);
70 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
71 			COMP(read.time_enabled);
72 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
73 			COMP(read.time_running);
74 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
75 		if (read_format & PERF_FORMAT_GROUP) {
76 			for (i = 0; i < s1->read.group.nr; i++)
77 				MCOMP(read.group.values[i]);
78 		} else {
79 			COMP(read.one.id);
80 		}
81 	}
82 
83 	if (type & PERF_SAMPLE_CALLCHAIN) {
84 		COMP(callchain->nr);
85 		for (i = 0; i < s1->callchain->nr; i++)
86 			COMP(callchain->ips[i]);
87 	}
88 
89 	if (type & PERF_SAMPLE_RAW) {
90 		COMP(raw_size);
91 		if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
92 			pr_debug("Samples differ at 'raw_data'\n");
93 			return false;
94 		}
95 	}
96 
97 	if (type & PERF_SAMPLE_BRANCH_STACK) {
98 		COMP(branch_stack->nr);
99 		for (i = 0; i < s1->branch_stack->nr; i++)
100 			MCOMP(branch_stack->entries[i]);
101 	}
102 
103 	if (type & PERF_SAMPLE_REGS_USER) {
104 		size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
105 
106 		COMP(user_regs.mask);
107 		COMP(user_regs.abi);
108 		if (s1->user_regs.abi &&
109 		    (!s1->user_regs.regs || !s2->user_regs.regs ||
110 		     memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
111 			pr_debug("Samples differ at 'user_regs'\n");
112 			return false;
113 		}
114 	}
115 
116 	if (type & PERF_SAMPLE_STACK_USER) {
117 		COMP(user_stack.size);
118 		if (memcmp(s1->user_stack.data, s2->user_stack.data,
119 			   s1->user_stack.size)) {
120 			pr_debug("Samples differ at 'user_stack'\n");
121 			return false;
122 		}
123 	}
124 
125 	if (type & PERF_SAMPLE_WEIGHT)
126 		COMP(weight);
127 
128 	if (type & PERF_SAMPLE_DATA_SRC)
129 		COMP(data_src);
130 
131 	if (type & PERF_SAMPLE_TRANSACTION)
132 		COMP(transaction);
133 
134 	if (type & PERF_SAMPLE_REGS_INTR) {
135 		size_t sz = hweight_long(s1->intr_regs.mask) * sizeof(u64);
136 
137 		COMP(intr_regs.mask);
138 		COMP(intr_regs.abi);
139 		if (s1->intr_regs.abi &&
140 		    (!s1->intr_regs.regs || !s2->intr_regs.regs ||
141 		     memcmp(s1->intr_regs.regs, s2->intr_regs.regs, sz))) {
142 			pr_debug("Samples differ at 'intr_regs'\n");
143 			return false;
144 		}
145 	}
146 
147 	if (type & PERF_SAMPLE_PHYS_ADDR)
148 		COMP(phys_addr);
149 
150 	return true;
151 }
152 
153 static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
154 {
155 	struct perf_evsel evsel = {
156 		.needs_swap = false,
157 		.attr = {
158 			.sample_type = sample_type,
159 			.read_format = read_format,
160 		},
161 	};
162 	union perf_event *event;
163 	union {
164 		struct ip_callchain callchain;
165 		u64 data[64];
166 	} callchain = {
167 		/* 3 ips */
168 		.data = {3, 201, 202, 203},
169 	};
170 	union {
171 		struct branch_stack branch_stack;
172 		u64 data[64];
173 	} branch_stack = {
174 		/* 1 branch_entry */
175 		.data = {1, 211, 212, 213},
176 	};
177 	u64 regs[64];
178 	const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL};
179 	const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
180 	struct perf_sample sample = {
181 		.ip		= 101,
182 		.pid		= 102,
183 		.tid		= 103,
184 		.time		= 104,
185 		.addr		= 105,
186 		.id		= 106,
187 		.stream_id	= 107,
188 		.period		= 108,
189 		.weight		= 109,
190 		.cpu		= 110,
191 		.raw_size	= sizeof(raw_data),
192 		.data_src	= 111,
193 		.transaction	= 112,
194 		.raw_data	= (void *)raw_data,
195 		.callchain	= &callchain.callchain,
196 		.branch_stack	= &branch_stack.branch_stack,
197 		.user_regs	= {
198 			.abi	= PERF_SAMPLE_REGS_ABI_64,
199 			.mask	= sample_regs,
200 			.regs	= regs,
201 		},
202 		.user_stack	= {
203 			.size	= sizeof(data),
204 			.data	= (void *)data,
205 		},
206 		.read		= {
207 			.time_enabled = 0x030a59d664fca7deULL,
208 			.time_running = 0x011b6ae553eb98edULL,
209 		},
210 		.intr_regs	= {
211 			.abi	= PERF_SAMPLE_REGS_ABI_64,
212 			.mask	= sample_regs,
213 			.regs	= regs,
214 		},
215 		.phys_addr	= 113,
216 	};
217 	struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
218 	struct perf_sample sample_out;
219 	size_t i, sz, bufsz;
220 	int err, ret = -1;
221 
222 	if (sample_type & PERF_SAMPLE_REGS_USER)
223 		evsel.attr.sample_regs_user = sample_regs;
224 
225 	if (sample_type & PERF_SAMPLE_REGS_INTR)
226 		evsel.attr.sample_regs_intr = sample_regs;
227 
228 	for (i = 0; i < sizeof(regs); i++)
229 		*(i + (u8 *)regs) = i & 0xfe;
230 
231 	if (read_format & PERF_FORMAT_GROUP) {
232 		sample.read.group.nr     = 4;
233 		sample.read.group.values = values;
234 	} else {
235 		sample.read.one.value = 0x08789faeb786aa87ULL;
236 		sample.read.one.id    = 99;
237 	}
238 
239 	sz = perf_event__sample_event_size(&sample, sample_type, read_format);
240 	bufsz = sz + 4096; /* Add a bit for overrun checking */
241 	event = malloc(bufsz);
242 	if (!event) {
243 		pr_debug("malloc failed\n");
244 		return -1;
245 	}
246 
247 	memset(event, 0xff, bufsz);
248 	event->header.type = PERF_RECORD_SAMPLE;
249 	event->header.misc = 0;
250 	event->header.size = sz;
251 
252 	err = perf_event__synthesize_sample(event, sample_type, read_format,
253 					    &sample);
254 	if (err) {
255 		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
256 			 "perf_event__synthesize_sample", sample_type, err);
257 		goto out_free;
258 	}
259 
260 	/* The data does not contain 0xff so we use that to check the size */
261 	for (i = bufsz; i > 0; i--) {
262 		if (*(i - 1 + (u8 *)event) != 0xff)
263 			break;
264 	}
265 	if (i != sz) {
266 		pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
267 			 i, sz);
268 		goto out_free;
269 	}
270 
271 	evsel.sample_size = __perf_evsel__sample_size(sample_type);
272 
273 	err = perf_evsel__parse_sample(&evsel, event, &sample_out);
274 	if (err) {
275 		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
276 			 "perf_evsel__parse_sample", sample_type, err);
277 		goto out_free;
278 	}
279 
280 	if (!samples_same(&sample, &sample_out, sample_type, read_format)) {
281 		pr_debug("parsing failed for sample_type %#"PRIx64"\n",
282 			 sample_type);
283 		goto out_free;
284 	}
285 
286 	ret = 0;
287 out_free:
288 	free(event);
289 	if (ret && read_format)
290 		pr_debug("read_format %#"PRIx64"\n", read_format);
291 	return ret;
292 }
293 
294 /**
295  * test__sample_parsing - test sample parsing.
296  *
297  * This function implements a test that synthesizes a sample event, parses it
298  * and then checks that the parsed sample matches the original sample.  The test
299  * checks sample format bits separately and together.  If the test passes %0 is
300  * returned, otherwise %-1 is returned.
301  */
302 int test__sample_parsing(struct test *test __maybe_unused, int subtest __maybe_unused)
303 {
304 	const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
305 	u64 sample_type;
306 	u64 sample_regs;
307 	size_t i;
308 	int err;
309 
310 	/*
311 	 * Fail the test if it has not been updated when new sample format bits
312 	 * were added.  Please actually update the test rather than just change
313 	 * the condition below.
314 	 */
315 	if (PERF_SAMPLE_MAX > PERF_SAMPLE_PHYS_ADDR << 1) {
316 		pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
317 		return -1;
318 	}
319 
320 	/* Test each sample format bit separately */
321 	for (sample_type = 1; sample_type != PERF_SAMPLE_MAX;
322 	     sample_type <<= 1) {
323 		/* Test read_format variations */
324 		if (sample_type == PERF_SAMPLE_READ) {
325 			for (i = 0; i < ARRAY_SIZE(rf); i++) {
326 				err = do_test(sample_type, 0, rf[i]);
327 				if (err)
328 					return err;
329 			}
330 			continue;
331 		}
332 		sample_regs = 0;
333 
334 		if (sample_type == PERF_SAMPLE_REGS_USER)
335 			sample_regs = 0x3fff;
336 
337 		if (sample_type == PERF_SAMPLE_REGS_INTR)
338 			sample_regs = 0xff0fff;
339 
340 		err = do_test(sample_type, sample_regs, 0);
341 		if (err)
342 			return err;
343 	}
344 
345 	/* Test all sample format bits together */
346 	sample_type = PERF_SAMPLE_MAX - 1;
347 	sample_regs = 0x3fff; /* shared yb intr and user regs */
348 	for (i = 0; i < ARRAY_SIZE(rf); i++) {
349 		err = do_test(sample_type, sample_regs, rf[i]);
350 		if (err)
351 			return err;
352 	}
353 
354 	return 0;
355 }
356