1 #include <stdbool.h>
2 #include <inttypes.h>
3 #include <linux/kernel.h>
4 #include <linux/types.h>
5 
6 #include "util.h"
7 #include "event.h"
8 #include "evsel.h"
9 #include "debug.h"
10 
11 #include "tests.h"
12 
13 #define COMP(m) do {					\
14 	if (s1->m != s2->m) {				\
15 		pr_debug("Samples differ at '"#m"'\n");	\
16 		return false;				\
17 	}						\
18 } while (0)
19 
20 #define MCOMP(m) do {					\
21 	if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) {	\
22 		pr_debug("Samples differ at '"#m"'\n");	\
23 		return false;				\
24 	}						\
25 } while (0)
26 
27 static bool samples_same(const struct perf_sample *s1,
28 			 const struct perf_sample *s2,
29 			 u64 type, u64 read_format)
30 {
31 	size_t i;
32 
33 	if (type & PERF_SAMPLE_IDENTIFIER)
34 		COMP(id);
35 
36 	if (type & PERF_SAMPLE_IP)
37 		COMP(ip);
38 
39 	if (type & PERF_SAMPLE_TID) {
40 		COMP(pid);
41 		COMP(tid);
42 	}
43 
44 	if (type & PERF_SAMPLE_TIME)
45 		COMP(time);
46 
47 	if (type & PERF_SAMPLE_ADDR)
48 		COMP(addr);
49 
50 	if (type & PERF_SAMPLE_ID)
51 		COMP(id);
52 
53 	if (type & PERF_SAMPLE_STREAM_ID)
54 		COMP(stream_id);
55 
56 	if (type & PERF_SAMPLE_CPU)
57 		COMP(cpu);
58 
59 	if (type & PERF_SAMPLE_PERIOD)
60 		COMP(period);
61 
62 	if (type & PERF_SAMPLE_READ) {
63 		if (read_format & PERF_FORMAT_GROUP)
64 			COMP(read.group.nr);
65 		else
66 			COMP(read.one.value);
67 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
68 			COMP(read.time_enabled);
69 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
70 			COMP(read.time_running);
71 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
72 		if (read_format & PERF_FORMAT_GROUP) {
73 			for (i = 0; i < s1->read.group.nr; i++)
74 				MCOMP(read.group.values[i]);
75 		} else {
76 			COMP(read.one.id);
77 		}
78 	}
79 
80 	if (type & PERF_SAMPLE_CALLCHAIN) {
81 		COMP(callchain->nr);
82 		for (i = 0; i < s1->callchain->nr; i++)
83 			COMP(callchain->ips[i]);
84 	}
85 
86 	if (type & PERF_SAMPLE_RAW) {
87 		COMP(raw_size);
88 		if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
89 			pr_debug("Samples differ at 'raw_data'\n");
90 			return false;
91 		}
92 	}
93 
94 	if (type & PERF_SAMPLE_BRANCH_STACK) {
95 		COMP(branch_stack->nr);
96 		for (i = 0; i < s1->branch_stack->nr; i++)
97 			MCOMP(branch_stack->entries[i]);
98 	}
99 
100 	if (type & PERF_SAMPLE_REGS_USER) {
101 		size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
102 
103 		COMP(user_regs.mask);
104 		COMP(user_regs.abi);
105 		if (s1->user_regs.abi &&
106 		    (!s1->user_regs.regs || !s2->user_regs.regs ||
107 		     memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
108 			pr_debug("Samples differ at 'user_regs'\n");
109 			return false;
110 		}
111 	}
112 
113 	if (type & PERF_SAMPLE_STACK_USER) {
114 		COMP(user_stack.size);
115 		if (memcmp(s1->user_stack.data, s2->user_stack.data,
116 			   s1->user_stack.size)) {
117 			pr_debug("Samples differ at 'user_stack'\n");
118 			return false;
119 		}
120 	}
121 
122 	if (type & PERF_SAMPLE_WEIGHT)
123 		COMP(weight);
124 
125 	if (type & PERF_SAMPLE_DATA_SRC)
126 		COMP(data_src);
127 
128 	if (type & PERF_SAMPLE_TRANSACTION)
129 		COMP(transaction);
130 
131 	if (type & PERF_SAMPLE_REGS_INTR) {
132 		size_t sz = hweight_long(s1->intr_regs.mask) * sizeof(u64);
133 
134 		COMP(intr_regs.mask);
135 		COMP(intr_regs.abi);
136 		if (s1->intr_regs.abi &&
137 		    (!s1->intr_regs.regs || !s2->intr_regs.regs ||
138 		     memcmp(s1->intr_regs.regs, s2->intr_regs.regs, sz))) {
139 			pr_debug("Samples differ at 'intr_regs'\n");
140 			return false;
141 		}
142 	}
143 
144 	return true;
145 }
146 
147 static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
148 {
149 	struct perf_evsel evsel = {
150 		.needs_swap = false,
151 		.attr = {
152 			.sample_type = sample_type,
153 			.read_format = read_format,
154 		},
155 	};
156 	union perf_event *event;
157 	union {
158 		struct ip_callchain callchain;
159 		u64 data[64];
160 	} callchain = {
161 		/* 3 ips */
162 		.data = {3, 201, 202, 203},
163 	};
164 	union {
165 		struct branch_stack branch_stack;
166 		u64 data[64];
167 	} branch_stack = {
168 		/* 1 branch_entry */
169 		.data = {1, 211, 212, 213},
170 	};
171 	u64 regs[64];
172 	const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL};
173 	const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
174 	struct perf_sample sample = {
175 		.ip		= 101,
176 		.pid		= 102,
177 		.tid		= 103,
178 		.time		= 104,
179 		.addr		= 105,
180 		.id		= 106,
181 		.stream_id	= 107,
182 		.period		= 108,
183 		.weight		= 109,
184 		.cpu		= 110,
185 		.raw_size	= sizeof(raw_data),
186 		.data_src	= 111,
187 		.transaction	= 112,
188 		.raw_data	= (void *)raw_data,
189 		.callchain	= &callchain.callchain,
190 		.branch_stack	= &branch_stack.branch_stack,
191 		.user_regs	= {
192 			.abi	= PERF_SAMPLE_REGS_ABI_64,
193 			.mask	= sample_regs,
194 			.regs	= regs,
195 		},
196 		.user_stack	= {
197 			.size	= sizeof(data),
198 			.data	= (void *)data,
199 		},
200 		.read		= {
201 			.time_enabled = 0x030a59d664fca7deULL,
202 			.time_running = 0x011b6ae553eb98edULL,
203 		},
204 		.intr_regs	= {
205 			.abi	= PERF_SAMPLE_REGS_ABI_64,
206 			.mask	= sample_regs,
207 			.regs	= regs,
208 		},
209 	};
210 	struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
211 	struct perf_sample sample_out;
212 	size_t i, sz, bufsz;
213 	int err, ret = -1;
214 
215 	if (sample_type & PERF_SAMPLE_REGS_USER)
216 		evsel.attr.sample_regs_user = sample_regs;
217 
218 	if (sample_type & PERF_SAMPLE_REGS_INTR)
219 		evsel.attr.sample_regs_intr = sample_regs;
220 
221 	for (i = 0; i < sizeof(regs); i++)
222 		*(i + (u8 *)regs) = i & 0xfe;
223 
224 	if (read_format & PERF_FORMAT_GROUP) {
225 		sample.read.group.nr     = 4;
226 		sample.read.group.values = values;
227 	} else {
228 		sample.read.one.value = 0x08789faeb786aa87ULL;
229 		sample.read.one.id    = 99;
230 	}
231 
232 	sz = perf_event__sample_event_size(&sample, sample_type, read_format);
233 	bufsz = sz + 4096; /* Add a bit for overrun checking */
234 	event = malloc(bufsz);
235 	if (!event) {
236 		pr_debug("malloc failed\n");
237 		return -1;
238 	}
239 
240 	memset(event, 0xff, bufsz);
241 	event->header.type = PERF_RECORD_SAMPLE;
242 	event->header.misc = 0;
243 	event->header.size = sz;
244 
245 	err = perf_event__synthesize_sample(event, sample_type, read_format,
246 					    &sample, false);
247 	if (err) {
248 		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
249 			 "perf_event__synthesize_sample", sample_type, err);
250 		goto out_free;
251 	}
252 
253 	/* The data does not contain 0xff so we use that to check the size */
254 	for (i = bufsz; i > 0; i--) {
255 		if (*(i - 1 + (u8 *)event) != 0xff)
256 			break;
257 	}
258 	if (i != sz) {
259 		pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
260 			 i, sz);
261 		goto out_free;
262 	}
263 
264 	evsel.sample_size = __perf_evsel__sample_size(sample_type);
265 
266 	err = perf_evsel__parse_sample(&evsel, event, &sample_out);
267 	if (err) {
268 		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
269 			 "perf_evsel__parse_sample", sample_type, err);
270 		goto out_free;
271 	}
272 
273 	if (!samples_same(&sample, &sample_out, sample_type, read_format)) {
274 		pr_debug("parsing failed for sample_type %#"PRIx64"\n",
275 			 sample_type);
276 		goto out_free;
277 	}
278 
279 	ret = 0;
280 out_free:
281 	free(event);
282 	if (ret && read_format)
283 		pr_debug("read_format %#"PRIx64"\n", read_format);
284 	return ret;
285 }
286 
287 /**
288  * test__sample_parsing - test sample parsing.
289  *
290  * This function implements a test that synthesizes a sample event, parses it
291  * and then checks that the parsed sample matches the original sample.  The test
292  * checks sample format bits separately and together.  If the test passes %0 is
293  * returned, otherwise %-1 is returned.
294  */
295 int test__sample_parsing(int subtest __maybe_unused)
296 {
297 	const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
298 	u64 sample_type;
299 	u64 sample_regs;
300 	size_t i;
301 	int err;
302 
303 	/*
304 	 * Fail the test if it has not been updated when new sample format bits
305 	 * were added.  Please actually update the test rather than just change
306 	 * the condition below.
307 	 */
308 	if (PERF_SAMPLE_MAX > PERF_SAMPLE_REGS_INTR << 1) {
309 		pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
310 		return -1;
311 	}
312 
313 	/* Test each sample format bit separately */
314 	for (sample_type = 1; sample_type != PERF_SAMPLE_MAX;
315 	     sample_type <<= 1) {
316 		/* Test read_format variations */
317 		if (sample_type == PERF_SAMPLE_READ) {
318 			for (i = 0; i < ARRAY_SIZE(rf); i++) {
319 				err = do_test(sample_type, 0, rf[i]);
320 				if (err)
321 					return err;
322 			}
323 			continue;
324 		}
325 		sample_regs = 0;
326 
327 		if (sample_type == PERF_SAMPLE_REGS_USER)
328 			sample_regs = 0x3fff;
329 
330 		if (sample_type == PERF_SAMPLE_REGS_INTR)
331 			sample_regs = 0xff0fff;
332 
333 		err = do_test(sample_type, sample_regs, 0);
334 		if (err)
335 			return err;
336 	}
337 
338 	/* Test all sample format bits together */
339 	sample_type = PERF_SAMPLE_MAX - 1;
340 	sample_regs = 0x3fff; /* shared yb intr and user regs */
341 	for (i = 0; i < ARRAY_SIZE(rf); i++) {
342 		err = do_test(sample_type, sample_regs, rf[i]);
343 		if (err)
344 			return err;
345 	}
346 
347 	return 0;
348 }
349