xref: /openbmc/linux/tools/perf/util/arm-spe.c (revision b694e3c604e999343258c49e574abd7be012e726)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Arm Statistical Profiling Extensions (SPE) support
4  * Copyright (c) 2017-2018, Arm Ltd.
5  */
6 
7 #include <byteswap.h>
8 #include <endian.h>
9 #include <errno.h>
10 #include <inttypes.h>
11 #include <linux/bitops.h>
12 #include <linux/kernel.h>
13 #include <linux/log2.h>
14 #include <linux/types.h>
15 #include <linux/zalloc.h>
16 #include <stdlib.h>
17 #include <unistd.h>
18 
19 #include "auxtrace.h"
20 #include "color.h"
21 #include "debug.h"
22 #include "evlist.h"
23 #include "evsel.h"
24 #include "machine.h"
25 #include "session.h"
26 #include "symbol.h"
27 #include "thread.h"
28 #include "thread-stack.h"
29 #include "tsc.h"
30 #include "tool.h"
31 #include "util/synthetic-events.h"
32 
33 #include "arm-spe.h"
34 #include "arm-spe-decoder/arm-spe-decoder.h"
35 #include "arm-spe-decoder/arm-spe-pkt-decoder.h"
36 
37 #include "../../arch/arm64/include/asm/cputype.h"
38 #define MAX_TIMESTAMP (~0ULL)
39 
40 #define is_ldst_op(op)		(!!((op) & ARM_SPE_OP_LDST))
41 
42 struct arm_spe {
43 	struct auxtrace			auxtrace;
44 	struct auxtrace_queues		queues;
45 	struct auxtrace_heap		heap;
46 	struct itrace_synth_opts        synth_opts;
47 	u32				auxtrace_type;
48 	struct perf_session		*session;
49 	struct machine			*machine;
50 	u32				pmu_type;
51 	u64				midr;
52 
53 	struct perf_tsc_conversion	tc;
54 
55 	u8				timeless_decoding;
56 	u8				data_queued;
57 
58 	u64				sample_type;
59 	u8				sample_flc;
60 	u8				sample_llc;
61 	u8				sample_tlb;
62 	u8				sample_branch;
63 	u8				sample_remote_access;
64 	u8				sample_memory;
65 	u8				sample_instructions;
66 	u64				instructions_sample_period;
67 
68 	u64				l1d_miss_id;
69 	u64				l1d_access_id;
70 	u64				llc_miss_id;
71 	u64				llc_access_id;
72 	u64				tlb_miss_id;
73 	u64				tlb_access_id;
74 	u64				branch_miss_id;
75 	u64				remote_access_id;
76 	u64				memory_id;
77 	u64				instructions_id;
78 
79 	u64				kernel_start;
80 
81 	unsigned long			num_events;
82 	u8				use_ctx_pkt_for_pid;
83 };
84 
85 struct arm_spe_queue {
86 	struct arm_spe			*spe;
87 	unsigned int			queue_nr;
88 	struct auxtrace_buffer		*buffer;
89 	struct auxtrace_buffer		*old_buffer;
90 	union perf_event		*event_buf;
91 	bool				on_heap;
92 	bool				done;
93 	pid_t				pid;
94 	pid_t				tid;
95 	int				cpu;
96 	struct arm_spe_decoder		*decoder;
97 	u64				time;
98 	u64				timestamp;
99 	struct thread			*thread;
100 	u64				period_instructions;
101 };
102 
arm_spe_dump(struct arm_spe * spe __maybe_unused,unsigned char * buf,size_t len)103 static void arm_spe_dump(struct arm_spe *spe __maybe_unused,
104 			 unsigned char *buf, size_t len)
105 {
106 	struct arm_spe_pkt packet;
107 	size_t pos = 0;
108 	int ret, pkt_len, i;
109 	char desc[ARM_SPE_PKT_DESC_MAX];
110 	const char *color = PERF_COLOR_BLUE;
111 
112 	color_fprintf(stdout, color,
113 		      ". ... ARM SPE data: size %#zx bytes\n",
114 		      len);
115 
116 	while (len) {
117 		ret = arm_spe_get_packet(buf, len, &packet);
118 		if (ret > 0)
119 			pkt_len = ret;
120 		else
121 			pkt_len = 1;
122 		printf(".");
123 		color_fprintf(stdout, color, "  %08x: ", pos);
124 		for (i = 0; i < pkt_len; i++)
125 			color_fprintf(stdout, color, " %02x", buf[i]);
126 		for (; i < 16; i++)
127 			color_fprintf(stdout, color, "   ");
128 		if (ret > 0) {
129 			ret = arm_spe_pkt_desc(&packet, desc,
130 					       ARM_SPE_PKT_DESC_MAX);
131 			if (!ret)
132 				color_fprintf(stdout, color, " %s\n", desc);
133 		} else {
134 			color_fprintf(stdout, color, " Bad packet!\n");
135 		}
136 		pos += pkt_len;
137 		buf += pkt_len;
138 		len -= pkt_len;
139 	}
140 }
141 
arm_spe_dump_event(struct arm_spe * spe,unsigned char * buf,size_t len)142 static void arm_spe_dump_event(struct arm_spe *spe, unsigned char *buf,
143 			       size_t len)
144 {
145 	printf(".\n");
146 	arm_spe_dump(spe, buf, len);
147 }
148 
arm_spe_get_trace(struct arm_spe_buffer * b,void * data)149 static int arm_spe_get_trace(struct arm_spe_buffer *b, void *data)
150 {
151 	struct arm_spe_queue *speq = data;
152 	struct auxtrace_buffer *buffer = speq->buffer;
153 	struct auxtrace_buffer *old_buffer = speq->old_buffer;
154 	struct auxtrace_queue *queue;
155 
156 	queue = &speq->spe->queues.queue_array[speq->queue_nr];
157 
158 	buffer = auxtrace_buffer__next(queue, buffer);
159 	/* If no more data, drop the previous auxtrace_buffer and return */
160 	if (!buffer) {
161 		if (old_buffer)
162 			auxtrace_buffer__drop_data(old_buffer);
163 		b->len = 0;
164 		return 0;
165 	}
166 
167 	speq->buffer = buffer;
168 
169 	/* If the aux_buffer doesn't have data associated, try to load it */
170 	if (!buffer->data) {
171 		/* get the file desc associated with the perf data file */
172 		int fd = perf_data__fd(speq->spe->session->data);
173 
174 		buffer->data = auxtrace_buffer__get_data(buffer, fd);
175 		if (!buffer->data)
176 			return -ENOMEM;
177 	}
178 
179 	b->len = buffer->size;
180 	b->buf = buffer->data;
181 
182 	if (b->len) {
183 		if (old_buffer)
184 			auxtrace_buffer__drop_data(old_buffer);
185 		speq->old_buffer = buffer;
186 	} else {
187 		auxtrace_buffer__drop_data(buffer);
188 		return arm_spe_get_trace(b, data);
189 	}
190 
191 	return 0;
192 }
193 
arm_spe__alloc_queue(struct arm_spe * spe,unsigned int queue_nr)194 static struct arm_spe_queue *arm_spe__alloc_queue(struct arm_spe *spe,
195 		unsigned int queue_nr)
196 {
197 	struct arm_spe_params params = { .get_trace = 0, };
198 	struct arm_spe_queue *speq;
199 
200 	speq = zalloc(sizeof(*speq));
201 	if (!speq)
202 		return NULL;
203 
204 	speq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
205 	if (!speq->event_buf)
206 		goto out_free;
207 
208 	speq->spe = spe;
209 	speq->queue_nr = queue_nr;
210 	speq->pid = -1;
211 	speq->tid = -1;
212 	speq->cpu = -1;
213 	speq->period_instructions = 0;
214 
215 	/* params set */
216 	params.get_trace = arm_spe_get_trace;
217 	params.data = speq;
218 
219 	/* create new decoder */
220 	speq->decoder = arm_spe_decoder_new(&params);
221 	if (!speq->decoder)
222 		goto out_free;
223 
224 	return speq;
225 
226 out_free:
227 	zfree(&speq->event_buf);
228 	free(speq);
229 
230 	return NULL;
231 }
232 
arm_spe_cpumode(struct arm_spe * spe,u64 ip)233 static inline u8 arm_spe_cpumode(struct arm_spe *spe, u64 ip)
234 {
235 	return ip >= spe->kernel_start ?
236 		PERF_RECORD_MISC_KERNEL :
237 		PERF_RECORD_MISC_USER;
238 }
239 
arm_spe_set_pid_tid_cpu(struct arm_spe * spe,struct auxtrace_queue * queue)240 static void arm_spe_set_pid_tid_cpu(struct arm_spe *spe,
241 				    struct auxtrace_queue *queue)
242 {
243 	struct arm_spe_queue *speq = queue->priv;
244 	pid_t tid;
245 
246 	tid = machine__get_current_tid(spe->machine, speq->cpu);
247 	if (tid != -1) {
248 		speq->tid = tid;
249 		thread__zput(speq->thread);
250 	} else
251 		speq->tid = queue->tid;
252 
253 	if ((!speq->thread) && (speq->tid != -1)) {
254 		speq->thread = machine__find_thread(spe->machine, -1,
255 						    speq->tid);
256 	}
257 
258 	if (speq->thread) {
259 		speq->pid = thread__pid(speq->thread);
260 		if (queue->cpu == -1)
261 			speq->cpu = thread__cpu(speq->thread);
262 	}
263 }
264 
arm_spe_set_tid(struct arm_spe_queue * speq,pid_t tid)265 static int arm_spe_set_tid(struct arm_spe_queue *speq, pid_t tid)
266 {
267 	struct arm_spe *spe = speq->spe;
268 	int err = machine__set_current_tid(spe->machine, speq->cpu, -1, tid);
269 
270 	if (err)
271 		return err;
272 
273 	arm_spe_set_pid_tid_cpu(spe, &spe->queues.queue_array[speq->queue_nr]);
274 
275 	return 0;
276 }
277 
arm_spe__synth_simd_flags(const struct arm_spe_record * record)278 static struct simd_flags arm_spe__synth_simd_flags(const struct arm_spe_record *record)
279 {
280 	struct simd_flags simd_flags = {};
281 
282 	if ((record->op & ARM_SPE_OP_LDST) && (record->op & ARM_SPE_OP_SVE_LDST))
283 		simd_flags.arch |= SIMD_OP_FLAGS_ARCH_SVE;
284 
285 	if ((record->op & ARM_SPE_OP_OTHER) && (record->op & ARM_SPE_OP_SVE_OTHER))
286 		simd_flags.arch |= SIMD_OP_FLAGS_ARCH_SVE;
287 
288 	if (record->type & ARM_SPE_SVE_PARTIAL_PRED)
289 		simd_flags.pred |= SIMD_OP_FLAGS_PRED_PARTIAL;
290 
291 	if (record->type & ARM_SPE_SVE_EMPTY_PRED)
292 		simd_flags.pred |= SIMD_OP_FLAGS_PRED_EMPTY;
293 
294 	return simd_flags;
295 }
296 
arm_spe_prep_sample(struct arm_spe * spe,struct arm_spe_queue * speq,union perf_event * event,struct perf_sample * sample)297 static void arm_spe_prep_sample(struct arm_spe *spe,
298 				struct arm_spe_queue *speq,
299 				union perf_event *event,
300 				struct perf_sample *sample)
301 {
302 	struct arm_spe_record *record = &speq->decoder->record;
303 
304 	if (!spe->timeless_decoding)
305 		sample->time = tsc_to_perf_time(record->timestamp, &spe->tc);
306 
307 	sample->ip = record->from_ip;
308 	sample->cpumode = arm_spe_cpumode(spe, sample->ip);
309 	sample->pid = speq->pid;
310 	sample->tid = speq->tid;
311 	sample->period = 1;
312 	sample->cpu = speq->cpu;
313 	sample->simd_flags = arm_spe__synth_simd_flags(record);
314 
315 	event->sample.header.type = PERF_RECORD_SAMPLE;
316 	event->sample.header.misc = sample->cpumode;
317 	event->sample.header.size = sizeof(struct perf_event_header);
318 }
319 
arm_spe__inject_event(union perf_event * event,struct perf_sample * sample,u64 type)320 static int arm_spe__inject_event(union perf_event *event, struct perf_sample *sample, u64 type)
321 {
322 	event->header.size = perf_event__sample_event_size(sample, type, 0);
323 	return perf_event__synthesize_sample(event, type, 0, sample);
324 }
325 
326 static inline int
arm_spe_deliver_synth_event(struct arm_spe * spe,struct arm_spe_queue * speq __maybe_unused,union perf_event * event,struct perf_sample * sample)327 arm_spe_deliver_synth_event(struct arm_spe *spe,
328 			    struct arm_spe_queue *speq __maybe_unused,
329 			    union perf_event *event,
330 			    struct perf_sample *sample)
331 {
332 	int ret;
333 
334 	if (spe->synth_opts.inject) {
335 		ret = arm_spe__inject_event(event, sample, spe->sample_type);
336 		if (ret)
337 			return ret;
338 	}
339 
340 	ret = perf_session__deliver_synth_event(spe->session, event, sample);
341 	if (ret)
342 		pr_err("ARM SPE: failed to deliver event, error %d\n", ret);
343 
344 	return ret;
345 }
346 
arm_spe__synth_mem_sample(struct arm_spe_queue * speq,u64 spe_events_id,u64 data_src)347 static int arm_spe__synth_mem_sample(struct arm_spe_queue *speq,
348 				     u64 spe_events_id, u64 data_src)
349 {
350 	struct arm_spe *spe = speq->spe;
351 	struct arm_spe_record *record = &speq->decoder->record;
352 	union perf_event *event = speq->event_buf;
353 	struct perf_sample sample = { .ip = 0, };
354 
355 	arm_spe_prep_sample(spe, speq, event, &sample);
356 
357 	sample.id = spe_events_id;
358 	sample.stream_id = spe_events_id;
359 	sample.addr = record->virt_addr;
360 	sample.phys_addr = record->phys_addr;
361 	sample.data_src = data_src;
362 	sample.weight = record->latency;
363 
364 	return arm_spe_deliver_synth_event(spe, speq, event, &sample);
365 }
366 
arm_spe__synth_branch_sample(struct arm_spe_queue * speq,u64 spe_events_id)367 static int arm_spe__synth_branch_sample(struct arm_spe_queue *speq,
368 					u64 spe_events_id)
369 {
370 	struct arm_spe *spe = speq->spe;
371 	struct arm_spe_record *record = &speq->decoder->record;
372 	union perf_event *event = speq->event_buf;
373 	struct perf_sample sample = { .ip = 0, };
374 
375 	arm_spe_prep_sample(spe, speq, event, &sample);
376 
377 	sample.id = spe_events_id;
378 	sample.stream_id = spe_events_id;
379 	sample.addr = record->to_ip;
380 	sample.weight = record->latency;
381 
382 	return arm_spe_deliver_synth_event(spe, speq, event, &sample);
383 }
384 
arm_spe__synth_instruction_sample(struct arm_spe_queue * speq,u64 spe_events_id,u64 data_src)385 static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq,
386 					     u64 spe_events_id, u64 data_src)
387 {
388 	struct arm_spe *spe = speq->spe;
389 	struct arm_spe_record *record = &speq->decoder->record;
390 	union perf_event *event = speq->event_buf;
391 	struct perf_sample sample = { .ip = 0, };
392 
393 	/*
394 	 * Handles perf instruction sampling period.
395 	 */
396 	speq->period_instructions++;
397 	if (speq->period_instructions < spe->instructions_sample_period)
398 		return 0;
399 	speq->period_instructions = 0;
400 
401 	arm_spe_prep_sample(spe, speq, event, &sample);
402 
403 	sample.id = spe_events_id;
404 	sample.stream_id = spe_events_id;
405 	sample.addr = record->virt_addr;
406 	sample.phys_addr = record->phys_addr;
407 	sample.data_src = data_src;
408 	sample.period = spe->instructions_sample_period;
409 	sample.weight = record->latency;
410 
411 	return arm_spe_deliver_synth_event(spe, speq, event, &sample);
412 }
413 
414 static const struct midr_range neoverse_spe[] = {
415 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
416 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
417 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
418 	{},
419 };
420 
arm_spe__synth_data_source_neoverse(const struct arm_spe_record * record,union perf_mem_data_src * data_src)421 static void arm_spe__synth_data_source_neoverse(const struct arm_spe_record *record,
422 						union perf_mem_data_src *data_src)
423 {
424 	/*
425 	 * Even though four levels of cache hierarchy are possible, no known
426 	 * production Neoverse systems currently include more than three levels
427 	 * so for the time being we assume three exist. If a production system
428 	 * is built with four the this function would have to be changed to
429 	 * detect the number of levels for reporting.
430 	 */
431 
432 	/*
433 	 * We have no data on the hit level or data source for stores in the
434 	 * Neoverse SPE records.
435 	 */
436 	if (record->op & ARM_SPE_OP_ST) {
437 		data_src->mem_lvl = PERF_MEM_LVL_NA;
438 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_NA;
439 		data_src->mem_snoop = PERF_MEM_SNOOP_NA;
440 		return;
441 	}
442 
443 	switch (record->source) {
444 	case ARM_SPE_NV_L1D:
445 		data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
446 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
447 		data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
448 		break;
449 	case ARM_SPE_NV_L2:
450 		data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
451 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
452 		data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
453 		break;
454 	case ARM_SPE_NV_PEER_CORE:
455 		data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
456 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
457 		data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
458 		break;
459 	/*
460 	 * We don't know if this is L1, L2 but we do know it was a cache-2-cache
461 	 * transfer, so set SNOOPX_PEER
462 	 */
463 	case ARM_SPE_NV_LOCAL_CLUSTER:
464 	case ARM_SPE_NV_PEER_CLUSTER:
465 		data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
466 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
467 		data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
468 		break;
469 	/*
470 	 * System cache is assumed to be L3
471 	 */
472 	case ARM_SPE_NV_SYS_CACHE:
473 		data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
474 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
475 		data_src->mem_snoop = PERF_MEM_SNOOP_HIT;
476 		break;
477 	/*
478 	 * We don't know what level it hit in, except it came from the other
479 	 * socket
480 	 */
481 	case ARM_SPE_NV_REMOTE:
482 		data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1;
483 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
484 		data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
485 		data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
486 		break;
487 	case ARM_SPE_NV_DRAM:
488 		data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT;
489 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
490 		data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
491 		break;
492 	default:
493 		break;
494 	}
495 }
496 
arm_spe__synth_data_source_generic(const struct arm_spe_record * record,union perf_mem_data_src * data_src)497 static void arm_spe__synth_data_source_generic(const struct arm_spe_record *record,
498 					       union perf_mem_data_src *data_src)
499 {
500 	if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
501 		data_src->mem_lvl = PERF_MEM_LVL_L3;
502 
503 		if (record->type & ARM_SPE_LLC_MISS)
504 			data_src->mem_lvl |= PERF_MEM_LVL_MISS;
505 		else
506 			data_src->mem_lvl |= PERF_MEM_LVL_HIT;
507 	} else if (record->type & (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS)) {
508 		data_src->mem_lvl = PERF_MEM_LVL_L1;
509 
510 		if (record->type & ARM_SPE_L1D_MISS)
511 			data_src->mem_lvl |= PERF_MEM_LVL_MISS;
512 		else
513 			data_src->mem_lvl |= PERF_MEM_LVL_HIT;
514 	}
515 
516 	if (record->type & ARM_SPE_REMOTE_ACCESS)
517 		data_src->mem_lvl |= PERF_MEM_LVL_REM_CCE1;
518 }
519 
arm_spe__synth_data_source(const struct arm_spe_record * record,u64 midr)520 static u64 arm_spe__synth_data_source(const struct arm_spe_record *record, u64 midr)
521 {
522 	union perf_mem_data_src	data_src = { .mem_op = PERF_MEM_OP_NA };
523 	bool is_neoverse = is_midr_in_range_list(midr, neoverse_spe);
524 
525 	/* Only synthesize data source for LDST operations */
526 	if (!is_ldst_op(record->op))
527 		return 0;
528 
529 	if (record->op & ARM_SPE_OP_LD)
530 		data_src.mem_op = PERF_MEM_OP_LOAD;
531 	else if (record->op & ARM_SPE_OP_ST)
532 		data_src.mem_op = PERF_MEM_OP_STORE;
533 	else
534 		return 0;
535 
536 	if (is_neoverse)
537 		arm_spe__synth_data_source_neoverse(record, &data_src);
538 	else
539 		arm_spe__synth_data_source_generic(record, &data_src);
540 
541 	if (record->type & (ARM_SPE_TLB_ACCESS | ARM_SPE_TLB_MISS)) {
542 		data_src.mem_dtlb = PERF_MEM_TLB_WK;
543 
544 		if (record->type & ARM_SPE_TLB_MISS)
545 			data_src.mem_dtlb |= PERF_MEM_TLB_MISS;
546 		else
547 			data_src.mem_dtlb |= PERF_MEM_TLB_HIT;
548 	}
549 
550 	return data_src.val;
551 }
552 
arm_spe_sample(struct arm_spe_queue * speq)553 static int arm_spe_sample(struct arm_spe_queue *speq)
554 {
555 	const struct arm_spe_record *record = &speq->decoder->record;
556 	struct arm_spe *spe = speq->spe;
557 	u64 data_src;
558 	int err;
559 
560 	data_src = arm_spe__synth_data_source(record, spe->midr);
561 
562 	if (spe->sample_flc) {
563 		if (record->type & ARM_SPE_L1D_MISS) {
564 			err = arm_spe__synth_mem_sample(speq, spe->l1d_miss_id,
565 							data_src);
566 			if (err)
567 				return err;
568 		}
569 
570 		if (record->type & ARM_SPE_L1D_ACCESS) {
571 			err = arm_spe__synth_mem_sample(speq, spe->l1d_access_id,
572 							data_src);
573 			if (err)
574 				return err;
575 		}
576 	}
577 
578 	if (spe->sample_llc) {
579 		if (record->type & ARM_SPE_LLC_MISS) {
580 			err = arm_spe__synth_mem_sample(speq, spe->llc_miss_id,
581 							data_src);
582 			if (err)
583 				return err;
584 		}
585 
586 		if (record->type & ARM_SPE_LLC_ACCESS) {
587 			err = arm_spe__synth_mem_sample(speq, spe->llc_access_id,
588 							data_src);
589 			if (err)
590 				return err;
591 		}
592 	}
593 
594 	if (spe->sample_tlb) {
595 		if (record->type & ARM_SPE_TLB_MISS) {
596 			err = arm_spe__synth_mem_sample(speq, spe->tlb_miss_id,
597 							data_src);
598 			if (err)
599 				return err;
600 		}
601 
602 		if (record->type & ARM_SPE_TLB_ACCESS) {
603 			err = arm_spe__synth_mem_sample(speq, spe->tlb_access_id,
604 							data_src);
605 			if (err)
606 				return err;
607 		}
608 	}
609 
610 	if (spe->sample_branch && (record->type & ARM_SPE_BRANCH_MISS)) {
611 		err = arm_spe__synth_branch_sample(speq, spe->branch_miss_id);
612 		if (err)
613 			return err;
614 	}
615 
616 	if (spe->sample_remote_access &&
617 	    (record->type & ARM_SPE_REMOTE_ACCESS)) {
618 		err = arm_spe__synth_mem_sample(speq, spe->remote_access_id,
619 						data_src);
620 		if (err)
621 			return err;
622 	}
623 
624 	/*
625 	 * When data_src is zero it means the record is not a memory operation,
626 	 * skip to synthesize memory sample for this case.
627 	 */
628 	if (spe->sample_memory && is_ldst_op(record->op)) {
629 		err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src);
630 		if (err)
631 			return err;
632 	}
633 
634 	if (spe->sample_instructions) {
635 		err = arm_spe__synth_instruction_sample(speq, spe->instructions_id, data_src);
636 		if (err)
637 			return err;
638 	}
639 
640 	return 0;
641 }
642 
arm_spe_run_decoder(struct arm_spe_queue * speq,u64 * timestamp)643 static int arm_spe_run_decoder(struct arm_spe_queue *speq, u64 *timestamp)
644 {
645 	struct arm_spe *spe = speq->spe;
646 	struct arm_spe_record *record;
647 	int ret;
648 
649 	if (!spe->kernel_start)
650 		spe->kernel_start = machine__kernel_start(spe->machine);
651 
652 	while (1) {
653 		/*
654 		 * The usual logic is firstly to decode the packets, and then
655 		 * based the record to synthesize sample; but here the flow is
656 		 * reversed: it calls arm_spe_sample() for synthesizing samples
657 		 * prior to arm_spe_decode().
658 		 *
659 		 * Two reasons for this code logic:
660 		 * 1. Firstly, when setup queue in arm_spe__setup_queue(), it
661 		 * has decoded trace data and generated a record, but the record
662 		 * is left to generate sample until run to here, so it's correct
663 		 * to synthesize sample for the left record.
664 		 * 2. After decoding trace data, it needs to compare the record
665 		 * timestamp with the coming perf event, if the record timestamp
666 		 * is later than the perf event, it needs bail out and pushs the
667 		 * record into auxtrace heap, thus the record can be deferred to
668 		 * synthesize sample until run to here at the next time; so this
669 		 * can correlate samples between Arm SPE trace data and other
670 		 * perf events with correct time ordering.
671 		 */
672 
673 		/*
674 		 * Update pid/tid info.
675 		 */
676 		record = &speq->decoder->record;
677 		if (!spe->timeless_decoding && record->context_id != (u64)-1) {
678 			ret = arm_spe_set_tid(speq, record->context_id);
679 			if (ret)
680 				return ret;
681 
682 			spe->use_ctx_pkt_for_pid = true;
683 		}
684 
685 		ret = arm_spe_sample(speq);
686 		if (ret)
687 			return ret;
688 
689 		ret = arm_spe_decode(speq->decoder);
690 		if (!ret) {
691 			pr_debug("No data or all data has been processed.\n");
692 			return 1;
693 		}
694 
695 		/*
696 		 * Error is detected when decode SPE trace data, continue to
697 		 * the next trace data and find out more records.
698 		 */
699 		if (ret < 0)
700 			continue;
701 
702 		record = &speq->decoder->record;
703 
704 		/* Update timestamp for the last record */
705 		if (record->timestamp > speq->timestamp)
706 			speq->timestamp = record->timestamp;
707 
708 		/*
709 		 * If the timestamp of the queue is later than timestamp of the
710 		 * coming perf event, bail out so can allow the perf event to
711 		 * be processed ahead.
712 		 */
713 		if (!spe->timeless_decoding && speq->timestamp >= *timestamp) {
714 			*timestamp = speq->timestamp;
715 			return 0;
716 		}
717 	}
718 
719 	return 0;
720 }
721 
arm_spe__setup_queue(struct arm_spe * spe,struct auxtrace_queue * queue,unsigned int queue_nr)722 static int arm_spe__setup_queue(struct arm_spe *spe,
723 			       struct auxtrace_queue *queue,
724 			       unsigned int queue_nr)
725 {
726 	struct arm_spe_queue *speq = queue->priv;
727 	struct arm_spe_record *record;
728 
729 	if (list_empty(&queue->head) || speq)
730 		return 0;
731 
732 	speq = arm_spe__alloc_queue(spe, queue_nr);
733 
734 	if (!speq)
735 		return -ENOMEM;
736 
737 	queue->priv = speq;
738 
739 	if (queue->cpu != -1)
740 		speq->cpu = queue->cpu;
741 
742 	if (!speq->on_heap) {
743 		int ret;
744 
745 		if (spe->timeless_decoding)
746 			return 0;
747 
748 retry:
749 		ret = arm_spe_decode(speq->decoder);
750 
751 		if (!ret)
752 			return 0;
753 
754 		if (ret < 0)
755 			goto retry;
756 
757 		record = &speq->decoder->record;
758 
759 		speq->timestamp = record->timestamp;
760 		ret = auxtrace_heap__add(&spe->heap, queue_nr, speq->timestamp);
761 		if (ret)
762 			return ret;
763 		speq->on_heap = true;
764 	}
765 
766 	return 0;
767 }
768 
arm_spe__setup_queues(struct arm_spe * spe)769 static int arm_spe__setup_queues(struct arm_spe *spe)
770 {
771 	unsigned int i;
772 	int ret;
773 
774 	for (i = 0; i < spe->queues.nr_queues; i++) {
775 		ret = arm_spe__setup_queue(spe, &spe->queues.queue_array[i], i);
776 		if (ret)
777 			return ret;
778 	}
779 
780 	return 0;
781 }
782 
arm_spe__update_queues(struct arm_spe * spe)783 static int arm_spe__update_queues(struct arm_spe *spe)
784 {
785 	if (spe->queues.new_data) {
786 		spe->queues.new_data = false;
787 		return arm_spe__setup_queues(spe);
788 	}
789 
790 	return 0;
791 }
792 
arm_spe__is_timeless_decoding(struct arm_spe * spe)793 static bool arm_spe__is_timeless_decoding(struct arm_spe *spe)
794 {
795 	struct evsel *evsel;
796 	struct evlist *evlist = spe->session->evlist;
797 	bool timeless_decoding = true;
798 
799 	/*
800 	 * Circle through the list of event and complain if we find one
801 	 * with the time bit set.
802 	 */
803 	evlist__for_each_entry(evlist, evsel) {
804 		if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
805 			timeless_decoding = false;
806 	}
807 
808 	return timeless_decoding;
809 }
810 
arm_spe_process_queues(struct arm_spe * spe,u64 timestamp)811 static int arm_spe_process_queues(struct arm_spe *spe, u64 timestamp)
812 {
813 	unsigned int queue_nr;
814 	u64 ts;
815 	int ret;
816 
817 	while (1) {
818 		struct auxtrace_queue *queue;
819 		struct arm_spe_queue *speq;
820 
821 		if (!spe->heap.heap_cnt)
822 			return 0;
823 
824 		if (spe->heap.heap_array[0].ordinal >= timestamp)
825 			return 0;
826 
827 		queue_nr = spe->heap.heap_array[0].queue_nr;
828 		queue = &spe->queues.queue_array[queue_nr];
829 		speq = queue->priv;
830 
831 		auxtrace_heap__pop(&spe->heap);
832 
833 		if (spe->heap.heap_cnt) {
834 			ts = spe->heap.heap_array[0].ordinal + 1;
835 			if (ts > timestamp)
836 				ts = timestamp;
837 		} else {
838 			ts = timestamp;
839 		}
840 
841 		/*
842 		 * A previous context-switch event has set pid/tid in the machine's context, so
843 		 * here we need to update the pid/tid in the thread and SPE queue.
844 		 */
845 		if (!spe->use_ctx_pkt_for_pid)
846 			arm_spe_set_pid_tid_cpu(spe, queue);
847 
848 		ret = arm_spe_run_decoder(speq, &ts);
849 		if (ret < 0) {
850 			auxtrace_heap__add(&spe->heap, queue_nr, ts);
851 			return ret;
852 		}
853 
854 		if (!ret) {
855 			ret = auxtrace_heap__add(&spe->heap, queue_nr, ts);
856 			if (ret < 0)
857 				return ret;
858 		} else {
859 			speq->on_heap = false;
860 		}
861 	}
862 
863 	return 0;
864 }
865 
arm_spe_process_timeless_queues(struct arm_spe * spe,pid_t tid,u64 time_)866 static int arm_spe_process_timeless_queues(struct arm_spe *spe, pid_t tid,
867 					    u64 time_)
868 {
869 	struct auxtrace_queues *queues = &spe->queues;
870 	unsigned int i;
871 	u64 ts = 0;
872 
873 	for (i = 0; i < queues->nr_queues; i++) {
874 		struct auxtrace_queue *queue = &spe->queues.queue_array[i];
875 		struct arm_spe_queue *speq = queue->priv;
876 
877 		if (speq && (tid == -1 || speq->tid == tid)) {
878 			speq->time = time_;
879 			arm_spe_set_pid_tid_cpu(spe, queue);
880 			arm_spe_run_decoder(speq, &ts);
881 		}
882 	}
883 	return 0;
884 }
885 
arm_spe_context_switch(struct arm_spe * spe,union perf_event * event,struct perf_sample * sample)886 static int arm_spe_context_switch(struct arm_spe *spe, union perf_event *event,
887 				  struct perf_sample *sample)
888 {
889 	pid_t pid, tid;
890 	int cpu;
891 
892 	if (!(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT))
893 		return 0;
894 
895 	pid = event->context_switch.next_prev_pid;
896 	tid = event->context_switch.next_prev_tid;
897 	cpu = sample->cpu;
898 
899 	if (tid == -1)
900 		pr_warning("context_switch event has no tid\n");
901 
902 	return machine__set_current_tid(spe->machine, cpu, pid, tid);
903 }
904 
arm_spe_process_event(struct perf_session * session,union perf_event * event,struct perf_sample * sample,struct perf_tool * tool)905 static int arm_spe_process_event(struct perf_session *session,
906 				 union perf_event *event,
907 				 struct perf_sample *sample,
908 				 struct perf_tool *tool)
909 {
910 	int err = 0;
911 	u64 timestamp;
912 	struct arm_spe *spe = container_of(session->auxtrace,
913 			struct arm_spe, auxtrace);
914 
915 	if (dump_trace)
916 		return 0;
917 
918 	if (!tool->ordered_events) {
919 		pr_err("SPE trace requires ordered events\n");
920 		return -EINVAL;
921 	}
922 
923 	if (sample->time && (sample->time != (u64) -1))
924 		timestamp = perf_time_to_tsc(sample->time, &spe->tc);
925 	else
926 		timestamp = 0;
927 
928 	if (timestamp || spe->timeless_decoding) {
929 		err = arm_spe__update_queues(spe);
930 		if (err)
931 			return err;
932 	}
933 
934 	if (spe->timeless_decoding) {
935 		if (event->header.type == PERF_RECORD_EXIT) {
936 			err = arm_spe_process_timeless_queues(spe,
937 					event->fork.tid,
938 					sample->time);
939 		}
940 	} else if (timestamp) {
941 		err = arm_spe_process_queues(spe, timestamp);
942 		if (err)
943 			return err;
944 
945 		if (!spe->use_ctx_pkt_for_pid &&
946 		    (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE ||
947 		    event->header.type == PERF_RECORD_SWITCH))
948 			err = arm_spe_context_switch(spe, event, sample);
949 	}
950 
951 	return err;
952 }
953 
arm_spe_process_auxtrace_event(struct perf_session * session,union perf_event * event,struct perf_tool * tool __maybe_unused)954 static int arm_spe_process_auxtrace_event(struct perf_session *session,
955 					  union perf_event *event,
956 					  struct perf_tool *tool __maybe_unused)
957 {
958 	struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
959 					     auxtrace);
960 
961 	if (!spe->data_queued) {
962 		struct auxtrace_buffer *buffer;
963 		off_t data_offset;
964 		int fd = perf_data__fd(session->data);
965 		int err;
966 
967 		if (perf_data__is_pipe(session->data)) {
968 			data_offset = 0;
969 		} else {
970 			data_offset = lseek(fd, 0, SEEK_CUR);
971 			if (data_offset == -1)
972 				return -errno;
973 		}
974 
975 		err = auxtrace_queues__add_event(&spe->queues, session, event,
976 				data_offset, &buffer);
977 		if (err)
978 			return err;
979 
980 		/* Dump here now we have copied a piped trace out of the pipe */
981 		if (dump_trace) {
982 			if (auxtrace_buffer__get_data(buffer, fd)) {
983 				arm_spe_dump_event(spe, buffer->data,
984 						buffer->size);
985 				auxtrace_buffer__put_data(buffer);
986 			}
987 		}
988 	}
989 
990 	return 0;
991 }
992 
arm_spe_flush(struct perf_session * session __maybe_unused,struct perf_tool * tool __maybe_unused)993 static int arm_spe_flush(struct perf_session *session __maybe_unused,
994 			 struct perf_tool *tool __maybe_unused)
995 {
996 	struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
997 			auxtrace);
998 	int ret;
999 
1000 	if (dump_trace)
1001 		return 0;
1002 
1003 	if (!tool->ordered_events)
1004 		return -EINVAL;
1005 
1006 	ret = arm_spe__update_queues(spe);
1007 	if (ret < 0)
1008 		return ret;
1009 
1010 	if (spe->timeless_decoding)
1011 		return arm_spe_process_timeless_queues(spe, -1,
1012 				MAX_TIMESTAMP - 1);
1013 
1014 	ret = arm_spe_process_queues(spe, MAX_TIMESTAMP);
1015 	if (ret)
1016 		return ret;
1017 
1018 	if (!spe->use_ctx_pkt_for_pid)
1019 		ui__warning("Arm SPE CONTEXT packets not found in the traces.\n"
1020 			    "Matching of TIDs to SPE events could be inaccurate.\n");
1021 
1022 	return 0;
1023 }
1024 
arm_spe_free_queue(void * priv)1025 static void arm_spe_free_queue(void *priv)
1026 {
1027 	struct arm_spe_queue *speq = priv;
1028 
1029 	if (!speq)
1030 		return;
1031 	thread__zput(speq->thread);
1032 	arm_spe_decoder_free(speq->decoder);
1033 	zfree(&speq->event_buf);
1034 	free(speq);
1035 }
1036 
arm_spe_free_events(struct perf_session * session)1037 static void arm_spe_free_events(struct perf_session *session)
1038 {
1039 	struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
1040 					     auxtrace);
1041 	struct auxtrace_queues *queues = &spe->queues;
1042 	unsigned int i;
1043 
1044 	for (i = 0; i < queues->nr_queues; i++) {
1045 		arm_spe_free_queue(queues->queue_array[i].priv);
1046 		queues->queue_array[i].priv = NULL;
1047 	}
1048 	auxtrace_queues__free(queues);
1049 }
1050 
arm_spe_free(struct perf_session * session)1051 static void arm_spe_free(struct perf_session *session)
1052 {
1053 	struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
1054 					     auxtrace);
1055 
1056 	auxtrace_heap__free(&spe->heap);
1057 	arm_spe_free_events(session);
1058 	session->auxtrace = NULL;
1059 	free(spe);
1060 }
1061 
arm_spe_evsel_is_auxtrace(struct perf_session * session,struct evsel * evsel)1062 static bool arm_spe_evsel_is_auxtrace(struct perf_session *session,
1063 				      struct evsel *evsel)
1064 {
1065 	struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe, auxtrace);
1066 
1067 	return evsel->core.attr.type == spe->pmu_type;
1068 }
1069 
1070 static const char * const arm_spe_info_fmts[] = {
1071 	[ARM_SPE_PMU_TYPE]		= "  PMU Type           %"PRId64"\n",
1072 };
1073 
arm_spe_print_info(__u64 * arr)1074 static void arm_spe_print_info(__u64 *arr)
1075 {
1076 	if (!dump_trace)
1077 		return;
1078 
1079 	fprintf(stdout, arm_spe_info_fmts[ARM_SPE_PMU_TYPE], arr[ARM_SPE_PMU_TYPE]);
1080 }
1081 
1082 struct arm_spe_synth {
1083 	struct perf_tool dummy_tool;
1084 	struct perf_session *session;
1085 };
1086 
arm_spe_event_synth(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)1087 static int arm_spe_event_synth(struct perf_tool *tool,
1088 			       union perf_event *event,
1089 			       struct perf_sample *sample __maybe_unused,
1090 			       struct machine *machine __maybe_unused)
1091 {
1092 	struct arm_spe_synth *arm_spe_synth =
1093 		      container_of(tool, struct arm_spe_synth, dummy_tool);
1094 
1095 	return perf_session__deliver_synth_event(arm_spe_synth->session,
1096 						 event, NULL);
1097 }
1098 
arm_spe_synth_event(struct perf_session * session,struct perf_event_attr * attr,u64 id)1099 static int arm_spe_synth_event(struct perf_session *session,
1100 			       struct perf_event_attr *attr, u64 id)
1101 {
1102 	struct arm_spe_synth arm_spe_synth;
1103 
1104 	memset(&arm_spe_synth, 0, sizeof(struct arm_spe_synth));
1105 	arm_spe_synth.session = session;
1106 
1107 	return perf_event__synthesize_attr(&arm_spe_synth.dummy_tool, attr, 1,
1108 					   &id, arm_spe_event_synth);
1109 }
1110 
arm_spe_set_event_name(struct evlist * evlist,u64 id,const char * name)1111 static void arm_spe_set_event_name(struct evlist *evlist, u64 id,
1112 				    const char *name)
1113 {
1114 	struct evsel *evsel;
1115 
1116 	evlist__for_each_entry(evlist, evsel) {
1117 		if (evsel->core.id && evsel->core.id[0] == id) {
1118 			if (evsel->name)
1119 				zfree(&evsel->name);
1120 			evsel->name = strdup(name);
1121 			break;
1122 		}
1123 	}
1124 }
1125 
1126 static int
arm_spe_synth_events(struct arm_spe * spe,struct perf_session * session)1127 arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
1128 {
1129 	struct evlist *evlist = session->evlist;
1130 	struct evsel *evsel;
1131 	struct perf_event_attr attr;
1132 	bool found = false;
1133 	u64 id;
1134 	int err;
1135 
1136 	evlist__for_each_entry(evlist, evsel) {
1137 		if (evsel->core.attr.type == spe->pmu_type) {
1138 			found = true;
1139 			break;
1140 		}
1141 	}
1142 
1143 	if (!found) {
1144 		pr_debug("No selected events with SPE trace data\n");
1145 		return 0;
1146 	}
1147 
1148 	memset(&attr, 0, sizeof(struct perf_event_attr));
1149 	attr.size = sizeof(struct perf_event_attr);
1150 	attr.type = PERF_TYPE_HARDWARE;
1151 	attr.sample_type = evsel->core.attr.sample_type &
1152 				(PERF_SAMPLE_MASK | PERF_SAMPLE_PHYS_ADDR);
1153 	attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
1154 			    PERF_SAMPLE_PERIOD | PERF_SAMPLE_DATA_SRC |
1155 			    PERF_SAMPLE_WEIGHT | PERF_SAMPLE_ADDR;
1156 	if (spe->timeless_decoding)
1157 		attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
1158 	else
1159 		attr.sample_type |= PERF_SAMPLE_TIME;
1160 
1161 	spe->sample_type = attr.sample_type;
1162 
1163 	attr.exclude_user = evsel->core.attr.exclude_user;
1164 	attr.exclude_kernel = evsel->core.attr.exclude_kernel;
1165 	attr.exclude_hv = evsel->core.attr.exclude_hv;
1166 	attr.exclude_host = evsel->core.attr.exclude_host;
1167 	attr.exclude_guest = evsel->core.attr.exclude_guest;
1168 	attr.sample_id_all = evsel->core.attr.sample_id_all;
1169 	attr.read_format = evsel->core.attr.read_format;
1170 
1171 	/* create new id val to be a fixed offset from evsel id */
1172 	id = evsel->core.id[0] + 1000000000;
1173 
1174 	if (!id)
1175 		id = 1;
1176 
1177 	if (spe->synth_opts.flc) {
1178 		spe->sample_flc = true;
1179 
1180 		/* Level 1 data cache miss */
1181 		err = arm_spe_synth_event(session, &attr, id);
1182 		if (err)
1183 			return err;
1184 		spe->l1d_miss_id = id;
1185 		arm_spe_set_event_name(evlist, id, "l1d-miss");
1186 		id += 1;
1187 
1188 		/* Level 1 data cache access */
1189 		err = arm_spe_synth_event(session, &attr, id);
1190 		if (err)
1191 			return err;
1192 		spe->l1d_access_id = id;
1193 		arm_spe_set_event_name(evlist, id, "l1d-access");
1194 		id += 1;
1195 	}
1196 
1197 	if (spe->synth_opts.llc) {
1198 		spe->sample_llc = true;
1199 
1200 		/* Last level cache miss */
1201 		err = arm_spe_synth_event(session, &attr, id);
1202 		if (err)
1203 			return err;
1204 		spe->llc_miss_id = id;
1205 		arm_spe_set_event_name(evlist, id, "llc-miss");
1206 		id += 1;
1207 
1208 		/* Last level cache access */
1209 		err = arm_spe_synth_event(session, &attr, id);
1210 		if (err)
1211 			return err;
1212 		spe->llc_access_id = id;
1213 		arm_spe_set_event_name(evlist, id, "llc-access");
1214 		id += 1;
1215 	}
1216 
1217 	if (spe->synth_opts.tlb) {
1218 		spe->sample_tlb = true;
1219 
1220 		/* TLB miss */
1221 		err = arm_spe_synth_event(session, &attr, id);
1222 		if (err)
1223 			return err;
1224 		spe->tlb_miss_id = id;
1225 		arm_spe_set_event_name(evlist, id, "tlb-miss");
1226 		id += 1;
1227 
1228 		/* TLB access */
1229 		err = arm_spe_synth_event(session, &attr, id);
1230 		if (err)
1231 			return err;
1232 		spe->tlb_access_id = id;
1233 		arm_spe_set_event_name(evlist, id, "tlb-access");
1234 		id += 1;
1235 	}
1236 
1237 	if (spe->synth_opts.branches) {
1238 		spe->sample_branch = true;
1239 
1240 		/* Branch miss */
1241 		err = arm_spe_synth_event(session, &attr, id);
1242 		if (err)
1243 			return err;
1244 		spe->branch_miss_id = id;
1245 		arm_spe_set_event_name(evlist, id, "branch-miss");
1246 		id += 1;
1247 	}
1248 
1249 	if (spe->synth_opts.remote_access) {
1250 		spe->sample_remote_access = true;
1251 
1252 		/* Remote access */
1253 		err = arm_spe_synth_event(session, &attr, id);
1254 		if (err)
1255 			return err;
1256 		spe->remote_access_id = id;
1257 		arm_spe_set_event_name(evlist, id, "remote-access");
1258 		id += 1;
1259 	}
1260 
1261 	if (spe->synth_opts.mem) {
1262 		spe->sample_memory = true;
1263 
1264 		err = arm_spe_synth_event(session, &attr, id);
1265 		if (err)
1266 			return err;
1267 		spe->memory_id = id;
1268 		arm_spe_set_event_name(evlist, id, "memory");
1269 		id += 1;
1270 	}
1271 
1272 	if (spe->synth_opts.instructions) {
1273 		if (spe->synth_opts.period_type != PERF_ITRACE_PERIOD_INSTRUCTIONS) {
1274 			pr_warning("Only instruction-based sampling period is currently supported by Arm SPE.\n");
1275 			goto synth_instructions_out;
1276 		}
1277 		if (spe->synth_opts.period > 1)
1278 			pr_warning("Arm SPE has a hardware-based sample period.\n"
1279 				   "Additional instruction events will be discarded by --itrace\n");
1280 
1281 		spe->sample_instructions = true;
1282 		attr.config = PERF_COUNT_HW_INSTRUCTIONS;
1283 		attr.sample_period = spe->synth_opts.period;
1284 		spe->instructions_sample_period = attr.sample_period;
1285 		err = arm_spe_synth_event(session, &attr, id);
1286 		if (err)
1287 			return err;
1288 		spe->instructions_id = id;
1289 		arm_spe_set_event_name(evlist, id, "instructions");
1290 	}
1291 synth_instructions_out:
1292 
1293 	return 0;
1294 }
1295 
arm_spe_process_auxtrace_info(union perf_event * event,struct perf_session * session)1296 int arm_spe_process_auxtrace_info(union perf_event *event,
1297 				  struct perf_session *session)
1298 {
1299 	struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
1300 	size_t min_sz = sizeof(u64) * ARM_SPE_AUXTRACE_PRIV_MAX;
1301 	struct perf_record_time_conv *tc = &session->time_conv;
1302 	const char *cpuid = perf_env__cpuid(session->evlist->env);
1303 	u64 midr = strtol(cpuid, NULL, 16);
1304 	struct arm_spe *spe;
1305 	int err;
1306 
1307 	if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
1308 					min_sz)
1309 		return -EINVAL;
1310 
1311 	spe = zalloc(sizeof(struct arm_spe));
1312 	if (!spe)
1313 		return -ENOMEM;
1314 
1315 	err = auxtrace_queues__init(&spe->queues);
1316 	if (err)
1317 		goto err_free;
1318 
1319 	spe->session = session;
1320 	spe->machine = &session->machines.host; /* No kvm support */
1321 	spe->auxtrace_type = auxtrace_info->type;
1322 	spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE];
1323 	spe->midr = midr;
1324 
1325 	spe->timeless_decoding = arm_spe__is_timeless_decoding(spe);
1326 
1327 	/*
1328 	 * The synthesized event PERF_RECORD_TIME_CONV has been handled ahead
1329 	 * and the parameters for hardware clock are stored in the session
1330 	 * context.  Passes these parameters to the struct perf_tsc_conversion
1331 	 * in "spe->tc", which is used for later conversion between clock
1332 	 * counter and timestamp.
1333 	 *
1334 	 * For backward compatibility, copies the fields starting from
1335 	 * "time_cycles" only if they are contained in the event.
1336 	 */
1337 	spe->tc.time_shift = tc->time_shift;
1338 	spe->tc.time_mult = tc->time_mult;
1339 	spe->tc.time_zero = tc->time_zero;
1340 
1341 	if (event_contains(*tc, time_cycles)) {
1342 		spe->tc.time_cycles = tc->time_cycles;
1343 		spe->tc.time_mask = tc->time_mask;
1344 		spe->tc.cap_user_time_zero = tc->cap_user_time_zero;
1345 		spe->tc.cap_user_time_short = tc->cap_user_time_short;
1346 	}
1347 
1348 	spe->auxtrace.process_event = arm_spe_process_event;
1349 	spe->auxtrace.process_auxtrace_event = arm_spe_process_auxtrace_event;
1350 	spe->auxtrace.flush_events = arm_spe_flush;
1351 	spe->auxtrace.free_events = arm_spe_free_events;
1352 	spe->auxtrace.free = arm_spe_free;
1353 	spe->auxtrace.evsel_is_auxtrace = arm_spe_evsel_is_auxtrace;
1354 	session->auxtrace = &spe->auxtrace;
1355 
1356 	arm_spe_print_info(&auxtrace_info->priv[0]);
1357 
1358 	if (dump_trace)
1359 		return 0;
1360 
1361 	if (session->itrace_synth_opts && session->itrace_synth_opts->set)
1362 		spe->synth_opts = *session->itrace_synth_opts;
1363 	else
1364 		itrace_synth_opts__set_default(&spe->synth_opts, false);
1365 
1366 	err = arm_spe_synth_events(spe, session);
1367 	if (err)
1368 		goto err_free_queues;
1369 
1370 	err = auxtrace_queues__process_index(&spe->queues, session);
1371 	if (err)
1372 		goto err_free_queues;
1373 
1374 	if (spe->queues.populated)
1375 		spe->data_queued = true;
1376 
1377 	return 0;
1378 
1379 err_free_queues:
1380 	auxtrace_queues__free(&spe->queues);
1381 	session->auxtrace = NULL;
1382 err_free:
1383 	free(spe);
1384 	return err;
1385 }
1386