xref: /openbmc/linux/tools/perf/util/cs-etm.c (revision 66c98360)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2015-2018 Linaro Limited.
4  *
5  * Author: Tor Jeremiassen <tor@ti.com>
6  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/coresight-pmu.h>
11 #include <linux/err.h>
12 #include <linux/kernel.h>
13 #include <linux/log2.h>
14 #include <linux/types.h>
15 #include <linux/zalloc.h>
16 
17 #include <opencsd/ocsd_if_types.h>
18 #include <stdlib.h>
19 
20 #include "auxtrace.h"
21 #include "color.h"
22 #include "cs-etm.h"
23 #include "cs-etm-decoder/cs-etm-decoder.h"
24 #include "debug.h"
25 #include "dso.h"
26 #include "evlist.h"
27 #include "intlist.h"
28 #include "machine.h"
29 #include "map.h"
30 #include "perf.h"
31 #include "session.h"
32 #include "map_symbol.h"
33 #include "branch.h"
34 #include "symbol.h"
35 #include "tool.h"
36 #include "thread.h"
37 #include "thread-stack.h"
38 #include "tsc.h"
39 #include <tools/libc_compat.h>
40 #include "util/synthetic-events.h"
41 #include "util/util.h"
42 
43 struct cs_etm_auxtrace {
44 	struct auxtrace auxtrace;
45 	struct auxtrace_queues queues;
46 	struct auxtrace_heap heap;
47 	struct itrace_synth_opts synth_opts;
48 	struct perf_session *session;
49 	struct machine *machine;
50 	struct thread *unknown_thread;
51 	struct perf_tsc_conversion tc;
52 
53 	/*
54 	 * Timeless has no timestamps in the trace so overlapping mmap lookups
55 	 * are less accurate but produces smaller trace data. We use context IDs
56 	 * in the trace instead of matching timestamps with fork records so
57 	 * they're not really needed in the general case. Overlapping mmaps
58 	 * happen in cases like between a fork and an exec.
59 	 */
60 	bool timeless_decoding;
61 
62 	/*
63 	 * Per-thread ignores the trace channel ID and instead assumes that
64 	 * everything in a buffer comes from the same process regardless of
65 	 * which CPU it ran on. It also implies no context IDs so the TID is
66 	 * taken from the auxtrace buffer.
67 	 */
68 	bool per_thread_decoding;
69 	bool snapshot_mode;
70 	bool data_queued;
71 	bool has_virtual_ts; /* Virtual/Kernel timestamps in the trace. */
72 
73 	int num_cpu;
74 	u64 latest_kernel_timestamp;
75 	u32 auxtrace_type;
76 	u64 branches_sample_type;
77 	u64 branches_id;
78 	u64 instructions_sample_type;
79 	u64 instructions_sample_period;
80 	u64 instructions_id;
81 	u64 **metadata;
82 	unsigned int pmu_type;
83 };
84 
85 struct cs_etm_traceid_queue {
86 	u8 trace_chan_id;
87 	pid_t pid, tid;
88 	u64 period_instructions;
89 	size_t last_branch_pos;
90 	union perf_event *event_buf;
91 	struct thread *thread;
92 	struct branch_stack *last_branch;
93 	struct branch_stack *last_branch_rb;
94 	struct cs_etm_packet *prev_packet;
95 	struct cs_etm_packet *packet;
96 	struct cs_etm_packet_queue packet_queue;
97 };
98 
99 struct cs_etm_queue {
100 	struct cs_etm_auxtrace *etm;
101 	struct cs_etm_decoder *decoder;
102 	struct auxtrace_buffer *buffer;
103 	unsigned int queue_nr;
104 	u8 pending_timestamp_chan_id;
105 	u64 offset;
106 	const unsigned char *buf;
107 	size_t buf_len, buf_used;
108 	/* Conversion between traceID and index in traceid_queues array */
109 	struct intlist *traceid_queues_list;
110 	struct cs_etm_traceid_queue **traceid_queues;
111 };
112 
113 /* RB tree for quick conversion between traceID and metadata pointers */
114 static struct intlist *traceid_list;
115 
116 static int cs_etm__process_timestamped_queues(struct cs_etm_auxtrace *etm);
117 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
118 					   pid_t tid);
119 static int cs_etm__get_data_block(struct cs_etm_queue *etmq);
120 static int cs_etm__decode_data_block(struct cs_etm_queue *etmq);
121 
122 /* PTMs ETMIDR [11:8] set to b0011 */
123 #define ETMIDR_PTM_VERSION 0x00000300
124 
125 /*
126  * A struct auxtrace_heap_item only has a queue_nr and a timestamp to
127  * work with.  One option is to modify to auxtrace_heap_XYZ() API or simply
128  * encode the etm queue number as the upper 16 bit and the channel as
129  * the lower 16 bit.
130  */
131 #define TO_CS_QUEUE_NR(queue_nr, trace_chan_id)	\
132 		      (queue_nr << 16 | trace_chan_id)
133 #define TO_QUEUE_NR(cs_queue_nr) (cs_queue_nr >> 16)
134 #define TO_TRACE_CHAN_ID(cs_queue_nr) (cs_queue_nr & 0x0000ffff)
135 
136 static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
137 {
138 	etmidr &= ETMIDR_PTM_VERSION;
139 
140 	if (etmidr == ETMIDR_PTM_VERSION)
141 		return CS_ETM_PROTO_PTM;
142 
143 	return CS_ETM_PROTO_ETMV3;
144 }
145 
146 static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
147 {
148 	struct int_node *inode;
149 	u64 *metadata;
150 
151 	inode = intlist__find(traceid_list, trace_chan_id);
152 	if (!inode)
153 		return -EINVAL;
154 
155 	metadata = inode->priv;
156 	*magic = metadata[CS_ETM_MAGIC];
157 	return 0;
158 }
159 
160 int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
161 {
162 	struct int_node *inode;
163 	u64 *metadata;
164 
165 	inode = intlist__find(traceid_list, trace_chan_id);
166 	if (!inode)
167 		return -EINVAL;
168 
169 	metadata = inode->priv;
170 	*cpu = (int)metadata[CS_ETM_CPU];
171 	return 0;
172 }
173 
174 /*
175  * The returned PID format is presented by two bits:
176  *
177  *   Bit ETM_OPT_CTXTID: CONTEXTIDR or CONTEXTIDR_EL1 is traced;
178  *   Bit ETM_OPT_CTXTID2: CONTEXTIDR_EL2 is traced.
179  *
180  * It's possible that the two bits ETM_OPT_CTXTID and ETM_OPT_CTXTID2
181  * are enabled at the same time when the session runs on an EL2 kernel.
182  * This means the CONTEXTIDR_EL1 and CONTEXTIDR_EL2 both will be
183  * recorded in the trace data, the tool will selectively use
184  * CONTEXTIDR_EL2 as PID.
185  */
186 int cs_etm__get_pid_fmt(u8 trace_chan_id, u64 *pid_fmt)
187 {
188 	struct int_node *inode;
189 	u64 *metadata, val;
190 
191 	inode = intlist__find(traceid_list, trace_chan_id);
192 	if (!inode)
193 		return -EINVAL;
194 
195 	metadata = inode->priv;
196 
197 	if (metadata[CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
198 		val = metadata[CS_ETM_ETMCR];
199 		/* CONTEXTIDR is traced */
200 		if (val & BIT(ETM_OPT_CTXTID))
201 			*pid_fmt = BIT(ETM_OPT_CTXTID);
202 	} else {
203 		val = metadata[CS_ETMV4_TRCCONFIGR];
204 		/* CONTEXTIDR_EL2 is traced */
205 		if (val & (BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT)))
206 			*pid_fmt = BIT(ETM_OPT_CTXTID2);
207 		/* CONTEXTIDR_EL1 is traced */
208 		else if (val & BIT(ETM4_CFG_BIT_CTXTID))
209 			*pid_fmt = BIT(ETM_OPT_CTXTID);
210 	}
211 
212 	return 0;
213 }
214 
215 static int cs_etm__map_trace_id(u8 trace_chan_id, u64 *cpu_metadata)
216 {
217 	struct int_node *inode;
218 
219 	/* Get an RB node for this CPU */
220 	inode = intlist__findnew(traceid_list, trace_chan_id);
221 
222 	/* Something went wrong, no need to continue */
223 	if (!inode)
224 		return -ENOMEM;
225 
226 	/*
227 	 * The node for that CPU should not be taken.
228 	 * Back out if that's the case.
229 	 */
230 	if (inode->priv)
231 		return -EINVAL;
232 
233 	/* All good, associate the traceID with the metadata pointer */
234 	inode->priv = cpu_metadata;
235 
236 	return 0;
237 }
238 
239 static int cs_etm__metadata_get_trace_id(u8 *trace_chan_id, u64 *cpu_metadata)
240 {
241 	u64 cs_etm_magic = cpu_metadata[CS_ETM_MAGIC];
242 
243 	switch (cs_etm_magic) {
244 	case __perf_cs_etmv3_magic:
245 		*trace_chan_id = (u8)(cpu_metadata[CS_ETM_ETMTRACEIDR] &
246 				      CORESIGHT_TRACE_ID_VAL_MASK);
247 		break;
248 	case __perf_cs_etmv4_magic:
249 	case __perf_cs_ete_magic:
250 		*trace_chan_id = (u8)(cpu_metadata[CS_ETMV4_TRCTRACEIDR] &
251 				      CORESIGHT_TRACE_ID_VAL_MASK);
252 		break;
253 	default:
254 		return -EINVAL;
255 	}
256 	return 0;
257 }
258 
259 /*
260  * update metadata trace ID from the value found in the AUX_HW_INFO packet.
261  * This will also clear the CORESIGHT_TRACE_ID_UNUSED_FLAG flag if present.
262  */
263 static int cs_etm__metadata_set_trace_id(u8 trace_chan_id, u64 *cpu_metadata)
264 {
265 	u64 cs_etm_magic = cpu_metadata[CS_ETM_MAGIC];
266 
267 	switch (cs_etm_magic) {
268 	case __perf_cs_etmv3_magic:
269 		 cpu_metadata[CS_ETM_ETMTRACEIDR] = trace_chan_id;
270 		break;
271 	case __perf_cs_etmv4_magic:
272 	case __perf_cs_ete_magic:
273 		cpu_metadata[CS_ETMV4_TRCTRACEIDR] = trace_chan_id;
274 		break;
275 
276 	default:
277 		return -EINVAL;
278 	}
279 	return 0;
280 }
281 
282 /*
283  * FIELD_GET (linux/bitfield.h) not available outside kernel code,
284  * and the header contains too many dependencies to just copy over,
285  * so roll our own based on the original
286  */
287 #define __bf_shf(x) (__builtin_ffsll(x) - 1)
288 #define FIELD_GET(_mask, _reg)						\
289 	({								\
290 		(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
291 	})
292 
293 /*
294  * Get a metadata for a specific cpu from an array.
295  *
296  */
297 static u64 *get_cpu_data(struct cs_etm_auxtrace *etm, int cpu)
298 {
299 	int i;
300 	u64 *metadata = NULL;
301 
302 	for (i = 0; i < etm->num_cpu; i++) {
303 		if (etm->metadata[i][CS_ETM_CPU] == (u64)cpu) {
304 			metadata = etm->metadata[i];
305 			break;
306 		}
307 	}
308 
309 	return metadata;
310 }
311 
312 /*
313  * Handle the PERF_RECORD_AUX_OUTPUT_HW_ID event.
314  *
315  * The payload associates the Trace ID and the CPU.
316  * The routine is tolerant of seeing multiple packets with the same association,
317  * but a CPU / Trace ID association changing during a session is an error.
318  */
319 static int cs_etm__process_aux_output_hw_id(struct perf_session *session,
320 					    union perf_event *event)
321 {
322 	struct cs_etm_auxtrace *etm;
323 	struct perf_sample sample;
324 	struct int_node *inode;
325 	struct evsel *evsel;
326 	u64 *cpu_data;
327 	u64 hw_id;
328 	int cpu, version, err;
329 	u8 trace_chan_id, curr_chan_id;
330 
331 	/* extract and parse the HW ID */
332 	hw_id = event->aux_output_hw_id.hw_id;
333 	version = FIELD_GET(CS_AUX_HW_ID_VERSION_MASK, hw_id);
334 	trace_chan_id = FIELD_GET(CS_AUX_HW_ID_TRACE_ID_MASK, hw_id);
335 
336 	/* check that we can handle this version */
337 	if (version > CS_AUX_HW_ID_CURR_VERSION)
338 		return -EINVAL;
339 
340 	/* get access to the etm metadata */
341 	etm = container_of(session->auxtrace, struct cs_etm_auxtrace, auxtrace);
342 	if (!etm || !etm->metadata)
343 		return -EINVAL;
344 
345 	/* parse the sample to get the CPU */
346 	evsel = evlist__event2evsel(session->evlist, event);
347 	if (!evsel)
348 		return -EINVAL;
349 	err = evsel__parse_sample(evsel, event, &sample);
350 	if (err)
351 		return err;
352 	cpu = sample.cpu;
353 	if (cpu == -1) {
354 		/* no CPU in the sample - possibly recorded with an old version of perf */
355 		pr_err("CS_ETM: no CPU AUX_OUTPUT_HW_ID sample. Use compatible perf to record.");
356 		return -EINVAL;
357 	}
358 
359 	/* See if the ID is mapped to a CPU, and it matches the current CPU */
360 	inode = intlist__find(traceid_list, trace_chan_id);
361 	if (inode) {
362 		cpu_data = inode->priv;
363 		if ((int)cpu_data[CS_ETM_CPU] != cpu) {
364 			pr_err("CS_ETM: map mismatch between HW_ID packet CPU and Trace ID\n");
365 			return -EINVAL;
366 		}
367 
368 		/* check that the mapped ID matches */
369 		err = cs_etm__metadata_get_trace_id(&curr_chan_id, cpu_data);
370 		if (err)
371 			return err;
372 		if (curr_chan_id != trace_chan_id) {
373 			pr_err("CS_ETM: mismatch between CPU trace ID and HW_ID packet ID\n");
374 			return -EINVAL;
375 		}
376 
377 		/* mapped and matched - return OK */
378 		return 0;
379 	}
380 
381 	cpu_data = get_cpu_data(etm, cpu);
382 	if (cpu_data == NULL)
383 		return err;
384 
385 	/* not one we've seen before - lets map it */
386 	err = cs_etm__map_trace_id(trace_chan_id, cpu_data);
387 	if (err)
388 		return err;
389 
390 	/*
391 	 * if we are picking up the association from the packet, need to plug
392 	 * the correct trace ID into the metadata for setting up decoders later.
393 	 */
394 	err = cs_etm__metadata_set_trace_id(trace_chan_id, cpu_data);
395 	return err;
396 }
397 
398 void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
399 					      u8 trace_chan_id)
400 {
401 	/*
402 	 * When a timestamp packet is encountered the backend code
403 	 * is stopped so that the front end has time to process packets
404 	 * that were accumulated in the traceID queue.  Since there can
405 	 * be more than one channel per cs_etm_queue, we need to specify
406 	 * what traceID queue needs servicing.
407 	 */
408 	etmq->pending_timestamp_chan_id = trace_chan_id;
409 }
410 
411 static u64 cs_etm__etmq_get_timestamp(struct cs_etm_queue *etmq,
412 				      u8 *trace_chan_id)
413 {
414 	struct cs_etm_packet_queue *packet_queue;
415 
416 	if (!etmq->pending_timestamp_chan_id)
417 		return 0;
418 
419 	if (trace_chan_id)
420 		*trace_chan_id = etmq->pending_timestamp_chan_id;
421 
422 	packet_queue = cs_etm__etmq_get_packet_queue(etmq,
423 						     etmq->pending_timestamp_chan_id);
424 	if (!packet_queue)
425 		return 0;
426 
427 	/* Acknowledge pending status */
428 	etmq->pending_timestamp_chan_id = 0;
429 
430 	/* See function cs_etm_decoder__do_{hard|soft}_timestamp() */
431 	return packet_queue->cs_timestamp;
432 }
433 
434 static void cs_etm__clear_packet_queue(struct cs_etm_packet_queue *queue)
435 {
436 	int i;
437 
438 	queue->head = 0;
439 	queue->tail = 0;
440 	queue->packet_count = 0;
441 	for (i = 0; i < CS_ETM_PACKET_MAX_BUFFER; i++) {
442 		queue->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN;
443 		queue->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
444 		queue->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
445 		queue->packet_buffer[i].instr_count = 0;
446 		queue->packet_buffer[i].last_instr_taken_branch = false;
447 		queue->packet_buffer[i].last_instr_size = 0;
448 		queue->packet_buffer[i].last_instr_type = 0;
449 		queue->packet_buffer[i].last_instr_subtype = 0;
450 		queue->packet_buffer[i].last_instr_cond = 0;
451 		queue->packet_buffer[i].flags = 0;
452 		queue->packet_buffer[i].exception_number = UINT32_MAX;
453 		queue->packet_buffer[i].trace_chan_id = UINT8_MAX;
454 		queue->packet_buffer[i].cpu = INT_MIN;
455 	}
456 }
457 
458 static void cs_etm__clear_all_packet_queues(struct cs_etm_queue *etmq)
459 {
460 	int idx;
461 	struct int_node *inode;
462 	struct cs_etm_traceid_queue *tidq;
463 	struct intlist *traceid_queues_list = etmq->traceid_queues_list;
464 
465 	intlist__for_each_entry(inode, traceid_queues_list) {
466 		idx = (int)(intptr_t)inode->priv;
467 		tidq = etmq->traceid_queues[idx];
468 		cs_etm__clear_packet_queue(&tidq->packet_queue);
469 	}
470 }
471 
472 static int cs_etm__init_traceid_queue(struct cs_etm_queue *etmq,
473 				      struct cs_etm_traceid_queue *tidq,
474 				      u8 trace_chan_id)
475 {
476 	int rc = -ENOMEM;
477 	struct auxtrace_queue *queue;
478 	struct cs_etm_auxtrace *etm = etmq->etm;
479 
480 	cs_etm__clear_packet_queue(&tidq->packet_queue);
481 
482 	queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
483 	tidq->tid = queue->tid;
484 	tidq->pid = -1;
485 	tidq->trace_chan_id = trace_chan_id;
486 
487 	tidq->packet = zalloc(sizeof(struct cs_etm_packet));
488 	if (!tidq->packet)
489 		goto out;
490 
491 	tidq->prev_packet = zalloc(sizeof(struct cs_etm_packet));
492 	if (!tidq->prev_packet)
493 		goto out_free;
494 
495 	if (etm->synth_opts.last_branch) {
496 		size_t sz = sizeof(struct branch_stack);
497 
498 		sz += etm->synth_opts.last_branch_sz *
499 		      sizeof(struct branch_entry);
500 		tidq->last_branch = zalloc(sz);
501 		if (!tidq->last_branch)
502 			goto out_free;
503 		tidq->last_branch_rb = zalloc(sz);
504 		if (!tidq->last_branch_rb)
505 			goto out_free;
506 	}
507 
508 	tidq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
509 	if (!tidq->event_buf)
510 		goto out_free;
511 
512 	return 0;
513 
514 out_free:
515 	zfree(&tidq->last_branch_rb);
516 	zfree(&tidq->last_branch);
517 	zfree(&tidq->prev_packet);
518 	zfree(&tidq->packet);
519 out:
520 	return rc;
521 }
522 
523 static struct cs_etm_traceid_queue
524 *cs_etm__etmq_get_traceid_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
525 {
526 	int idx;
527 	struct int_node *inode;
528 	struct intlist *traceid_queues_list;
529 	struct cs_etm_traceid_queue *tidq, **traceid_queues;
530 	struct cs_etm_auxtrace *etm = etmq->etm;
531 
532 	if (etm->per_thread_decoding)
533 		trace_chan_id = CS_ETM_PER_THREAD_TRACEID;
534 
535 	traceid_queues_list = etmq->traceid_queues_list;
536 
537 	/*
538 	 * Check if the traceid_queue exist for this traceID by looking
539 	 * in the queue list.
540 	 */
541 	inode = intlist__find(traceid_queues_list, trace_chan_id);
542 	if (inode) {
543 		idx = (int)(intptr_t)inode->priv;
544 		return etmq->traceid_queues[idx];
545 	}
546 
547 	/* We couldn't find a traceid_queue for this traceID, allocate one */
548 	tidq = malloc(sizeof(*tidq));
549 	if (!tidq)
550 		return NULL;
551 
552 	memset(tidq, 0, sizeof(*tidq));
553 
554 	/* Get a valid index for the new traceid_queue */
555 	idx = intlist__nr_entries(traceid_queues_list);
556 	/* Memory for the inode is free'ed in cs_etm_free_traceid_queues () */
557 	inode = intlist__findnew(traceid_queues_list, trace_chan_id);
558 	if (!inode)
559 		goto out_free;
560 
561 	/* Associate this traceID with this index */
562 	inode->priv = (void *)(intptr_t)idx;
563 
564 	if (cs_etm__init_traceid_queue(etmq, tidq, trace_chan_id))
565 		goto out_free;
566 
567 	/* Grow the traceid_queues array by one unit */
568 	traceid_queues = etmq->traceid_queues;
569 	traceid_queues = reallocarray(traceid_queues,
570 				      idx + 1,
571 				      sizeof(*traceid_queues));
572 
573 	/*
574 	 * On failure reallocarray() returns NULL and the original block of
575 	 * memory is left untouched.
576 	 */
577 	if (!traceid_queues)
578 		goto out_free;
579 
580 	traceid_queues[idx] = tidq;
581 	etmq->traceid_queues = traceid_queues;
582 
583 	return etmq->traceid_queues[idx];
584 
585 out_free:
586 	/*
587 	 * Function intlist__remove() removes the inode from the list
588 	 * and delete the memory associated to it.
589 	 */
590 	intlist__remove(traceid_queues_list, inode);
591 	free(tidq);
592 
593 	return NULL;
594 }
595 
596 struct cs_etm_packet_queue
597 *cs_etm__etmq_get_packet_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
598 {
599 	struct cs_etm_traceid_queue *tidq;
600 
601 	tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
602 	if (tidq)
603 		return &tidq->packet_queue;
604 
605 	return NULL;
606 }
607 
608 static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm,
609 				struct cs_etm_traceid_queue *tidq)
610 {
611 	struct cs_etm_packet *tmp;
612 
613 	if (etm->synth_opts.branches || etm->synth_opts.last_branch ||
614 	    etm->synth_opts.instructions) {
615 		/*
616 		 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
617 		 * the next incoming packet.
618 		 */
619 		tmp = tidq->packet;
620 		tidq->packet = tidq->prev_packet;
621 		tidq->prev_packet = tmp;
622 	}
623 }
624 
625 static void cs_etm__packet_dump(const char *pkt_string)
626 {
627 	const char *color = PERF_COLOR_BLUE;
628 	int len = strlen(pkt_string);
629 
630 	if (len && (pkt_string[len-1] == '\n'))
631 		color_fprintf(stdout, color, "	%s", pkt_string);
632 	else
633 		color_fprintf(stdout, color, "	%s\n", pkt_string);
634 
635 	fflush(stdout);
636 }
637 
638 static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
639 					  struct cs_etm_auxtrace *etm, int idx,
640 					  u32 etmidr)
641 {
642 	u64 **metadata = etm->metadata;
643 
644 	t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
645 	t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
646 	t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
647 }
648 
649 static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
650 					  struct cs_etm_auxtrace *etm, int idx)
651 {
652 	u64 **metadata = etm->metadata;
653 
654 	t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
655 	t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
656 	t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
657 	t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
658 	t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
659 	t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
660 	t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
661 }
662 
663 static void cs_etm__set_trace_param_ete(struct cs_etm_trace_params *t_params,
664 					  struct cs_etm_auxtrace *etm, int idx)
665 {
666 	u64 **metadata = etm->metadata;
667 
668 	t_params[idx].protocol = CS_ETM_PROTO_ETE;
669 	t_params[idx].ete.reg_idr0 = metadata[idx][CS_ETE_TRCIDR0];
670 	t_params[idx].ete.reg_idr1 = metadata[idx][CS_ETE_TRCIDR1];
671 	t_params[idx].ete.reg_idr2 = metadata[idx][CS_ETE_TRCIDR2];
672 	t_params[idx].ete.reg_idr8 = metadata[idx][CS_ETE_TRCIDR8];
673 	t_params[idx].ete.reg_configr = metadata[idx][CS_ETE_TRCCONFIGR];
674 	t_params[idx].ete.reg_traceidr = metadata[idx][CS_ETE_TRCTRACEIDR];
675 	t_params[idx].ete.reg_devarch = metadata[idx][CS_ETE_TRCDEVARCH];
676 }
677 
678 static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
679 				     struct cs_etm_auxtrace *etm,
680 				     int decoders)
681 {
682 	int i;
683 	u32 etmidr;
684 	u64 architecture;
685 
686 	for (i = 0; i < decoders; i++) {
687 		architecture = etm->metadata[i][CS_ETM_MAGIC];
688 
689 		switch (architecture) {
690 		case __perf_cs_etmv3_magic:
691 			etmidr = etm->metadata[i][CS_ETM_ETMIDR];
692 			cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
693 			break;
694 		case __perf_cs_etmv4_magic:
695 			cs_etm__set_trace_param_etmv4(t_params, etm, i);
696 			break;
697 		case __perf_cs_ete_magic:
698 			cs_etm__set_trace_param_ete(t_params, etm, i);
699 			break;
700 		default:
701 			return -EINVAL;
702 		}
703 	}
704 
705 	return 0;
706 }
707 
708 static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
709 				       struct cs_etm_queue *etmq,
710 				       enum cs_etm_decoder_operation mode,
711 				       bool formatted)
712 {
713 	int ret = -EINVAL;
714 
715 	if (!(mode < CS_ETM_OPERATION_MAX))
716 		goto out;
717 
718 	d_params->packet_printer = cs_etm__packet_dump;
719 	d_params->operation = mode;
720 	d_params->data = etmq;
721 	d_params->formatted = formatted;
722 	d_params->fsyncs = false;
723 	d_params->hsyncs = false;
724 	d_params->frame_aligned = true;
725 
726 	ret = 0;
727 out:
728 	return ret;
729 }
730 
731 static void cs_etm__dump_event(struct cs_etm_queue *etmq,
732 			       struct auxtrace_buffer *buffer)
733 {
734 	int ret;
735 	const char *color = PERF_COLOR_BLUE;
736 	size_t buffer_used = 0;
737 
738 	fprintf(stdout, "\n");
739 	color_fprintf(stdout, color,
740 		     ". ... CoreSight %s Trace data: size %#zx bytes\n",
741 		     cs_etm_decoder__get_name(etmq->decoder), buffer->size);
742 
743 	do {
744 		size_t consumed;
745 
746 		ret = cs_etm_decoder__process_data_block(
747 				etmq->decoder, buffer->offset,
748 				&((u8 *)buffer->data)[buffer_used],
749 				buffer->size - buffer_used, &consumed);
750 		if (ret)
751 			break;
752 
753 		buffer_used += consumed;
754 	} while (buffer_used < buffer->size);
755 
756 	cs_etm_decoder__reset(etmq->decoder);
757 }
758 
759 static int cs_etm__flush_events(struct perf_session *session,
760 				struct perf_tool *tool)
761 {
762 	struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
763 						   struct cs_etm_auxtrace,
764 						   auxtrace);
765 	if (dump_trace)
766 		return 0;
767 
768 	if (!tool->ordered_events)
769 		return -EINVAL;
770 
771 	if (etm->timeless_decoding) {
772 		/*
773 		 * Pass tid = -1 to process all queues. But likely they will have
774 		 * already been processed on PERF_RECORD_EXIT anyway.
775 		 */
776 		return cs_etm__process_timeless_queues(etm, -1);
777 	}
778 
779 	return cs_etm__process_timestamped_queues(etm);
780 }
781 
782 static void cs_etm__free_traceid_queues(struct cs_etm_queue *etmq)
783 {
784 	int idx;
785 	uintptr_t priv;
786 	struct int_node *inode, *tmp;
787 	struct cs_etm_traceid_queue *tidq;
788 	struct intlist *traceid_queues_list = etmq->traceid_queues_list;
789 
790 	intlist__for_each_entry_safe(inode, tmp, traceid_queues_list) {
791 		priv = (uintptr_t)inode->priv;
792 		idx = priv;
793 
794 		/* Free this traceid_queue from the array */
795 		tidq = etmq->traceid_queues[idx];
796 		thread__zput(tidq->thread);
797 		zfree(&tidq->event_buf);
798 		zfree(&tidq->last_branch);
799 		zfree(&tidq->last_branch_rb);
800 		zfree(&tidq->prev_packet);
801 		zfree(&tidq->packet);
802 		zfree(&tidq);
803 
804 		/*
805 		 * Function intlist__remove() removes the inode from the list
806 		 * and delete the memory associated to it.
807 		 */
808 		intlist__remove(traceid_queues_list, inode);
809 	}
810 
811 	/* Then the RB tree itself */
812 	intlist__delete(traceid_queues_list);
813 	etmq->traceid_queues_list = NULL;
814 
815 	/* finally free the traceid_queues array */
816 	zfree(&etmq->traceid_queues);
817 }
818 
819 static void cs_etm__free_queue(void *priv)
820 {
821 	struct cs_etm_queue *etmq = priv;
822 
823 	if (!etmq)
824 		return;
825 
826 	cs_etm_decoder__free(etmq->decoder);
827 	cs_etm__free_traceid_queues(etmq);
828 	free(etmq);
829 }
830 
831 static void cs_etm__free_events(struct perf_session *session)
832 {
833 	unsigned int i;
834 	struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
835 						   struct cs_etm_auxtrace,
836 						   auxtrace);
837 	struct auxtrace_queues *queues = &aux->queues;
838 
839 	for (i = 0; i < queues->nr_queues; i++) {
840 		cs_etm__free_queue(queues->queue_array[i].priv);
841 		queues->queue_array[i].priv = NULL;
842 	}
843 
844 	auxtrace_queues__free(queues);
845 }
846 
847 static void cs_etm__free(struct perf_session *session)
848 {
849 	int i;
850 	struct int_node *inode, *tmp;
851 	struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
852 						   struct cs_etm_auxtrace,
853 						   auxtrace);
854 	cs_etm__free_events(session);
855 	session->auxtrace = NULL;
856 
857 	/* First remove all traceID/metadata nodes for the RB tree */
858 	intlist__for_each_entry_safe(inode, tmp, traceid_list)
859 		intlist__remove(traceid_list, inode);
860 	/* Then the RB tree itself */
861 	intlist__delete(traceid_list);
862 
863 	for (i = 0; i < aux->num_cpu; i++)
864 		zfree(&aux->metadata[i]);
865 
866 	thread__zput(aux->unknown_thread);
867 	zfree(&aux->metadata);
868 	zfree(&aux);
869 }
870 
871 static bool cs_etm__evsel_is_auxtrace(struct perf_session *session,
872 				      struct evsel *evsel)
873 {
874 	struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
875 						   struct cs_etm_auxtrace,
876 						   auxtrace);
877 
878 	return evsel->core.attr.type == aux->pmu_type;
879 }
880 
881 static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address)
882 {
883 	struct machine *machine;
884 
885 	machine = etmq->etm->machine;
886 
887 	if (address >= machine__kernel_start(machine)) {
888 		if (machine__is_host(machine))
889 			return PERF_RECORD_MISC_KERNEL;
890 		else
891 			return PERF_RECORD_MISC_GUEST_KERNEL;
892 	} else {
893 		if (machine__is_host(machine))
894 			return PERF_RECORD_MISC_USER;
895 		else if (perf_guest)
896 			return PERF_RECORD_MISC_GUEST_USER;
897 		else
898 			return PERF_RECORD_MISC_HYPERVISOR;
899 	}
900 }
901 
902 static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u8 trace_chan_id,
903 			      u64 address, size_t size, u8 *buffer)
904 {
905 	u8  cpumode;
906 	u64 offset;
907 	int len;
908 	struct thread *thread;
909 	struct machine *machine;
910 	struct addr_location al;
911 	struct dso *dso;
912 	struct cs_etm_traceid_queue *tidq;
913 
914 	if (!etmq)
915 		return 0;
916 
917 	machine = etmq->etm->machine;
918 	cpumode = cs_etm__cpu_mode(etmq, address);
919 	tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
920 	if (!tidq)
921 		return 0;
922 
923 	thread = tidq->thread;
924 	if (!thread) {
925 		if (cpumode != PERF_RECORD_MISC_KERNEL)
926 			return 0;
927 		thread = etmq->etm->unknown_thread;
928 	}
929 
930 	if (!thread__find_map(thread, cpumode, address, &al))
931 		return 0;
932 
933 	dso = map__dso(al.map);
934 	if (!dso)
935 		return 0;
936 
937 	if (dso->data.status == DSO_DATA_STATUS_ERROR &&
938 	    dso__data_status_seen(dso, DSO_DATA_STATUS_SEEN_ITRACE))
939 		return 0;
940 
941 	offset = map__map_ip(al.map, address);
942 
943 	map__load(al.map);
944 
945 	len = dso__data_read_offset(dso, machine, offset, buffer, size);
946 
947 	if (len <= 0) {
948 		ui__warning_once("CS ETM Trace: Missing DSO. Use 'perf archive' or debuginfod to export data from the traced system.\n"
949 				 "              Enable CONFIG_PROC_KCORE or use option '-k /path/to/vmlinux' for kernel symbols.\n");
950 		if (!dso->auxtrace_warned) {
951 			pr_err("CS ETM Trace: Debug data not found for address %#"PRIx64" in %s\n",
952 				    address,
953 				    dso->long_name ? dso->long_name : "Unknown");
954 			dso->auxtrace_warned = true;
955 		}
956 		return 0;
957 	}
958 
959 	return len;
960 }
961 
962 static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
963 						bool formatted)
964 {
965 	struct cs_etm_decoder_params d_params;
966 	struct cs_etm_trace_params  *t_params = NULL;
967 	struct cs_etm_queue *etmq;
968 	/*
969 	 * Each queue can only contain data from one CPU when unformatted, so only one decoder is
970 	 * needed.
971 	 */
972 	int decoders = formatted ? etm->num_cpu : 1;
973 
974 	etmq = zalloc(sizeof(*etmq));
975 	if (!etmq)
976 		return NULL;
977 
978 	etmq->traceid_queues_list = intlist__new(NULL);
979 	if (!etmq->traceid_queues_list)
980 		goto out_free;
981 
982 	/* Use metadata to fill in trace parameters for trace decoder */
983 	t_params = zalloc(sizeof(*t_params) * decoders);
984 
985 	if (!t_params)
986 		goto out_free;
987 
988 	if (cs_etm__init_trace_params(t_params, etm, decoders))
989 		goto out_free;
990 
991 	/* Set decoder parameters to decode trace packets */
992 	if (cs_etm__init_decoder_params(&d_params, etmq,
993 					dump_trace ? CS_ETM_OPERATION_PRINT :
994 						     CS_ETM_OPERATION_DECODE,
995 					formatted))
996 		goto out_free;
997 
998 	etmq->decoder = cs_etm_decoder__new(decoders, &d_params,
999 					    t_params);
1000 
1001 	if (!etmq->decoder)
1002 		goto out_free;
1003 
1004 	/*
1005 	 * Register a function to handle all memory accesses required by
1006 	 * the trace decoder library.
1007 	 */
1008 	if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
1009 					      0x0L, ((u64) -1L),
1010 					      cs_etm__mem_access))
1011 		goto out_free_decoder;
1012 
1013 	zfree(&t_params);
1014 	return etmq;
1015 
1016 out_free_decoder:
1017 	cs_etm_decoder__free(etmq->decoder);
1018 out_free:
1019 	intlist__delete(etmq->traceid_queues_list);
1020 	free(etmq);
1021 
1022 	return NULL;
1023 }
1024 
1025 static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
1026 			       struct auxtrace_queue *queue,
1027 			       unsigned int queue_nr,
1028 			       bool formatted)
1029 {
1030 	struct cs_etm_queue *etmq = queue->priv;
1031 
1032 	if (list_empty(&queue->head) || etmq)
1033 		return 0;
1034 
1035 	etmq = cs_etm__alloc_queue(etm, formatted);
1036 
1037 	if (!etmq)
1038 		return -ENOMEM;
1039 
1040 	queue->priv = etmq;
1041 	etmq->etm = etm;
1042 	etmq->queue_nr = queue_nr;
1043 	etmq->offset = 0;
1044 
1045 	return 0;
1046 }
1047 
1048 static int cs_etm__queue_first_cs_timestamp(struct cs_etm_auxtrace *etm,
1049 					    struct cs_etm_queue *etmq,
1050 					    unsigned int queue_nr)
1051 {
1052 	int ret = 0;
1053 	unsigned int cs_queue_nr;
1054 	u8 trace_chan_id;
1055 	u64 cs_timestamp;
1056 
1057 	/*
1058 	 * We are under a CPU-wide trace scenario.  As such we need to know
1059 	 * when the code that generated the traces started to execute so that
1060 	 * it can be correlated with execution on other CPUs.  So we get a
1061 	 * handle on the beginning of traces and decode until we find a
1062 	 * timestamp.  The timestamp is then added to the auxtrace min heap
1063 	 * in order to know what nibble (of all the etmqs) to decode first.
1064 	 */
1065 	while (1) {
1066 		/*
1067 		 * Fetch an aux_buffer from this etmq.  Bail if no more
1068 		 * blocks or an error has been encountered.
1069 		 */
1070 		ret = cs_etm__get_data_block(etmq);
1071 		if (ret <= 0)
1072 			goto out;
1073 
1074 		/*
1075 		 * Run decoder on the trace block.  The decoder will stop when
1076 		 * encountering a CS timestamp, a full packet queue or the end of
1077 		 * trace for that block.
1078 		 */
1079 		ret = cs_etm__decode_data_block(etmq);
1080 		if (ret)
1081 			goto out;
1082 
1083 		/*
1084 		 * Function cs_etm_decoder__do_{hard|soft}_timestamp() does all
1085 		 * the timestamp calculation for us.
1086 		 */
1087 		cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
1088 
1089 		/* We found a timestamp, no need to continue. */
1090 		if (cs_timestamp)
1091 			break;
1092 
1093 		/*
1094 		 * We didn't find a timestamp so empty all the traceid packet
1095 		 * queues before looking for another timestamp packet, either
1096 		 * in the current data block or a new one.  Packets that were
1097 		 * just decoded are useless since no timestamp has been
1098 		 * associated with them.  As such simply discard them.
1099 		 */
1100 		cs_etm__clear_all_packet_queues(etmq);
1101 	}
1102 
1103 	/*
1104 	 * We have a timestamp.  Add it to the min heap to reflect when
1105 	 * instructions conveyed by the range packets of this traceID queue
1106 	 * started to execute.  Once the same has been done for all the traceID
1107 	 * queues of each etmq, redenring and decoding can start in
1108 	 * chronological order.
1109 	 *
1110 	 * Note that packets decoded above are still in the traceID's packet
1111 	 * queue and will be processed in cs_etm__process_timestamped_queues().
1112 	 */
1113 	cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
1114 	ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
1115 out:
1116 	return ret;
1117 }
1118 
1119 static inline
1120 void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq,
1121 				 struct cs_etm_traceid_queue *tidq)
1122 {
1123 	struct branch_stack *bs_src = tidq->last_branch_rb;
1124 	struct branch_stack *bs_dst = tidq->last_branch;
1125 	size_t nr = 0;
1126 
1127 	/*
1128 	 * Set the number of records before early exit: ->nr is used to
1129 	 * determine how many branches to copy from ->entries.
1130 	 */
1131 	bs_dst->nr = bs_src->nr;
1132 
1133 	/*
1134 	 * Early exit when there is nothing to copy.
1135 	 */
1136 	if (!bs_src->nr)
1137 		return;
1138 
1139 	/*
1140 	 * As bs_src->entries is a circular buffer, we need to copy from it in
1141 	 * two steps.  First, copy the branches from the most recently inserted
1142 	 * branch ->last_branch_pos until the end of bs_src->entries buffer.
1143 	 */
1144 	nr = etmq->etm->synth_opts.last_branch_sz - tidq->last_branch_pos;
1145 	memcpy(&bs_dst->entries[0],
1146 	       &bs_src->entries[tidq->last_branch_pos],
1147 	       sizeof(struct branch_entry) * nr);
1148 
1149 	/*
1150 	 * If we wrapped around at least once, the branches from the beginning
1151 	 * of the bs_src->entries buffer and until the ->last_branch_pos element
1152 	 * are older valid branches: copy them over.  The total number of
1153 	 * branches copied over will be equal to the number of branches asked by
1154 	 * the user in last_branch_sz.
1155 	 */
1156 	if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
1157 		memcpy(&bs_dst->entries[nr],
1158 		       &bs_src->entries[0],
1159 		       sizeof(struct branch_entry) * tidq->last_branch_pos);
1160 	}
1161 }
1162 
1163 static inline
1164 void cs_etm__reset_last_branch_rb(struct cs_etm_traceid_queue *tidq)
1165 {
1166 	tidq->last_branch_pos = 0;
1167 	tidq->last_branch_rb->nr = 0;
1168 }
1169 
1170 static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
1171 					 u8 trace_chan_id, u64 addr)
1172 {
1173 	u8 instrBytes[2];
1174 
1175 	cs_etm__mem_access(etmq, trace_chan_id, addr,
1176 			   ARRAY_SIZE(instrBytes), instrBytes);
1177 	/*
1178 	 * T32 instruction size is indicated by bits[15:11] of the first
1179 	 * 16-bit word of the instruction: 0b11101, 0b11110 and 0b11111
1180 	 * denote a 32-bit instruction.
1181 	 */
1182 	return ((instrBytes[1] & 0xF8) >= 0xE8) ? 4 : 2;
1183 }
1184 
1185 static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
1186 {
1187 	/* Returns 0 for the CS_ETM_DISCONTINUITY packet */
1188 	if (packet->sample_type == CS_ETM_DISCONTINUITY)
1189 		return 0;
1190 
1191 	return packet->start_addr;
1192 }
1193 
1194 static inline
1195 u64 cs_etm__last_executed_instr(const struct cs_etm_packet *packet)
1196 {
1197 	/* Returns 0 for the CS_ETM_DISCONTINUITY packet */
1198 	if (packet->sample_type == CS_ETM_DISCONTINUITY)
1199 		return 0;
1200 
1201 	return packet->end_addr - packet->last_instr_size;
1202 }
1203 
1204 static inline u64 cs_etm__instr_addr(struct cs_etm_queue *etmq,
1205 				     u64 trace_chan_id,
1206 				     const struct cs_etm_packet *packet,
1207 				     u64 offset)
1208 {
1209 	if (packet->isa == CS_ETM_ISA_T32) {
1210 		u64 addr = packet->start_addr;
1211 
1212 		while (offset) {
1213 			addr += cs_etm__t32_instr_size(etmq,
1214 						       trace_chan_id, addr);
1215 			offset--;
1216 		}
1217 		return addr;
1218 	}
1219 
1220 	/* Assume a 4 byte instruction size (A32/A64) */
1221 	return packet->start_addr + offset * 4;
1222 }
1223 
1224 static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq,
1225 					  struct cs_etm_traceid_queue *tidq)
1226 {
1227 	struct branch_stack *bs = tidq->last_branch_rb;
1228 	struct branch_entry *be;
1229 
1230 	/*
1231 	 * The branches are recorded in a circular buffer in reverse
1232 	 * chronological order: we start recording from the last element of the
1233 	 * buffer down.  After writing the first element of the stack, move the
1234 	 * insert position back to the end of the buffer.
1235 	 */
1236 	if (!tidq->last_branch_pos)
1237 		tidq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
1238 
1239 	tidq->last_branch_pos -= 1;
1240 
1241 	be       = &bs->entries[tidq->last_branch_pos];
1242 	be->from = cs_etm__last_executed_instr(tidq->prev_packet);
1243 	be->to	 = cs_etm__first_executed_instr(tidq->packet);
1244 	/* No support for mispredict */
1245 	be->flags.mispred = 0;
1246 	be->flags.predicted = 1;
1247 
1248 	/*
1249 	 * Increment bs->nr until reaching the number of last branches asked by
1250 	 * the user on the command line.
1251 	 */
1252 	if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
1253 		bs->nr += 1;
1254 }
1255 
1256 static int cs_etm__inject_event(union perf_event *event,
1257 			       struct perf_sample *sample, u64 type)
1258 {
1259 	event->header.size = perf_event__sample_event_size(sample, type, 0);
1260 	return perf_event__synthesize_sample(event, type, 0, sample);
1261 }
1262 
1263 
1264 static int
1265 cs_etm__get_trace(struct cs_etm_queue *etmq)
1266 {
1267 	struct auxtrace_buffer *aux_buffer = etmq->buffer;
1268 	struct auxtrace_buffer *old_buffer = aux_buffer;
1269 	struct auxtrace_queue *queue;
1270 
1271 	queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
1272 
1273 	aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
1274 
1275 	/* If no more data, drop the previous auxtrace_buffer and return */
1276 	if (!aux_buffer) {
1277 		if (old_buffer)
1278 			auxtrace_buffer__drop_data(old_buffer);
1279 		etmq->buf_len = 0;
1280 		return 0;
1281 	}
1282 
1283 	etmq->buffer = aux_buffer;
1284 
1285 	/* If the aux_buffer doesn't have data associated, try to load it */
1286 	if (!aux_buffer->data) {
1287 		/* get the file desc associated with the perf data file */
1288 		int fd = perf_data__fd(etmq->etm->session->data);
1289 
1290 		aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
1291 		if (!aux_buffer->data)
1292 			return -ENOMEM;
1293 	}
1294 
1295 	/* If valid, drop the previous buffer */
1296 	if (old_buffer)
1297 		auxtrace_buffer__drop_data(old_buffer);
1298 
1299 	etmq->buf_used = 0;
1300 	etmq->buf_len = aux_buffer->size;
1301 	etmq->buf = aux_buffer->data;
1302 
1303 	return etmq->buf_len;
1304 }
1305 
1306 static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
1307 				    struct cs_etm_traceid_queue *tidq)
1308 {
1309 	if ((!tidq->thread) && (tidq->tid != -1))
1310 		tidq->thread = machine__find_thread(etm->machine, -1,
1311 						    tidq->tid);
1312 
1313 	if (tidq->thread)
1314 		tidq->pid = tidq->thread->pid_;
1315 }
1316 
1317 int cs_etm__etmq_set_tid(struct cs_etm_queue *etmq,
1318 			 pid_t tid, u8 trace_chan_id)
1319 {
1320 	int cpu, err = -EINVAL;
1321 	struct cs_etm_auxtrace *etm = etmq->etm;
1322 	struct cs_etm_traceid_queue *tidq;
1323 
1324 	tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
1325 	if (!tidq)
1326 		return err;
1327 
1328 	if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
1329 		return err;
1330 
1331 	err = machine__set_current_tid(etm->machine, cpu, tid, tid);
1332 	if (err)
1333 		return err;
1334 
1335 	tidq->tid = tid;
1336 	thread__zput(tidq->thread);
1337 
1338 	cs_etm__set_pid_tid_cpu(etm, tidq);
1339 	return 0;
1340 }
1341 
1342 bool cs_etm__etmq_is_timeless(struct cs_etm_queue *etmq)
1343 {
1344 	return !!etmq->etm->timeless_decoding;
1345 }
1346 
1347 static void cs_etm__copy_insn(struct cs_etm_queue *etmq,
1348 			      u64 trace_chan_id,
1349 			      const struct cs_etm_packet *packet,
1350 			      struct perf_sample *sample)
1351 {
1352 	/*
1353 	 * It's pointless to read instructions for the CS_ETM_DISCONTINUITY
1354 	 * packet, so directly bail out with 'insn_len' = 0.
1355 	 */
1356 	if (packet->sample_type == CS_ETM_DISCONTINUITY) {
1357 		sample->insn_len = 0;
1358 		return;
1359 	}
1360 
1361 	/*
1362 	 * T32 instruction size might be 32-bit or 16-bit, decide by calling
1363 	 * cs_etm__t32_instr_size().
1364 	 */
1365 	if (packet->isa == CS_ETM_ISA_T32)
1366 		sample->insn_len = cs_etm__t32_instr_size(etmq, trace_chan_id,
1367 							  sample->ip);
1368 	/* Otherwise, A64 and A32 instruction size are always 32-bit. */
1369 	else
1370 		sample->insn_len = 4;
1371 
1372 	cs_etm__mem_access(etmq, trace_chan_id, sample->ip,
1373 			   sample->insn_len, (void *)sample->insn);
1374 }
1375 
1376 u64 cs_etm__convert_sample_time(struct cs_etm_queue *etmq, u64 cs_timestamp)
1377 {
1378 	struct cs_etm_auxtrace *etm = etmq->etm;
1379 
1380 	if (etm->has_virtual_ts)
1381 		return tsc_to_perf_time(cs_timestamp, &etm->tc);
1382 	else
1383 		return cs_timestamp;
1384 }
1385 
1386 static inline u64 cs_etm__resolve_sample_time(struct cs_etm_queue *etmq,
1387 					       struct cs_etm_traceid_queue *tidq)
1388 {
1389 	struct cs_etm_auxtrace *etm = etmq->etm;
1390 	struct cs_etm_packet_queue *packet_queue = &tidq->packet_queue;
1391 
1392 	if (!etm->timeless_decoding && etm->has_virtual_ts)
1393 		return packet_queue->cs_timestamp;
1394 	else
1395 		return etm->latest_kernel_timestamp;
1396 }
1397 
1398 static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
1399 					    struct cs_etm_traceid_queue *tidq,
1400 					    u64 addr, u64 period)
1401 {
1402 	int ret = 0;
1403 	struct cs_etm_auxtrace *etm = etmq->etm;
1404 	union perf_event *event = tidq->event_buf;
1405 	struct perf_sample sample = {.ip = 0,};
1406 
1407 	event->sample.header.type = PERF_RECORD_SAMPLE;
1408 	event->sample.header.misc = cs_etm__cpu_mode(etmq, addr);
1409 	event->sample.header.size = sizeof(struct perf_event_header);
1410 
1411 	/* Set time field based on etm auxtrace config. */
1412 	sample.time = cs_etm__resolve_sample_time(etmq, tidq);
1413 
1414 	sample.ip = addr;
1415 	sample.pid = tidq->pid;
1416 	sample.tid = tidq->tid;
1417 	sample.id = etmq->etm->instructions_id;
1418 	sample.stream_id = etmq->etm->instructions_id;
1419 	sample.period = period;
1420 	sample.cpu = tidq->packet->cpu;
1421 	sample.flags = tidq->prev_packet->flags;
1422 	sample.cpumode = event->sample.header.misc;
1423 
1424 	cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->packet, &sample);
1425 
1426 	if (etm->synth_opts.last_branch)
1427 		sample.branch_stack = tidq->last_branch;
1428 
1429 	if (etm->synth_opts.inject) {
1430 		ret = cs_etm__inject_event(event, &sample,
1431 					   etm->instructions_sample_type);
1432 		if (ret)
1433 			return ret;
1434 	}
1435 
1436 	ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1437 
1438 	if (ret)
1439 		pr_err(
1440 			"CS ETM Trace: failed to deliver instruction event, error %d\n",
1441 			ret);
1442 
1443 	return ret;
1444 }
1445 
1446 /*
1447  * The cs etm packet encodes an instruction range between a branch target
1448  * and the next taken branch. Generate sample accordingly.
1449  */
1450 static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
1451 				       struct cs_etm_traceid_queue *tidq)
1452 {
1453 	int ret = 0;
1454 	struct cs_etm_auxtrace *etm = etmq->etm;
1455 	struct perf_sample sample = {.ip = 0,};
1456 	union perf_event *event = tidq->event_buf;
1457 	struct dummy_branch_stack {
1458 		u64			nr;
1459 		u64			hw_idx;
1460 		struct branch_entry	entries;
1461 	} dummy_bs;
1462 	u64 ip;
1463 
1464 	ip = cs_etm__last_executed_instr(tidq->prev_packet);
1465 
1466 	event->sample.header.type = PERF_RECORD_SAMPLE;
1467 	event->sample.header.misc = cs_etm__cpu_mode(etmq, ip);
1468 	event->sample.header.size = sizeof(struct perf_event_header);
1469 
1470 	/* Set time field based on etm auxtrace config. */
1471 	sample.time = cs_etm__resolve_sample_time(etmq, tidq);
1472 
1473 	sample.ip = ip;
1474 	sample.pid = tidq->pid;
1475 	sample.tid = tidq->tid;
1476 	sample.addr = cs_etm__first_executed_instr(tidq->packet);
1477 	sample.id = etmq->etm->branches_id;
1478 	sample.stream_id = etmq->etm->branches_id;
1479 	sample.period = 1;
1480 	sample.cpu = tidq->packet->cpu;
1481 	sample.flags = tidq->prev_packet->flags;
1482 	sample.cpumode = event->sample.header.misc;
1483 
1484 	cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->prev_packet,
1485 			  &sample);
1486 
1487 	/*
1488 	 * perf report cannot handle events without a branch stack
1489 	 */
1490 	if (etm->synth_opts.last_branch) {
1491 		dummy_bs = (struct dummy_branch_stack){
1492 			.nr = 1,
1493 			.hw_idx = -1ULL,
1494 			.entries = {
1495 				.from = sample.ip,
1496 				.to = sample.addr,
1497 			},
1498 		};
1499 		sample.branch_stack = (struct branch_stack *)&dummy_bs;
1500 	}
1501 
1502 	if (etm->synth_opts.inject) {
1503 		ret = cs_etm__inject_event(event, &sample,
1504 					   etm->branches_sample_type);
1505 		if (ret)
1506 			return ret;
1507 	}
1508 
1509 	ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1510 
1511 	if (ret)
1512 		pr_err(
1513 		"CS ETM Trace: failed to deliver instruction event, error %d\n",
1514 		ret);
1515 
1516 	return ret;
1517 }
1518 
1519 struct cs_etm_synth {
1520 	struct perf_tool dummy_tool;
1521 	struct perf_session *session;
1522 };
1523 
1524 static int cs_etm__event_synth(struct perf_tool *tool,
1525 			       union perf_event *event,
1526 			       struct perf_sample *sample __maybe_unused,
1527 			       struct machine *machine __maybe_unused)
1528 {
1529 	struct cs_etm_synth *cs_etm_synth =
1530 		      container_of(tool, struct cs_etm_synth, dummy_tool);
1531 
1532 	return perf_session__deliver_synth_event(cs_etm_synth->session,
1533 						 event, NULL);
1534 }
1535 
1536 static int cs_etm__synth_event(struct perf_session *session,
1537 			       struct perf_event_attr *attr, u64 id)
1538 {
1539 	struct cs_etm_synth cs_etm_synth;
1540 
1541 	memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
1542 	cs_etm_synth.session = session;
1543 
1544 	return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
1545 					   &id, cs_etm__event_synth);
1546 }
1547 
1548 static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
1549 				struct perf_session *session)
1550 {
1551 	struct evlist *evlist = session->evlist;
1552 	struct evsel *evsel;
1553 	struct perf_event_attr attr;
1554 	bool found = false;
1555 	u64 id;
1556 	int err;
1557 
1558 	evlist__for_each_entry(evlist, evsel) {
1559 		if (evsel->core.attr.type == etm->pmu_type) {
1560 			found = true;
1561 			break;
1562 		}
1563 	}
1564 
1565 	if (!found) {
1566 		pr_debug("No selected events with CoreSight Trace data\n");
1567 		return 0;
1568 	}
1569 
1570 	memset(&attr, 0, sizeof(struct perf_event_attr));
1571 	attr.size = sizeof(struct perf_event_attr);
1572 	attr.type = PERF_TYPE_HARDWARE;
1573 	attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
1574 	attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
1575 			    PERF_SAMPLE_PERIOD;
1576 	if (etm->timeless_decoding)
1577 		attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
1578 	else
1579 		attr.sample_type |= PERF_SAMPLE_TIME;
1580 
1581 	attr.exclude_user = evsel->core.attr.exclude_user;
1582 	attr.exclude_kernel = evsel->core.attr.exclude_kernel;
1583 	attr.exclude_hv = evsel->core.attr.exclude_hv;
1584 	attr.exclude_host = evsel->core.attr.exclude_host;
1585 	attr.exclude_guest = evsel->core.attr.exclude_guest;
1586 	attr.sample_id_all = evsel->core.attr.sample_id_all;
1587 	attr.read_format = evsel->core.attr.read_format;
1588 
1589 	/* create new id val to be a fixed offset from evsel id */
1590 	id = evsel->core.id[0] + 1000000000;
1591 
1592 	if (!id)
1593 		id = 1;
1594 
1595 	if (etm->synth_opts.branches) {
1596 		attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
1597 		attr.sample_period = 1;
1598 		attr.sample_type |= PERF_SAMPLE_ADDR;
1599 		err = cs_etm__synth_event(session, &attr, id);
1600 		if (err)
1601 			return err;
1602 		etm->branches_sample_type = attr.sample_type;
1603 		etm->branches_id = id;
1604 		id += 1;
1605 		attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
1606 	}
1607 
1608 	if (etm->synth_opts.last_branch) {
1609 		attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
1610 		/*
1611 		 * We don't use the hardware index, but the sample generation
1612 		 * code uses the new format branch_stack with this field,
1613 		 * so the event attributes must indicate that it's present.
1614 		 */
1615 		attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
1616 	}
1617 
1618 	if (etm->synth_opts.instructions) {
1619 		attr.config = PERF_COUNT_HW_INSTRUCTIONS;
1620 		attr.sample_period = etm->synth_opts.period;
1621 		etm->instructions_sample_period = attr.sample_period;
1622 		err = cs_etm__synth_event(session, &attr, id);
1623 		if (err)
1624 			return err;
1625 		etm->instructions_sample_type = attr.sample_type;
1626 		etm->instructions_id = id;
1627 		id += 1;
1628 	}
1629 
1630 	return 0;
1631 }
1632 
1633 static int cs_etm__sample(struct cs_etm_queue *etmq,
1634 			  struct cs_etm_traceid_queue *tidq)
1635 {
1636 	struct cs_etm_auxtrace *etm = etmq->etm;
1637 	int ret;
1638 	u8 trace_chan_id = tidq->trace_chan_id;
1639 	u64 instrs_prev;
1640 
1641 	/* Get instructions remainder from previous packet */
1642 	instrs_prev = tidq->period_instructions;
1643 
1644 	tidq->period_instructions += tidq->packet->instr_count;
1645 
1646 	/*
1647 	 * Record a branch when the last instruction in
1648 	 * PREV_PACKET is a branch.
1649 	 */
1650 	if (etm->synth_opts.last_branch &&
1651 	    tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1652 	    tidq->prev_packet->last_instr_taken_branch)
1653 		cs_etm__update_last_branch_rb(etmq, tidq);
1654 
1655 	if (etm->synth_opts.instructions &&
1656 	    tidq->period_instructions >= etm->instructions_sample_period) {
1657 		/*
1658 		 * Emit instruction sample periodically
1659 		 * TODO: allow period to be defined in cycles and clock time
1660 		 */
1661 
1662 		/*
1663 		 * Below diagram demonstrates the instruction samples
1664 		 * generation flows:
1665 		 *
1666 		 *    Instrs     Instrs       Instrs       Instrs
1667 		 *   Sample(n)  Sample(n+1)  Sample(n+2)  Sample(n+3)
1668 		 *    |            |            |            |
1669 		 *    V            V            V            V
1670 		 *   --------------------------------------------------
1671 		 *            ^                                  ^
1672 		 *            |                                  |
1673 		 *         Period                             Period
1674 		 *    instructions(Pi)                   instructions(Pi')
1675 		 *
1676 		 *            |                                  |
1677 		 *            \---------------- -----------------/
1678 		 *                             V
1679 		 *                 tidq->packet->instr_count
1680 		 *
1681 		 * Instrs Sample(n...) are the synthesised samples occurring
1682 		 * every etm->instructions_sample_period instructions - as
1683 		 * defined on the perf command line.  Sample(n) is being the
1684 		 * last sample before the current etm packet, n+1 to n+3
1685 		 * samples are generated from the current etm packet.
1686 		 *
1687 		 * tidq->packet->instr_count represents the number of
1688 		 * instructions in the current etm packet.
1689 		 *
1690 		 * Period instructions (Pi) contains the number of
1691 		 * instructions executed after the sample point(n) from the
1692 		 * previous etm packet.  This will always be less than
1693 		 * etm->instructions_sample_period.
1694 		 *
1695 		 * When generate new samples, it combines with two parts
1696 		 * instructions, one is the tail of the old packet and another
1697 		 * is the head of the new coming packet, to generate
1698 		 * sample(n+1); sample(n+2) and sample(n+3) consume the
1699 		 * instructions with sample period.  After sample(n+3), the rest
1700 		 * instructions will be used by later packet and it is assigned
1701 		 * to tidq->period_instructions for next round calculation.
1702 		 */
1703 
1704 		/*
1705 		 * Get the initial offset into the current packet instructions;
1706 		 * entry conditions ensure that instrs_prev is less than
1707 		 * etm->instructions_sample_period.
1708 		 */
1709 		u64 offset = etm->instructions_sample_period - instrs_prev;
1710 		u64 addr;
1711 
1712 		/* Prepare last branches for instruction sample */
1713 		if (etm->synth_opts.last_branch)
1714 			cs_etm__copy_last_branch_rb(etmq, tidq);
1715 
1716 		while (tidq->period_instructions >=
1717 				etm->instructions_sample_period) {
1718 			/*
1719 			 * Calculate the address of the sampled instruction (-1
1720 			 * as sample is reported as though instruction has just
1721 			 * been executed, but PC has not advanced to next
1722 			 * instruction)
1723 			 */
1724 			addr = cs_etm__instr_addr(etmq, trace_chan_id,
1725 						  tidq->packet, offset - 1);
1726 			ret = cs_etm__synth_instruction_sample(
1727 				etmq, tidq, addr,
1728 				etm->instructions_sample_period);
1729 			if (ret)
1730 				return ret;
1731 
1732 			offset += etm->instructions_sample_period;
1733 			tidq->period_instructions -=
1734 				etm->instructions_sample_period;
1735 		}
1736 	}
1737 
1738 	if (etm->synth_opts.branches) {
1739 		bool generate_sample = false;
1740 
1741 		/* Generate sample for tracing on packet */
1742 		if (tidq->prev_packet->sample_type == CS_ETM_DISCONTINUITY)
1743 			generate_sample = true;
1744 
1745 		/* Generate sample for branch taken packet */
1746 		if (tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1747 		    tidq->prev_packet->last_instr_taken_branch)
1748 			generate_sample = true;
1749 
1750 		if (generate_sample) {
1751 			ret = cs_etm__synth_branch_sample(etmq, tidq);
1752 			if (ret)
1753 				return ret;
1754 		}
1755 	}
1756 
1757 	cs_etm__packet_swap(etm, tidq);
1758 
1759 	return 0;
1760 }
1761 
1762 static int cs_etm__exception(struct cs_etm_traceid_queue *tidq)
1763 {
1764 	/*
1765 	 * When the exception packet is inserted, whether the last instruction
1766 	 * in previous range packet is taken branch or not, we need to force
1767 	 * to set 'prev_packet->last_instr_taken_branch' to true.  This ensures
1768 	 * to generate branch sample for the instruction range before the
1769 	 * exception is trapped to kernel or before the exception returning.
1770 	 *
1771 	 * The exception packet includes the dummy address values, so don't
1772 	 * swap PACKET with PREV_PACKET.  This keeps PREV_PACKET to be useful
1773 	 * for generating instruction and branch samples.
1774 	 */
1775 	if (tidq->prev_packet->sample_type == CS_ETM_RANGE)
1776 		tidq->prev_packet->last_instr_taken_branch = true;
1777 
1778 	return 0;
1779 }
1780 
1781 static int cs_etm__flush(struct cs_etm_queue *etmq,
1782 			 struct cs_etm_traceid_queue *tidq)
1783 {
1784 	int err = 0;
1785 	struct cs_etm_auxtrace *etm = etmq->etm;
1786 
1787 	/* Handle start tracing packet */
1788 	if (tidq->prev_packet->sample_type == CS_ETM_EMPTY)
1789 		goto swap_packet;
1790 
1791 	if (etmq->etm->synth_opts.last_branch &&
1792 	    etmq->etm->synth_opts.instructions &&
1793 	    tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1794 		u64 addr;
1795 
1796 		/* Prepare last branches for instruction sample */
1797 		cs_etm__copy_last_branch_rb(etmq, tidq);
1798 
1799 		/*
1800 		 * Generate a last branch event for the branches left in the
1801 		 * circular buffer at the end of the trace.
1802 		 *
1803 		 * Use the address of the end of the last reported execution
1804 		 * range
1805 		 */
1806 		addr = cs_etm__last_executed_instr(tidq->prev_packet);
1807 
1808 		err = cs_etm__synth_instruction_sample(
1809 			etmq, tidq, addr,
1810 			tidq->period_instructions);
1811 		if (err)
1812 			return err;
1813 
1814 		tidq->period_instructions = 0;
1815 
1816 	}
1817 
1818 	if (etm->synth_opts.branches &&
1819 	    tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1820 		err = cs_etm__synth_branch_sample(etmq, tidq);
1821 		if (err)
1822 			return err;
1823 	}
1824 
1825 swap_packet:
1826 	cs_etm__packet_swap(etm, tidq);
1827 
1828 	/* Reset last branches after flush the trace */
1829 	if (etm->synth_opts.last_branch)
1830 		cs_etm__reset_last_branch_rb(tidq);
1831 
1832 	return err;
1833 }
1834 
1835 static int cs_etm__end_block(struct cs_etm_queue *etmq,
1836 			     struct cs_etm_traceid_queue *tidq)
1837 {
1838 	int err;
1839 
1840 	/*
1841 	 * It has no new packet coming and 'etmq->packet' contains the stale
1842 	 * packet which was set at the previous time with packets swapping;
1843 	 * so skip to generate branch sample to avoid stale packet.
1844 	 *
1845 	 * For this case only flush branch stack and generate a last branch
1846 	 * event for the branches left in the circular buffer at the end of
1847 	 * the trace.
1848 	 */
1849 	if (etmq->etm->synth_opts.last_branch &&
1850 	    etmq->etm->synth_opts.instructions &&
1851 	    tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1852 		u64 addr;
1853 
1854 		/* Prepare last branches for instruction sample */
1855 		cs_etm__copy_last_branch_rb(etmq, tidq);
1856 
1857 		/*
1858 		 * Use the address of the end of the last reported execution
1859 		 * range.
1860 		 */
1861 		addr = cs_etm__last_executed_instr(tidq->prev_packet);
1862 
1863 		err = cs_etm__synth_instruction_sample(
1864 			etmq, tidq, addr,
1865 			tidq->period_instructions);
1866 		if (err)
1867 			return err;
1868 
1869 		tidq->period_instructions = 0;
1870 	}
1871 
1872 	return 0;
1873 }
1874 /*
1875  * cs_etm__get_data_block: Fetch a block from the auxtrace_buffer queue
1876  *			   if need be.
1877  * Returns:	< 0	if error
1878  *		= 0	if no more auxtrace_buffer to read
1879  *		> 0	if the current buffer isn't empty yet
1880  */
1881 static int cs_etm__get_data_block(struct cs_etm_queue *etmq)
1882 {
1883 	int ret;
1884 
1885 	if (!etmq->buf_len) {
1886 		ret = cs_etm__get_trace(etmq);
1887 		if (ret <= 0)
1888 			return ret;
1889 		/*
1890 		 * We cannot assume consecutive blocks in the data file
1891 		 * are contiguous, reset the decoder to force re-sync.
1892 		 */
1893 		ret = cs_etm_decoder__reset(etmq->decoder);
1894 		if (ret)
1895 			return ret;
1896 	}
1897 
1898 	return etmq->buf_len;
1899 }
1900 
1901 static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq, u8 trace_chan_id,
1902 				 struct cs_etm_packet *packet,
1903 				 u64 end_addr)
1904 {
1905 	/* Initialise to keep compiler happy */
1906 	u16 instr16 = 0;
1907 	u32 instr32 = 0;
1908 	u64 addr;
1909 
1910 	switch (packet->isa) {
1911 	case CS_ETM_ISA_T32:
1912 		/*
1913 		 * The SVC of T32 is defined in ARM DDI 0487D.a, F5.1.247:
1914 		 *
1915 		 *  b'15         b'8
1916 		 * +-----------------+--------+
1917 		 * | 1 1 0 1 1 1 1 1 |  imm8  |
1918 		 * +-----------------+--------+
1919 		 *
1920 		 * According to the specification, it only defines SVC for T32
1921 		 * with 16 bits instruction and has no definition for 32bits;
1922 		 * so below only read 2 bytes as instruction size for T32.
1923 		 */
1924 		addr = end_addr - 2;
1925 		cs_etm__mem_access(etmq, trace_chan_id, addr,
1926 				   sizeof(instr16), (u8 *)&instr16);
1927 		if ((instr16 & 0xFF00) == 0xDF00)
1928 			return true;
1929 
1930 		break;
1931 	case CS_ETM_ISA_A32:
1932 		/*
1933 		 * The SVC of A32 is defined in ARM DDI 0487D.a, F5.1.247:
1934 		 *
1935 		 *  b'31 b'28 b'27 b'24
1936 		 * +---------+---------+-------------------------+
1937 		 * |  !1111  | 1 1 1 1 |        imm24            |
1938 		 * +---------+---------+-------------------------+
1939 		 */
1940 		addr = end_addr - 4;
1941 		cs_etm__mem_access(etmq, trace_chan_id, addr,
1942 				   sizeof(instr32), (u8 *)&instr32);
1943 		if ((instr32 & 0x0F000000) == 0x0F000000 &&
1944 		    (instr32 & 0xF0000000) != 0xF0000000)
1945 			return true;
1946 
1947 		break;
1948 	case CS_ETM_ISA_A64:
1949 		/*
1950 		 * The SVC of A64 is defined in ARM DDI 0487D.a, C6.2.294:
1951 		 *
1952 		 *  b'31               b'21           b'4     b'0
1953 		 * +-----------------------+---------+-----------+
1954 		 * | 1 1 0 1 0 1 0 0 0 0 0 |  imm16  | 0 0 0 0 1 |
1955 		 * +-----------------------+---------+-----------+
1956 		 */
1957 		addr = end_addr - 4;
1958 		cs_etm__mem_access(etmq, trace_chan_id, addr,
1959 				   sizeof(instr32), (u8 *)&instr32);
1960 		if ((instr32 & 0xFFE0001F) == 0xd4000001)
1961 			return true;
1962 
1963 		break;
1964 	case CS_ETM_ISA_UNKNOWN:
1965 	default:
1966 		break;
1967 	}
1968 
1969 	return false;
1970 }
1971 
1972 static bool cs_etm__is_syscall(struct cs_etm_queue *etmq,
1973 			       struct cs_etm_traceid_queue *tidq, u64 magic)
1974 {
1975 	u8 trace_chan_id = tidq->trace_chan_id;
1976 	struct cs_etm_packet *packet = tidq->packet;
1977 	struct cs_etm_packet *prev_packet = tidq->prev_packet;
1978 
1979 	if (magic == __perf_cs_etmv3_magic)
1980 		if (packet->exception_number == CS_ETMV3_EXC_SVC)
1981 			return true;
1982 
1983 	/*
1984 	 * ETMv4 exception type CS_ETMV4_EXC_CALL covers SVC, SMC and
1985 	 * HVC cases; need to check if it's SVC instruction based on
1986 	 * packet address.
1987 	 */
1988 	if (magic == __perf_cs_etmv4_magic) {
1989 		if (packet->exception_number == CS_ETMV4_EXC_CALL &&
1990 		    cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
1991 					 prev_packet->end_addr))
1992 			return true;
1993 	}
1994 
1995 	return false;
1996 }
1997 
1998 static bool cs_etm__is_async_exception(struct cs_etm_traceid_queue *tidq,
1999 				       u64 magic)
2000 {
2001 	struct cs_etm_packet *packet = tidq->packet;
2002 
2003 	if (magic == __perf_cs_etmv3_magic)
2004 		if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT ||
2005 		    packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT ||
2006 		    packet->exception_number == CS_ETMV3_EXC_PE_RESET ||
2007 		    packet->exception_number == CS_ETMV3_EXC_IRQ ||
2008 		    packet->exception_number == CS_ETMV3_EXC_FIQ)
2009 			return true;
2010 
2011 	if (magic == __perf_cs_etmv4_magic)
2012 		if (packet->exception_number == CS_ETMV4_EXC_RESET ||
2013 		    packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT ||
2014 		    packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR ||
2015 		    packet->exception_number == CS_ETMV4_EXC_INST_DEBUG ||
2016 		    packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG ||
2017 		    packet->exception_number == CS_ETMV4_EXC_IRQ ||
2018 		    packet->exception_number == CS_ETMV4_EXC_FIQ)
2019 			return true;
2020 
2021 	return false;
2022 }
2023 
2024 static bool cs_etm__is_sync_exception(struct cs_etm_queue *etmq,
2025 				      struct cs_etm_traceid_queue *tidq,
2026 				      u64 magic)
2027 {
2028 	u8 trace_chan_id = tidq->trace_chan_id;
2029 	struct cs_etm_packet *packet = tidq->packet;
2030 	struct cs_etm_packet *prev_packet = tidq->prev_packet;
2031 
2032 	if (magic == __perf_cs_etmv3_magic)
2033 		if (packet->exception_number == CS_ETMV3_EXC_SMC ||
2034 		    packet->exception_number == CS_ETMV3_EXC_HYP ||
2035 		    packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE ||
2036 		    packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR ||
2037 		    packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT ||
2038 		    packet->exception_number == CS_ETMV3_EXC_DATA_FAULT ||
2039 		    packet->exception_number == CS_ETMV3_EXC_GENERIC)
2040 			return true;
2041 
2042 	if (magic == __perf_cs_etmv4_magic) {
2043 		if (packet->exception_number == CS_ETMV4_EXC_TRAP ||
2044 		    packet->exception_number == CS_ETMV4_EXC_ALIGNMENT ||
2045 		    packet->exception_number == CS_ETMV4_EXC_INST_FAULT ||
2046 		    packet->exception_number == CS_ETMV4_EXC_DATA_FAULT)
2047 			return true;
2048 
2049 		/*
2050 		 * For CS_ETMV4_EXC_CALL, except SVC other instructions
2051 		 * (SMC, HVC) are taken as sync exceptions.
2052 		 */
2053 		if (packet->exception_number == CS_ETMV4_EXC_CALL &&
2054 		    !cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
2055 					  prev_packet->end_addr))
2056 			return true;
2057 
2058 		/*
2059 		 * ETMv4 has 5 bits for exception number; if the numbers
2060 		 * are in the range ( CS_ETMV4_EXC_FIQ, CS_ETMV4_EXC_END ]
2061 		 * they are implementation defined exceptions.
2062 		 *
2063 		 * For this case, simply take it as sync exception.
2064 		 */
2065 		if (packet->exception_number > CS_ETMV4_EXC_FIQ &&
2066 		    packet->exception_number <= CS_ETMV4_EXC_END)
2067 			return true;
2068 	}
2069 
2070 	return false;
2071 }
2072 
2073 static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
2074 				    struct cs_etm_traceid_queue *tidq)
2075 {
2076 	struct cs_etm_packet *packet = tidq->packet;
2077 	struct cs_etm_packet *prev_packet = tidq->prev_packet;
2078 	u8 trace_chan_id = tidq->trace_chan_id;
2079 	u64 magic;
2080 	int ret;
2081 
2082 	switch (packet->sample_type) {
2083 	case CS_ETM_RANGE:
2084 		/*
2085 		 * Immediate branch instruction without neither link nor
2086 		 * return flag, it's normal branch instruction within
2087 		 * the function.
2088 		 */
2089 		if (packet->last_instr_type == OCSD_INSTR_BR &&
2090 		    packet->last_instr_subtype == OCSD_S_INSTR_NONE) {
2091 			packet->flags = PERF_IP_FLAG_BRANCH;
2092 
2093 			if (packet->last_instr_cond)
2094 				packet->flags |= PERF_IP_FLAG_CONDITIONAL;
2095 		}
2096 
2097 		/*
2098 		 * Immediate branch instruction with link (e.g. BL), this is
2099 		 * branch instruction for function call.
2100 		 */
2101 		if (packet->last_instr_type == OCSD_INSTR_BR &&
2102 		    packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
2103 			packet->flags = PERF_IP_FLAG_BRANCH |
2104 					PERF_IP_FLAG_CALL;
2105 
2106 		/*
2107 		 * Indirect branch instruction with link (e.g. BLR), this is
2108 		 * branch instruction for function call.
2109 		 */
2110 		if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
2111 		    packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
2112 			packet->flags = PERF_IP_FLAG_BRANCH |
2113 					PERF_IP_FLAG_CALL;
2114 
2115 		/*
2116 		 * Indirect branch instruction with subtype of
2117 		 * OCSD_S_INSTR_V7_IMPLIED_RET, this is explicit hint for
2118 		 * function return for A32/T32.
2119 		 */
2120 		if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
2121 		    packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET)
2122 			packet->flags = PERF_IP_FLAG_BRANCH |
2123 					PERF_IP_FLAG_RETURN;
2124 
2125 		/*
2126 		 * Indirect branch instruction without link (e.g. BR), usually
2127 		 * this is used for function return, especially for functions
2128 		 * within dynamic link lib.
2129 		 */
2130 		if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
2131 		    packet->last_instr_subtype == OCSD_S_INSTR_NONE)
2132 			packet->flags = PERF_IP_FLAG_BRANCH |
2133 					PERF_IP_FLAG_RETURN;
2134 
2135 		/* Return instruction for function return. */
2136 		if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
2137 		    packet->last_instr_subtype == OCSD_S_INSTR_V8_RET)
2138 			packet->flags = PERF_IP_FLAG_BRANCH |
2139 					PERF_IP_FLAG_RETURN;
2140 
2141 		/*
2142 		 * Decoder might insert a discontinuity in the middle of
2143 		 * instruction packets, fixup prev_packet with flag
2144 		 * PERF_IP_FLAG_TRACE_BEGIN to indicate restarting trace.
2145 		 */
2146 		if (prev_packet->sample_type == CS_ETM_DISCONTINUITY)
2147 			prev_packet->flags |= PERF_IP_FLAG_BRANCH |
2148 					      PERF_IP_FLAG_TRACE_BEGIN;
2149 
2150 		/*
2151 		 * If the previous packet is an exception return packet
2152 		 * and the return address just follows SVC instruction,
2153 		 * it needs to calibrate the previous packet sample flags
2154 		 * as PERF_IP_FLAG_SYSCALLRET.
2155 		 */
2156 		if (prev_packet->flags == (PERF_IP_FLAG_BRANCH |
2157 					   PERF_IP_FLAG_RETURN |
2158 					   PERF_IP_FLAG_INTERRUPT) &&
2159 		    cs_etm__is_svc_instr(etmq, trace_chan_id,
2160 					 packet, packet->start_addr))
2161 			prev_packet->flags = PERF_IP_FLAG_BRANCH |
2162 					     PERF_IP_FLAG_RETURN |
2163 					     PERF_IP_FLAG_SYSCALLRET;
2164 		break;
2165 	case CS_ETM_DISCONTINUITY:
2166 		/*
2167 		 * The trace is discontinuous, if the previous packet is
2168 		 * instruction packet, set flag PERF_IP_FLAG_TRACE_END
2169 		 * for previous packet.
2170 		 */
2171 		if (prev_packet->sample_type == CS_ETM_RANGE)
2172 			prev_packet->flags |= PERF_IP_FLAG_BRANCH |
2173 					      PERF_IP_FLAG_TRACE_END;
2174 		break;
2175 	case CS_ETM_EXCEPTION:
2176 		ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
2177 		if (ret)
2178 			return ret;
2179 
2180 		/* The exception is for system call. */
2181 		if (cs_etm__is_syscall(etmq, tidq, magic))
2182 			packet->flags = PERF_IP_FLAG_BRANCH |
2183 					PERF_IP_FLAG_CALL |
2184 					PERF_IP_FLAG_SYSCALLRET;
2185 		/*
2186 		 * The exceptions are triggered by external signals from bus,
2187 		 * interrupt controller, debug module, PE reset or halt.
2188 		 */
2189 		else if (cs_etm__is_async_exception(tidq, magic))
2190 			packet->flags = PERF_IP_FLAG_BRANCH |
2191 					PERF_IP_FLAG_CALL |
2192 					PERF_IP_FLAG_ASYNC |
2193 					PERF_IP_FLAG_INTERRUPT;
2194 		/*
2195 		 * Otherwise, exception is caused by trap, instruction &
2196 		 * data fault, or alignment errors.
2197 		 */
2198 		else if (cs_etm__is_sync_exception(etmq, tidq, magic))
2199 			packet->flags = PERF_IP_FLAG_BRANCH |
2200 					PERF_IP_FLAG_CALL |
2201 					PERF_IP_FLAG_INTERRUPT;
2202 
2203 		/*
2204 		 * When the exception packet is inserted, since exception
2205 		 * packet is not used standalone for generating samples
2206 		 * and it's affiliation to the previous instruction range
2207 		 * packet; so set previous range packet flags to tell perf
2208 		 * it is an exception taken branch.
2209 		 */
2210 		if (prev_packet->sample_type == CS_ETM_RANGE)
2211 			prev_packet->flags = packet->flags;
2212 		break;
2213 	case CS_ETM_EXCEPTION_RET:
2214 		/*
2215 		 * When the exception return packet is inserted, since
2216 		 * exception return packet is not used standalone for
2217 		 * generating samples and it's affiliation to the previous
2218 		 * instruction range packet; so set previous range packet
2219 		 * flags to tell perf it is an exception return branch.
2220 		 *
2221 		 * The exception return can be for either system call or
2222 		 * other exception types; unfortunately the packet doesn't
2223 		 * contain exception type related info so we cannot decide
2224 		 * the exception type purely based on exception return packet.
2225 		 * If we record the exception number from exception packet and
2226 		 * reuse it for exception return packet, this is not reliable
2227 		 * due the trace can be discontinuity or the interrupt can
2228 		 * be nested, thus the recorded exception number cannot be
2229 		 * used for exception return packet for these two cases.
2230 		 *
2231 		 * For exception return packet, we only need to distinguish the
2232 		 * packet is for system call or for other types.  Thus the
2233 		 * decision can be deferred when receive the next packet which
2234 		 * contains the return address, based on the return address we
2235 		 * can read out the previous instruction and check if it's a
2236 		 * system call instruction and then calibrate the sample flag
2237 		 * as needed.
2238 		 */
2239 		if (prev_packet->sample_type == CS_ETM_RANGE)
2240 			prev_packet->flags = PERF_IP_FLAG_BRANCH |
2241 					     PERF_IP_FLAG_RETURN |
2242 					     PERF_IP_FLAG_INTERRUPT;
2243 		break;
2244 	case CS_ETM_EMPTY:
2245 	default:
2246 		break;
2247 	}
2248 
2249 	return 0;
2250 }
2251 
2252 static int cs_etm__decode_data_block(struct cs_etm_queue *etmq)
2253 {
2254 	int ret = 0;
2255 	size_t processed = 0;
2256 
2257 	/*
2258 	 * Packets are decoded and added to the decoder's packet queue
2259 	 * until the decoder packet processing callback has requested that
2260 	 * processing stops or there is nothing left in the buffer.  Normal
2261 	 * operations that stop processing are a timestamp packet or a full
2262 	 * decoder buffer queue.
2263 	 */
2264 	ret = cs_etm_decoder__process_data_block(etmq->decoder,
2265 						 etmq->offset,
2266 						 &etmq->buf[etmq->buf_used],
2267 						 etmq->buf_len,
2268 						 &processed);
2269 	if (ret)
2270 		goto out;
2271 
2272 	etmq->offset += processed;
2273 	etmq->buf_used += processed;
2274 	etmq->buf_len -= processed;
2275 
2276 out:
2277 	return ret;
2278 }
2279 
2280 static int cs_etm__process_traceid_queue(struct cs_etm_queue *etmq,
2281 					 struct cs_etm_traceid_queue *tidq)
2282 {
2283 	int ret;
2284 	struct cs_etm_packet_queue *packet_queue;
2285 
2286 	packet_queue = &tidq->packet_queue;
2287 
2288 	/* Process each packet in this chunk */
2289 	while (1) {
2290 		ret = cs_etm_decoder__get_packet(packet_queue,
2291 						 tidq->packet);
2292 		if (ret <= 0)
2293 			/*
2294 			 * Stop processing this chunk on
2295 			 * end of data or error
2296 			 */
2297 			break;
2298 
2299 		/*
2300 		 * Since packet addresses are swapped in packet
2301 		 * handling within below switch() statements,
2302 		 * thus setting sample flags must be called
2303 		 * prior to switch() statement to use address
2304 		 * information before packets swapping.
2305 		 */
2306 		ret = cs_etm__set_sample_flags(etmq, tidq);
2307 		if (ret < 0)
2308 			break;
2309 
2310 		switch (tidq->packet->sample_type) {
2311 		case CS_ETM_RANGE:
2312 			/*
2313 			 * If the packet contains an instruction
2314 			 * range, generate instruction sequence
2315 			 * events.
2316 			 */
2317 			cs_etm__sample(etmq, tidq);
2318 			break;
2319 		case CS_ETM_EXCEPTION:
2320 		case CS_ETM_EXCEPTION_RET:
2321 			/*
2322 			 * If the exception packet is coming,
2323 			 * make sure the previous instruction
2324 			 * range packet to be handled properly.
2325 			 */
2326 			cs_etm__exception(tidq);
2327 			break;
2328 		case CS_ETM_DISCONTINUITY:
2329 			/*
2330 			 * Discontinuity in trace, flush
2331 			 * previous branch stack
2332 			 */
2333 			cs_etm__flush(etmq, tidq);
2334 			break;
2335 		case CS_ETM_EMPTY:
2336 			/*
2337 			 * Should not receive empty packet,
2338 			 * report error.
2339 			 */
2340 			pr_err("CS ETM Trace: empty packet\n");
2341 			return -EINVAL;
2342 		default:
2343 			break;
2344 		}
2345 	}
2346 
2347 	return ret;
2348 }
2349 
2350 static void cs_etm__clear_all_traceid_queues(struct cs_etm_queue *etmq)
2351 {
2352 	int idx;
2353 	struct int_node *inode;
2354 	struct cs_etm_traceid_queue *tidq;
2355 	struct intlist *traceid_queues_list = etmq->traceid_queues_list;
2356 
2357 	intlist__for_each_entry(inode, traceid_queues_list) {
2358 		idx = (int)(intptr_t)inode->priv;
2359 		tidq = etmq->traceid_queues[idx];
2360 
2361 		/* Ignore return value */
2362 		cs_etm__process_traceid_queue(etmq, tidq);
2363 
2364 		/*
2365 		 * Generate an instruction sample with the remaining
2366 		 * branchstack entries.
2367 		 */
2368 		cs_etm__flush(etmq, tidq);
2369 	}
2370 }
2371 
2372 static int cs_etm__run_per_thread_timeless_decoder(struct cs_etm_queue *etmq)
2373 {
2374 	int err = 0;
2375 	struct cs_etm_traceid_queue *tidq;
2376 
2377 	tidq = cs_etm__etmq_get_traceid_queue(etmq, CS_ETM_PER_THREAD_TRACEID);
2378 	if (!tidq)
2379 		return -EINVAL;
2380 
2381 	/* Go through each buffer in the queue and decode them one by one */
2382 	while (1) {
2383 		err = cs_etm__get_data_block(etmq);
2384 		if (err <= 0)
2385 			return err;
2386 
2387 		/* Run trace decoder until buffer consumed or end of trace */
2388 		do {
2389 			err = cs_etm__decode_data_block(etmq);
2390 			if (err)
2391 				return err;
2392 
2393 			/*
2394 			 * Process each packet in this chunk, nothing to do if
2395 			 * an error occurs other than hoping the next one will
2396 			 * be better.
2397 			 */
2398 			err = cs_etm__process_traceid_queue(etmq, tidq);
2399 
2400 		} while (etmq->buf_len);
2401 
2402 		if (err == 0)
2403 			/* Flush any remaining branch stack entries */
2404 			err = cs_etm__end_block(etmq, tidq);
2405 	}
2406 
2407 	return err;
2408 }
2409 
2410 static int cs_etm__run_per_cpu_timeless_decoder(struct cs_etm_queue *etmq)
2411 {
2412 	int idx, err = 0;
2413 	struct cs_etm_traceid_queue *tidq;
2414 	struct int_node *inode;
2415 
2416 	/* Go through each buffer in the queue and decode them one by one */
2417 	while (1) {
2418 		err = cs_etm__get_data_block(etmq);
2419 		if (err <= 0)
2420 			return err;
2421 
2422 		/* Run trace decoder until buffer consumed or end of trace */
2423 		do {
2424 			err = cs_etm__decode_data_block(etmq);
2425 			if (err)
2426 				return err;
2427 
2428 			/*
2429 			 * cs_etm__run_per_thread_timeless_decoder() runs on a
2430 			 * single traceID queue because each TID has a separate
2431 			 * buffer. But here in per-cpu mode we need to iterate
2432 			 * over each channel instead.
2433 			 */
2434 			intlist__for_each_entry(inode,
2435 						etmq->traceid_queues_list) {
2436 				idx = (int)(intptr_t)inode->priv;
2437 				tidq = etmq->traceid_queues[idx];
2438 				cs_etm__process_traceid_queue(etmq, tidq);
2439 			}
2440 		} while (etmq->buf_len);
2441 
2442 		intlist__for_each_entry(inode, etmq->traceid_queues_list) {
2443 			idx = (int)(intptr_t)inode->priv;
2444 			tidq = etmq->traceid_queues[idx];
2445 			/* Flush any remaining branch stack entries */
2446 			err = cs_etm__end_block(etmq, tidq);
2447 			if (err)
2448 				return err;
2449 		}
2450 	}
2451 
2452 	return err;
2453 }
2454 
2455 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
2456 					   pid_t tid)
2457 {
2458 	unsigned int i;
2459 	struct auxtrace_queues *queues = &etm->queues;
2460 
2461 	for (i = 0; i < queues->nr_queues; i++) {
2462 		struct auxtrace_queue *queue = &etm->queues.queue_array[i];
2463 		struct cs_etm_queue *etmq = queue->priv;
2464 		struct cs_etm_traceid_queue *tidq;
2465 
2466 		if (!etmq)
2467 			continue;
2468 
2469 		/*
2470 		 * Per-cpu mode has contextIDs in the trace and the decoder
2471 		 * calls cs_etm__set_pid_tid_cpu() automatically so no need
2472 		 * to do this here
2473 		 */
2474 		if (etm->per_thread_decoding) {
2475 			tidq = cs_etm__etmq_get_traceid_queue(
2476 				etmq, CS_ETM_PER_THREAD_TRACEID);
2477 
2478 			if (!tidq)
2479 				continue;
2480 
2481 			if ((tid == -1) || (tidq->tid == tid)) {
2482 				cs_etm__set_pid_tid_cpu(etm, tidq);
2483 				cs_etm__run_per_thread_timeless_decoder(etmq);
2484 			}
2485 		} else
2486 			cs_etm__run_per_cpu_timeless_decoder(etmq);
2487 	}
2488 
2489 	return 0;
2490 }
2491 
2492 static int cs_etm__process_timestamped_queues(struct cs_etm_auxtrace *etm)
2493 {
2494 	int ret = 0;
2495 	unsigned int cs_queue_nr, queue_nr, i;
2496 	u8 trace_chan_id;
2497 	u64 cs_timestamp;
2498 	struct auxtrace_queue *queue;
2499 	struct cs_etm_queue *etmq;
2500 	struct cs_etm_traceid_queue *tidq;
2501 
2502 	/*
2503 	 * Pre-populate the heap with one entry from each queue so that we can
2504 	 * start processing in time order across all queues.
2505 	 */
2506 	for (i = 0; i < etm->queues.nr_queues; i++) {
2507 		etmq = etm->queues.queue_array[i].priv;
2508 		if (!etmq)
2509 			continue;
2510 
2511 		ret = cs_etm__queue_first_cs_timestamp(etm, etmq, i);
2512 		if (ret)
2513 			return ret;
2514 	}
2515 
2516 	while (1) {
2517 		if (!etm->heap.heap_cnt)
2518 			goto out;
2519 
2520 		/* Take the entry at the top of the min heap */
2521 		cs_queue_nr = etm->heap.heap_array[0].queue_nr;
2522 		queue_nr = TO_QUEUE_NR(cs_queue_nr);
2523 		trace_chan_id = TO_TRACE_CHAN_ID(cs_queue_nr);
2524 		queue = &etm->queues.queue_array[queue_nr];
2525 		etmq = queue->priv;
2526 
2527 		/*
2528 		 * Remove the top entry from the heap since we are about
2529 		 * to process it.
2530 		 */
2531 		auxtrace_heap__pop(&etm->heap);
2532 
2533 		tidq  = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
2534 		if (!tidq) {
2535 			/*
2536 			 * No traceID queue has been allocated for this traceID,
2537 			 * which means something somewhere went very wrong.  No
2538 			 * other choice than simply exit.
2539 			 */
2540 			ret = -EINVAL;
2541 			goto out;
2542 		}
2543 
2544 		/*
2545 		 * Packets associated with this timestamp are already in
2546 		 * the etmq's traceID queue, so process them.
2547 		 */
2548 		ret = cs_etm__process_traceid_queue(etmq, tidq);
2549 		if (ret < 0)
2550 			goto out;
2551 
2552 		/*
2553 		 * Packets for this timestamp have been processed, time to
2554 		 * move on to the next timestamp, fetching a new auxtrace_buffer
2555 		 * if need be.
2556 		 */
2557 refetch:
2558 		ret = cs_etm__get_data_block(etmq);
2559 		if (ret < 0)
2560 			goto out;
2561 
2562 		/*
2563 		 * No more auxtrace_buffers to process in this etmq, simply
2564 		 * move on to another entry in the auxtrace_heap.
2565 		 */
2566 		if (!ret)
2567 			continue;
2568 
2569 		ret = cs_etm__decode_data_block(etmq);
2570 		if (ret)
2571 			goto out;
2572 
2573 		cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
2574 
2575 		if (!cs_timestamp) {
2576 			/*
2577 			 * Function cs_etm__decode_data_block() returns when
2578 			 * there is no more traces to decode in the current
2579 			 * auxtrace_buffer OR when a timestamp has been
2580 			 * encountered on any of the traceID queues.  Since we
2581 			 * did not get a timestamp, there is no more traces to
2582 			 * process in this auxtrace_buffer.  As such empty and
2583 			 * flush all traceID queues.
2584 			 */
2585 			cs_etm__clear_all_traceid_queues(etmq);
2586 
2587 			/* Fetch another auxtrace_buffer for this etmq */
2588 			goto refetch;
2589 		}
2590 
2591 		/*
2592 		 * Add to the min heap the timestamp for packets that have
2593 		 * just been decoded.  They will be processed and synthesized
2594 		 * during the next call to cs_etm__process_traceid_queue() for
2595 		 * this queue/traceID.
2596 		 */
2597 		cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
2598 		ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
2599 	}
2600 
2601 out:
2602 	return ret;
2603 }
2604 
2605 static int cs_etm__process_itrace_start(struct cs_etm_auxtrace *etm,
2606 					union perf_event *event)
2607 {
2608 	struct thread *th;
2609 
2610 	if (etm->timeless_decoding)
2611 		return 0;
2612 
2613 	/*
2614 	 * Add the tid/pid to the log so that we can get a match when
2615 	 * we get a contextID from the decoder.
2616 	 */
2617 	th = machine__findnew_thread(etm->machine,
2618 				     event->itrace_start.pid,
2619 				     event->itrace_start.tid);
2620 	if (!th)
2621 		return -ENOMEM;
2622 
2623 	thread__put(th);
2624 
2625 	return 0;
2626 }
2627 
2628 static int cs_etm__process_switch_cpu_wide(struct cs_etm_auxtrace *etm,
2629 					   union perf_event *event)
2630 {
2631 	struct thread *th;
2632 	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
2633 
2634 	/*
2635 	 * Context switch in per-thread mode are irrelevant since perf
2636 	 * will start/stop tracing as the process is scheduled.
2637 	 */
2638 	if (etm->timeless_decoding)
2639 		return 0;
2640 
2641 	/*
2642 	 * SWITCH_IN events carry the next process to be switched out while
2643 	 * SWITCH_OUT events carry the process to be switched in.  As such
2644 	 * we don't care about IN events.
2645 	 */
2646 	if (!out)
2647 		return 0;
2648 
2649 	/*
2650 	 * Add the tid/pid to the log so that we can get a match when
2651 	 * we get a contextID from the decoder.
2652 	 */
2653 	th = machine__findnew_thread(etm->machine,
2654 				     event->context_switch.next_prev_pid,
2655 				     event->context_switch.next_prev_tid);
2656 	if (!th)
2657 		return -ENOMEM;
2658 
2659 	thread__put(th);
2660 
2661 	return 0;
2662 }
2663 
2664 static int cs_etm__process_event(struct perf_session *session,
2665 				 union perf_event *event,
2666 				 struct perf_sample *sample,
2667 				 struct perf_tool *tool)
2668 {
2669 	struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2670 						   struct cs_etm_auxtrace,
2671 						   auxtrace);
2672 
2673 	if (dump_trace)
2674 		return 0;
2675 
2676 	if (!tool->ordered_events) {
2677 		pr_err("CoreSight ETM Trace requires ordered events\n");
2678 		return -EINVAL;
2679 	}
2680 
2681 	switch (event->header.type) {
2682 	case PERF_RECORD_EXIT:
2683 		/*
2684 		 * Don't need to wait for cs_etm__flush_events() in per-thread mode to
2685 		 * start the decode because we know there will be no more trace from
2686 		 * this thread. All this does is emit samples earlier than waiting for
2687 		 * the flush in other modes, but with timestamps it makes sense to wait
2688 		 * for flush so that events from different threads are interleaved
2689 		 * properly.
2690 		 */
2691 		if (etm->per_thread_decoding && etm->timeless_decoding)
2692 			return cs_etm__process_timeless_queues(etm,
2693 							       event->fork.tid);
2694 		break;
2695 
2696 	case PERF_RECORD_ITRACE_START:
2697 		return cs_etm__process_itrace_start(etm, event);
2698 
2699 	case PERF_RECORD_SWITCH_CPU_WIDE:
2700 		return cs_etm__process_switch_cpu_wide(etm, event);
2701 
2702 	case PERF_RECORD_AUX:
2703 		/*
2704 		 * Record the latest kernel timestamp available in the header
2705 		 * for samples so that synthesised samples occur from this point
2706 		 * onwards.
2707 		 */
2708 		if (sample->time && (sample->time != (u64)-1))
2709 			etm->latest_kernel_timestamp = sample->time;
2710 		break;
2711 
2712 	default:
2713 		break;
2714 	}
2715 
2716 	return 0;
2717 }
2718 
2719 static void dump_queued_data(struct cs_etm_auxtrace *etm,
2720 			     struct perf_record_auxtrace *event)
2721 {
2722 	struct auxtrace_buffer *buf;
2723 	unsigned int i;
2724 	/*
2725 	 * Find all buffers with same reference in the queues and dump them.
2726 	 * This is because the queues can contain multiple entries of the same
2727 	 * buffer that were split on aux records.
2728 	 */
2729 	for (i = 0; i < etm->queues.nr_queues; ++i)
2730 		list_for_each_entry(buf, &etm->queues.queue_array[i].head, list)
2731 			if (buf->reference == event->reference)
2732 				cs_etm__dump_event(etm->queues.queue_array[i].priv, buf);
2733 }
2734 
2735 static int cs_etm__process_auxtrace_event(struct perf_session *session,
2736 					  union perf_event *event,
2737 					  struct perf_tool *tool __maybe_unused)
2738 {
2739 	struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2740 						   struct cs_etm_auxtrace,
2741 						   auxtrace);
2742 	if (!etm->data_queued) {
2743 		struct auxtrace_buffer *buffer;
2744 		off_t  data_offset;
2745 		int fd = perf_data__fd(session->data);
2746 		bool is_pipe = perf_data__is_pipe(session->data);
2747 		int err;
2748 		int idx = event->auxtrace.idx;
2749 
2750 		if (is_pipe)
2751 			data_offset = 0;
2752 		else {
2753 			data_offset = lseek(fd, 0, SEEK_CUR);
2754 			if (data_offset == -1)
2755 				return -errno;
2756 		}
2757 
2758 		err = auxtrace_queues__add_event(&etm->queues, session,
2759 						 event, data_offset, &buffer);
2760 		if (err)
2761 			return err;
2762 
2763 		/*
2764 		 * Knowing if the trace is formatted or not requires a lookup of
2765 		 * the aux record so only works in non-piped mode where data is
2766 		 * queued in cs_etm__queue_aux_records(). Always assume
2767 		 * formatted in piped mode (true).
2768 		 */
2769 		err = cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
2770 					  idx, true);
2771 		if (err)
2772 			return err;
2773 
2774 		if (dump_trace)
2775 			if (auxtrace_buffer__get_data(buffer, fd)) {
2776 				cs_etm__dump_event(etm->queues.queue_array[idx].priv, buffer);
2777 				auxtrace_buffer__put_data(buffer);
2778 			}
2779 	} else if (dump_trace)
2780 		dump_queued_data(etm, &event->auxtrace);
2781 
2782 	return 0;
2783 }
2784 
2785 static int cs_etm__setup_timeless_decoding(struct cs_etm_auxtrace *etm)
2786 {
2787 	struct evsel *evsel;
2788 	struct evlist *evlist = etm->session->evlist;
2789 
2790 	/* Override timeless mode with user input from --itrace=Z */
2791 	if (etm->synth_opts.timeless_decoding) {
2792 		etm->timeless_decoding = true;
2793 		return 0;
2794 	}
2795 
2796 	/*
2797 	 * Find the cs_etm evsel and look at what its timestamp setting was
2798 	 */
2799 	evlist__for_each_entry(evlist, evsel)
2800 		if (cs_etm__evsel_is_auxtrace(etm->session, evsel)) {
2801 			etm->timeless_decoding =
2802 				!(evsel->core.attr.config & BIT(ETM_OPT_TS));
2803 			return 0;
2804 		}
2805 
2806 	pr_err("CS ETM: Couldn't find ETM evsel\n");
2807 	return -EINVAL;
2808 }
2809 
2810 /*
2811  * Read a single cpu parameter block from the auxtrace_info priv block.
2812  *
2813  * For version 1 there is a per cpu nr_params entry. If we are handling
2814  * version 1 file, then there may be less, the same, or more params
2815  * indicated by this value than the compile time number we understand.
2816  *
2817  * For a version 0 info block, there are a fixed number, and we need to
2818  * fill out the nr_param value in the metadata we create.
2819  */
2820 static u64 *cs_etm__create_meta_blk(u64 *buff_in, int *buff_in_offset,
2821 				    int out_blk_size, int nr_params_v0)
2822 {
2823 	u64 *metadata = NULL;
2824 	int hdr_version;
2825 	int nr_in_params, nr_out_params, nr_cmn_params;
2826 	int i, k;
2827 
2828 	metadata = zalloc(sizeof(*metadata) * out_blk_size);
2829 	if (!metadata)
2830 		return NULL;
2831 
2832 	/* read block current index & version */
2833 	i = *buff_in_offset;
2834 	hdr_version = buff_in[CS_HEADER_VERSION];
2835 
2836 	if (!hdr_version) {
2837 	/* read version 0 info block into a version 1 metadata block  */
2838 		nr_in_params = nr_params_v0;
2839 		metadata[CS_ETM_MAGIC] = buff_in[i + CS_ETM_MAGIC];
2840 		metadata[CS_ETM_CPU] = buff_in[i + CS_ETM_CPU];
2841 		metadata[CS_ETM_NR_TRC_PARAMS] = nr_in_params;
2842 		/* remaining block params at offset +1 from source */
2843 		for (k = CS_ETM_COMMON_BLK_MAX_V1 - 1; k < nr_in_params; k++)
2844 			metadata[k + 1] = buff_in[i + k];
2845 		/* version 0 has 2 common params */
2846 		nr_cmn_params = 2;
2847 	} else {
2848 	/* read version 1 info block - input and output nr_params may differ */
2849 		/* version 1 has 3 common params */
2850 		nr_cmn_params = 3;
2851 		nr_in_params = buff_in[i + CS_ETM_NR_TRC_PARAMS];
2852 
2853 		/* if input has more params than output - skip excess */
2854 		nr_out_params = nr_in_params + nr_cmn_params;
2855 		if (nr_out_params > out_blk_size)
2856 			nr_out_params = out_blk_size;
2857 
2858 		for (k = CS_ETM_MAGIC; k < nr_out_params; k++)
2859 			metadata[k] = buff_in[i + k];
2860 
2861 		/* record the actual nr params we copied */
2862 		metadata[CS_ETM_NR_TRC_PARAMS] = nr_out_params - nr_cmn_params;
2863 	}
2864 
2865 	/* adjust in offset by number of in params used */
2866 	i += nr_in_params + nr_cmn_params;
2867 	*buff_in_offset = i;
2868 	return metadata;
2869 }
2870 
2871 /**
2872  * Puts a fragment of an auxtrace buffer into the auxtrace queues based
2873  * on the bounds of aux_event, if it matches with the buffer that's at
2874  * file_offset.
2875  *
2876  * Normally, whole auxtrace buffers would be added to the queue. But we
2877  * want to reset the decoder for every PERF_RECORD_AUX event, and the decoder
2878  * is reset across each buffer, so splitting the buffers up in advance has
2879  * the same effect.
2880  */
2881 static int cs_etm__queue_aux_fragment(struct perf_session *session, off_t file_offset, size_t sz,
2882 				      struct perf_record_aux *aux_event, struct perf_sample *sample)
2883 {
2884 	int err;
2885 	char buf[PERF_SAMPLE_MAX_SIZE];
2886 	union perf_event *auxtrace_event_union;
2887 	struct perf_record_auxtrace *auxtrace_event;
2888 	union perf_event auxtrace_fragment;
2889 	__u64 aux_offset, aux_size;
2890 	__u32 idx;
2891 	bool formatted;
2892 
2893 	struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2894 						   struct cs_etm_auxtrace,
2895 						   auxtrace);
2896 
2897 	/*
2898 	 * There should be a PERF_RECORD_AUXTRACE event at the file_offset that we got
2899 	 * from looping through the auxtrace index.
2900 	 */
2901 	err = perf_session__peek_event(session, file_offset, buf,
2902 				       PERF_SAMPLE_MAX_SIZE, &auxtrace_event_union, NULL);
2903 	if (err)
2904 		return err;
2905 	auxtrace_event = &auxtrace_event_union->auxtrace;
2906 	if (auxtrace_event->header.type != PERF_RECORD_AUXTRACE)
2907 		return -EINVAL;
2908 
2909 	if (auxtrace_event->header.size < sizeof(struct perf_record_auxtrace) ||
2910 		auxtrace_event->header.size != sz) {
2911 		return -EINVAL;
2912 	}
2913 
2914 	/*
2915 	 * In per-thread mode, auxtrace CPU is set to -1, but TID will be set instead. See
2916 	 * auxtrace_mmap_params__set_idx(). However, the sample AUX event will contain a
2917 	 * CPU as we set this always for the AUX_OUTPUT_HW_ID event.
2918 	 * So now compare only TIDs if auxtrace CPU is -1, and CPUs if auxtrace CPU is not -1.
2919 	 * Return 'not found' if mismatch.
2920 	 */
2921 	if (auxtrace_event->cpu == (__u32) -1) {
2922 		etm->per_thread_decoding = true;
2923 		if (auxtrace_event->tid != sample->tid)
2924 			return 1;
2925 	} else if (auxtrace_event->cpu != sample->cpu) {
2926 		if (etm->per_thread_decoding) {
2927 			/*
2928 			 * Found a per-cpu buffer after a per-thread one was
2929 			 * already found
2930 			 */
2931 			pr_err("CS ETM: Inconsistent per-thread/per-cpu mode.\n");
2932 			return -EINVAL;
2933 		}
2934 		return 1;
2935 	}
2936 
2937 	if (aux_event->flags & PERF_AUX_FLAG_OVERWRITE) {
2938 		/*
2939 		 * Clamp size in snapshot mode. The buffer size is clamped in
2940 		 * __auxtrace_mmap__read() for snapshots, so the aux record size doesn't reflect
2941 		 * the buffer size.
2942 		 */
2943 		aux_size = min(aux_event->aux_size, auxtrace_event->size);
2944 
2945 		/*
2946 		 * In this mode, the head also points to the end of the buffer so aux_offset
2947 		 * needs to have the size subtracted so it points to the beginning as in normal mode
2948 		 */
2949 		aux_offset = aux_event->aux_offset - aux_size;
2950 	} else {
2951 		aux_size = aux_event->aux_size;
2952 		aux_offset = aux_event->aux_offset;
2953 	}
2954 
2955 	if (aux_offset >= auxtrace_event->offset &&
2956 	    aux_offset + aux_size <= auxtrace_event->offset + auxtrace_event->size) {
2957 		/*
2958 		 * If this AUX event was inside this buffer somewhere, create a new auxtrace event
2959 		 * based on the sizes of the aux event, and queue that fragment.
2960 		 */
2961 		auxtrace_fragment.auxtrace = *auxtrace_event;
2962 		auxtrace_fragment.auxtrace.size = aux_size;
2963 		auxtrace_fragment.auxtrace.offset = aux_offset;
2964 		file_offset += aux_offset - auxtrace_event->offset + auxtrace_event->header.size;
2965 
2966 		pr_debug3("CS ETM: Queue buffer size: %#"PRI_lx64" offset: %#"PRI_lx64
2967 			  " tid: %d cpu: %d\n", aux_size, aux_offset, sample->tid, sample->cpu);
2968 		err = auxtrace_queues__add_event(&etm->queues, session, &auxtrace_fragment,
2969 						 file_offset, NULL);
2970 		if (err)
2971 			return err;
2972 
2973 		idx = auxtrace_event->idx;
2974 		formatted = !(aux_event->flags & PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
2975 		return cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
2976 					   idx, formatted);
2977 	}
2978 
2979 	/* Wasn't inside this buffer, but there were no parse errors. 1 == 'not found' */
2980 	return 1;
2981 }
2982 
2983 static int cs_etm__process_aux_hw_id_cb(struct perf_session *session, union perf_event *event,
2984 					u64 offset __maybe_unused, void *data __maybe_unused)
2985 {
2986 	/* look to handle PERF_RECORD_AUX_OUTPUT_HW_ID early to ensure decoders can be set up */
2987 	if (event->header.type == PERF_RECORD_AUX_OUTPUT_HW_ID) {
2988 		(*(int *)data)++; /* increment found count */
2989 		return cs_etm__process_aux_output_hw_id(session, event);
2990 	}
2991 	return 0;
2992 }
2993 
2994 static int cs_etm__queue_aux_records_cb(struct perf_session *session, union perf_event *event,
2995 					u64 offset __maybe_unused, void *data __maybe_unused)
2996 {
2997 	struct perf_sample sample;
2998 	int ret;
2999 	struct auxtrace_index_entry *ent;
3000 	struct auxtrace_index *auxtrace_index;
3001 	struct evsel *evsel;
3002 	size_t i;
3003 
3004 	/* Don't care about any other events, we're only queuing buffers for AUX events */
3005 	if (event->header.type != PERF_RECORD_AUX)
3006 		return 0;
3007 
3008 	if (event->header.size < sizeof(struct perf_record_aux))
3009 		return -EINVAL;
3010 
3011 	/* Truncated Aux records can have 0 size and shouldn't result in anything being queued. */
3012 	if (!event->aux.aux_size)
3013 		return 0;
3014 
3015 	/*
3016 	 * Parse the sample, we need the sample_id_all data that comes after the event so that the
3017 	 * CPU or PID can be matched to an AUXTRACE buffer's CPU or PID.
3018 	 */
3019 	evsel = evlist__event2evsel(session->evlist, event);
3020 	if (!evsel)
3021 		return -EINVAL;
3022 	ret = evsel__parse_sample(evsel, event, &sample);
3023 	if (ret)
3024 		return ret;
3025 
3026 	/*
3027 	 * Loop through the auxtrace index to find the buffer that matches up with this aux event.
3028 	 */
3029 	list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
3030 		for (i = 0; i < auxtrace_index->nr; i++) {
3031 			ent = &auxtrace_index->entries[i];
3032 			ret = cs_etm__queue_aux_fragment(session, ent->file_offset,
3033 							 ent->sz, &event->aux, &sample);
3034 			/*
3035 			 * Stop search on error or successful values. Continue search on
3036 			 * 1 ('not found')
3037 			 */
3038 			if (ret != 1)
3039 				return ret;
3040 		}
3041 	}
3042 
3043 	/*
3044 	 * Couldn't find the buffer corresponding to this aux record, something went wrong. Warn but
3045 	 * don't exit with an error because it will still be possible to decode other aux records.
3046 	 */
3047 	pr_err("CS ETM: Couldn't find auxtrace buffer for aux_offset: %#"PRI_lx64
3048 	       " tid: %d cpu: %d\n", event->aux.aux_offset, sample.tid, sample.cpu);
3049 	return 0;
3050 }
3051 
3052 static int cs_etm__queue_aux_records(struct perf_session *session)
3053 {
3054 	struct auxtrace_index *index = list_first_entry_or_null(&session->auxtrace_index,
3055 								struct auxtrace_index, list);
3056 	if (index && index->nr > 0)
3057 		return perf_session__peek_events(session, session->header.data_offset,
3058 						 session->header.data_size,
3059 						 cs_etm__queue_aux_records_cb, NULL);
3060 
3061 	/*
3062 	 * We would get here if there are no entries in the index (either no auxtrace
3063 	 * buffers or no index at all). Fail silently as there is the possibility of
3064 	 * queueing them in cs_etm__process_auxtrace_event() if etm->data_queued is still
3065 	 * false.
3066 	 *
3067 	 * In that scenario, buffers will not be split by AUX records.
3068 	 */
3069 	return 0;
3070 }
3071 
3072 #define HAS_PARAM(j, type, param) (metadata[(j)][CS_ETM_NR_TRC_PARAMS] <= \
3073 				  (CS_##type##_##param - CS_ETM_COMMON_BLK_MAX_V1))
3074 
3075 /*
3076  * Loop through the ETMs and complain if we find at least one where ts_source != 1 (virtual
3077  * timestamps).
3078  */
3079 static bool cs_etm__has_virtual_ts(u64 **metadata, int num_cpu)
3080 {
3081 	int j;
3082 
3083 	for (j = 0; j < num_cpu; j++) {
3084 		switch (metadata[j][CS_ETM_MAGIC]) {
3085 		case __perf_cs_etmv4_magic:
3086 			if (HAS_PARAM(j, ETMV4, TS_SOURCE) || metadata[j][CS_ETMV4_TS_SOURCE] != 1)
3087 				return false;
3088 			break;
3089 		case __perf_cs_ete_magic:
3090 			if (HAS_PARAM(j, ETE, TS_SOURCE) || metadata[j][CS_ETE_TS_SOURCE] != 1)
3091 				return false;
3092 			break;
3093 		default:
3094 			/* Unknown / unsupported magic number. */
3095 			return false;
3096 		}
3097 	}
3098 	return true;
3099 }
3100 
3101 /* map trace ids to correct metadata block, from information in metadata */
3102 static int cs_etm__map_trace_ids_metadata(int num_cpu, u64 **metadata)
3103 {
3104 	u64 cs_etm_magic;
3105 	u8 trace_chan_id;
3106 	int i, err;
3107 
3108 	for (i = 0; i < num_cpu; i++) {
3109 		cs_etm_magic = metadata[i][CS_ETM_MAGIC];
3110 		switch (cs_etm_magic) {
3111 		case __perf_cs_etmv3_magic:
3112 			metadata[i][CS_ETM_ETMTRACEIDR] &= CORESIGHT_TRACE_ID_VAL_MASK;
3113 			trace_chan_id = (u8)(metadata[i][CS_ETM_ETMTRACEIDR]);
3114 			break;
3115 		case __perf_cs_etmv4_magic:
3116 		case __perf_cs_ete_magic:
3117 			metadata[i][CS_ETMV4_TRCTRACEIDR] &= CORESIGHT_TRACE_ID_VAL_MASK;
3118 			trace_chan_id = (u8)(metadata[i][CS_ETMV4_TRCTRACEIDR]);
3119 			break;
3120 		default:
3121 			/* unknown magic number */
3122 			return -EINVAL;
3123 		}
3124 		err = cs_etm__map_trace_id(trace_chan_id, metadata[i]);
3125 		if (err)
3126 			return err;
3127 	}
3128 	return 0;
3129 }
3130 
3131 /*
3132  * If we found AUX_HW_ID packets, then set any metadata marked as unused to the
3133  * unused value to reduce the number of unneeded decoders created.
3134  */
3135 static int cs_etm__clear_unused_trace_ids_metadata(int num_cpu, u64 **metadata)
3136 {
3137 	u64 cs_etm_magic;
3138 	int i;
3139 
3140 	for (i = 0; i < num_cpu; i++) {
3141 		cs_etm_magic = metadata[i][CS_ETM_MAGIC];
3142 		switch (cs_etm_magic) {
3143 		case __perf_cs_etmv3_magic:
3144 			if (metadata[i][CS_ETM_ETMTRACEIDR] & CORESIGHT_TRACE_ID_UNUSED_FLAG)
3145 				metadata[i][CS_ETM_ETMTRACEIDR] = CORESIGHT_TRACE_ID_UNUSED_VAL;
3146 			break;
3147 		case __perf_cs_etmv4_magic:
3148 		case __perf_cs_ete_magic:
3149 			if (metadata[i][CS_ETMV4_TRCTRACEIDR] & CORESIGHT_TRACE_ID_UNUSED_FLAG)
3150 				metadata[i][CS_ETMV4_TRCTRACEIDR] = CORESIGHT_TRACE_ID_UNUSED_VAL;
3151 			break;
3152 		default:
3153 			/* unknown magic number */
3154 			return -EINVAL;
3155 		}
3156 	}
3157 	return 0;
3158 }
3159 
3160 int cs_etm__process_auxtrace_info_full(union perf_event *event,
3161 				       struct perf_session *session)
3162 {
3163 	struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
3164 	struct cs_etm_auxtrace *etm = NULL;
3165 	struct perf_record_time_conv *tc = &session->time_conv;
3166 	int event_header_size = sizeof(struct perf_event_header);
3167 	int total_size = auxtrace_info->header.size;
3168 	int priv_size = 0;
3169 	int num_cpu;
3170 	int err = 0;
3171 	int aux_hw_id_found;
3172 	int i, j;
3173 	u64 *ptr = NULL;
3174 	u64 **metadata = NULL;
3175 
3176 	/*
3177 	 * Create an RB tree for traceID-metadata tuple.  Since the conversion
3178 	 * has to be made for each packet that gets decoded, optimizing access
3179 	 * in anything other than a sequential array is worth doing.
3180 	 */
3181 	traceid_list = intlist__new(NULL);
3182 	if (!traceid_list)
3183 		return -ENOMEM;
3184 
3185 	/* First the global part */
3186 	ptr = (u64 *) auxtrace_info->priv;
3187 	num_cpu = ptr[CS_PMU_TYPE_CPUS] & 0xffffffff;
3188 	metadata = zalloc(sizeof(*metadata) * num_cpu);
3189 	if (!metadata) {
3190 		err = -ENOMEM;
3191 		goto err_free_traceid_list;
3192 	}
3193 
3194 	/* Start parsing after the common part of the header */
3195 	i = CS_HEADER_VERSION_MAX;
3196 
3197 	/*
3198 	 * The metadata is stored in the auxtrace_info section and encodes
3199 	 * the configuration of the ARM embedded trace macrocell which is
3200 	 * required by the trace decoder to properly decode the trace due
3201 	 * to its highly compressed nature.
3202 	 */
3203 	for (j = 0; j < num_cpu; j++) {
3204 		if (ptr[i] == __perf_cs_etmv3_magic) {
3205 			metadata[j] =
3206 				cs_etm__create_meta_blk(ptr, &i,
3207 							CS_ETM_PRIV_MAX,
3208 							CS_ETM_NR_TRC_PARAMS_V0);
3209 		} else if (ptr[i] == __perf_cs_etmv4_magic) {
3210 			metadata[j] =
3211 				cs_etm__create_meta_blk(ptr, &i,
3212 							CS_ETMV4_PRIV_MAX,
3213 							CS_ETMV4_NR_TRC_PARAMS_V0);
3214 		} else if (ptr[i] == __perf_cs_ete_magic) {
3215 			metadata[j] = cs_etm__create_meta_blk(ptr, &i, CS_ETE_PRIV_MAX, -1);
3216 		} else {
3217 			ui__error("CS ETM Trace: Unrecognised magic number %#"PRIx64". File could be from a newer version of perf.\n",
3218 				  ptr[i]);
3219 			err = -EINVAL;
3220 			goto err_free_metadata;
3221 		}
3222 
3223 		if (!metadata[j]) {
3224 			err = -ENOMEM;
3225 			goto err_free_metadata;
3226 		}
3227 	}
3228 
3229 	/*
3230 	 * Each of CS_HEADER_VERSION_MAX, CS_ETM_PRIV_MAX and
3231 	 * CS_ETMV4_PRIV_MAX mark how many double words are in the
3232 	 * global metadata, and each cpu's metadata respectively.
3233 	 * The following tests if the correct number of double words was
3234 	 * present in the auxtrace info section.
3235 	 */
3236 	priv_size = total_size - event_header_size - INFO_HEADER_SIZE;
3237 	if (i * 8 != priv_size) {
3238 		err = -EINVAL;
3239 		goto err_free_metadata;
3240 	}
3241 
3242 	etm = zalloc(sizeof(*etm));
3243 
3244 	if (!etm) {
3245 		err = -ENOMEM;
3246 		goto err_free_metadata;
3247 	}
3248 
3249 	err = auxtrace_queues__init(&etm->queues);
3250 	if (err)
3251 		goto err_free_etm;
3252 
3253 	if (session->itrace_synth_opts->set) {
3254 		etm->synth_opts = *session->itrace_synth_opts;
3255 	} else {
3256 		itrace_synth_opts__set_default(&etm->synth_opts,
3257 				session->itrace_synth_opts->default_no_sample);
3258 		etm->synth_opts.callchain = false;
3259 	}
3260 
3261 	etm->session = session;
3262 	etm->machine = &session->machines.host;
3263 
3264 	etm->num_cpu = num_cpu;
3265 	etm->pmu_type = (unsigned int) ((ptr[CS_PMU_TYPE_CPUS] >> 32) & 0xffffffff);
3266 	etm->snapshot_mode = (ptr[CS_ETM_SNAPSHOT] != 0);
3267 	etm->metadata = metadata;
3268 	etm->auxtrace_type = auxtrace_info->type;
3269 
3270 	/* Use virtual timestamps if all ETMs report ts_source = 1 */
3271 	etm->has_virtual_ts = cs_etm__has_virtual_ts(metadata, num_cpu);
3272 
3273 	if (!etm->has_virtual_ts)
3274 		ui__warning("Virtual timestamps are not enabled, or not supported by the traced system.\n"
3275 			    "The time field of the samples will not be set accurately.\n\n");
3276 
3277 	etm->auxtrace.process_event = cs_etm__process_event;
3278 	etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
3279 	etm->auxtrace.flush_events = cs_etm__flush_events;
3280 	etm->auxtrace.free_events = cs_etm__free_events;
3281 	etm->auxtrace.free = cs_etm__free;
3282 	etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace;
3283 	session->auxtrace = &etm->auxtrace;
3284 
3285 	err = cs_etm__setup_timeless_decoding(etm);
3286 	if (err)
3287 		return err;
3288 
3289 	etm->unknown_thread = thread__new(999999999, 999999999);
3290 	if (!etm->unknown_thread) {
3291 		err = -ENOMEM;
3292 		goto err_free_queues;
3293 	}
3294 
3295 	/*
3296 	 * Initialize list node so that at thread__zput() we can avoid
3297 	 * segmentation fault at list_del_init().
3298 	 */
3299 	INIT_LIST_HEAD(&etm->unknown_thread->node);
3300 
3301 	err = thread__set_comm(etm->unknown_thread, "unknown", 0);
3302 	if (err)
3303 		goto err_delete_thread;
3304 
3305 	if (thread__init_maps(etm->unknown_thread, etm->machine)) {
3306 		err = -ENOMEM;
3307 		goto err_delete_thread;
3308 	}
3309 
3310 	etm->tc.time_shift = tc->time_shift;
3311 	etm->tc.time_mult = tc->time_mult;
3312 	etm->tc.time_zero = tc->time_zero;
3313 	if (event_contains(*tc, time_cycles)) {
3314 		etm->tc.time_cycles = tc->time_cycles;
3315 		etm->tc.time_mask = tc->time_mask;
3316 		etm->tc.cap_user_time_zero = tc->cap_user_time_zero;
3317 		etm->tc.cap_user_time_short = tc->cap_user_time_short;
3318 	}
3319 	err = cs_etm__synth_events(etm, session);
3320 	if (err)
3321 		goto err_delete_thread;
3322 
3323 	/*
3324 	 * Map Trace ID values to CPU metadata.
3325 	 *
3326 	 * Trace metadata will always contain Trace ID values from the legacy algorithm. If the
3327 	 * files has been recorded by a "new" perf updated to handle AUX_HW_ID then the metadata
3328 	 * ID value will also have the CORESIGHT_TRACE_ID_UNUSED_FLAG set.
3329 	 *
3330 	 * The updated kernel drivers that use AUX_HW_ID to sent Trace IDs will attempt to use
3331 	 * the same IDs as the old algorithm as far as is possible, unless there are clashes
3332 	 * in which case a different value will be used. This means an older perf may still
3333 	 * be able to record and read files generate on a newer system.
3334 	 *
3335 	 * For a perf able to interpret AUX_HW_ID packets we first check for the presence of
3336 	 * those packets. If they are there then the values will be mapped and plugged into
3337 	 * the metadata. We then set any remaining metadata values with the used flag to a
3338 	 * value CORESIGHT_TRACE_ID_UNUSED_VAL - which indicates no decoder is required.
3339 	 *
3340 	 * If no AUX_HW_ID packets are present - which means a file recorded on an old kernel
3341 	 * then we map Trace ID values to CPU directly from the metadata - clearing any unused
3342 	 * flags if present.
3343 	 */
3344 
3345 	/* first scan for AUX_OUTPUT_HW_ID records to map trace ID values to CPU metadata */
3346 	aux_hw_id_found = 0;
3347 	err = perf_session__peek_events(session, session->header.data_offset,
3348 					session->header.data_size,
3349 					cs_etm__process_aux_hw_id_cb, &aux_hw_id_found);
3350 	if (err)
3351 		goto err_delete_thread;
3352 
3353 	/* if HW ID found then clear any unused metadata ID values */
3354 	if (aux_hw_id_found)
3355 		err = cs_etm__clear_unused_trace_ids_metadata(num_cpu, metadata);
3356 	/* otherwise, this is a file with metadata values only, map from metadata */
3357 	else
3358 		err = cs_etm__map_trace_ids_metadata(num_cpu, metadata);
3359 
3360 	if (err)
3361 		goto err_delete_thread;
3362 
3363 	err = cs_etm__queue_aux_records(session);
3364 	if (err)
3365 		goto err_delete_thread;
3366 
3367 	etm->data_queued = etm->queues.populated;
3368 	return 0;
3369 
3370 err_delete_thread:
3371 	thread__zput(etm->unknown_thread);
3372 err_free_queues:
3373 	auxtrace_queues__free(&etm->queues);
3374 	session->auxtrace = NULL;
3375 err_free_etm:
3376 	zfree(&etm);
3377 err_free_metadata:
3378 	/* No need to check @metadata[j], free(NULL) is supported */
3379 	for (j = 0; j < num_cpu; j++)
3380 		zfree(&metadata[j]);
3381 	zfree(&metadata);
3382 err_free_traceid_list:
3383 	intlist__delete(traceid_list);
3384 	return err;
3385 }
3386