1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2015-2018 Linaro Limited.
4  *
5  * Author: Tor Jeremiassen <tor@ti.com>
6  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7  */
8 
9 #include <asm/bug.h>
10 #include <linux/coresight-pmu.h>
11 #include <linux/err.h>
12 #include <linux/list.h>
13 #include <linux/zalloc.h>
14 #include <stdlib.h>
15 #include <opencsd/c_api/opencsd_c_api.h>
16 #include <opencsd/etmv4/trc_pkt_types_etmv4.h>
17 #include <opencsd/ocsd_if_types.h>
18 
19 #include "cs-etm.h"
20 #include "cs-etm-decoder.h"
21 #include "debug.h"
22 #include "intlist.h"
23 
24 /* use raw logging */
25 #ifdef CS_DEBUG_RAW
26 #define CS_LOG_RAW_FRAMES
27 #ifdef CS_RAW_PACKED
28 #define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT | \
29 			    OCSD_DFRMTR_PACKED_RAW_OUT)
30 #else
31 #define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT)
32 #endif
33 #endif
34 
35 struct cs_etm_decoder {
36 	void *data;
37 	void (*packet_printer)(const char *msg);
38 	dcd_tree_handle_t dcd_tree;
39 	cs_etm_mem_cb_type mem_access;
40 	ocsd_datapath_resp_t prev_return;
41 };
42 
43 static u32
44 cs_etm_decoder__mem_access(const void *context,
45 			   const ocsd_vaddr_t address,
46 			   const ocsd_mem_space_acc_t mem_space __maybe_unused,
47 			   const u8 trace_chan_id,
48 			   const u32 req_size,
49 			   u8 *buffer)
50 {
51 	struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
52 
53 	return decoder->mem_access(decoder->data, trace_chan_id,
54 				   address, req_size, buffer);
55 }
56 
57 int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder,
58 				      u64 start, u64 end,
59 				      cs_etm_mem_cb_type cb_func)
60 {
61 	decoder->mem_access = cb_func;
62 
63 	if (ocsd_dt_add_callback_trcid_mem_acc(decoder->dcd_tree, start, end,
64 					       OCSD_MEM_SPACE_ANY,
65 					       cs_etm_decoder__mem_access,
66 					       decoder))
67 		return -1;
68 
69 	return 0;
70 }
71 
72 int cs_etm_decoder__reset(struct cs_etm_decoder *decoder)
73 {
74 	ocsd_datapath_resp_t dp_ret;
75 
76 	decoder->prev_return = OCSD_RESP_CONT;
77 
78 	dp_ret = ocsd_dt_process_data(decoder->dcd_tree, OCSD_OP_RESET,
79 				      0, 0, NULL, NULL);
80 	if (OCSD_DATA_RESP_IS_FATAL(dp_ret))
81 		return -1;
82 
83 	return 0;
84 }
85 
86 int cs_etm_decoder__get_packet(struct cs_etm_packet_queue *packet_queue,
87 			       struct cs_etm_packet *packet)
88 {
89 	if (!packet_queue || !packet)
90 		return -EINVAL;
91 
92 	/* Nothing to do, might as well just return */
93 	if (packet_queue->packet_count == 0)
94 		return 0;
95 	/*
96 	 * The queueing process in function cs_etm_decoder__buffer_packet()
97 	 * increments the tail *before* using it.  This is somewhat counter
98 	 * intuitive but it has the advantage of centralizing tail management
99 	 * at a single location.  Because of that we need to follow the same
100 	 * heuristic with the head, i.e we increment it before using its
101 	 * value.  Otherwise the first element of the packet queue is not
102 	 * used.
103 	 */
104 	packet_queue->head = (packet_queue->head + 1) &
105 			     (CS_ETM_PACKET_MAX_BUFFER - 1);
106 
107 	*packet = packet_queue->packet_buffer[packet_queue->head];
108 
109 	packet_queue->packet_count--;
110 
111 	return 1;
112 }
113 
114 static int cs_etm_decoder__gen_etmv3_config(struct cs_etm_trace_params *params,
115 					    ocsd_etmv3_cfg *config)
116 {
117 	config->reg_idr = params->etmv3.reg_idr;
118 	config->reg_ctrl = params->etmv3.reg_ctrl;
119 	config->reg_ccer = params->etmv3.reg_ccer;
120 	config->reg_trc_id = params->etmv3.reg_trc_id;
121 	config->arch_ver = ARCH_V7;
122 	config->core_prof = profile_CortexA;
123 
124 	return 0;
125 }
126 
127 static void cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params,
128 					     ocsd_etmv4_cfg *config)
129 {
130 	config->reg_configr = params->etmv4.reg_configr;
131 	config->reg_traceidr = params->etmv4.reg_traceidr;
132 	config->reg_idr0 = params->etmv4.reg_idr0;
133 	config->reg_idr1 = params->etmv4.reg_idr1;
134 	config->reg_idr2 = params->etmv4.reg_idr2;
135 	config->reg_idr8 = params->etmv4.reg_idr8;
136 	config->reg_idr9 = 0;
137 	config->reg_idr10 = 0;
138 	config->reg_idr11 = 0;
139 	config->reg_idr12 = 0;
140 	config->reg_idr13 = 0;
141 	config->arch_ver = ARCH_V8;
142 	config->core_prof = profile_CortexA;
143 }
144 
145 static void cs_etm_decoder__print_str_cb(const void *p_context,
146 					 const char *msg,
147 					 const int str_len)
148 {
149 	if (p_context && str_len)
150 		((struct cs_etm_decoder *)p_context)->packet_printer(msg);
151 }
152 
153 static int
154 cs_etm_decoder__init_def_logger_printing(struct cs_etm_decoder_params *d_params,
155 					 struct cs_etm_decoder *decoder)
156 {
157 	int ret = 0;
158 
159 	if (d_params->packet_printer == NULL)
160 		return -1;
161 
162 	decoder->packet_printer = d_params->packet_printer;
163 
164 	/*
165 	 * Set up a library default logger to process any printers
166 	 * (packet/raw frame) we add later.
167 	 */
168 	ret = ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
169 	if (ret != 0)
170 		return -1;
171 
172 	/* no stdout / err / file output */
173 	ret = ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
174 	if (ret != 0)
175 		return -1;
176 
177 	/*
178 	 * Set the string CB for the default logger, passes strings to
179 	 * perf print logger.
180 	 */
181 	ret = ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
182 					      (void *)decoder,
183 					      cs_etm_decoder__print_str_cb);
184 	if (ret != 0)
185 		ret = -1;
186 
187 	return 0;
188 }
189 
190 #ifdef CS_LOG_RAW_FRAMES
191 static void
192 cs_etm_decoder__init_raw_frame_logging(struct cs_etm_decoder_params *d_params,
193 				       struct cs_etm_decoder *decoder)
194 {
195 	/* Only log these during a --dump operation */
196 	if (d_params->operation == CS_ETM_OPERATION_PRINT) {
197 		/* set up a library default logger to process the
198 		 *  raw frame printer we add later
199 		 */
200 		ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
201 
202 		/* no stdout / err / file output */
203 		ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
204 
205 		/* set the string CB for the default logger,
206 		 * passes strings to perf print logger.
207 		 */
208 		ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
209 						(void *)decoder,
210 						cs_etm_decoder__print_str_cb);
211 
212 		/* use the built in library printer for the raw frames */
213 		ocsd_dt_set_raw_frame_printer(decoder->dcd_tree,
214 					      CS_RAW_DEBUG_FLAGS);
215 	}
216 }
217 #else
218 static void
219 cs_etm_decoder__init_raw_frame_logging(
220 		struct cs_etm_decoder_params *d_params __maybe_unused,
221 		struct cs_etm_decoder *decoder __maybe_unused)
222 {
223 }
224 #endif
225 
226 static int cs_etm_decoder__create_packet_printer(struct cs_etm_decoder *decoder,
227 						 const char *decoder_name,
228 						 void *trace_config)
229 {
230 	u8 csid;
231 
232 	if (ocsd_dt_create_decoder(decoder->dcd_tree, decoder_name,
233 				   OCSD_CREATE_FLG_PACKET_PROC,
234 				   trace_config, &csid))
235 		return -1;
236 
237 	if (ocsd_dt_set_pkt_protocol_printer(decoder->dcd_tree, csid, 0))
238 		return -1;
239 
240 	return 0;
241 }
242 
243 static int
244 cs_etm_decoder__create_etm_packet_printer(struct cs_etm_trace_params *t_params,
245 					  struct cs_etm_decoder *decoder)
246 {
247 	const char *decoder_name;
248 	ocsd_etmv3_cfg config_etmv3;
249 	ocsd_etmv4_cfg trace_config_etmv4;
250 	void *trace_config;
251 
252 	switch (t_params->protocol) {
253 	case CS_ETM_PROTO_ETMV3:
254 	case CS_ETM_PROTO_PTM:
255 		cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3);
256 		decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ?
257 							OCSD_BUILTIN_DCD_ETMV3 :
258 							OCSD_BUILTIN_DCD_PTM;
259 		trace_config = &config_etmv3;
260 		break;
261 	case CS_ETM_PROTO_ETMV4i:
262 		cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
263 		decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
264 		trace_config = &trace_config_etmv4;
265 		break;
266 	default:
267 		return -1;
268 	}
269 
270 	return cs_etm_decoder__create_packet_printer(decoder,
271 						     decoder_name,
272 						     trace_config);
273 }
274 
275 static ocsd_datapath_resp_t
276 cs_etm_decoder__do_soft_timestamp(struct cs_etm_queue *etmq,
277 				  struct cs_etm_packet_queue *packet_queue,
278 				  const uint8_t trace_chan_id)
279 {
280 	/* No timestamp packet has been received, nothing to do */
281 	if (!packet_queue->cs_timestamp)
282 		return OCSD_RESP_CONT;
283 
284 	packet_queue->cs_timestamp = packet_queue->next_cs_timestamp;
285 
286 	/* Estimate the timestamp for the next range packet */
287 	packet_queue->next_cs_timestamp += packet_queue->instr_count;
288 	packet_queue->instr_count = 0;
289 
290 	/* Tell the front end which traceid_queue needs attention */
291 	cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id);
292 
293 	return OCSD_RESP_WAIT;
294 }
295 
296 static ocsd_datapath_resp_t
297 cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue *etmq,
298 				  const ocsd_generic_trace_elem *elem,
299 				  const uint8_t trace_chan_id,
300 				  const ocsd_trc_index_t indx)
301 {
302 	struct cs_etm_packet_queue *packet_queue;
303 
304 	/* First get the packet queue for this traceID */
305 	packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id);
306 	if (!packet_queue)
307 		return OCSD_RESP_FATAL_SYS_ERR;
308 
309 	/*
310 	 * We've seen a timestamp packet before - simply record the new value.
311 	 * Function do_soft_timestamp() will report the value to the front end,
312 	 * hence asking the decoder to keep decoding rather than stopping.
313 	 */
314 	if (packet_queue->cs_timestamp) {
315 		packet_queue->next_cs_timestamp = elem->timestamp;
316 		return OCSD_RESP_CONT;
317 	}
318 
319 
320 	if (!elem->timestamp) {
321 		/*
322 		 * Zero timestamps can be seen due to misconfiguration or hardware bugs.
323 		 * Warn once, and don't try to subtract instr_count as it would result in an
324 		 * underflow.
325 		 */
326 		packet_queue->cs_timestamp = 0;
327 		WARN_ONCE(true, "Zero Coresight timestamp found at Idx:%" OCSD_TRC_IDX_STR
328 				". Decoding may be improved with --itrace=Z...\n", indx);
329 	} else if (packet_queue->instr_count > elem->timestamp) {
330 		/*
331 		 * Sanity check that the elem->timestamp - packet_queue->instr_count would not
332 		 * result in an underflow. Warn and clamp at 0 if it would.
333 		 */
334 		packet_queue->cs_timestamp = 0;
335 		pr_err("Timestamp calculation underflow at Idx:%" OCSD_TRC_IDX_STR "\n", indx);
336 	} else {
337 		/*
338 		 * This is the first timestamp we've seen since the beginning of traces
339 		 * or a discontinuity.  Since timestamps packets are generated *after*
340 		 * range packets have been generated, we need to estimate the time at
341 		 * which instructions started by subtracting the number of instructions
342 		 * executed to the timestamp.
343 		 */
344 		packet_queue->cs_timestamp = elem->timestamp - packet_queue->instr_count;
345 	}
346 	packet_queue->next_cs_timestamp = elem->timestamp;
347 	packet_queue->instr_count = 0;
348 
349 	/* Tell the front end which traceid_queue needs attention */
350 	cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id);
351 
352 	/* Halt processing until we are being told to proceed */
353 	return OCSD_RESP_WAIT;
354 }
355 
356 static void
357 cs_etm_decoder__reset_timestamp(struct cs_etm_packet_queue *packet_queue)
358 {
359 	packet_queue->cs_timestamp = 0;
360 	packet_queue->next_cs_timestamp = 0;
361 	packet_queue->instr_count = 0;
362 }
363 
364 static ocsd_datapath_resp_t
365 cs_etm_decoder__buffer_packet(struct cs_etm_packet_queue *packet_queue,
366 			      const u8 trace_chan_id,
367 			      enum cs_etm_sample_type sample_type)
368 {
369 	u32 et = 0;
370 	int cpu;
371 
372 	if (packet_queue->packet_count >= CS_ETM_PACKET_MAX_BUFFER - 1)
373 		return OCSD_RESP_FATAL_SYS_ERR;
374 
375 	if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
376 		return OCSD_RESP_FATAL_SYS_ERR;
377 
378 	et = packet_queue->tail;
379 	et = (et + 1) & (CS_ETM_PACKET_MAX_BUFFER - 1);
380 	packet_queue->tail = et;
381 	packet_queue->packet_count++;
382 
383 	packet_queue->packet_buffer[et].sample_type = sample_type;
384 	packet_queue->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN;
385 	packet_queue->packet_buffer[et].cpu = cpu;
386 	packet_queue->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
387 	packet_queue->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
388 	packet_queue->packet_buffer[et].instr_count = 0;
389 	packet_queue->packet_buffer[et].last_instr_taken_branch = false;
390 	packet_queue->packet_buffer[et].last_instr_size = 0;
391 	packet_queue->packet_buffer[et].last_instr_type = 0;
392 	packet_queue->packet_buffer[et].last_instr_subtype = 0;
393 	packet_queue->packet_buffer[et].last_instr_cond = 0;
394 	packet_queue->packet_buffer[et].flags = 0;
395 	packet_queue->packet_buffer[et].exception_number = UINT32_MAX;
396 	packet_queue->packet_buffer[et].trace_chan_id = trace_chan_id;
397 
398 	if (packet_queue->packet_count == CS_ETM_PACKET_MAX_BUFFER - 1)
399 		return OCSD_RESP_WAIT;
400 
401 	return OCSD_RESP_CONT;
402 }
403 
404 static ocsd_datapath_resp_t
405 cs_etm_decoder__buffer_range(struct cs_etm_queue *etmq,
406 			     struct cs_etm_packet_queue *packet_queue,
407 			     const ocsd_generic_trace_elem *elem,
408 			     const uint8_t trace_chan_id)
409 {
410 	int ret = 0;
411 	struct cs_etm_packet *packet;
412 
413 	ret = cs_etm_decoder__buffer_packet(packet_queue, trace_chan_id,
414 					    CS_ETM_RANGE);
415 	if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
416 		return ret;
417 
418 	packet = &packet_queue->packet_buffer[packet_queue->tail];
419 
420 	switch (elem->isa) {
421 	case ocsd_isa_aarch64:
422 		packet->isa = CS_ETM_ISA_A64;
423 		break;
424 	case ocsd_isa_arm:
425 		packet->isa = CS_ETM_ISA_A32;
426 		break;
427 	case ocsd_isa_thumb2:
428 		packet->isa = CS_ETM_ISA_T32;
429 		break;
430 	case ocsd_isa_tee:
431 	case ocsd_isa_jazelle:
432 	case ocsd_isa_custom:
433 	case ocsd_isa_unknown:
434 	default:
435 		packet->isa = CS_ETM_ISA_UNKNOWN;
436 	}
437 
438 	packet->start_addr = elem->st_addr;
439 	packet->end_addr = elem->en_addr;
440 	packet->instr_count = elem->num_instr_range;
441 	packet->last_instr_type = elem->last_i_type;
442 	packet->last_instr_subtype = elem->last_i_subtype;
443 	packet->last_instr_cond = elem->last_instr_cond;
444 
445 	if (elem->last_i_type == OCSD_INSTR_BR || elem->last_i_type == OCSD_INSTR_BR_INDIRECT)
446 		packet->last_instr_taken_branch = elem->last_instr_exec;
447 	else
448 		packet->last_instr_taken_branch = false;
449 
450 	packet->last_instr_size = elem->last_instr_sz;
451 
452 	/* per-thread scenario, no need to generate a timestamp */
453 	if (cs_etm__etmq_is_timeless(etmq))
454 		goto out;
455 
456 	/*
457 	 * The packet queue is full and we haven't seen a timestamp (had we
458 	 * seen one the packet queue wouldn't be full).  Let the front end
459 	 * deal with it.
460 	 */
461 	if (ret == OCSD_RESP_WAIT)
462 		goto out;
463 
464 	packet_queue->instr_count += elem->num_instr_range;
465 	/* Tell the front end we have a new timestamp to process */
466 	ret = cs_etm_decoder__do_soft_timestamp(etmq, packet_queue,
467 						trace_chan_id);
468 out:
469 	return ret;
470 }
471 
472 static ocsd_datapath_resp_t
473 cs_etm_decoder__buffer_discontinuity(struct cs_etm_packet_queue *queue,
474 				     const uint8_t trace_chan_id)
475 {
476 	/*
477 	 * Something happened and who knows when we'll get new traces so
478 	 * reset time statistics.
479 	 */
480 	cs_etm_decoder__reset_timestamp(queue);
481 	return cs_etm_decoder__buffer_packet(queue, trace_chan_id,
482 					     CS_ETM_DISCONTINUITY);
483 }
484 
485 static ocsd_datapath_resp_t
486 cs_etm_decoder__buffer_exception(struct cs_etm_packet_queue *queue,
487 				 const ocsd_generic_trace_elem *elem,
488 				 const uint8_t trace_chan_id)
489 {	int ret = 0;
490 	struct cs_etm_packet *packet;
491 
492 	ret = cs_etm_decoder__buffer_packet(queue, trace_chan_id,
493 					    CS_ETM_EXCEPTION);
494 	if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
495 		return ret;
496 
497 	packet = &queue->packet_buffer[queue->tail];
498 	packet->exception_number = elem->exception_number;
499 
500 	return ret;
501 }
502 
503 static ocsd_datapath_resp_t
504 cs_etm_decoder__buffer_exception_ret(struct cs_etm_packet_queue *queue,
505 				     const uint8_t trace_chan_id)
506 {
507 	return cs_etm_decoder__buffer_packet(queue, trace_chan_id,
508 					     CS_ETM_EXCEPTION_RET);
509 }
510 
511 static ocsd_datapath_resp_t
512 cs_etm_decoder__set_tid(struct cs_etm_queue *etmq,
513 			struct cs_etm_packet_queue *packet_queue,
514 			const ocsd_generic_trace_elem *elem,
515 			const uint8_t trace_chan_id)
516 {
517 	pid_t tid = -1;
518 	static u64 pid_fmt;
519 	int ret;
520 
521 	/*
522 	 * As all the ETMs run at the same exception level, the system should
523 	 * have the same PID format crossing CPUs.  So cache the PID format
524 	 * and reuse it for sequential decoding.
525 	 */
526 	if (!pid_fmt) {
527 		ret = cs_etm__get_pid_fmt(trace_chan_id, &pid_fmt);
528 		if (ret)
529 			return OCSD_RESP_FATAL_SYS_ERR;
530 	}
531 
532 	/*
533 	 * Process the PE_CONTEXT packets if we have a valid contextID or VMID.
534 	 * If the kernel is running at EL2, the PID is traced in CONTEXTIDR_EL2
535 	 * as VMID, Bit ETM_OPT_CTXTID2 is set in this case.
536 	 */
537 	switch (pid_fmt) {
538 	case BIT(ETM_OPT_CTXTID):
539 		if (elem->context.ctxt_id_valid)
540 			tid = elem->context.context_id;
541 		break;
542 	case BIT(ETM_OPT_CTXTID2):
543 		if (elem->context.vmid_valid)
544 			tid = elem->context.vmid;
545 		break;
546 	default:
547 		break;
548 	}
549 
550 	if (tid == -1)
551 		return OCSD_RESP_CONT;
552 
553 	if (cs_etm__etmq_set_tid(etmq, tid, trace_chan_id))
554 		return OCSD_RESP_FATAL_SYS_ERR;
555 
556 	/*
557 	 * A timestamp is generated after a PE_CONTEXT element so make sure
558 	 * to rely on that coming one.
559 	 */
560 	cs_etm_decoder__reset_timestamp(packet_queue);
561 
562 	return OCSD_RESP_CONT;
563 }
564 
565 static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
566 				const void *context,
567 				const ocsd_trc_index_t indx,
568 				const u8 trace_chan_id __maybe_unused,
569 				const ocsd_generic_trace_elem *elem)
570 {
571 	ocsd_datapath_resp_t resp = OCSD_RESP_CONT;
572 	struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
573 	struct cs_etm_queue *etmq = decoder->data;
574 	struct cs_etm_packet_queue *packet_queue;
575 
576 	/* First get the packet queue for this traceID */
577 	packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id);
578 	if (!packet_queue)
579 		return OCSD_RESP_FATAL_SYS_ERR;
580 
581 	switch (elem->elem_type) {
582 	case OCSD_GEN_TRC_ELEM_UNKNOWN:
583 		break;
584 	case OCSD_GEN_TRC_ELEM_EO_TRACE:
585 	case OCSD_GEN_TRC_ELEM_NO_SYNC:
586 	case OCSD_GEN_TRC_ELEM_TRACE_ON:
587 		resp = cs_etm_decoder__buffer_discontinuity(packet_queue,
588 							    trace_chan_id);
589 		break;
590 	case OCSD_GEN_TRC_ELEM_INSTR_RANGE:
591 		resp = cs_etm_decoder__buffer_range(etmq, packet_queue, elem,
592 						    trace_chan_id);
593 		break;
594 	case OCSD_GEN_TRC_ELEM_EXCEPTION:
595 		resp = cs_etm_decoder__buffer_exception(packet_queue, elem,
596 							trace_chan_id);
597 		break;
598 	case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
599 		resp = cs_etm_decoder__buffer_exception_ret(packet_queue,
600 							    trace_chan_id);
601 		break;
602 	case OCSD_GEN_TRC_ELEM_TIMESTAMP:
603 		resp = cs_etm_decoder__do_hard_timestamp(etmq, elem,
604 							 trace_chan_id,
605 							 indx);
606 		break;
607 	case OCSD_GEN_TRC_ELEM_PE_CONTEXT:
608 		resp = cs_etm_decoder__set_tid(etmq, packet_queue,
609 					       elem, trace_chan_id);
610 		break;
611 	/* Unused packet types */
612 	case OCSD_GEN_TRC_ELEM_I_RANGE_NOPATH:
613 	case OCSD_GEN_TRC_ELEM_ADDR_NACC:
614 	case OCSD_GEN_TRC_ELEM_CYCLE_COUNT:
615 	case OCSD_GEN_TRC_ELEM_ADDR_UNKNOWN:
616 	case OCSD_GEN_TRC_ELEM_EVENT:
617 	case OCSD_GEN_TRC_ELEM_SWTRACE:
618 	case OCSD_GEN_TRC_ELEM_CUSTOM:
619 	case OCSD_GEN_TRC_ELEM_SYNC_MARKER:
620 	case OCSD_GEN_TRC_ELEM_MEMTRANS:
621 	default:
622 		break;
623 	}
624 
625 	return resp;
626 }
627 
628 static int cs_etm_decoder__create_etm_packet_decoder(
629 					struct cs_etm_trace_params *t_params,
630 					struct cs_etm_decoder *decoder)
631 {
632 	const char *decoder_name;
633 	ocsd_etmv3_cfg config_etmv3;
634 	ocsd_etmv4_cfg trace_config_etmv4;
635 	void *trace_config;
636 	u8 csid;
637 
638 	switch (t_params->protocol) {
639 	case CS_ETM_PROTO_ETMV3:
640 	case CS_ETM_PROTO_PTM:
641 		cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3);
642 		decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ?
643 							OCSD_BUILTIN_DCD_ETMV3 :
644 							OCSD_BUILTIN_DCD_PTM;
645 		trace_config = &config_etmv3;
646 		break;
647 	case CS_ETM_PROTO_ETMV4i:
648 		cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
649 		decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
650 		trace_config = &trace_config_etmv4;
651 		break;
652 	default:
653 		return -1;
654 	}
655 
656 	if (ocsd_dt_create_decoder(decoder->dcd_tree,
657 				     decoder_name,
658 				     OCSD_CREATE_FLG_FULL_DECODER,
659 				     trace_config, &csid))
660 		return -1;
661 
662 	if (ocsd_dt_set_gen_elem_outfn(decoder->dcd_tree,
663 				       cs_etm_decoder__gen_trace_elem_printer,
664 				       decoder))
665 		return -1;
666 
667 	return 0;
668 }
669 
670 static int
671 cs_etm_decoder__create_etm_decoder(struct cs_etm_decoder_params *d_params,
672 				   struct cs_etm_trace_params *t_params,
673 				   struct cs_etm_decoder *decoder)
674 {
675 	if (d_params->operation == CS_ETM_OPERATION_PRINT)
676 		return cs_etm_decoder__create_etm_packet_printer(t_params,
677 								 decoder);
678 	else if (d_params->operation == CS_ETM_OPERATION_DECODE)
679 		return cs_etm_decoder__create_etm_packet_decoder(t_params,
680 								 decoder);
681 
682 	return -1;
683 }
684 
685 struct cs_etm_decoder *
686 cs_etm_decoder__new(int num_cpu, struct cs_etm_decoder_params *d_params,
687 		    struct cs_etm_trace_params t_params[])
688 {
689 	struct cs_etm_decoder *decoder;
690 	ocsd_dcd_tree_src_t format;
691 	u32 flags;
692 	int i, ret;
693 
694 	if ((!t_params) || (!d_params))
695 		return NULL;
696 
697 	decoder = zalloc(sizeof(*decoder));
698 
699 	if (!decoder)
700 		return NULL;
701 
702 	decoder->data = d_params->data;
703 	decoder->prev_return = OCSD_RESP_CONT;
704 	format = (d_params->formatted ? OCSD_TRC_SRC_FRAME_FORMATTED :
705 					 OCSD_TRC_SRC_SINGLE);
706 	flags = 0;
707 	flags |= (d_params->fsyncs ? OCSD_DFRMTR_HAS_FSYNCS : 0);
708 	flags |= (d_params->hsyncs ? OCSD_DFRMTR_HAS_HSYNCS : 0);
709 	flags |= (d_params->frame_aligned ? OCSD_DFRMTR_FRAME_MEM_ALIGN : 0);
710 
711 	/*
712 	 * Drivers may add barrier frames when used with perf, set up to
713 	 * handle this. Barriers const of FSYNC packet repeated 4 times.
714 	 */
715 	flags |= OCSD_DFRMTR_RESET_ON_4X_FSYNC;
716 
717 	/* Create decode tree for the data source */
718 	decoder->dcd_tree = ocsd_create_dcd_tree(format, flags);
719 
720 	if (decoder->dcd_tree == 0)
721 		goto err_free_decoder;
722 
723 	/* init library print logging support */
724 	ret = cs_etm_decoder__init_def_logger_printing(d_params, decoder);
725 	if (ret != 0)
726 		goto err_free_decoder;
727 
728 	/* init raw frame logging if required */
729 	cs_etm_decoder__init_raw_frame_logging(d_params, decoder);
730 
731 	for (i = 0; i < num_cpu; i++) {
732 		ret = cs_etm_decoder__create_etm_decoder(d_params,
733 							 &t_params[i],
734 							 decoder);
735 		if (ret != 0)
736 			goto err_free_decoder;
737 	}
738 
739 	return decoder;
740 
741 err_free_decoder:
742 	cs_etm_decoder__free(decoder);
743 	return NULL;
744 }
745 
746 int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
747 				       u64 indx, const u8 *buf,
748 				       size_t len, size_t *consumed)
749 {
750 	int ret = 0;
751 	ocsd_datapath_resp_t cur = OCSD_RESP_CONT;
752 	ocsd_datapath_resp_t prev_return = decoder->prev_return;
753 	size_t processed = 0;
754 	u32 count;
755 
756 	while (processed < len) {
757 		if (OCSD_DATA_RESP_IS_WAIT(prev_return)) {
758 			cur = ocsd_dt_process_data(decoder->dcd_tree,
759 						   OCSD_OP_FLUSH,
760 						   0,
761 						   0,
762 						   NULL,
763 						   NULL);
764 		} else if (OCSD_DATA_RESP_IS_CONT(prev_return)) {
765 			cur = ocsd_dt_process_data(decoder->dcd_tree,
766 						   OCSD_OP_DATA,
767 						   indx + processed,
768 						   len - processed,
769 						   &buf[processed],
770 						   &count);
771 			processed += count;
772 		} else {
773 			ret = -EINVAL;
774 			break;
775 		}
776 
777 		/*
778 		 * Return to the input code if the packet buffer is full.
779 		 * Flushing will get done once the packet buffer has been
780 		 * processed.
781 		 */
782 		if (OCSD_DATA_RESP_IS_WAIT(cur))
783 			break;
784 
785 		prev_return = cur;
786 	}
787 
788 	decoder->prev_return = cur;
789 	*consumed = processed;
790 
791 	return ret;
792 }
793 
794 void cs_etm_decoder__free(struct cs_etm_decoder *decoder)
795 {
796 	if (!decoder)
797 		return;
798 
799 	ocsd_destroy_dcd_tree(decoder->dcd_tree);
800 	decoder->dcd_tree = NULL;
801 	free(decoder);
802 }
803