1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright(C) 2015-2018 Linaro Limited. 4 * 5 * Author: Tor Jeremiassen <tor@ti.com> 6 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 7 */ 8 9 #include <linux/coresight-pmu.h> 10 #include <linux/err.h> 11 #include <linux/list.h> 12 #include <linux/zalloc.h> 13 #include <stdlib.h> 14 #include <opencsd/c_api/opencsd_c_api.h> 15 #include <opencsd/etmv4/trc_pkt_types_etmv4.h> 16 #include <opencsd/ocsd_if_types.h> 17 18 #include "cs-etm.h" 19 #include "cs-etm-decoder.h" 20 #include "intlist.h" 21 22 /* use raw logging */ 23 #ifdef CS_DEBUG_RAW 24 #define CS_LOG_RAW_FRAMES 25 #ifdef CS_RAW_PACKED 26 #define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT | \ 27 OCSD_DFRMTR_PACKED_RAW_OUT) 28 #else 29 #define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT) 30 #endif 31 #endif 32 33 struct cs_etm_decoder { 34 void *data; 35 void (*packet_printer)(const char *msg); 36 dcd_tree_handle_t dcd_tree; 37 cs_etm_mem_cb_type mem_access; 38 ocsd_datapath_resp_t prev_return; 39 }; 40 41 static u32 42 cs_etm_decoder__mem_access(const void *context, 43 const ocsd_vaddr_t address, 44 const ocsd_mem_space_acc_t mem_space __maybe_unused, 45 const u8 trace_chan_id, 46 const u32 req_size, 47 u8 *buffer) 48 { 49 struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context; 50 51 return decoder->mem_access(decoder->data, trace_chan_id, 52 address, req_size, buffer); 53 } 54 55 int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder, 56 u64 start, u64 end, 57 cs_etm_mem_cb_type cb_func) 58 { 59 decoder->mem_access = cb_func; 60 61 if (ocsd_dt_add_callback_trcid_mem_acc(decoder->dcd_tree, start, end, 62 OCSD_MEM_SPACE_ANY, 63 cs_etm_decoder__mem_access, 64 decoder)) 65 return -1; 66 67 return 0; 68 } 69 70 int cs_etm_decoder__reset(struct cs_etm_decoder *decoder) 71 { 72 ocsd_datapath_resp_t dp_ret; 73 74 decoder->prev_return = OCSD_RESP_CONT; 75 76 dp_ret = ocsd_dt_process_data(decoder->dcd_tree, OCSD_OP_RESET, 77 0, 0, NULL, NULL); 78 if (OCSD_DATA_RESP_IS_FATAL(dp_ret)) 79 return -1; 80 81 return 0; 82 } 83 84 int cs_etm_decoder__get_packet(struct cs_etm_packet_queue *packet_queue, 85 struct cs_etm_packet *packet) 86 { 87 if (!packet_queue || !packet) 88 return -EINVAL; 89 90 /* Nothing to do, might as well just return */ 91 if (packet_queue->packet_count == 0) 92 return 0; 93 /* 94 * The queueing process in function cs_etm_decoder__buffer_packet() 95 * increments the tail *before* using it. This is somewhat counter 96 * intuitive but it has the advantage of centralizing tail management 97 * at a single location. Because of that we need to follow the same 98 * heuristic with the head, i.e we increment it before using its 99 * value. Otherwise the first element of the packet queue is not 100 * used. 101 */ 102 packet_queue->head = (packet_queue->head + 1) & 103 (CS_ETM_PACKET_MAX_BUFFER - 1); 104 105 *packet = packet_queue->packet_buffer[packet_queue->head]; 106 107 packet_queue->packet_count--; 108 109 return 1; 110 } 111 112 static int cs_etm_decoder__gen_etmv3_config(struct cs_etm_trace_params *params, 113 ocsd_etmv3_cfg *config) 114 { 115 config->reg_idr = params->etmv3.reg_idr; 116 config->reg_ctrl = params->etmv3.reg_ctrl; 117 config->reg_ccer = params->etmv3.reg_ccer; 118 config->reg_trc_id = params->etmv3.reg_trc_id; 119 config->arch_ver = ARCH_V7; 120 config->core_prof = profile_CortexA; 121 122 return 0; 123 } 124 125 static void cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params, 126 ocsd_etmv4_cfg *config) 127 { 128 config->reg_configr = params->etmv4.reg_configr; 129 config->reg_traceidr = params->etmv4.reg_traceidr; 130 config->reg_idr0 = params->etmv4.reg_idr0; 131 config->reg_idr1 = params->etmv4.reg_idr1; 132 config->reg_idr2 = params->etmv4.reg_idr2; 133 config->reg_idr8 = params->etmv4.reg_idr8; 134 config->reg_idr9 = 0; 135 config->reg_idr10 = 0; 136 config->reg_idr11 = 0; 137 config->reg_idr12 = 0; 138 config->reg_idr13 = 0; 139 config->arch_ver = ARCH_V8; 140 config->core_prof = profile_CortexA; 141 } 142 143 static void cs_etm_decoder__print_str_cb(const void *p_context, 144 const char *msg, 145 const int str_len) 146 { 147 if (p_context && str_len) 148 ((struct cs_etm_decoder *)p_context)->packet_printer(msg); 149 } 150 151 static int 152 cs_etm_decoder__init_def_logger_printing(struct cs_etm_decoder_params *d_params, 153 struct cs_etm_decoder *decoder) 154 { 155 int ret = 0; 156 157 if (d_params->packet_printer == NULL) 158 return -1; 159 160 decoder->packet_printer = d_params->packet_printer; 161 162 /* 163 * Set up a library default logger to process any printers 164 * (packet/raw frame) we add later. 165 */ 166 ret = ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1); 167 if (ret != 0) 168 return -1; 169 170 /* no stdout / err / file output */ 171 ret = ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL); 172 if (ret != 0) 173 return -1; 174 175 /* 176 * Set the string CB for the default logger, passes strings to 177 * perf print logger. 178 */ 179 ret = ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree, 180 (void *)decoder, 181 cs_etm_decoder__print_str_cb); 182 if (ret != 0) 183 ret = -1; 184 185 return 0; 186 } 187 188 #ifdef CS_LOG_RAW_FRAMES 189 static void 190 cs_etm_decoder__init_raw_frame_logging(struct cs_etm_decoder_params *d_params, 191 struct cs_etm_decoder *decoder) 192 { 193 /* Only log these during a --dump operation */ 194 if (d_params->operation == CS_ETM_OPERATION_PRINT) { 195 /* set up a library default logger to process the 196 * raw frame printer we add later 197 */ 198 ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1); 199 200 /* no stdout / err / file output */ 201 ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL); 202 203 /* set the string CB for the default logger, 204 * passes strings to perf print logger. 205 */ 206 ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree, 207 (void *)decoder, 208 cs_etm_decoder__print_str_cb); 209 210 /* use the built in library printer for the raw frames */ 211 ocsd_dt_set_raw_frame_printer(decoder->dcd_tree, 212 CS_RAW_DEBUG_FLAGS); 213 } 214 } 215 #else 216 static void 217 cs_etm_decoder__init_raw_frame_logging( 218 struct cs_etm_decoder_params *d_params __maybe_unused, 219 struct cs_etm_decoder *decoder __maybe_unused) 220 { 221 } 222 #endif 223 224 static int cs_etm_decoder__create_packet_printer(struct cs_etm_decoder *decoder, 225 const char *decoder_name, 226 void *trace_config) 227 { 228 u8 csid; 229 230 if (ocsd_dt_create_decoder(decoder->dcd_tree, decoder_name, 231 OCSD_CREATE_FLG_PACKET_PROC, 232 trace_config, &csid)) 233 return -1; 234 235 if (ocsd_dt_set_pkt_protocol_printer(decoder->dcd_tree, csid, 0)) 236 return -1; 237 238 return 0; 239 } 240 241 static int 242 cs_etm_decoder__create_etm_packet_printer(struct cs_etm_trace_params *t_params, 243 struct cs_etm_decoder *decoder) 244 { 245 const char *decoder_name; 246 ocsd_etmv3_cfg config_etmv3; 247 ocsd_etmv4_cfg trace_config_etmv4; 248 void *trace_config; 249 250 switch (t_params->protocol) { 251 case CS_ETM_PROTO_ETMV3: 252 case CS_ETM_PROTO_PTM: 253 cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3); 254 decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ? 255 OCSD_BUILTIN_DCD_ETMV3 : 256 OCSD_BUILTIN_DCD_PTM; 257 trace_config = &config_etmv3; 258 break; 259 case CS_ETM_PROTO_ETMV4i: 260 cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4); 261 decoder_name = OCSD_BUILTIN_DCD_ETMV4I; 262 trace_config = &trace_config_etmv4; 263 break; 264 default: 265 return -1; 266 } 267 268 return cs_etm_decoder__create_packet_printer(decoder, 269 decoder_name, 270 trace_config); 271 } 272 273 static ocsd_datapath_resp_t 274 cs_etm_decoder__do_soft_timestamp(struct cs_etm_queue *etmq, 275 struct cs_etm_packet_queue *packet_queue, 276 const uint8_t trace_chan_id) 277 { 278 /* No timestamp packet has been received, nothing to do */ 279 if (!packet_queue->timestamp) 280 return OCSD_RESP_CONT; 281 282 packet_queue->timestamp = packet_queue->next_timestamp; 283 284 /* Estimate the timestamp for the next range packet */ 285 packet_queue->next_timestamp += packet_queue->instr_count; 286 packet_queue->instr_count = 0; 287 288 /* Tell the front end which traceid_queue needs attention */ 289 cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id); 290 291 return OCSD_RESP_WAIT; 292 } 293 294 static ocsd_datapath_resp_t 295 cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue *etmq, 296 const ocsd_generic_trace_elem *elem, 297 const uint8_t trace_chan_id) 298 { 299 struct cs_etm_packet_queue *packet_queue; 300 301 /* First get the packet queue for this traceID */ 302 packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id); 303 if (!packet_queue) 304 return OCSD_RESP_FATAL_SYS_ERR; 305 306 /* 307 * We've seen a timestamp packet before - simply record the new value. 308 * Function do_soft_timestamp() will report the value to the front end, 309 * hence asking the decoder to keep decoding rather than stopping. 310 */ 311 if (packet_queue->timestamp) { 312 packet_queue->next_timestamp = elem->timestamp; 313 return OCSD_RESP_CONT; 314 } 315 316 /* 317 * This is the first timestamp we've seen since the beginning of traces 318 * or a discontinuity. Since timestamps packets are generated *after* 319 * range packets have been generated, we need to estimate the time at 320 * which instructions started by subtracting the number of instructions 321 * executed to the timestamp. 322 */ 323 packet_queue->timestamp = elem->timestamp - packet_queue->instr_count; 324 packet_queue->next_timestamp = elem->timestamp; 325 packet_queue->instr_count = 0; 326 327 /* Tell the front end which traceid_queue needs attention */ 328 cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id); 329 330 /* Halt processing until we are being told to proceed */ 331 return OCSD_RESP_WAIT; 332 } 333 334 static void 335 cs_etm_decoder__reset_timestamp(struct cs_etm_packet_queue *packet_queue) 336 { 337 packet_queue->timestamp = 0; 338 packet_queue->next_timestamp = 0; 339 packet_queue->instr_count = 0; 340 } 341 342 static ocsd_datapath_resp_t 343 cs_etm_decoder__buffer_packet(struct cs_etm_packet_queue *packet_queue, 344 const u8 trace_chan_id, 345 enum cs_etm_sample_type sample_type) 346 { 347 u32 et = 0; 348 int cpu; 349 350 if (packet_queue->packet_count >= CS_ETM_PACKET_MAX_BUFFER - 1) 351 return OCSD_RESP_FATAL_SYS_ERR; 352 353 if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0) 354 return OCSD_RESP_FATAL_SYS_ERR; 355 356 et = packet_queue->tail; 357 et = (et + 1) & (CS_ETM_PACKET_MAX_BUFFER - 1); 358 packet_queue->tail = et; 359 packet_queue->packet_count++; 360 361 packet_queue->packet_buffer[et].sample_type = sample_type; 362 packet_queue->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN; 363 packet_queue->packet_buffer[et].cpu = cpu; 364 packet_queue->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR; 365 packet_queue->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR; 366 packet_queue->packet_buffer[et].instr_count = 0; 367 packet_queue->packet_buffer[et].last_instr_taken_branch = false; 368 packet_queue->packet_buffer[et].last_instr_size = 0; 369 packet_queue->packet_buffer[et].last_instr_type = 0; 370 packet_queue->packet_buffer[et].last_instr_subtype = 0; 371 packet_queue->packet_buffer[et].last_instr_cond = 0; 372 packet_queue->packet_buffer[et].flags = 0; 373 packet_queue->packet_buffer[et].exception_number = UINT32_MAX; 374 packet_queue->packet_buffer[et].trace_chan_id = trace_chan_id; 375 376 if (packet_queue->packet_count == CS_ETM_PACKET_MAX_BUFFER - 1) 377 return OCSD_RESP_WAIT; 378 379 return OCSD_RESP_CONT; 380 } 381 382 static ocsd_datapath_resp_t 383 cs_etm_decoder__buffer_range(struct cs_etm_queue *etmq, 384 struct cs_etm_packet_queue *packet_queue, 385 const ocsd_generic_trace_elem *elem, 386 const uint8_t trace_chan_id) 387 { 388 int ret = 0; 389 struct cs_etm_packet *packet; 390 391 ret = cs_etm_decoder__buffer_packet(packet_queue, trace_chan_id, 392 CS_ETM_RANGE); 393 if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT) 394 return ret; 395 396 packet = &packet_queue->packet_buffer[packet_queue->tail]; 397 398 switch (elem->isa) { 399 case ocsd_isa_aarch64: 400 packet->isa = CS_ETM_ISA_A64; 401 break; 402 case ocsd_isa_arm: 403 packet->isa = CS_ETM_ISA_A32; 404 break; 405 case ocsd_isa_thumb2: 406 packet->isa = CS_ETM_ISA_T32; 407 break; 408 case ocsd_isa_tee: 409 case ocsd_isa_jazelle: 410 case ocsd_isa_custom: 411 case ocsd_isa_unknown: 412 default: 413 packet->isa = CS_ETM_ISA_UNKNOWN; 414 } 415 416 packet->start_addr = elem->st_addr; 417 packet->end_addr = elem->en_addr; 418 packet->instr_count = elem->num_instr_range; 419 packet->last_instr_type = elem->last_i_type; 420 packet->last_instr_subtype = elem->last_i_subtype; 421 packet->last_instr_cond = elem->last_instr_cond; 422 423 if (elem->last_i_type == OCSD_INSTR_BR || elem->last_i_type == OCSD_INSTR_BR_INDIRECT) 424 packet->last_instr_taken_branch = elem->last_instr_exec; 425 else 426 packet->last_instr_taken_branch = false; 427 428 packet->last_instr_size = elem->last_instr_sz; 429 430 /* per-thread scenario, no need to generate a timestamp */ 431 if (cs_etm__etmq_is_timeless(etmq)) 432 goto out; 433 434 /* 435 * The packet queue is full and we haven't seen a timestamp (had we 436 * seen one the packet queue wouldn't be full). Let the front end 437 * deal with it. 438 */ 439 if (ret == OCSD_RESP_WAIT) 440 goto out; 441 442 packet_queue->instr_count += elem->num_instr_range; 443 /* Tell the front end we have a new timestamp to process */ 444 ret = cs_etm_decoder__do_soft_timestamp(etmq, packet_queue, 445 trace_chan_id); 446 out: 447 return ret; 448 } 449 450 static ocsd_datapath_resp_t 451 cs_etm_decoder__buffer_discontinuity(struct cs_etm_packet_queue *queue, 452 const uint8_t trace_chan_id) 453 { 454 /* 455 * Something happened and who knows when we'll get new traces so 456 * reset time statistics. 457 */ 458 cs_etm_decoder__reset_timestamp(queue); 459 return cs_etm_decoder__buffer_packet(queue, trace_chan_id, 460 CS_ETM_DISCONTINUITY); 461 } 462 463 static ocsd_datapath_resp_t 464 cs_etm_decoder__buffer_exception(struct cs_etm_packet_queue *queue, 465 const ocsd_generic_trace_elem *elem, 466 const uint8_t trace_chan_id) 467 { int ret = 0; 468 struct cs_etm_packet *packet; 469 470 ret = cs_etm_decoder__buffer_packet(queue, trace_chan_id, 471 CS_ETM_EXCEPTION); 472 if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT) 473 return ret; 474 475 packet = &queue->packet_buffer[queue->tail]; 476 packet->exception_number = elem->exception_number; 477 478 return ret; 479 } 480 481 static ocsd_datapath_resp_t 482 cs_etm_decoder__buffer_exception_ret(struct cs_etm_packet_queue *queue, 483 const uint8_t trace_chan_id) 484 { 485 return cs_etm_decoder__buffer_packet(queue, trace_chan_id, 486 CS_ETM_EXCEPTION_RET); 487 } 488 489 static ocsd_datapath_resp_t 490 cs_etm_decoder__set_tid(struct cs_etm_queue *etmq, 491 struct cs_etm_packet_queue *packet_queue, 492 const ocsd_generic_trace_elem *elem, 493 const uint8_t trace_chan_id) 494 { 495 pid_t tid = -1; 496 static u64 pid_fmt; 497 int ret; 498 499 /* 500 * As all the ETMs run at the same exception level, the system should 501 * have the same PID format crossing CPUs. So cache the PID format 502 * and reuse it for sequential decoding. 503 */ 504 if (!pid_fmt) { 505 ret = cs_etm__get_pid_fmt(trace_chan_id, &pid_fmt); 506 if (ret) 507 return OCSD_RESP_FATAL_SYS_ERR; 508 } 509 510 /* 511 * Process the PE_CONTEXT packets if we have a valid contextID or VMID. 512 * If the kernel is running at EL2, the PID is traced in CONTEXTIDR_EL2 513 * as VMID, Bit ETM_OPT_CTXTID2 is set in this case. 514 */ 515 switch (pid_fmt) { 516 case BIT(ETM_OPT_CTXTID): 517 if (elem->context.ctxt_id_valid) 518 tid = elem->context.context_id; 519 break; 520 case BIT(ETM_OPT_CTXTID2): 521 if (elem->context.vmid_valid) 522 tid = elem->context.vmid; 523 break; 524 default: 525 break; 526 } 527 528 if (tid == -1) 529 return OCSD_RESP_CONT; 530 531 if (cs_etm__etmq_set_tid(etmq, tid, trace_chan_id)) 532 return OCSD_RESP_FATAL_SYS_ERR; 533 534 /* 535 * A timestamp is generated after a PE_CONTEXT element so make sure 536 * to rely on that coming one. 537 */ 538 cs_etm_decoder__reset_timestamp(packet_queue); 539 540 return OCSD_RESP_CONT; 541 } 542 543 static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer( 544 const void *context, 545 const ocsd_trc_index_t indx __maybe_unused, 546 const u8 trace_chan_id __maybe_unused, 547 const ocsd_generic_trace_elem *elem) 548 { 549 ocsd_datapath_resp_t resp = OCSD_RESP_CONT; 550 struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context; 551 struct cs_etm_queue *etmq = decoder->data; 552 struct cs_etm_packet_queue *packet_queue; 553 554 /* First get the packet queue for this traceID */ 555 packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id); 556 if (!packet_queue) 557 return OCSD_RESP_FATAL_SYS_ERR; 558 559 switch (elem->elem_type) { 560 case OCSD_GEN_TRC_ELEM_UNKNOWN: 561 break; 562 case OCSD_GEN_TRC_ELEM_EO_TRACE: 563 case OCSD_GEN_TRC_ELEM_NO_SYNC: 564 case OCSD_GEN_TRC_ELEM_TRACE_ON: 565 resp = cs_etm_decoder__buffer_discontinuity(packet_queue, 566 trace_chan_id); 567 break; 568 case OCSD_GEN_TRC_ELEM_INSTR_RANGE: 569 resp = cs_etm_decoder__buffer_range(etmq, packet_queue, elem, 570 trace_chan_id); 571 break; 572 case OCSD_GEN_TRC_ELEM_EXCEPTION: 573 resp = cs_etm_decoder__buffer_exception(packet_queue, elem, 574 trace_chan_id); 575 break; 576 case OCSD_GEN_TRC_ELEM_EXCEPTION_RET: 577 resp = cs_etm_decoder__buffer_exception_ret(packet_queue, 578 trace_chan_id); 579 break; 580 case OCSD_GEN_TRC_ELEM_TIMESTAMP: 581 resp = cs_etm_decoder__do_hard_timestamp(etmq, elem, 582 trace_chan_id); 583 break; 584 case OCSD_GEN_TRC_ELEM_PE_CONTEXT: 585 resp = cs_etm_decoder__set_tid(etmq, packet_queue, 586 elem, trace_chan_id); 587 break; 588 /* Unused packet types */ 589 case OCSD_GEN_TRC_ELEM_I_RANGE_NOPATH: 590 case OCSD_GEN_TRC_ELEM_ADDR_NACC: 591 case OCSD_GEN_TRC_ELEM_CYCLE_COUNT: 592 case OCSD_GEN_TRC_ELEM_ADDR_UNKNOWN: 593 case OCSD_GEN_TRC_ELEM_EVENT: 594 case OCSD_GEN_TRC_ELEM_SWTRACE: 595 case OCSD_GEN_TRC_ELEM_CUSTOM: 596 case OCSD_GEN_TRC_ELEM_SYNC_MARKER: 597 case OCSD_GEN_TRC_ELEM_MEMTRANS: 598 default: 599 break; 600 } 601 602 return resp; 603 } 604 605 static int cs_etm_decoder__create_etm_packet_decoder( 606 struct cs_etm_trace_params *t_params, 607 struct cs_etm_decoder *decoder) 608 { 609 const char *decoder_name; 610 ocsd_etmv3_cfg config_etmv3; 611 ocsd_etmv4_cfg trace_config_etmv4; 612 void *trace_config; 613 u8 csid; 614 615 switch (t_params->protocol) { 616 case CS_ETM_PROTO_ETMV3: 617 case CS_ETM_PROTO_PTM: 618 cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3); 619 decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ? 620 OCSD_BUILTIN_DCD_ETMV3 : 621 OCSD_BUILTIN_DCD_PTM; 622 trace_config = &config_etmv3; 623 break; 624 case CS_ETM_PROTO_ETMV4i: 625 cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4); 626 decoder_name = OCSD_BUILTIN_DCD_ETMV4I; 627 trace_config = &trace_config_etmv4; 628 break; 629 default: 630 return -1; 631 } 632 633 if (ocsd_dt_create_decoder(decoder->dcd_tree, 634 decoder_name, 635 OCSD_CREATE_FLG_FULL_DECODER, 636 trace_config, &csid)) 637 return -1; 638 639 if (ocsd_dt_set_gen_elem_outfn(decoder->dcd_tree, 640 cs_etm_decoder__gen_trace_elem_printer, 641 decoder)) 642 return -1; 643 644 return 0; 645 } 646 647 static int 648 cs_etm_decoder__create_etm_decoder(struct cs_etm_decoder_params *d_params, 649 struct cs_etm_trace_params *t_params, 650 struct cs_etm_decoder *decoder) 651 { 652 if (d_params->operation == CS_ETM_OPERATION_PRINT) 653 return cs_etm_decoder__create_etm_packet_printer(t_params, 654 decoder); 655 else if (d_params->operation == CS_ETM_OPERATION_DECODE) 656 return cs_etm_decoder__create_etm_packet_decoder(t_params, 657 decoder); 658 659 return -1; 660 } 661 662 struct cs_etm_decoder * 663 cs_etm_decoder__new(int num_cpu, struct cs_etm_decoder_params *d_params, 664 struct cs_etm_trace_params t_params[]) 665 { 666 struct cs_etm_decoder *decoder; 667 ocsd_dcd_tree_src_t format; 668 u32 flags; 669 int i, ret; 670 671 if ((!t_params) || (!d_params)) 672 return NULL; 673 674 decoder = zalloc(sizeof(*decoder)); 675 676 if (!decoder) 677 return NULL; 678 679 decoder->data = d_params->data; 680 decoder->prev_return = OCSD_RESP_CONT; 681 format = (d_params->formatted ? OCSD_TRC_SRC_FRAME_FORMATTED : 682 OCSD_TRC_SRC_SINGLE); 683 flags = 0; 684 flags |= (d_params->fsyncs ? OCSD_DFRMTR_HAS_FSYNCS : 0); 685 flags |= (d_params->hsyncs ? OCSD_DFRMTR_HAS_HSYNCS : 0); 686 flags |= (d_params->frame_aligned ? OCSD_DFRMTR_FRAME_MEM_ALIGN : 0); 687 688 /* 689 * Drivers may add barrier frames when used with perf, set up to 690 * handle this. Barriers const of FSYNC packet repeated 4 times. 691 */ 692 flags |= OCSD_DFRMTR_RESET_ON_4X_FSYNC; 693 694 /* Create decode tree for the data source */ 695 decoder->dcd_tree = ocsd_create_dcd_tree(format, flags); 696 697 if (decoder->dcd_tree == 0) 698 goto err_free_decoder; 699 700 /* init library print logging support */ 701 ret = cs_etm_decoder__init_def_logger_printing(d_params, decoder); 702 if (ret != 0) 703 goto err_free_decoder; 704 705 /* init raw frame logging if required */ 706 cs_etm_decoder__init_raw_frame_logging(d_params, decoder); 707 708 for (i = 0; i < num_cpu; i++) { 709 ret = cs_etm_decoder__create_etm_decoder(d_params, 710 &t_params[i], 711 decoder); 712 if (ret != 0) 713 goto err_free_decoder; 714 } 715 716 return decoder; 717 718 err_free_decoder: 719 cs_etm_decoder__free(decoder); 720 return NULL; 721 } 722 723 int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder, 724 u64 indx, const u8 *buf, 725 size_t len, size_t *consumed) 726 { 727 int ret = 0; 728 ocsd_datapath_resp_t cur = OCSD_RESP_CONT; 729 ocsd_datapath_resp_t prev_return = decoder->prev_return; 730 size_t processed = 0; 731 u32 count; 732 733 while (processed < len) { 734 if (OCSD_DATA_RESP_IS_WAIT(prev_return)) { 735 cur = ocsd_dt_process_data(decoder->dcd_tree, 736 OCSD_OP_FLUSH, 737 0, 738 0, 739 NULL, 740 NULL); 741 } else if (OCSD_DATA_RESP_IS_CONT(prev_return)) { 742 cur = ocsd_dt_process_data(decoder->dcd_tree, 743 OCSD_OP_DATA, 744 indx + processed, 745 len - processed, 746 &buf[processed], 747 &count); 748 processed += count; 749 } else { 750 ret = -EINVAL; 751 break; 752 } 753 754 /* 755 * Return to the input code if the packet buffer is full. 756 * Flushing will get done once the packet buffer has been 757 * processed. 758 */ 759 if (OCSD_DATA_RESP_IS_WAIT(cur)) 760 break; 761 762 prev_return = cur; 763 } 764 765 decoder->prev_return = cur; 766 *consumed = processed; 767 768 return ret; 769 } 770 771 void cs_etm_decoder__free(struct cs_etm_decoder *decoder) 772 { 773 if (!decoder) 774 return; 775 776 ocsd_destroy_dcd_tree(decoder->dcd_tree); 777 decoder->dcd_tree = NULL; 778 free(decoder); 779 } 780