Lines Matching +full:coresight +full:- +full:dummy +full:- +full:source
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015-2018 Linaro Limited.
12 #include <linux/coresight-pmu.h>
22 #include "cs-etm.h"
23 #include "cs-etm-decoder/cs-etm-decoder.h"
37 #include "thread-stack.h"
40 #include "util/synthetic-events.h"
61 * Per-thread ignores the trace channel ID and instead assumes that
154 return -EINVAL; in cs_etm__get_magic()
156 metadata = inode->priv; in cs_etm__get_magic()
168 return -EINVAL; in cs_etm__get_cpu()
170 metadata = inode->priv; in cs_etm__get_cpu()
188 * The result is cached in etm->pid_fmt so this function only needs to be called
215 return etmq->etm->pid_fmt; in cs_etm__get_pid_fmt()
227 return -ENOMEM; in cs_etm__map_trace_id()
233 if (inode->priv) in cs_etm__map_trace_id()
234 return -EINVAL; in cs_etm__map_trace_id()
237 inode->priv = cpu_metadata; in cs_etm__map_trace_id()
257 return -EINVAL; in cs_etm__metadata_get_trace_id()
280 return -EINVAL; in cs_etm__metadata_set_trace_id()
294 for (i = 0; i < etm->num_cpu; i++) { in get_cpu_data()
295 if (etm->metadata[i][CS_ETM_CPU] == (u64)cpu) { in get_cpu_data()
296 metadata = etm->metadata[i]; in get_cpu_data()
324 hw_id = event->aux_output_hw_id.hw_id; in cs_etm__process_aux_output_hw_id()
330 return -EINVAL; in cs_etm__process_aux_output_hw_id()
333 etm = container_of(session->auxtrace, struct cs_etm_auxtrace, auxtrace); in cs_etm__process_aux_output_hw_id()
334 if (!etm || !etm->metadata) in cs_etm__process_aux_output_hw_id()
335 return -EINVAL; in cs_etm__process_aux_output_hw_id()
338 evsel = evlist__event2evsel(session->evlist, event); in cs_etm__process_aux_output_hw_id()
340 return -EINVAL; in cs_etm__process_aux_output_hw_id()
345 if (cpu == -1) { in cs_etm__process_aux_output_hw_id()
346 /* no CPU in the sample - possibly recorded with an old version of perf */ in cs_etm__process_aux_output_hw_id()
348 return -EINVAL; in cs_etm__process_aux_output_hw_id()
354 cpu_data = inode->priv; in cs_etm__process_aux_output_hw_id()
357 return -EINVAL; in cs_etm__process_aux_output_hw_id()
366 return -EINVAL; in cs_etm__process_aux_output_hw_id()
369 /* mapped and matched - return OK */ in cs_etm__process_aux_output_hw_id()
377 /* not one we've seen before - lets map it */ in cs_etm__process_aux_output_hw_id()
400 etmq->pending_timestamp_chan_id = trace_chan_id; in cs_etm__etmq_set_traceid_queue_timestamp()
408 if (!etmq->pending_timestamp_chan_id) in cs_etm__etmq_get_timestamp()
412 *trace_chan_id = etmq->pending_timestamp_chan_id; in cs_etm__etmq_get_timestamp()
415 etmq->pending_timestamp_chan_id); in cs_etm__etmq_get_timestamp()
420 etmq->pending_timestamp_chan_id = 0; in cs_etm__etmq_get_timestamp()
423 return packet_queue->cs_timestamp; in cs_etm__etmq_get_timestamp()
430 queue->head = 0; in cs_etm__clear_packet_queue()
431 queue->tail = 0; in cs_etm__clear_packet_queue()
432 queue->packet_count = 0; in cs_etm__clear_packet_queue()
434 queue->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN; in cs_etm__clear_packet_queue()
435 queue->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR; in cs_etm__clear_packet_queue()
436 queue->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR; in cs_etm__clear_packet_queue()
437 queue->packet_buffer[i].instr_count = 0; in cs_etm__clear_packet_queue()
438 queue->packet_buffer[i].last_instr_taken_branch = false; in cs_etm__clear_packet_queue()
439 queue->packet_buffer[i].last_instr_size = 0; in cs_etm__clear_packet_queue()
440 queue->packet_buffer[i].last_instr_type = 0; in cs_etm__clear_packet_queue()
441 queue->packet_buffer[i].last_instr_subtype = 0; in cs_etm__clear_packet_queue()
442 queue->packet_buffer[i].last_instr_cond = 0; in cs_etm__clear_packet_queue()
443 queue->packet_buffer[i].flags = 0; in cs_etm__clear_packet_queue()
444 queue->packet_buffer[i].exception_number = UINT32_MAX; in cs_etm__clear_packet_queue()
445 queue->packet_buffer[i].trace_chan_id = UINT8_MAX; in cs_etm__clear_packet_queue()
446 queue->packet_buffer[i].cpu = INT_MIN; in cs_etm__clear_packet_queue()
455 struct intlist *traceid_queues_list = etmq->traceid_queues_list; in cs_etm__clear_all_packet_queues()
458 idx = (int)(intptr_t)inode->priv; in cs_etm__clear_all_packet_queues()
459 tidq = etmq->traceid_queues[idx]; in cs_etm__clear_all_packet_queues()
460 cs_etm__clear_packet_queue(&tidq->packet_queue); in cs_etm__clear_all_packet_queues()
468 int rc = -ENOMEM; in cs_etm__init_traceid_queue()
470 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__init_traceid_queue()
472 cs_etm__clear_packet_queue(&tidq->packet_queue); in cs_etm__init_traceid_queue()
474 queue = &etmq->etm->queues.queue_array[etmq->queue_nr]; in cs_etm__init_traceid_queue()
475 tidq->trace_chan_id = trace_chan_id; in cs_etm__init_traceid_queue()
476 tidq->el = tidq->prev_packet_el = ocsd_EL_unknown; in cs_etm__init_traceid_queue()
477 tidq->thread = machine__findnew_thread(&etm->session->machines.host, -1, in cs_etm__init_traceid_queue()
478 queue->tid); in cs_etm__init_traceid_queue()
479 tidq->prev_packet_thread = machine__idle_thread(&etm->session->machines.host); in cs_etm__init_traceid_queue()
481 tidq->packet = zalloc(sizeof(struct cs_etm_packet)); in cs_etm__init_traceid_queue()
482 if (!tidq->packet) in cs_etm__init_traceid_queue()
485 tidq->prev_packet = zalloc(sizeof(struct cs_etm_packet)); in cs_etm__init_traceid_queue()
486 if (!tidq->prev_packet) in cs_etm__init_traceid_queue()
489 if (etm->synth_opts.last_branch) { in cs_etm__init_traceid_queue()
492 sz += etm->synth_opts.last_branch_sz * in cs_etm__init_traceid_queue()
494 tidq->last_branch = zalloc(sz); in cs_etm__init_traceid_queue()
495 if (!tidq->last_branch) in cs_etm__init_traceid_queue()
497 tidq->last_branch_rb = zalloc(sz); in cs_etm__init_traceid_queue()
498 if (!tidq->last_branch_rb) in cs_etm__init_traceid_queue()
502 tidq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE); in cs_etm__init_traceid_queue()
503 if (!tidq->event_buf) in cs_etm__init_traceid_queue()
509 zfree(&tidq->last_branch_rb); in cs_etm__init_traceid_queue()
510 zfree(&tidq->last_branch); in cs_etm__init_traceid_queue()
511 zfree(&tidq->prev_packet); in cs_etm__init_traceid_queue()
512 zfree(&tidq->packet); in cs_etm__init_traceid_queue()
524 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__etmq_get_traceid_queue()
526 if (etm->per_thread_decoding) in cs_etm__etmq_get_traceid_queue()
529 traceid_queues_list = etmq->traceid_queues_list; in cs_etm__etmq_get_traceid_queue()
537 idx = (int)(intptr_t)inode->priv; in cs_etm__etmq_get_traceid_queue()
538 return etmq->traceid_queues[idx]; in cs_etm__etmq_get_traceid_queue()
556 inode->priv = (void *)(intptr_t)idx; in cs_etm__etmq_get_traceid_queue()
562 traceid_queues = etmq->traceid_queues; in cs_etm__etmq_get_traceid_queue()
575 etmq->traceid_queues = traceid_queues; in cs_etm__etmq_get_traceid_queue()
577 return etmq->traceid_queues[idx]; in cs_etm__etmq_get_traceid_queue()
597 return &tidq->packet_queue; in cs_etm__etmq_get_packet_queue()
607 if (etm->synth_opts.branches || etm->synth_opts.last_branch || in cs_etm__packet_swap()
608 etm->synth_opts.instructions) { in cs_etm__packet_swap()
621 tmp = tidq->packet; in cs_etm__packet_swap()
622 tidq->packet = tidq->prev_packet; in cs_etm__packet_swap()
623 tidq->prev_packet = tmp; in cs_etm__packet_swap()
624 tidq->prev_packet_el = tidq->el; in cs_etm__packet_swap()
625 thread__put(tidq->prev_packet_thread); in cs_etm__packet_swap()
626 tidq->prev_packet_thread = thread__get(tidq->thread); in cs_etm__packet_swap()
635 if (len && (pkt_string[len-1] == '\n')) in cs_etm__packet_dump()
647 u64 **metadata = etm->metadata; in cs_etm__set_trace_param_etmv3()
657 u64 **metadata = etm->metadata; in cs_etm__set_trace_param_etmv4()
671 u64 **metadata = etm->metadata; in cs_etm__set_trace_param_ete()
692 architecture = etm->metadata[i][CS_ETM_MAGIC]; in cs_etm__init_trace_params()
696 etmidr = etm->metadata[i][CS_ETM_ETMIDR]; in cs_etm__init_trace_params()
706 return -EINVAL; in cs_etm__init_trace_params()
718 int ret = -EINVAL; in cs_etm__init_decoder_params()
723 d_params->packet_printer = cs_etm__packet_dump; in cs_etm__init_decoder_params()
724 d_params->operation = mode; in cs_etm__init_decoder_params()
725 d_params->data = etmq; in cs_etm__init_decoder_params()
726 d_params->formatted = formatted; in cs_etm__init_decoder_params()
727 d_params->fsyncs = false; in cs_etm__init_decoder_params()
728 d_params->hsyncs = false; in cs_etm__init_decoder_params()
729 d_params->frame_aligned = true; in cs_etm__init_decoder_params()
745 ". ... CoreSight %s Trace data: size %#zx bytes\n", in cs_etm__dump_event()
746 cs_etm_decoder__get_name(etmq->decoder), buffer->size); in cs_etm__dump_event()
752 etmq->decoder, buffer->offset, in cs_etm__dump_event()
753 &((u8 *)buffer->data)[buffer_used], in cs_etm__dump_event()
754 buffer->size - buffer_used, &consumed); in cs_etm__dump_event()
759 } while (buffer_used < buffer->size); in cs_etm__dump_event()
761 cs_etm_decoder__reset(etmq->decoder); in cs_etm__dump_event()
767 struct cs_etm_auxtrace *etm = container_of(session->auxtrace, in cs_etm__flush_events()
773 if (!tool->ordered_events) in cs_etm__flush_events()
774 return -EINVAL; in cs_etm__flush_events()
776 if (etm->timeless_decoding) { in cs_etm__flush_events()
778 * Pass tid = -1 to process all queues. But likely they will have in cs_etm__flush_events()
781 return cs_etm__process_timeless_queues(etm, -1); in cs_etm__flush_events()
793 struct intlist *traceid_queues_list = etmq->traceid_queues_list; in cs_etm__free_traceid_queues()
796 priv = (uintptr_t)inode->priv; in cs_etm__free_traceid_queues()
800 tidq = etmq->traceid_queues[idx]; in cs_etm__free_traceid_queues()
801 thread__zput(tidq->thread); in cs_etm__free_traceid_queues()
802 thread__zput(tidq->prev_packet_thread); in cs_etm__free_traceid_queues()
803 zfree(&tidq->event_buf); in cs_etm__free_traceid_queues()
804 zfree(&tidq->last_branch); in cs_etm__free_traceid_queues()
805 zfree(&tidq->last_branch_rb); in cs_etm__free_traceid_queues()
806 zfree(&tidq->prev_packet); in cs_etm__free_traceid_queues()
807 zfree(&tidq->packet); in cs_etm__free_traceid_queues()
819 etmq->traceid_queues_list = NULL; in cs_etm__free_traceid_queues()
822 zfree(&etmq->traceid_queues); in cs_etm__free_traceid_queues()
832 cs_etm_decoder__free(etmq->decoder); in cs_etm__free_queue()
840 struct cs_etm_auxtrace *aux = container_of(session->auxtrace, in cs_etm__free_events()
843 struct auxtrace_queues *queues = &aux->queues; in cs_etm__free_events()
845 for (i = 0; i < queues->nr_queues; i++) { in cs_etm__free_events()
846 cs_etm__free_queue(queues->queue_array[i].priv); in cs_etm__free_events()
847 queues->queue_array[i].priv = NULL; in cs_etm__free_events()
857 struct cs_etm_auxtrace *aux = container_of(session->auxtrace, in cs_etm__free()
861 session->auxtrace = NULL; in cs_etm__free()
869 for (i = 0; i < aux->num_cpu; i++) in cs_etm__free()
870 zfree(&aux->metadata[i]); in cs_etm__free()
872 zfree(&aux->metadata); in cs_etm__free()
879 struct cs_etm_auxtrace *aux = container_of(session->auxtrace, in cs_etm__evsel_is_auxtrace()
883 return evsel->core.attr.type == aux->pmu_type; in cs_etm__evsel_is_auxtrace()
896 return &etmq->etm->session->machines.host; in cs_etm__get_machine()
908 return machines__find_guest(&etmq->etm->session->machines, in cs_etm__get_machine()
915 return &etmq->etm->session->machines.host; in cs_etm__get_machine()
974 assert(tidq->el == ocsd_EL1 || tidq->el == ocsd_EL0); in cs_etm__mem_access()
976 assert(tidq->el == ocsd_EL2); in cs_etm__mem_access()
978 assert(tidq->el == ocsd_EL3); in cs_etm__mem_access()
981 cpumode = cs_etm__cpu_mode(etmq, address, tidq->el); in cs_etm__mem_access()
983 if (!thread__find_map(tidq->thread, cpumode, address, &al)) in cs_etm__mem_access()
990 if (dso->data.status == DSO_DATA_STATUS_ERROR && in cs_etm__mem_access()
998 len = dso__data_read_offset(dso, maps__machine(thread__maps(tidq->thread)), in cs_etm__mem_access()
1003 …" Enable CONFIG_PROC_KCORE or use option '-k /path/to/vmlinux' for kernel symbols.\n"… in cs_etm__mem_access()
1004 if (!dso->auxtrace_warned) { in cs_etm__mem_access()
1007 dso->long_name ? dso->long_name : "Unknown"); in cs_etm__mem_access()
1008 dso->auxtrace_warned = true; in cs_etm__mem_access()
1028 int decoders = formatted ? etm->num_cpu : 1; in cs_etm__alloc_queue()
1034 etmq->traceid_queues_list = intlist__new(NULL); in cs_etm__alloc_queue()
1035 if (!etmq->traceid_queues_list) in cs_etm__alloc_queue()
1054 etmq->decoder = cs_etm_decoder__new(decoders, &d_params, in cs_etm__alloc_queue()
1057 if (!etmq->decoder) in cs_etm__alloc_queue()
1064 if (cs_etm_decoder__add_mem_access_cb(etmq->decoder, in cs_etm__alloc_queue()
1065 0x0L, ((u64) -1L), in cs_etm__alloc_queue()
1073 cs_etm_decoder__free(etmq->decoder); in cs_etm__alloc_queue()
1075 intlist__delete(etmq->traceid_queues_list); in cs_etm__alloc_queue()
1086 struct cs_etm_queue *etmq = queue->priv; in cs_etm__setup_queue()
1088 if (list_empty(&queue->head) || etmq) in cs_etm__setup_queue()
1094 return -ENOMEM; in cs_etm__setup_queue()
1096 queue->priv = etmq; in cs_etm__setup_queue()
1097 etmq->etm = etm; in cs_etm__setup_queue()
1098 etmq->queue_nr = queue_nr; in cs_etm__setup_queue()
1099 etmq->offset = 0; in cs_etm__setup_queue()
1114 * We are under a CPU-wide trace scenario. As such we need to know in cs_etm__queue_first_cs_timestamp()
1170 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp); in cs_etm__queue_first_cs_timestamp()
1179 struct branch_stack *bs_src = tidq->last_branch_rb; in cs_etm__copy_last_branch_rb()
1180 struct branch_stack *bs_dst = tidq->last_branch; in cs_etm__copy_last_branch_rb()
1184 * Set the number of records before early exit: ->nr is used to in cs_etm__copy_last_branch_rb()
1185 * determine how many branches to copy from ->entries. in cs_etm__copy_last_branch_rb()
1187 bs_dst->nr = bs_src->nr; in cs_etm__copy_last_branch_rb()
1192 if (!bs_src->nr) in cs_etm__copy_last_branch_rb()
1196 * As bs_src->entries is a circular buffer, we need to copy from it in in cs_etm__copy_last_branch_rb()
1198 * branch ->last_branch_pos until the end of bs_src->entries buffer. in cs_etm__copy_last_branch_rb()
1200 nr = etmq->etm->synth_opts.last_branch_sz - tidq->last_branch_pos; in cs_etm__copy_last_branch_rb()
1201 memcpy(&bs_dst->entries[0], in cs_etm__copy_last_branch_rb()
1202 &bs_src->entries[tidq->last_branch_pos], in cs_etm__copy_last_branch_rb()
1207 * of the bs_src->entries buffer and until the ->last_branch_pos element in cs_etm__copy_last_branch_rb()
1212 if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) { in cs_etm__copy_last_branch_rb()
1213 memcpy(&bs_dst->entries[nr], in cs_etm__copy_last_branch_rb()
1214 &bs_src->entries[0], in cs_etm__copy_last_branch_rb()
1215 sizeof(struct branch_entry) * tidq->last_branch_pos); in cs_etm__copy_last_branch_rb()
1222 tidq->last_branch_pos = 0; in cs_etm__reset_last_branch_rb()
1223 tidq->last_branch_rb->nr = 0; in cs_etm__reset_last_branch_rb()
1235 * 16-bit word of the instruction: 0b11101, 0b11110 and 0b11111 in cs_etm__t32_instr_size()
1236 * denote a 32-bit instruction. in cs_etm__t32_instr_size()
1244 if (packet->sample_type == CS_ETM_DISCONTINUITY) in cs_etm__first_executed_instr()
1247 return packet->start_addr; in cs_etm__first_executed_instr()
1254 if (packet->sample_type == CS_ETM_DISCONTINUITY) in cs_etm__last_executed_instr()
1257 return packet->end_addr - packet->last_instr_size; in cs_etm__last_executed_instr()
1265 if (packet->isa == CS_ETM_ISA_T32) { in cs_etm__instr_addr()
1266 u64 addr = packet->start_addr; in cs_etm__instr_addr()
1271 offset--; in cs_etm__instr_addr()
1277 return packet->start_addr + offset * 4; in cs_etm__instr_addr()
1283 struct branch_stack *bs = tidq->last_branch_rb; in cs_etm__update_last_branch_rb()
1292 if (!tidq->last_branch_pos) in cs_etm__update_last_branch_rb()
1293 tidq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz; in cs_etm__update_last_branch_rb()
1295 tidq->last_branch_pos -= 1; in cs_etm__update_last_branch_rb()
1297 be = &bs->entries[tidq->last_branch_pos]; in cs_etm__update_last_branch_rb()
1298 be->from = cs_etm__last_executed_instr(tidq->prev_packet); in cs_etm__update_last_branch_rb()
1299 be->to = cs_etm__first_executed_instr(tidq->packet); in cs_etm__update_last_branch_rb()
1301 be->flags.mispred = 0; in cs_etm__update_last_branch_rb()
1302 be->flags.predicted = 1; in cs_etm__update_last_branch_rb()
1305 * Increment bs->nr until reaching the number of last branches asked by in cs_etm__update_last_branch_rb()
1308 if (bs->nr < etmq->etm->synth_opts.last_branch_sz) in cs_etm__update_last_branch_rb()
1309 bs->nr += 1; in cs_etm__update_last_branch_rb()
1315 event->header.size = perf_event__sample_event_size(sample, type, 0); in cs_etm__inject_event()
1323 struct auxtrace_buffer *aux_buffer = etmq->buffer; in cs_etm__get_trace()
1327 queue = &etmq->etm->queues.queue_array[etmq->queue_nr]; in cs_etm__get_trace()
1335 etmq->buf_len = 0; in cs_etm__get_trace()
1339 etmq->buffer = aux_buffer; in cs_etm__get_trace()
1342 if (!aux_buffer->data) { in cs_etm__get_trace()
1344 int fd = perf_data__fd(etmq->etm->session->data); in cs_etm__get_trace()
1346 aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd); in cs_etm__get_trace()
1347 if (!aux_buffer->data) in cs_etm__get_trace()
1348 return -ENOMEM; in cs_etm__get_trace()
1355 etmq->buf_used = 0; in cs_etm__get_trace()
1356 etmq->buf_len = aux_buffer->size; in cs_etm__get_trace()
1357 etmq->buf = aux_buffer->data; in cs_etm__get_trace()
1359 return etmq->buf_len; in cs_etm__get_trace()
1368 if (tid != -1) { in cs_etm__set_thread()
1369 thread__zput(tidq->thread); in cs_etm__set_thread()
1370 tidq->thread = machine__find_thread(machine, -1, tid); in cs_etm__set_thread()
1374 if (!tidq->thread) in cs_etm__set_thread()
1375 tidq->thread = machine__idle_thread(machine); in cs_etm__set_thread()
1377 tidq->el = el; in cs_etm__set_thread()
1387 return -EINVAL; in cs_etm__etmq_set_tid_el()
1395 return !!etmq->etm->timeless_decoding; in cs_etm__etmq_is_timeless()
1407 if (packet->sample_type == CS_ETM_DISCONTINUITY) { in cs_etm__copy_insn()
1408 sample->insn_len = 0; in cs_etm__copy_insn()
1413 * T32 instruction size might be 32-bit or 16-bit, decide by calling in cs_etm__copy_insn()
1416 if (packet->isa == CS_ETM_ISA_T32) in cs_etm__copy_insn()
1417 sample->insn_len = cs_etm__t32_instr_size(etmq, trace_chan_id, in cs_etm__copy_insn()
1418 sample->ip); in cs_etm__copy_insn()
1419 /* Otherwise, A64 and A32 instruction size are always 32-bit. */ in cs_etm__copy_insn()
1421 sample->insn_len = 4; in cs_etm__copy_insn()
1423 cs_etm__mem_access(etmq, trace_chan_id, sample->ip, sample->insn_len, in cs_etm__copy_insn()
1424 (void *)sample->insn, 0); in cs_etm__copy_insn()
1429 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__convert_sample_time()
1431 if (etm->has_virtual_ts) in cs_etm__convert_sample_time()
1432 return tsc_to_perf_time(cs_timestamp, &etm->tc); in cs_etm__convert_sample_time()
1440 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__resolve_sample_time()
1441 struct cs_etm_packet_queue *packet_queue = &tidq->packet_queue; in cs_etm__resolve_sample_time()
1443 if (!etm->timeless_decoding && etm->has_virtual_ts) in cs_etm__resolve_sample_time()
1444 return packet_queue->cs_timestamp; in cs_etm__resolve_sample_time()
1446 return etm->latest_kernel_timestamp; in cs_etm__resolve_sample_time()
1454 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__synth_instruction_sample()
1455 union perf_event *event = tidq->event_buf; in cs_etm__synth_instruction_sample()
1458 event->sample.header.type = PERF_RECORD_SAMPLE; in cs_etm__synth_instruction_sample()
1459 event->sample.header.misc = cs_etm__cpu_mode(etmq, addr, tidq->el); in cs_etm__synth_instruction_sample()
1460 event->sample.header.size = sizeof(struct perf_event_header); in cs_etm__synth_instruction_sample()
1466 sample.pid = thread__pid(tidq->thread); in cs_etm__synth_instruction_sample()
1467 sample.tid = thread__tid(tidq->thread); in cs_etm__synth_instruction_sample()
1468 sample.id = etmq->etm->instructions_id; in cs_etm__synth_instruction_sample()
1469 sample.stream_id = etmq->etm->instructions_id; in cs_etm__synth_instruction_sample()
1471 sample.cpu = tidq->packet->cpu; in cs_etm__synth_instruction_sample()
1472 sample.flags = tidq->prev_packet->flags; in cs_etm__synth_instruction_sample()
1473 sample.cpumode = event->sample.header.misc; in cs_etm__synth_instruction_sample()
1475 cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->packet, &sample); in cs_etm__synth_instruction_sample()
1477 if (etm->synth_opts.last_branch) in cs_etm__synth_instruction_sample()
1478 sample.branch_stack = tidq->last_branch; in cs_etm__synth_instruction_sample()
1480 if (etm->synth_opts.inject) { in cs_etm__synth_instruction_sample()
1482 etm->instructions_sample_type); in cs_etm__synth_instruction_sample()
1487 ret = perf_session__deliver_synth_event(etm->session, event, &sample); in cs_etm__synth_instruction_sample()
1505 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__synth_branch_sample()
1507 union perf_event *event = tidq->event_buf; in cs_etm__synth_branch_sample()
1515 ip = cs_etm__last_executed_instr(tidq->prev_packet); in cs_etm__synth_branch_sample()
1517 event->sample.header.type = PERF_RECORD_SAMPLE; in cs_etm__synth_branch_sample()
1518 event->sample.header.misc = cs_etm__cpu_mode(etmq, ip, in cs_etm__synth_branch_sample()
1519 tidq->prev_packet_el); in cs_etm__synth_branch_sample()
1520 event->sample.header.size = sizeof(struct perf_event_header); in cs_etm__synth_branch_sample()
1526 sample.pid = thread__pid(tidq->prev_packet_thread); in cs_etm__synth_branch_sample()
1527 sample.tid = thread__tid(tidq->prev_packet_thread); in cs_etm__synth_branch_sample()
1528 sample.addr = cs_etm__first_executed_instr(tidq->packet); in cs_etm__synth_branch_sample()
1529 sample.id = etmq->etm->branches_id; in cs_etm__synth_branch_sample()
1530 sample.stream_id = etmq->etm->branches_id; in cs_etm__synth_branch_sample()
1532 sample.cpu = tidq->packet->cpu; in cs_etm__synth_branch_sample()
1533 sample.flags = tidq->prev_packet->flags; in cs_etm__synth_branch_sample()
1534 sample.cpumode = event->sample.header.misc; in cs_etm__synth_branch_sample()
1536 cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->prev_packet, in cs_etm__synth_branch_sample()
1542 if (etm->synth_opts.last_branch) { in cs_etm__synth_branch_sample()
1545 .hw_idx = -1ULL, in cs_etm__synth_branch_sample()
1554 if (etm->synth_opts.inject) { in cs_etm__synth_branch_sample()
1556 etm->branches_sample_type); in cs_etm__synth_branch_sample()
1561 ret = perf_session__deliver_synth_event(etm->session, event, &sample); in cs_etm__synth_branch_sample()
1584 return perf_session__deliver_synth_event(cs_etm_synth->session, in cs_etm__event_synth()
1603 struct evlist *evlist = session->evlist; in cs_etm__synth_events()
1611 if (evsel->core.attr.type == etm->pmu_type) { in cs_etm__synth_events()
1618 pr_debug("No selected events with CoreSight Trace data\n"); in cs_etm__synth_events()
1625 attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK; in cs_etm__synth_events()
1628 if (etm->timeless_decoding) in cs_etm__synth_events()
1633 attr.exclude_user = evsel->core.attr.exclude_user; in cs_etm__synth_events()
1634 attr.exclude_kernel = evsel->core.attr.exclude_kernel; in cs_etm__synth_events()
1635 attr.exclude_hv = evsel->core.attr.exclude_hv; in cs_etm__synth_events()
1636 attr.exclude_host = evsel->core.attr.exclude_host; in cs_etm__synth_events()
1637 attr.exclude_guest = evsel->core.attr.exclude_guest; in cs_etm__synth_events()
1638 attr.sample_id_all = evsel->core.attr.sample_id_all; in cs_etm__synth_events()
1639 attr.read_format = evsel->core.attr.read_format; in cs_etm__synth_events()
1642 id = evsel->core.id[0] + 1000000000; in cs_etm__synth_events()
1647 if (etm->synth_opts.branches) { in cs_etm__synth_events()
1654 etm->branches_sample_type = attr.sample_type; in cs_etm__synth_events()
1655 etm->branches_id = id; in cs_etm__synth_events()
1660 if (etm->synth_opts.last_branch) { in cs_etm__synth_events()
1670 if (etm->synth_opts.instructions) { in cs_etm__synth_events()
1672 attr.sample_period = etm->synth_opts.period; in cs_etm__synth_events()
1673 etm->instructions_sample_period = attr.sample_period; in cs_etm__synth_events()
1677 etm->instructions_sample_type = attr.sample_type; in cs_etm__synth_events()
1678 etm->instructions_id = id; in cs_etm__synth_events()
1688 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__sample()
1690 u8 trace_chan_id = tidq->trace_chan_id; in cs_etm__sample()
1694 instrs_prev = tidq->period_instructions; in cs_etm__sample()
1696 tidq->period_instructions += tidq->packet->instr_count; in cs_etm__sample()
1702 if (etm->synth_opts.last_branch && in cs_etm__sample()
1703 tidq->prev_packet->sample_type == CS_ETM_RANGE && in cs_etm__sample()
1704 tidq->prev_packet->last_instr_taken_branch) in cs_etm__sample()
1707 if (etm->synth_opts.instructions && in cs_etm__sample()
1708 tidq->period_instructions >= etm->instructions_sample_period) { in cs_etm__sample()
1722 * -------------------------------------------------- in cs_etm__sample()
1729 * \---------------- -----------------/ in cs_etm__sample()
1731 * tidq->packet->instr_count in cs_etm__sample()
1734 * every etm->instructions_sample_period instructions - as in cs_etm__sample()
1739 * tidq->packet->instr_count represents the number of in cs_etm__sample()
1745 * etm->instructions_sample_period. in cs_etm__sample()
1753 * to tidq->period_instructions for next round calculation. in cs_etm__sample()
1759 * etm->instructions_sample_period. in cs_etm__sample()
1761 u64 offset = etm->instructions_sample_period - instrs_prev; in cs_etm__sample()
1765 if (etm->synth_opts.last_branch) in cs_etm__sample()
1768 while (tidq->period_instructions >= in cs_etm__sample()
1769 etm->instructions_sample_period) { in cs_etm__sample()
1771 * Calculate the address of the sampled instruction (-1 in cs_etm__sample()
1777 tidq->packet, offset - 1); in cs_etm__sample()
1780 etm->instructions_sample_period); in cs_etm__sample()
1784 offset += etm->instructions_sample_period; in cs_etm__sample()
1785 tidq->period_instructions -= in cs_etm__sample()
1786 etm->instructions_sample_period; in cs_etm__sample()
1790 if (etm->synth_opts.branches) { in cs_etm__sample()
1794 if (tidq->prev_packet->sample_type == CS_ETM_DISCONTINUITY) in cs_etm__sample()
1798 if (tidq->prev_packet->sample_type == CS_ETM_RANGE && in cs_etm__sample()
1799 tidq->prev_packet->last_instr_taken_branch) in cs_etm__sample()
1819 * to set 'prev_packet->last_instr_taken_branch' to true. This ensures in cs_etm__exception()
1823 * The exception packet includes the dummy address values, so don't in cs_etm__exception()
1827 if (tidq->prev_packet->sample_type == CS_ETM_RANGE) in cs_etm__exception()
1828 tidq->prev_packet->last_instr_taken_branch = true; in cs_etm__exception()
1837 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__flush()
1840 if (tidq->prev_packet->sample_type == CS_ETM_EMPTY) in cs_etm__flush()
1843 if (etmq->etm->synth_opts.last_branch && in cs_etm__flush()
1844 etmq->etm->synth_opts.instructions && in cs_etm__flush()
1845 tidq->prev_packet->sample_type == CS_ETM_RANGE) { in cs_etm__flush()
1858 addr = cs_etm__last_executed_instr(tidq->prev_packet); in cs_etm__flush()
1862 tidq->period_instructions); in cs_etm__flush()
1866 tidq->period_instructions = 0; in cs_etm__flush()
1870 if (etm->synth_opts.branches && in cs_etm__flush()
1871 tidq->prev_packet->sample_type == CS_ETM_RANGE) { in cs_etm__flush()
1881 if (etm->synth_opts.last_branch) in cs_etm__flush()
1893 * It has no new packet coming and 'etmq->packet' contains the stale in cs_etm__end_block()
1901 if (etmq->etm->synth_opts.last_branch && in cs_etm__end_block()
1902 etmq->etm->synth_opts.instructions && in cs_etm__end_block()
1903 tidq->prev_packet->sample_type == CS_ETM_RANGE) { in cs_etm__end_block()
1913 addr = cs_etm__last_executed_instr(tidq->prev_packet); in cs_etm__end_block()
1917 tidq->period_instructions); in cs_etm__end_block()
1921 tidq->period_instructions = 0; in cs_etm__end_block()
1937 if (!etmq->buf_len) { in cs_etm__get_data_block()
1943 * are contiguous, reset the decoder to force re-sync. in cs_etm__get_data_block()
1945 ret = cs_etm_decoder__reset(etmq->decoder); in cs_etm__get_data_block()
1950 return etmq->buf_len; in cs_etm__get_data_block()
1962 switch (packet->isa) { in cs_etm__is_svc_instr()
1968 * +-----------------+--------+ in cs_etm__is_svc_instr()
1970 * +-----------------+--------+ in cs_etm__is_svc_instr()
1976 addr = end_addr - 2; in cs_etm__is_svc_instr()
1988 * +---------+---------+-------------------------+ in cs_etm__is_svc_instr()
1990 * +---------+---------+-------------------------+ in cs_etm__is_svc_instr()
1992 addr = end_addr - 4; in cs_etm__is_svc_instr()
2005 * +-----------------------+---------+-----------+ in cs_etm__is_svc_instr()
2007 * +-----------------------+---------+-----------+ in cs_etm__is_svc_instr()
2009 addr = end_addr - 4; in cs_etm__is_svc_instr()
2027 u8 trace_chan_id = tidq->trace_chan_id; in cs_etm__is_syscall()
2028 struct cs_etm_packet *packet = tidq->packet; in cs_etm__is_syscall()
2029 struct cs_etm_packet *prev_packet = tidq->prev_packet; in cs_etm__is_syscall()
2032 if (packet->exception_number == CS_ETMV3_EXC_SVC) in cs_etm__is_syscall()
2041 if (packet->exception_number == CS_ETMV4_EXC_CALL && in cs_etm__is_syscall()
2043 prev_packet->end_addr)) in cs_etm__is_syscall()
2053 struct cs_etm_packet *packet = tidq->packet; in cs_etm__is_async_exception()
2056 if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT || in cs_etm__is_async_exception()
2057 packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT || in cs_etm__is_async_exception()
2058 packet->exception_number == CS_ETMV3_EXC_PE_RESET || in cs_etm__is_async_exception()
2059 packet->exception_number == CS_ETMV3_EXC_IRQ || in cs_etm__is_async_exception()
2060 packet->exception_number == CS_ETMV3_EXC_FIQ) in cs_etm__is_async_exception()
2064 if (packet->exception_number == CS_ETMV4_EXC_RESET || in cs_etm__is_async_exception()
2065 packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT || in cs_etm__is_async_exception()
2066 packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR || in cs_etm__is_async_exception()
2067 packet->exception_number == CS_ETMV4_EXC_INST_DEBUG || in cs_etm__is_async_exception()
2068 packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG || in cs_etm__is_async_exception()
2069 packet->exception_number == CS_ETMV4_EXC_IRQ || in cs_etm__is_async_exception()
2070 packet->exception_number == CS_ETMV4_EXC_FIQ) in cs_etm__is_async_exception()
2080 u8 trace_chan_id = tidq->trace_chan_id; in cs_etm__is_sync_exception()
2081 struct cs_etm_packet *packet = tidq->packet; in cs_etm__is_sync_exception()
2082 struct cs_etm_packet *prev_packet = tidq->prev_packet; in cs_etm__is_sync_exception()
2085 if (packet->exception_number == CS_ETMV3_EXC_SMC || in cs_etm__is_sync_exception()
2086 packet->exception_number == CS_ETMV3_EXC_HYP || in cs_etm__is_sync_exception()
2087 packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE || in cs_etm__is_sync_exception()
2088 packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR || in cs_etm__is_sync_exception()
2089 packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT || in cs_etm__is_sync_exception()
2090 packet->exception_number == CS_ETMV3_EXC_DATA_FAULT || in cs_etm__is_sync_exception()
2091 packet->exception_number == CS_ETMV3_EXC_GENERIC) in cs_etm__is_sync_exception()
2095 if (packet->exception_number == CS_ETMV4_EXC_TRAP || in cs_etm__is_sync_exception()
2096 packet->exception_number == CS_ETMV4_EXC_ALIGNMENT || in cs_etm__is_sync_exception()
2097 packet->exception_number == CS_ETMV4_EXC_INST_FAULT || in cs_etm__is_sync_exception()
2098 packet->exception_number == CS_ETMV4_EXC_DATA_FAULT) in cs_etm__is_sync_exception()
2105 if (packet->exception_number == CS_ETMV4_EXC_CALL && in cs_etm__is_sync_exception()
2107 prev_packet->end_addr)) in cs_etm__is_sync_exception()
2117 if (packet->exception_number > CS_ETMV4_EXC_FIQ && in cs_etm__is_sync_exception()
2118 packet->exception_number <= CS_ETMV4_EXC_END) in cs_etm__is_sync_exception()
2128 struct cs_etm_packet *packet = tidq->packet; in cs_etm__set_sample_flags()
2129 struct cs_etm_packet *prev_packet = tidq->prev_packet; in cs_etm__set_sample_flags()
2130 u8 trace_chan_id = tidq->trace_chan_id; in cs_etm__set_sample_flags()
2134 switch (packet->sample_type) { in cs_etm__set_sample_flags()
2141 if (packet->last_instr_type == OCSD_INSTR_BR && in cs_etm__set_sample_flags()
2142 packet->last_instr_subtype == OCSD_S_INSTR_NONE) { in cs_etm__set_sample_flags()
2143 packet->flags = PERF_IP_FLAG_BRANCH; in cs_etm__set_sample_flags()
2145 if (packet->last_instr_cond) in cs_etm__set_sample_flags()
2146 packet->flags |= PERF_IP_FLAG_CONDITIONAL; in cs_etm__set_sample_flags()
2153 if (packet->last_instr_type == OCSD_INSTR_BR && in cs_etm__set_sample_flags()
2154 packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK) in cs_etm__set_sample_flags()
2155 packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
2162 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT && in cs_etm__set_sample_flags()
2163 packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK) in cs_etm__set_sample_flags()
2164 packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
2172 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT && in cs_etm__set_sample_flags()
2173 packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET) in cs_etm__set_sample_flags()
2174 packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
2182 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT && in cs_etm__set_sample_flags()
2183 packet->last_instr_subtype == OCSD_S_INSTR_NONE) in cs_etm__set_sample_flags()
2184 packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
2188 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT && in cs_etm__set_sample_flags()
2189 packet->last_instr_subtype == OCSD_S_INSTR_V8_RET) in cs_etm__set_sample_flags()
2190 packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
2198 if (prev_packet->sample_type == CS_ETM_DISCONTINUITY) in cs_etm__set_sample_flags()
2199 prev_packet->flags |= PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
2208 if (prev_packet->flags == (PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
2212 packet, packet->start_addr)) in cs_etm__set_sample_flags()
2213 prev_packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
2223 if (prev_packet->sample_type == CS_ETM_RANGE) in cs_etm__set_sample_flags()
2224 prev_packet->flags |= PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
2228 ret = cs_etm__get_magic(packet->trace_chan_id, &magic); in cs_etm__set_sample_flags()
2234 packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
2242 packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
2251 packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
2262 if (prev_packet->sample_type == CS_ETM_RANGE) in cs_etm__set_sample_flags()
2263 prev_packet->flags = packet->flags; in cs_etm__set_sample_flags()
2291 if (prev_packet->sample_type == CS_ETM_RANGE) in cs_etm__set_sample_flags()
2292 prev_packet->flags = PERF_IP_FLAG_BRANCH | in cs_etm__set_sample_flags()
2316 ret = cs_etm_decoder__process_data_block(etmq->decoder, in cs_etm__decode_data_block()
2317 etmq->offset, in cs_etm__decode_data_block()
2318 &etmq->buf[etmq->buf_used], in cs_etm__decode_data_block()
2319 etmq->buf_len, in cs_etm__decode_data_block()
2324 etmq->offset += processed; in cs_etm__decode_data_block()
2325 etmq->buf_used += processed; in cs_etm__decode_data_block()
2326 etmq->buf_len -= processed; in cs_etm__decode_data_block()
2338 packet_queue = &tidq->packet_queue; in cs_etm__process_traceid_queue()
2343 tidq->packet); in cs_etm__process_traceid_queue()
2362 switch (tidq->packet->sample_type) { in cs_etm__process_traceid_queue()
2393 return -EINVAL; in cs_etm__process_traceid_queue()
2407 struct intlist *traceid_queues_list = etmq->traceid_queues_list; in cs_etm__clear_all_traceid_queues()
2410 idx = (int)(intptr_t)inode->priv; in cs_etm__clear_all_traceid_queues()
2411 tidq = etmq->traceid_queues[idx]; in cs_etm__clear_all_traceid_queues()
2425 return -EINVAL; in cs_etm__run_per_thread_timeless_decoder()
2446 } while (etmq->buf_len); in cs_etm__run_per_thread_timeless_decoder()
2477 * buffer. But here in per-cpu mode we need to iterate in cs_etm__run_per_cpu_timeless_decoder()
2481 etmq->traceid_queues_list) { in cs_etm__run_per_cpu_timeless_decoder()
2482 idx = (int)(intptr_t)inode->priv; in cs_etm__run_per_cpu_timeless_decoder()
2483 tidq = etmq->traceid_queues[idx]; in cs_etm__run_per_cpu_timeless_decoder()
2486 } while (etmq->buf_len); in cs_etm__run_per_cpu_timeless_decoder()
2488 intlist__for_each_entry(inode, etmq->traceid_queues_list) { in cs_etm__run_per_cpu_timeless_decoder()
2489 idx = (int)(intptr_t)inode->priv; in cs_etm__run_per_cpu_timeless_decoder()
2490 tidq = etmq->traceid_queues[idx]; in cs_etm__run_per_cpu_timeless_decoder()
2505 struct auxtrace_queues *queues = &etm->queues; in cs_etm__process_timeless_queues()
2507 for (i = 0; i < queues->nr_queues; i++) { in cs_etm__process_timeless_queues()
2508 struct auxtrace_queue *queue = &etm->queues.queue_array[i]; in cs_etm__process_timeless_queues()
2509 struct cs_etm_queue *etmq = queue->priv; in cs_etm__process_timeless_queues()
2515 if (etm->per_thread_decoding) { in cs_etm__process_timeless_queues()
2522 if (tid == -1 || thread__tid(tidq->thread) == tid) in cs_etm__process_timeless_queues()
2542 * Pre-populate the heap with one entry from each queue so that we can in cs_etm__process_timestamped_queues()
2545 for (i = 0; i < etm->queues.nr_queues; i++) { in cs_etm__process_timestamped_queues()
2546 etmq = etm->queues.queue_array[i].priv; in cs_etm__process_timestamped_queues()
2556 if (!etm->heap.heap_cnt) in cs_etm__process_timestamped_queues()
2560 cs_queue_nr = etm->heap.heap_array[0].queue_nr; in cs_etm__process_timestamped_queues()
2563 queue = &etm->queues.queue_array[queue_nr]; in cs_etm__process_timestamped_queues()
2564 etmq = queue->priv; in cs_etm__process_timestamped_queues()
2570 auxtrace_heap__pop(&etm->heap); in cs_etm__process_timestamped_queues()
2579 ret = -EINVAL; in cs_etm__process_timestamped_queues()
2637 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp); in cs_etm__process_timestamped_queues()
2640 for (i = 0; i < etm->queues.nr_queues; i++) { in cs_etm__process_timestamped_queues()
2643 etmq = etm->queues.queue_array[i].priv; in cs_etm__process_timestamped_queues()
2647 intlist__for_each_entry(inode, etmq->traceid_queues_list) { in cs_etm__process_timestamped_queues()
2648 int idx = (int)(intptr_t)inode->priv; in cs_etm__process_timestamped_queues()
2651 tidq = etmq->traceid_queues[idx]; in cs_etm__process_timestamped_queues()
2666 if (etm->timeless_decoding) in cs_etm__process_itrace_start()
2675 th = machine__findnew_thread(&etm->session->machines.host, in cs_etm__process_itrace_start()
2676 event->itrace_start.pid, in cs_etm__process_itrace_start()
2677 event->itrace_start.tid); in cs_etm__process_itrace_start()
2679 return -ENOMEM; in cs_etm__process_itrace_start()
2690 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; in cs_etm__process_switch_cpu_wide()
2693 * Context switch in per-thread mode are irrelevant since perf in cs_etm__process_switch_cpu_wide()
2696 if (etm->timeless_decoding) in cs_etm__process_switch_cpu_wide()
2713 th = machine__findnew_thread(&etm->session->machines.host, in cs_etm__process_switch_cpu_wide()
2714 event->context_switch.next_prev_pid, in cs_etm__process_switch_cpu_wide()
2715 event->context_switch.next_prev_tid); in cs_etm__process_switch_cpu_wide()
2717 return -ENOMEM; in cs_etm__process_switch_cpu_wide()
2729 struct cs_etm_auxtrace *etm = container_of(session->auxtrace, in cs_etm__process_event()
2736 if (!tool->ordered_events) { in cs_etm__process_event()
2737 pr_err("CoreSight ETM Trace requires ordered events\n"); in cs_etm__process_event()
2738 return -EINVAL; in cs_etm__process_event()
2741 switch (event->header.type) { in cs_etm__process_event()
2744 * Don't need to wait for cs_etm__flush_events() in per-thread mode to in cs_etm__process_event()
2751 if (etm->per_thread_decoding && etm->timeless_decoding) in cs_etm__process_event()
2753 event->fork.tid); in cs_etm__process_event()
2768 if (sample->time && (sample->time != (u64)-1)) in cs_etm__process_event()
2769 etm->latest_kernel_timestamp = sample->time; in cs_etm__process_event()
2789 for (i = 0; i < etm->queues.nr_queues; ++i) in dump_queued_data()
2790 list_for_each_entry(buf, &etm->queues.queue_array[i].head, list) in dump_queued_data()
2791 if (buf->reference == event->reference) in dump_queued_data()
2792 cs_etm__dump_event(etm->queues.queue_array[i].priv, buf); in dump_queued_data()
2799 struct cs_etm_auxtrace *etm = container_of(session->auxtrace, in cs_etm__process_auxtrace_event()
2802 if (!etm->data_queued) { in cs_etm__process_auxtrace_event()
2805 int fd = perf_data__fd(session->data); in cs_etm__process_auxtrace_event()
2806 bool is_pipe = perf_data__is_pipe(session->data); in cs_etm__process_auxtrace_event()
2808 int idx = event->auxtrace.idx; in cs_etm__process_auxtrace_event()
2814 if (data_offset == -1) in cs_etm__process_auxtrace_event()
2815 return -errno; in cs_etm__process_auxtrace_event()
2818 err = auxtrace_queues__add_event(&etm->queues, session, in cs_etm__process_auxtrace_event()
2825 * the aux record so only works in non-piped mode where data is in cs_etm__process_auxtrace_event()
2829 err = cs_etm__setup_queue(etm, &etm->queues.queue_array[idx], in cs_etm__process_auxtrace_event()
2836 cs_etm__dump_event(etm->queues.queue_array[idx].priv, buffer); in cs_etm__process_auxtrace_event()
2840 dump_queued_data(etm, &event->auxtrace); in cs_etm__process_auxtrace_event()
2848 struct evlist *evlist = etm->session->evlist; in cs_etm__setup_timeless_decoding()
2850 /* Override timeless mode with user input from --itrace=Z */ in cs_etm__setup_timeless_decoding()
2851 if (etm->synth_opts.timeless_decoding) { in cs_etm__setup_timeless_decoding()
2852 etm->timeless_decoding = true; in cs_etm__setup_timeless_decoding()
2860 if (cs_etm__evsel_is_auxtrace(etm->session, evsel)) { in cs_etm__setup_timeless_decoding()
2861 etm->timeless_decoding = in cs_etm__setup_timeless_decoding()
2862 !(evsel->core.attr.config & BIT(ETM_OPT_TS)); in cs_etm__setup_timeless_decoding()
2867 return -EINVAL; in cs_etm__setup_timeless_decoding()
2902 /* remaining block params at offset +1 from source */ in cs_etm__create_meta_blk()
2903 for (k = CS_ETM_COMMON_BLK_MAX_V1 - 1; k < nr_in_params; k++) in cs_etm__create_meta_blk()
2908 /* read version 1 info block - input and output nr_params may differ */ in cs_etm__create_meta_blk()
2913 /* if input has more params than output - skip excess */ in cs_etm__create_meta_blk()
2922 metadata[CS_ETM_NR_TRC_PARAMS] = nr_out_params - nr_cmn_params; in cs_etm__create_meta_blk()
2953 struct cs_etm_auxtrace *etm = container_of(session->auxtrace, in cs_etm__queue_aux_fragment()
2965 auxtrace_event = &auxtrace_event_union->auxtrace; in cs_etm__queue_aux_fragment()
2966 if (auxtrace_event->header.type != PERF_RECORD_AUXTRACE) in cs_etm__queue_aux_fragment()
2967 return -EINVAL; in cs_etm__queue_aux_fragment()
2969 if (auxtrace_event->header.size < sizeof(struct perf_record_auxtrace) || in cs_etm__queue_aux_fragment()
2970 auxtrace_event->header.size != sz) { in cs_etm__queue_aux_fragment()
2971 return -EINVAL; in cs_etm__queue_aux_fragment()
2975 * In per-thread mode, auxtrace CPU is set to -1, but TID will be set instead. See in cs_etm__queue_aux_fragment()
2978 * So now compare only TIDs if auxtrace CPU is -1, and CPUs if auxtrace CPU is not -1. in cs_etm__queue_aux_fragment()
2981 if (auxtrace_event->cpu == (__u32) -1) { in cs_etm__queue_aux_fragment()
2982 etm->per_thread_decoding = true; in cs_etm__queue_aux_fragment()
2983 if (auxtrace_event->tid != sample->tid) in cs_etm__queue_aux_fragment()
2985 } else if (auxtrace_event->cpu != sample->cpu) { in cs_etm__queue_aux_fragment()
2986 if (etm->per_thread_decoding) { in cs_etm__queue_aux_fragment()
2988 * Found a per-cpu buffer after a per-thread one was in cs_etm__queue_aux_fragment()
2991 pr_err("CS ETM: Inconsistent per-thread/per-cpu mode.\n"); in cs_etm__queue_aux_fragment()
2992 return -EINVAL; in cs_etm__queue_aux_fragment()
2997 if (aux_event->flags & PERF_AUX_FLAG_OVERWRITE) { in cs_etm__queue_aux_fragment()
3003 aux_size = min(aux_event->aux_size, auxtrace_event->size); in cs_etm__queue_aux_fragment()
3009 aux_offset = aux_event->aux_offset - aux_size; in cs_etm__queue_aux_fragment()
3011 aux_size = aux_event->aux_size; in cs_etm__queue_aux_fragment()
3012 aux_offset = aux_event->aux_offset; in cs_etm__queue_aux_fragment()
3015 if (aux_offset >= auxtrace_event->offset && in cs_etm__queue_aux_fragment()
3016 aux_offset + aux_size <= auxtrace_event->offset + auxtrace_event->size) { in cs_etm__queue_aux_fragment()
3024 file_offset += aux_offset - auxtrace_event->offset + auxtrace_event->header.size; in cs_etm__queue_aux_fragment()
3027 " tid: %d cpu: %d\n", aux_size, aux_offset, sample->tid, sample->cpu); in cs_etm__queue_aux_fragment()
3028 err = auxtrace_queues__add_event(&etm->queues, session, &auxtrace_fragment, in cs_etm__queue_aux_fragment()
3033 idx = auxtrace_event->idx; in cs_etm__queue_aux_fragment()
3034 formatted = !(aux_event->flags & PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW); in cs_etm__queue_aux_fragment()
3035 return cs_etm__setup_queue(etm, &etm->queues.queue_array[idx], in cs_etm__queue_aux_fragment()
3047 if (event->header.type == PERF_RECORD_AUX_OUTPUT_HW_ID) { in cs_etm__process_aux_hw_id_cb()
3065 if (event->header.type != PERF_RECORD_AUX) in cs_etm__queue_aux_records_cb()
3068 if (event->header.size < sizeof(struct perf_record_aux)) in cs_etm__queue_aux_records_cb()
3069 return -EINVAL; in cs_etm__queue_aux_records_cb()
3072 if (!event->aux.aux_size) in cs_etm__queue_aux_records_cb()
3079 evsel = evlist__event2evsel(session->evlist, event); in cs_etm__queue_aux_records_cb()
3081 return -EINVAL; in cs_etm__queue_aux_records_cb()
3089 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) { in cs_etm__queue_aux_records_cb()
3090 for (i = 0; i < auxtrace_index->nr; i++) { in cs_etm__queue_aux_records_cb()
3091 ent = &auxtrace_index->entries[i]; in cs_etm__queue_aux_records_cb()
3092 ret = cs_etm__queue_aux_fragment(session, ent->file_offset, in cs_etm__queue_aux_records_cb()
3093 ent->sz, &event->aux, &sample); in cs_etm__queue_aux_records_cb()
3108 " tid: %d cpu: %d\n", event->aux.aux_offset, sample.tid, sample.cpu); in cs_etm__queue_aux_records_cb()
3114 struct auxtrace_index *index = list_first_entry_or_null(&session->auxtrace_index, in cs_etm__queue_aux_records()
3116 if (index && index->nr > 0) in cs_etm__queue_aux_records()
3117 return perf_session__peek_events(session, session->header.data_offset, in cs_etm__queue_aux_records()
3118 session->header.data_size, in cs_etm__queue_aux_records()
3124 * queueing them in cs_etm__process_auxtrace_event() if etm->data_queued is still in cs_etm__queue_aux_records()
3133 (CS_##type##_##param - CS_ETM_COMMON_BLK_MAX_V1))
3182 return -EINVAL; in cs_etm__map_trace_ids_metadata()
3214 return -EINVAL; in cs_etm__clear_unused_trace_ids_metadata()
3223 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info; in cs_etm__process_auxtrace_info_full()
3225 struct perf_record_time_conv *tc = &session->time_conv; in cs_etm__process_auxtrace_info_full()
3227 int total_size = auxtrace_info->header.size; in cs_etm__process_auxtrace_info_full()
3237 * Create an RB tree for traceID-metadata tuple. Since the conversion in cs_etm__process_auxtrace_info_full()
3243 return -ENOMEM; in cs_etm__process_auxtrace_info_full()
3246 ptr = (u64 *) auxtrace_info->priv; in cs_etm__process_auxtrace_info_full()
3250 err = -ENOMEM; in cs_etm__process_auxtrace_info_full()
3275 metadata[j] = cs_etm__create_meta_blk(ptr, &i, CS_ETE_PRIV_MAX, -1); in cs_etm__process_auxtrace_info_full()
3279 err = -EINVAL; in cs_etm__process_auxtrace_info_full()
3284 err = -ENOMEM; in cs_etm__process_auxtrace_info_full()
3296 priv_size = total_size - event_header_size - INFO_HEADER_SIZE; in cs_etm__process_auxtrace_info_full()
3298 err = -EINVAL; in cs_etm__process_auxtrace_info_full()
3305 err = -ENOMEM; in cs_etm__process_auxtrace_info_full()
3314 etm->pid_fmt = cs_etm__init_pid_fmt(metadata[0]); in cs_etm__process_auxtrace_info_full()
3316 err = auxtrace_queues__init(&etm->queues); in cs_etm__process_auxtrace_info_full()
3320 if (session->itrace_synth_opts->set) { in cs_etm__process_auxtrace_info_full()
3321 etm->synth_opts = *session->itrace_synth_opts; in cs_etm__process_auxtrace_info_full()
3323 itrace_synth_opts__set_default(&etm->synth_opts, in cs_etm__process_auxtrace_info_full()
3324 session->itrace_synth_opts->default_no_sample); in cs_etm__process_auxtrace_info_full()
3325 etm->synth_opts.callchain = false; in cs_etm__process_auxtrace_info_full()
3328 etm->session = session; in cs_etm__process_auxtrace_info_full()
3330 etm->num_cpu = num_cpu; in cs_etm__process_auxtrace_info_full()
3331 etm->pmu_type = (unsigned int) ((ptr[CS_PMU_TYPE_CPUS] >> 32) & 0xffffffff); in cs_etm__process_auxtrace_info_full()
3332 etm->snapshot_mode = (ptr[CS_ETM_SNAPSHOT] != 0); in cs_etm__process_auxtrace_info_full()
3333 etm->metadata = metadata; in cs_etm__process_auxtrace_info_full()
3334 etm->auxtrace_type = auxtrace_info->type; in cs_etm__process_auxtrace_info_full()
3337 etm->has_virtual_ts = cs_etm__has_virtual_ts(metadata, num_cpu); in cs_etm__process_auxtrace_info_full()
3339 if (!etm->has_virtual_ts) in cs_etm__process_auxtrace_info_full()
3343 etm->auxtrace.process_event = cs_etm__process_event; in cs_etm__process_auxtrace_info_full()
3344 etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event; in cs_etm__process_auxtrace_info_full()
3345 etm->auxtrace.flush_events = cs_etm__flush_events; in cs_etm__process_auxtrace_info_full()
3346 etm->auxtrace.free_events = cs_etm__free_events; in cs_etm__process_auxtrace_info_full()
3347 etm->auxtrace.free = cs_etm__free; in cs_etm__process_auxtrace_info_full()
3348 etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace; in cs_etm__process_auxtrace_info_full()
3349 session->auxtrace = &etm->auxtrace; in cs_etm__process_auxtrace_info_full()
3355 etm->tc.time_shift = tc->time_shift; in cs_etm__process_auxtrace_info_full()
3356 etm->tc.time_mult = tc->time_mult; in cs_etm__process_auxtrace_info_full()
3357 etm->tc.time_zero = tc->time_zero; in cs_etm__process_auxtrace_info_full()
3359 etm->tc.time_cycles = tc->time_cycles; in cs_etm__process_auxtrace_info_full()
3360 etm->tc.time_mask = tc->time_mask; in cs_etm__process_auxtrace_info_full()
3361 etm->tc.cap_user_time_zero = tc->cap_user_time_zero; in cs_etm__process_auxtrace_info_full()
3362 etm->tc.cap_user_time_short = tc->cap_user_time_short; in cs_etm__process_auxtrace_info_full()
3383 * value CORESIGHT_TRACE_ID_UNUSED_VAL - which indicates no decoder is required. in cs_etm__process_auxtrace_info_full()
3385 * If no AUX_HW_ID packets are present - which means a file recorded on an old kernel in cs_etm__process_auxtrace_info_full()
3386 * then we map Trace ID values to CPU directly from the metadata - clearing any unused in cs_etm__process_auxtrace_info_full()
3392 err = perf_session__peek_events(session, session->header.data_offset, in cs_etm__process_auxtrace_info_full()
3393 session->header.data_size, in cs_etm__process_auxtrace_info_full()
3412 etm->data_queued = etm->queues.populated; in cs_etm__process_auxtrace_info_full()
3416 auxtrace_queues__free(&etm->queues); in cs_etm__process_auxtrace_info_full()
3417 session->auxtrace = NULL; in cs_etm__process_auxtrace_info_full()