Lines Matching refs:etm
101 struct cs_etm_auxtrace *etm; member
117 static int cs_etm__process_timestamped_queues(struct cs_etm_auxtrace *etm);
118 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
215 return etmq->etm->pid_fmt; in cs_etm__get_pid_fmt()
289 static u64 *get_cpu_data(struct cs_etm_auxtrace *etm, int cpu) in get_cpu_data() argument
294 for (i = 0; i < etm->num_cpu; i++) { in get_cpu_data()
295 if (etm->metadata[i][CS_ETM_CPU] == (u64)cpu) { in get_cpu_data()
296 metadata = etm->metadata[i]; in get_cpu_data()
314 struct cs_etm_auxtrace *etm; in cs_etm__process_aux_output_hw_id() local
333 etm = container_of(session->auxtrace, struct cs_etm_auxtrace, auxtrace); in cs_etm__process_aux_output_hw_id()
334 if (!etm || !etm->metadata) in cs_etm__process_aux_output_hw_id()
373 cpu_data = get_cpu_data(etm, cpu); in cs_etm__process_aux_output_hw_id()
470 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__init_traceid_queue() local
474 queue = &etmq->etm->queues.queue_array[etmq->queue_nr]; in cs_etm__init_traceid_queue()
477 tidq->thread = machine__findnew_thread(&etm->session->machines.host, -1, in cs_etm__init_traceid_queue()
479 tidq->prev_packet_thread = machine__idle_thread(&etm->session->machines.host); in cs_etm__init_traceid_queue()
489 if (etm->synth_opts.last_branch) { in cs_etm__init_traceid_queue()
492 sz += etm->synth_opts.last_branch_sz * in cs_etm__init_traceid_queue()
524 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__etmq_get_traceid_queue() local
526 if (etm->per_thread_decoding) in cs_etm__etmq_get_traceid_queue()
602 static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm, in cs_etm__packet_swap() argument
607 if (etm->synth_opts.branches || etm->synth_opts.last_branch || in cs_etm__packet_swap()
608 etm->synth_opts.instructions) { in cs_etm__packet_swap()
644 struct cs_etm_auxtrace *etm, int idx, in cs_etm__set_trace_param_etmv3() argument
647 u64 **metadata = etm->metadata; in cs_etm__set_trace_param_etmv3()
655 struct cs_etm_auxtrace *etm, int idx) in cs_etm__set_trace_param_etmv4() argument
657 u64 **metadata = etm->metadata; in cs_etm__set_trace_param_etmv4()
669 struct cs_etm_auxtrace *etm, int idx) in cs_etm__set_trace_param_ete() argument
671 u64 **metadata = etm->metadata; in cs_etm__set_trace_param_ete()
684 struct cs_etm_auxtrace *etm, in cs_etm__init_trace_params() argument
692 architecture = etm->metadata[i][CS_ETM_MAGIC]; in cs_etm__init_trace_params()
696 etmidr = etm->metadata[i][CS_ETM_ETMIDR]; in cs_etm__init_trace_params()
697 cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr); in cs_etm__init_trace_params()
700 cs_etm__set_trace_param_etmv4(t_params, etm, i); in cs_etm__init_trace_params()
703 cs_etm__set_trace_param_ete(t_params, etm, i); in cs_etm__init_trace_params()
767 struct cs_etm_auxtrace *etm = container_of(session->auxtrace, in cs_etm__flush_events() local
776 if (etm->timeless_decoding) { in cs_etm__flush_events()
781 return cs_etm__process_timeless_queues(etm, -1); in cs_etm__flush_events()
784 return cs_etm__process_timestamped_queues(etm); in cs_etm__flush_events()
896 return &etmq->etm->session->machines.host; in cs_etm__get_machine()
908 return machines__find_guest(&etmq->etm->session->machines, in cs_etm__get_machine()
915 return &etmq->etm->session->machines.host; in cs_etm__get_machine()
1018 static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm, in cs_etm__alloc_queue() argument
1028 int decoders = formatted ? etm->num_cpu : 1; in cs_etm__alloc_queue()
1044 if (cs_etm__init_trace_params(t_params, etm, decoders)) in cs_etm__alloc_queue()
1081 static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm, in cs_etm__setup_queue() argument
1091 etmq = cs_etm__alloc_queue(etm, formatted); in cs_etm__setup_queue()
1097 etmq->etm = etm; in cs_etm__setup_queue()
1104 static int cs_etm__queue_first_cs_timestamp(struct cs_etm_auxtrace *etm, in cs_etm__queue_first_cs_timestamp() argument
1170 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp); in cs_etm__queue_first_cs_timestamp()
1200 nr = etmq->etm->synth_opts.last_branch_sz - tidq->last_branch_pos; in cs_etm__copy_last_branch_rb()
1212 if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) { in cs_etm__copy_last_branch_rb()
1293 tidq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz; in cs_etm__update_last_branch_rb()
1308 if (bs->nr < etmq->etm->synth_opts.last_branch_sz) in cs_etm__update_last_branch_rb()
1327 queue = &etmq->etm->queues.queue_array[etmq->queue_nr]; in cs_etm__get_trace()
1344 int fd = perf_data__fd(etmq->etm->session->data); in cs_etm__get_trace()
1395 return !!etmq->etm->timeless_decoding; in cs_etm__etmq_is_timeless()
1429 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__convert_sample_time() local
1431 if (etm->has_virtual_ts) in cs_etm__convert_sample_time()
1432 return tsc_to_perf_time(cs_timestamp, &etm->tc); in cs_etm__convert_sample_time()
1440 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__resolve_sample_time() local
1443 if (!etm->timeless_decoding && etm->has_virtual_ts) in cs_etm__resolve_sample_time()
1446 return etm->latest_kernel_timestamp; in cs_etm__resolve_sample_time()
1454 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__synth_instruction_sample() local
1468 sample.id = etmq->etm->instructions_id; in cs_etm__synth_instruction_sample()
1469 sample.stream_id = etmq->etm->instructions_id; in cs_etm__synth_instruction_sample()
1477 if (etm->synth_opts.last_branch) in cs_etm__synth_instruction_sample()
1480 if (etm->synth_opts.inject) { in cs_etm__synth_instruction_sample()
1482 etm->instructions_sample_type); in cs_etm__synth_instruction_sample()
1487 ret = perf_session__deliver_synth_event(etm->session, event, &sample); in cs_etm__synth_instruction_sample()
1505 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__synth_branch_sample() local
1529 sample.id = etmq->etm->branches_id; in cs_etm__synth_branch_sample()
1530 sample.stream_id = etmq->etm->branches_id; in cs_etm__synth_branch_sample()
1542 if (etm->synth_opts.last_branch) { in cs_etm__synth_branch_sample()
1554 if (etm->synth_opts.inject) { in cs_etm__synth_branch_sample()
1556 etm->branches_sample_type); in cs_etm__synth_branch_sample()
1561 ret = perf_session__deliver_synth_event(etm->session, event, &sample); in cs_etm__synth_branch_sample()
1600 static int cs_etm__synth_events(struct cs_etm_auxtrace *etm, in cs_etm__synth_events() argument
1611 if (evsel->core.attr.type == etm->pmu_type) { in cs_etm__synth_events()
1628 if (etm->timeless_decoding) in cs_etm__synth_events()
1647 if (etm->synth_opts.branches) { in cs_etm__synth_events()
1654 etm->branches_sample_type = attr.sample_type; in cs_etm__synth_events()
1655 etm->branches_id = id; in cs_etm__synth_events()
1660 if (etm->synth_opts.last_branch) { in cs_etm__synth_events()
1670 if (etm->synth_opts.instructions) { in cs_etm__synth_events()
1672 attr.sample_period = etm->synth_opts.period; in cs_etm__synth_events()
1673 etm->instructions_sample_period = attr.sample_period; in cs_etm__synth_events()
1677 etm->instructions_sample_type = attr.sample_type; in cs_etm__synth_events()
1678 etm->instructions_id = id; in cs_etm__synth_events()
1688 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__sample() local
1702 if (etm->synth_opts.last_branch && in cs_etm__sample()
1707 if (etm->synth_opts.instructions && in cs_etm__sample()
1708 tidq->period_instructions >= etm->instructions_sample_period) { in cs_etm__sample()
1761 u64 offset = etm->instructions_sample_period - instrs_prev; in cs_etm__sample()
1765 if (etm->synth_opts.last_branch) in cs_etm__sample()
1769 etm->instructions_sample_period) { in cs_etm__sample()
1780 etm->instructions_sample_period); in cs_etm__sample()
1784 offset += etm->instructions_sample_period; in cs_etm__sample()
1786 etm->instructions_sample_period; in cs_etm__sample()
1790 if (etm->synth_opts.branches) { in cs_etm__sample()
1809 cs_etm__packet_swap(etm, tidq); in cs_etm__sample()
1837 struct cs_etm_auxtrace *etm = etmq->etm; in cs_etm__flush() local
1843 if (etmq->etm->synth_opts.last_branch && in cs_etm__flush()
1844 etmq->etm->synth_opts.instructions && in cs_etm__flush()
1870 if (etm->synth_opts.branches && in cs_etm__flush()
1878 cs_etm__packet_swap(etm, tidq); in cs_etm__flush()
1881 if (etm->synth_opts.last_branch) in cs_etm__flush()
1901 if (etmq->etm->synth_opts.last_branch && in cs_etm__end_block()
1902 etmq->etm->synth_opts.instructions && in cs_etm__end_block()
2507 static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm, in cs_etm__process_timeless_queues() argument
2511 struct auxtrace_queues *queues = &etm->queues; in cs_etm__process_timeless_queues()
2514 struct auxtrace_queue *queue = &etm->queues.queue_array[i]; in cs_etm__process_timeless_queues()
2521 if (etm->per_thread_decoding) { in cs_etm__process_timeless_queues()
2537 static int cs_etm__process_timestamped_queues(struct cs_etm_auxtrace *etm) in cs_etm__process_timestamped_queues() argument
2551 for (i = 0; i < etm->queues.nr_queues; i++) { in cs_etm__process_timestamped_queues()
2552 etmq = etm->queues.queue_array[i].priv; in cs_etm__process_timestamped_queues()
2556 ret = cs_etm__queue_first_cs_timestamp(etm, etmq, i); in cs_etm__process_timestamped_queues()
2562 if (!etm->heap.heap_cnt) in cs_etm__process_timestamped_queues()
2566 cs_queue_nr = etm->heap.heap_array[0].queue_nr; in cs_etm__process_timestamped_queues()
2569 queue = &etm->queues.queue_array[queue_nr]; in cs_etm__process_timestamped_queues()
2576 auxtrace_heap__pop(&etm->heap); in cs_etm__process_timestamped_queues()
2643 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp); in cs_etm__process_timestamped_queues()
2650 static int cs_etm__process_itrace_start(struct cs_etm_auxtrace *etm, in cs_etm__process_itrace_start() argument
2655 if (etm->timeless_decoding) in cs_etm__process_itrace_start()
2664 th = machine__findnew_thread(&etm->session->machines.host, in cs_etm__process_itrace_start()
2675 static int cs_etm__process_switch_cpu_wide(struct cs_etm_auxtrace *etm, in cs_etm__process_switch_cpu_wide() argument
2685 if (etm->timeless_decoding) in cs_etm__process_switch_cpu_wide()
2702 th = machine__findnew_thread(&etm->session->machines.host, in cs_etm__process_switch_cpu_wide()
2718 struct cs_etm_auxtrace *etm = container_of(session->auxtrace, in cs_etm__process_event() local
2740 if (etm->per_thread_decoding && etm->timeless_decoding) in cs_etm__process_event()
2741 return cs_etm__process_timeless_queues(etm, in cs_etm__process_event()
2746 return cs_etm__process_itrace_start(etm, event); in cs_etm__process_event()
2749 return cs_etm__process_switch_cpu_wide(etm, event); in cs_etm__process_event()
2758 etm->latest_kernel_timestamp = sample->time; in cs_etm__process_event()
2768 static void dump_queued_data(struct cs_etm_auxtrace *etm, in dump_queued_data() argument
2778 for (i = 0; i < etm->queues.nr_queues; ++i) in dump_queued_data()
2779 list_for_each_entry(buf, &etm->queues.queue_array[i].head, list) in dump_queued_data()
2781 cs_etm__dump_event(etm->queues.queue_array[i].priv, buf); in dump_queued_data()
2788 struct cs_etm_auxtrace *etm = container_of(session->auxtrace, in cs_etm__process_auxtrace_event() local
2791 if (!etm->data_queued) { in cs_etm__process_auxtrace_event()
2807 err = auxtrace_queues__add_event(&etm->queues, session, in cs_etm__process_auxtrace_event()
2818 err = cs_etm__setup_queue(etm, &etm->queues.queue_array[idx], in cs_etm__process_auxtrace_event()
2825 cs_etm__dump_event(etm->queues.queue_array[idx].priv, buffer); in cs_etm__process_auxtrace_event()
2829 dump_queued_data(etm, &event->auxtrace); in cs_etm__process_auxtrace_event()
2834 static int cs_etm__setup_timeless_decoding(struct cs_etm_auxtrace *etm) in cs_etm__setup_timeless_decoding() argument
2837 struct evlist *evlist = etm->session->evlist; in cs_etm__setup_timeless_decoding()
2840 if (etm->synth_opts.timeless_decoding) { in cs_etm__setup_timeless_decoding()
2841 etm->timeless_decoding = true; in cs_etm__setup_timeless_decoding()
2849 if (cs_etm__evsel_is_auxtrace(etm->session, evsel)) { in cs_etm__setup_timeless_decoding()
2850 etm->timeless_decoding = in cs_etm__setup_timeless_decoding()
2942 struct cs_etm_auxtrace *etm = container_of(session->auxtrace, in cs_etm__queue_aux_fragment() local
2971 etm->per_thread_decoding = true; in cs_etm__queue_aux_fragment()
2975 if (etm->per_thread_decoding) { in cs_etm__queue_aux_fragment()
3017 err = auxtrace_queues__add_event(&etm->queues, session, &auxtrace_fragment, in cs_etm__queue_aux_fragment()
3024 return cs_etm__setup_queue(etm, &etm->queues.queue_array[idx], in cs_etm__queue_aux_fragment()
3213 struct cs_etm_auxtrace *etm = NULL; in cs_etm__process_auxtrace_info_full() local
3291 etm = zalloc(sizeof(*etm)); in cs_etm__process_auxtrace_info_full()
3293 if (!etm) { in cs_etm__process_auxtrace_info_full()
3303 etm->pid_fmt = cs_etm__init_pid_fmt(metadata[0]); in cs_etm__process_auxtrace_info_full()
3305 err = auxtrace_queues__init(&etm->queues); in cs_etm__process_auxtrace_info_full()
3310 etm->synth_opts = *session->itrace_synth_opts; in cs_etm__process_auxtrace_info_full()
3312 itrace_synth_opts__set_default(&etm->synth_opts, in cs_etm__process_auxtrace_info_full()
3314 etm->synth_opts.callchain = false; in cs_etm__process_auxtrace_info_full()
3317 etm->session = session; in cs_etm__process_auxtrace_info_full()
3319 etm->num_cpu = num_cpu; in cs_etm__process_auxtrace_info_full()
3320 etm->pmu_type = (unsigned int) ((ptr[CS_PMU_TYPE_CPUS] >> 32) & 0xffffffff); in cs_etm__process_auxtrace_info_full()
3321 etm->snapshot_mode = (ptr[CS_ETM_SNAPSHOT] != 0); in cs_etm__process_auxtrace_info_full()
3322 etm->metadata = metadata; in cs_etm__process_auxtrace_info_full()
3323 etm->auxtrace_type = auxtrace_info->type; in cs_etm__process_auxtrace_info_full()
3326 etm->has_virtual_ts = cs_etm__has_virtual_ts(metadata, num_cpu); in cs_etm__process_auxtrace_info_full()
3328 if (!etm->has_virtual_ts) in cs_etm__process_auxtrace_info_full()
3332 etm->auxtrace.process_event = cs_etm__process_event; in cs_etm__process_auxtrace_info_full()
3333 etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event; in cs_etm__process_auxtrace_info_full()
3334 etm->auxtrace.flush_events = cs_etm__flush_events; in cs_etm__process_auxtrace_info_full()
3335 etm->auxtrace.free_events = cs_etm__free_events; in cs_etm__process_auxtrace_info_full()
3336 etm->auxtrace.free = cs_etm__free; in cs_etm__process_auxtrace_info_full()
3337 etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace; in cs_etm__process_auxtrace_info_full()
3338 session->auxtrace = &etm->auxtrace; in cs_etm__process_auxtrace_info_full()
3340 err = cs_etm__setup_timeless_decoding(etm); in cs_etm__process_auxtrace_info_full()
3344 etm->tc.time_shift = tc->time_shift; in cs_etm__process_auxtrace_info_full()
3345 etm->tc.time_mult = tc->time_mult; in cs_etm__process_auxtrace_info_full()
3346 etm->tc.time_zero = tc->time_zero; in cs_etm__process_auxtrace_info_full()
3348 etm->tc.time_cycles = tc->time_cycles; in cs_etm__process_auxtrace_info_full()
3349 etm->tc.time_mask = tc->time_mask; in cs_etm__process_auxtrace_info_full()
3350 etm->tc.cap_user_time_zero = tc->cap_user_time_zero; in cs_etm__process_auxtrace_info_full()
3351 etm->tc.cap_user_time_short = tc->cap_user_time_short; in cs_etm__process_auxtrace_info_full()
3353 err = cs_etm__synth_events(etm, session); in cs_etm__process_auxtrace_info_full()
3401 etm->data_queued = etm->queues.populated; in cs_etm__process_auxtrace_info_full()
3405 auxtrace_queues__free(&etm->queues); in cs_etm__process_auxtrace_info_full()
3408 zfree(&etm); in cs_etm__process_auxtrace_info_full()