xref: /openbmc/linux/tools/perf/util/intel-bts.c (revision 4da722ca)
1 /*
2  * intel-bts.c: Intel Processor Trace support
3  * Copyright (c) 2013-2015, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  */
15 
16 #include <endian.h>
17 #include <errno.h>
18 #include <byteswap.h>
19 #include <inttypes.h>
20 #include <linux/kernel.h>
21 #include <linux/types.h>
22 #include <linux/bitops.h>
23 #include <linux/log2.h>
24 
25 #include "cpumap.h"
26 #include "color.h"
27 #include "evsel.h"
28 #include "evlist.h"
29 #include "machine.h"
30 #include "session.h"
31 #include "util.h"
32 #include "thread.h"
33 #include "thread-stack.h"
34 #include "debug.h"
35 #include "tsc.h"
36 #include "auxtrace.h"
37 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
38 #include "intel-bts.h"
39 
40 #define MAX_TIMESTAMP (~0ULL)
41 
42 #define INTEL_BTS_ERR_NOINSN  5
43 #define INTEL_BTS_ERR_LOST    9
44 
45 #if __BYTE_ORDER == __BIG_ENDIAN
46 #define le64_to_cpu bswap_64
47 #else
48 #define le64_to_cpu
49 #endif
50 
51 struct intel_bts {
52 	struct auxtrace			auxtrace;
53 	struct auxtrace_queues		queues;
54 	struct auxtrace_heap		heap;
55 	u32				auxtrace_type;
56 	struct perf_session		*session;
57 	struct machine			*machine;
58 	bool				sampling_mode;
59 	bool				snapshot_mode;
60 	bool				data_queued;
61 	u32				pmu_type;
62 	struct perf_tsc_conversion	tc;
63 	bool				cap_user_time_zero;
64 	struct itrace_synth_opts	synth_opts;
65 	bool				sample_branches;
66 	u32				branches_filter;
67 	u64				branches_sample_type;
68 	u64				branches_id;
69 	size_t				branches_event_size;
70 	bool				synth_needs_swap;
71 	unsigned long			num_events;
72 };
73 
74 struct intel_bts_queue {
75 	struct intel_bts	*bts;
76 	unsigned int		queue_nr;
77 	struct auxtrace_buffer	*buffer;
78 	bool			on_heap;
79 	bool			done;
80 	pid_t			pid;
81 	pid_t			tid;
82 	int			cpu;
83 	u64			time;
84 	struct intel_pt_insn	intel_pt_insn;
85 	u32			sample_flags;
86 };
87 
88 struct branch {
89 	u64 from;
90 	u64 to;
91 	u64 misc;
92 };
93 
94 static void intel_bts_dump(struct intel_bts *bts __maybe_unused,
95 			   unsigned char *buf, size_t len)
96 {
97 	struct branch *branch;
98 	size_t i, pos = 0, br_sz = sizeof(struct branch), sz;
99 	const char *color = PERF_COLOR_BLUE;
100 
101 	color_fprintf(stdout, color,
102 		      ". ... Intel BTS data: size %zu bytes\n",
103 		      len);
104 
105 	while (len) {
106 		if (len >= br_sz)
107 			sz = br_sz;
108 		else
109 			sz = len;
110 		printf(".");
111 		color_fprintf(stdout, color, "  %08x: ", pos);
112 		for (i = 0; i < sz; i++)
113 			color_fprintf(stdout, color, " %02x", buf[i]);
114 		for (; i < br_sz; i++)
115 			color_fprintf(stdout, color, "   ");
116 		if (len >= br_sz) {
117 			branch = (struct branch *)buf;
118 			color_fprintf(stdout, color, " %"PRIx64" -> %"PRIx64" %s\n",
119 				      le64_to_cpu(branch->from),
120 				      le64_to_cpu(branch->to),
121 				      le64_to_cpu(branch->misc) & 0x10 ?
122 							"pred" : "miss");
123 		} else {
124 			color_fprintf(stdout, color, " Bad record!\n");
125 		}
126 		pos += sz;
127 		buf += sz;
128 		len -= sz;
129 	}
130 }
131 
132 static void intel_bts_dump_event(struct intel_bts *bts, unsigned char *buf,
133 				 size_t len)
134 {
135 	printf(".\n");
136 	intel_bts_dump(bts, buf, len);
137 }
138 
139 static int intel_bts_lost(struct intel_bts *bts, struct perf_sample *sample)
140 {
141 	union perf_event event;
142 	int err;
143 
144 	auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
145 			     INTEL_BTS_ERR_LOST, sample->cpu, sample->pid,
146 			     sample->tid, 0, "Lost trace data");
147 
148 	err = perf_session__deliver_synth_event(bts->session, &event, NULL);
149 	if (err)
150 		pr_err("Intel BTS: failed to deliver error event, error %d\n",
151 		       err);
152 
153 	return err;
154 }
155 
156 static struct intel_bts_queue *intel_bts_alloc_queue(struct intel_bts *bts,
157 						     unsigned int queue_nr)
158 {
159 	struct intel_bts_queue *btsq;
160 
161 	btsq = zalloc(sizeof(struct intel_bts_queue));
162 	if (!btsq)
163 		return NULL;
164 
165 	btsq->bts = bts;
166 	btsq->queue_nr = queue_nr;
167 	btsq->pid = -1;
168 	btsq->tid = -1;
169 	btsq->cpu = -1;
170 
171 	return btsq;
172 }
173 
174 static int intel_bts_setup_queue(struct intel_bts *bts,
175 				 struct auxtrace_queue *queue,
176 				 unsigned int queue_nr)
177 {
178 	struct intel_bts_queue *btsq = queue->priv;
179 
180 	if (list_empty(&queue->head))
181 		return 0;
182 
183 	if (!btsq) {
184 		btsq = intel_bts_alloc_queue(bts, queue_nr);
185 		if (!btsq)
186 			return -ENOMEM;
187 		queue->priv = btsq;
188 
189 		if (queue->cpu != -1)
190 			btsq->cpu = queue->cpu;
191 		btsq->tid = queue->tid;
192 	}
193 
194 	if (bts->sampling_mode)
195 		return 0;
196 
197 	if (!btsq->on_heap && !btsq->buffer) {
198 		int ret;
199 
200 		btsq->buffer = auxtrace_buffer__next(queue, NULL);
201 		if (!btsq->buffer)
202 			return 0;
203 
204 		ret = auxtrace_heap__add(&bts->heap, queue_nr,
205 					 btsq->buffer->reference);
206 		if (ret)
207 			return ret;
208 		btsq->on_heap = true;
209 	}
210 
211 	return 0;
212 }
213 
214 static int intel_bts_setup_queues(struct intel_bts *bts)
215 {
216 	unsigned int i;
217 	int ret;
218 
219 	for (i = 0; i < bts->queues.nr_queues; i++) {
220 		ret = intel_bts_setup_queue(bts, &bts->queues.queue_array[i],
221 					    i);
222 		if (ret)
223 			return ret;
224 	}
225 	return 0;
226 }
227 
228 static inline int intel_bts_update_queues(struct intel_bts *bts)
229 {
230 	if (bts->queues.new_data) {
231 		bts->queues.new_data = false;
232 		return intel_bts_setup_queues(bts);
233 	}
234 	return 0;
235 }
236 
237 static unsigned char *intel_bts_find_overlap(unsigned char *buf_a, size_t len_a,
238 					     unsigned char *buf_b, size_t len_b)
239 {
240 	size_t offs, len;
241 
242 	if (len_a > len_b)
243 		offs = len_a - len_b;
244 	else
245 		offs = 0;
246 
247 	for (; offs < len_a; offs += sizeof(struct branch)) {
248 		len = len_a - offs;
249 		if (!memcmp(buf_a + offs, buf_b, len))
250 			return buf_b + len;
251 	}
252 
253 	return buf_b;
254 }
255 
256 static int intel_bts_do_fix_overlap(struct auxtrace_queue *queue,
257 				    struct auxtrace_buffer *b)
258 {
259 	struct auxtrace_buffer *a;
260 	void *start;
261 
262 	if (b->list.prev == &queue->head)
263 		return 0;
264 	a = list_entry(b->list.prev, struct auxtrace_buffer, list);
265 	start = intel_bts_find_overlap(a->data, a->size, b->data, b->size);
266 	if (!start)
267 		return -EINVAL;
268 	b->use_size = b->data + b->size - start;
269 	b->use_data = start;
270 	return 0;
271 }
272 
273 static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq,
274 					 struct branch *branch)
275 {
276 	int ret;
277 	struct intel_bts *bts = btsq->bts;
278 	union perf_event event;
279 	struct perf_sample sample = { .ip = 0, };
280 
281 	if (bts->synth_opts.initial_skip &&
282 	    bts->num_events++ <= bts->synth_opts.initial_skip)
283 		return 0;
284 
285 	event.sample.header.type = PERF_RECORD_SAMPLE;
286 	event.sample.header.misc = PERF_RECORD_MISC_USER;
287 	event.sample.header.size = sizeof(struct perf_event_header);
288 
289 	sample.cpumode = PERF_RECORD_MISC_USER;
290 	sample.ip = le64_to_cpu(branch->from);
291 	sample.pid = btsq->pid;
292 	sample.tid = btsq->tid;
293 	sample.addr = le64_to_cpu(branch->to);
294 	sample.id = btsq->bts->branches_id;
295 	sample.stream_id = btsq->bts->branches_id;
296 	sample.period = 1;
297 	sample.cpu = btsq->cpu;
298 	sample.flags = btsq->sample_flags;
299 	sample.insn_len = btsq->intel_pt_insn.length;
300 	memcpy(sample.insn, btsq->intel_pt_insn.buf, INTEL_PT_INSN_BUF_SZ);
301 
302 	if (bts->synth_opts.inject) {
303 		event.sample.header.size = bts->branches_event_size;
304 		ret = perf_event__synthesize_sample(&event,
305 						    bts->branches_sample_type,
306 						    0, &sample,
307 						    bts->synth_needs_swap);
308 		if (ret)
309 			return ret;
310 	}
311 
312 	ret = perf_session__deliver_synth_event(bts->session, &event, &sample);
313 	if (ret)
314 		pr_err("Intel BTS: failed to deliver branch event, error %d\n",
315 		       ret);
316 
317 	return ret;
318 }
319 
320 static int intel_bts_get_next_insn(struct intel_bts_queue *btsq, u64 ip)
321 {
322 	struct machine *machine = btsq->bts->machine;
323 	struct thread *thread;
324 	struct addr_location al;
325 	unsigned char buf[INTEL_PT_INSN_BUF_SZ];
326 	ssize_t len;
327 	int x86_64;
328 	uint8_t cpumode;
329 	int err = -1;
330 
331 	if (machine__kernel_ip(machine, ip))
332 		cpumode = PERF_RECORD_MISC_KERNEL;
333 	else
334 		cpumode = PERF_RECORD_MISC_USER;
335 
336 	thread = machine__find_thread(machine, -1, btsq->tid);
337 	if (!thread)
338 		return -1;
339 
340 	thread__find_addr_map(thread, cpumode, MAP__FUNCTION, ip, &al);
341 	if (!al.map || !al.map->dso)
342 		goto out_put;
343 
344 	len = dso__data_read_addr(al.map->dso, al.map, machine, ip, buf,
345 				  INTEL_PT_INSN_BUF_SZ);
346 	if (len <= 0)
347 		goto out_put;
348 
349 	/* Load maps to ensure dso->is_64_bit has been updated */
350 	map__load(al.map);
351 
352 	x86_64 = al.map->dso->is_64_bit;
353 
354 	if (intel_pt_get_insn(buf, len, x86_64, &btsq->intel_pt_insn))
355 		goto out_put;
356 
357 	err = 0;
358 out_put:
359 	thread__put(thread);
360 	return err;
361 }
362 
363 static int intel_bts_synth_error(struct intel_bts *bts, int cpu, pid_t pid,
364 				 pid_t tid, u64 ip)
365 {
366 	union perf_event event;
367 	int err;
368 
369 	auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
370 			     INTEL_BTS_ERR_NOINSN, cpu, pid, tid, ip,
371 			     "Failed to get instruction");
372 
373 	err = perf_session__deliver_synth_event(bts->session, &event, NULL);
374 	if (err)
375 		pr_err("Intel BTS: failed to deliver error event, error %d\n",
376 		       err);
377 
378 	return err;
379 }
380 
381 static int intel_bts_get_branch_type(struct intel_bts_queue *btsq,
382 				     struct branch *branch)
383 {
384 	int err;
385 
386 	if (!branch->from) {
387 		if (branch->to)
388 			btsq->sample_flags = PERF_IP_FLAG_BRANCH |
389 					     PERF_IP_FLAG_TRACE_BEGIN;
390 		else
391 			btsq->sample_flags = 0;
392 		btsq->intel_pt_insn.length = 0;
393 	} else if (!branch->to) {
394 		btsq->sample_flags = PERF_IP_FLAG_BRANCH |
395 				     PERF_IP_FLAG_TRACE_END;
396 		btsq->intel_pt_insn.length = 0;
397 	} else {
398 		err = intel_bts_get_next_insn(btsq, branch->from);
399 		if (err) {
400 			btsq->sample_flags = 0;
401 			btsq->intel_pt_insn.length = 0;
402 			if (!btsq->bts->synth_opts.errors)
403 				return 0;
404 			err = intel_bts_synth_error(btsq->bts, btsq->cpu,
405 						    btsq->pid, btsq->tid,
406 						    branch->from);
407 			return err;
408 		}
409 		btsq->sample_flags = intel_pt_insn_type(btsq->intel_pt_insn.op);
410 		/* Check for an async branch into the kernel */
411 		if (!machine__kernel_ip(btsq->bts->machine, branch->from) &&
412 		    machine__kernel_ip(btsq->bts->machine, branch->to) &&
413 		    btsq->sample_flags != (PERF_IP_FLAG_BRANCH |
414 					   PERF_IP_FLAG_CALL |
415 					   PERF_IP_FLAG_SYSCALLRET))
416 			btsq->sample_flags = PERF_IP_FLAG_BRANCH |
417 					     PERF_IP_FLAG_CALL |
418 					     PERF_IP_FLAG_ASYNC |
419 					     PERF_IP_FLAG_INTERRUPT;
420 	}
421 
422 	return 0;
423 }
424 
425 static int intel_bts_process_buffer(struct intel_bts_queue *btsq,
426 				    struct auxtrace_buffer *buffer,
427 				    struct thread *thread)
428 {
429 	struct branch *branch;
430 	size_t sz, bsz = sizeof(struct branch);
431 	u32 filter = btsq->bts->branches_filter;
432 	int err = 0;
433 
434 	if (buffer->use_data) {
435 		sz = buffer->use_size;
436 		branch = buffer->use_data;
437 	} else {
438 		sz = buffer->size;
439 		branch = buffer->data;
440 	}
441 
442 	if (!btsq->bts->sample_branches)
443 		return 0;
444 
445 	for (; sz > bsz; branch += 1, sz -= bsz) {
446 		if (!branch->from && !branch->to)
447 			continue;
448 		intel_bts_get_branch_type(btsq, branch);
449 		if (btsq->bts->synth_opts.thread_stack)
450 			thread_stack__event(thread, btsq->sample_flags,
451 					    le64_to_cpu(branch->from),
452 					    le64_to_cpu(branch->to),
453 					    btsq->intel_pt_insn.length,
454 					    buffer->buffer_nr + 1);
455 		if (filter && !(filter & btsq->sample_flags))
456 			continue;
457 		err = intel_bts_synth_branch_sample(btsq, branch);
458 		if (err)
459 			break;
460 	}
461 	return err;
462 }
463 
464 static int intel_bts_process_queue(struct intel_bts_queue *btsq, u64 *timestamp)
465 {
466 	struct auxtrace_buffer *buffer = btsq->buffer, *old_buffer = buffer;
467 	struct auxtrace_queue *queue;
468 	struct thread *thread;
469 	int err;
470 
471 	if (btsq->done)
472 		return 1;
473 
474 	if (btsq->pid == -1) {
475 		thread = machine__find_thread(btsq->bts->machine, -1,
476 					      btsq->tid);
477 		if (thread)
478 			btsq->pid = thread->pid_;
479 	} else {
480 		thread = machine__findnew_thread(btsq->bts->machine, btsq->pid,
481 						 btsq->tid);
482 	}
483 
484 	queue = &btsq->bts->queues.queue_array[btsq->queue_nr];
485 
486 	if (!buffer)
487 		buffer = auxtrace_buffer__next(queue, NULL);
488 
489 	if (!buffer) {
490 		if (!btsq->bts->sampling_mode)
491 			btsq->done = 1;
492 		err = 1;
493 		goto out_put;
494 	}
495 
496 	/* Currently there is no support for split buffers */
497 	if (buffer->consecutive) {
498 		err = -EINVAL;
499 		goto out_put;
500 	}
501 
502 	if (!buffer->data) {
503 		int fd = perf_data_file__fd(btsq->bts->session->file);
504 
505 		buffer->data = auxtrace_buffer__get_data(buffer, fd);
506 		if (!buffer->data) {
507 			err = -ENOMEM;
508 			goto out_put;
509 		}
510 	}
511 
512 	if (btsq->bts->snapshot_mode && !buffer->consecutive &&
513 	    intel_bts_do_fix_overlap(queue, buffer)) {
514 		err = -ENOMEM;
515 		goto out_put;
516 	}
517 
518 	if (!btsq->bts->synth_opts.callchain &&
519 	    !btsq->bts->synth_opts.thread_stack && thread &&
520 	    (!old_buffer || btsq->bts->sampling_mode ||
521 	     (btsq->bts->snapshot_mode && !buffer->consecutive)))
522 		thread_stack__set_trace_nr(thread, buffer->buffer_nr + 1);
523 
524 	err = intel_bts_process_buffer(btsq, buffer, thread);
525 
526 	auxtrace_buffer__drop_data(buffer);
527 
528 	btsq->buffer = auxtrace_buffer__next(queue, buffer);
529 	if (btsq->buffer) {
530 		if (timestamp)
531 			*timestamp = btsq->buffer->reference;
532 	} else {
533 		if (!btsq->bts->sampling_mode)
534 			btsq->done = 1;
535 	}
536 out_put:
537 	thread__put(thread);
538 	return err;
539 }
540 
541 static int intel_bts_flush_queue(struct intel_bts_queue *btsq)
542 {
543 	u64 ts = 0;
544 	int ret;
545 
546 	while (1) {
547 		ret = intel_bts_process_queue(btsq, &ts);
548 		if (ret < 0)
549 			return ret;
550 		if (ret)
551 			break;
552 	}
553 	return 0;
554 }
555 
556 static int intel_bts_process_tid_exit(struct intel_bts *bts, pid_t tid)
557 {
558 	struct auxtrace_queues *queues = &bts->queues;
559 	unsigned int i;
560 
561 	for (i = 0; i < queues->nr_queues; i++) {
562 		struct auxtrace_queue *queue = &bts->queues.queue_array[i];
563 		struct intel_bts_queue *btsq = queue->priv;
564 
565 		if (btsq && btsq->tid == tid)
566 			return intel_bts_flush_queue(btsq);
567 	}
568 	return 0;
569 }
570 
571 static int intel_bts_process_queues(struct intel_bts *bts, u64 timestamp)
572 {
573 	while (1) {
574 		unsigned int queue_nr;
575 		struct auxtrace_queue *queue;
576 		struct intel_bts_queue *btsq;
577 		u64 ts = 0;
578 		int ret;
579 
580 		if (!bts->heap.heap_cnt)
581 			return 0;
582 
583 		if (bts->heap.heap_array[0].ordinal > timestamp)
584 			return 0;
585 
586 		queue_nr = bts->heap.heap_array[0].queue_nr;
587 		queue = &bts->queues.queue_array[queue_nr];
588 		btsq = queue->priv;
589 
590 		auxtrace_heap__pop(&bts->heap);
591 
592 		ret = intel_bts_process_queue(btsq, &ts);
593 		if (ret < 0) {
594 			auxtrace_heap__add(&bts->heap, queue_nr, ts);
595 			return ret;
596 		}
597 
598 		if (!ret) {
599 			ret = auxtrace_heap__add(&bts->heap, queue_nr, ts);
600 			if (ret < 0)
601 				return ret;
602 		} else {
603 			btsq->on_heap = false;
604 		}
605 	}
606 
607 	return 0;
608 }
609 
610 static int intel_bts_process_event(struct perf_session *session,
611 				   union perf_event *event,
612 				   struct perf_sample *sample,
613 				   struct perf_tool *tool)
614 {
615 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
616 					     auxtrace);
617 	u64 timestamp;
618 	int err;
619 
620 	if (dump_trace)
621 		return 0;
622 
623 	if (!tool->ordered_events) {
624 		pr_err("Intel BTS requires ordered events\n");
625 		return -EINVAL;
626 	}
627 
628 	if (sample->time && sample->time != (u64)-1)
629 		timestamp = perf_time_to_tsc(sample->time, &bts->tc);
630 	else
631 		timestamp = 0;
632 
633 	err = intel_bts_update_queues(bts);
634 	if (err)
635 		return err;
636 
637 	err = intel_bts_process_queues(bts, timestamp);
638 	if (err)
639 		return err;
640 	if (event->header.type == PERF_RECORD_EXIT) {
641 		err = intel_bts_process_tid_exit(bts, event->fork.tid);
642 		if (err)
643 			return err;
644 	}
645 
646 	if (event->header.type == PERF_RECORD_AUX &&
647 	    (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
648 	    bts->synth_opts.errors)
649 		err = intel_bts_lost(bts, sample);
650 
651 	return err;
652 }
653 
654 static int intel_bts_process_auxtrace_event(struct perf_session *session,
655 					    union perf_event *event,
656 					    struct perf_tool *tool __maybe_unused)
657 {
658 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
659 					     auxtrace);
660 
661 	if (bts->sampling_mode)
662 		return 0;
663 
664 	if (!bts->data_queued) {
665 		struct auxtrace_buffer *buffer;
666 		off_t data_offset;
667 		int fd = perf_data_file__fd(session->file);
668 		int err;
669 
670 		if (perf_data_file__is_pipe(session->file)) {
671 			data_offset = 0;
672 		} else {
673 			data_offset = lseek(fd, 0, SEEK_CUR);
674 			if (data_offset == -1)
675 				return -errno;
676 		}
677 
678 		err = auxtrace_queues__add_event(&bts->queues, session, event,
679 						 data_offset, &buffer);
680 		if (err)
681 			return err;
682 
683 		/* Dump here now we have copied a piped trace out of the pipe */
684 		if (dump_trace) {
685 			if (auxtrace_buffer__get_data(buffer, fd)) {
686 				intel_bts_dump_event(bts, buffer->data,
687 						     buffer->size);
688 				auxtrace_buffer__put_data(buffer);
689 			}
690 		}
691 	}
692 
693 	return 0;
694 }
695 
696 static int intel_bts_flush(struct perf_session *session,
697 			   struct perf_tool *tool __maybe_unused)
698 {
699 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
700 					     auxtrace);
701 	int ret;
702 
703 	if (dump_trace || bts->sampling_mode)
704 		return 0;
705 
706 	if (!tool->ordered_events)
707 		return -EINVAL;
708 
709 	ret = intel_bts_update_queues(bts);
710 	if (ret < 0)
711 		return ret;
712 
713 	return intel_bts_process_queues(bts, MAX_TIMESTAMP);
714 }
715 
716 static void intel_bts_free_queue(void *priv)
717 {
718 	struct intel_bts_queue *btsq = priv;
719 
720 	if (!btsq)
721 		return;
722 	free(btsq);
723 }
724 
725 static void intel_bts_free_events(struct perf_session *session)
726 {
727 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
728 					     auxtrace);
729 	struct auxtrace_queues *queues = &bts->queues;
730 	unsigned int i;
731 
732 	for (i = 0; i < queues->nr_queues; i++) {
733 		intel_bts_free_queue(queues->queue_array[i].priv);
734 		queues->queue_array[i].priv = NULL;
735 	}
736 	auxtrace_queues__free(queues);
737 }
738 
739 static void intel_bts_free(struct perf_session *session)
740 {
741 	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
742 					     auxtrace);
743 
744 	auxtrace_heap__free(&bts->heap);
745 	intel_bts_free_events(session);
746 	session->auxtrace = NULL;
747 	free(bts);
748 }
749 
750 struct intel_bts_synth {
751 	struct perf_tool dummy_tool;
752 	struct perf_session *session;
753 };
754 
755 static int intel_bts_event_synth(struct perf_tool *tool,
756 				 union perf_event *event,
757 				 struct perf_sample *sample __maybe_unused,
758 				 struct machine *machine __maybe_unused)
759 {
760 	struct intel_bts_synth *intel_bts_synth =
761 			container_of(tool, struct intel_bts_synth, dummy_tool);
762 
763 	return perf_session__deliver_synth_event(intel_bts_synth->session,
764 						 event, NULL);
765 }
766 
767 static int intel_bts_synth_event(struct perf_session *session,
768 				 struct perf_event_attr *attr, u64 id)
769 {
770 	struct intel_bts_synth intel_bts_synth;
771 
772 	memset(&intel_bts_synth, 0, sizeof(struct intel_bts_synth));
773 	intel_bts_synth.session = session;
774 
775 	return perf_event__synthesize_attr(&intel_bts_synth.dummy_tool, attr, 1,
776 					   &id, intel_bts_event_synth);
777 }
778 
779 static int intel_bts_synth_events(struct intel_bts *bts,
780 				  struct perf_session *session)
781 {
782 	struct perf_evlist *evlist = session->evlist;
783 	struct perf_evsel *evsel;
784 	struct perf_event_attr attr;
785 	bool found = false;
786 	u64 id;
787 	int err;
788 
789 	evlist__for_each_entry(evlist, evsel) {
790 		if (evsel->attr.type == bts->pmu_type && evsel->ids) {
791 			found = true;
792 			break;
793 		}
794 	}
795 
796 	if (!found) {
797 		pr_debug("There are no selected events with Intel BTS data\n");
798 		return 0;
799 	}
800 
801 	memset(&attr, 0, sizeof(struct perf_event_attr));
802 	attr.size = sizeof(struct perf_event_attr);
803 	attr.type = PERF_TYPE_HARDWARE;
804 	attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
805 	attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
806 			    PERF_SAMPLE_PERIOD;
807 	attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
808 	attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
809 	attr.exclude_user = evsel->attr.exclude_user;
810 	attr.exclude_kernel = evsel->attr.exclude_kernel;
811 	attr.exclude_hv = evsel->attr.exclude_hv;
812 	attr.exclude_host = evsel->attr.exclude_host;
813 	attr.exclude_guest = evsel->attr.exclude_guest;
814 	attr.sample_id_all = evsel->attr.sample_id_all;
815 	attr.read_format = evsel->attr.read_format;
816 
817 	id = evsel->id[0] + 1000000000;
818 	if (!id)
819 		id = 1;
820 
821 	if (bts->synth_opts.branches) {
822 		attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
823 		attr.sample_period = 1;
824 		attr.sample_type |= PERF_SAMPLE_ADDR;
825 		pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
826 			 id, (u64)attr.sample_type);
827 		err = intel_bts_synth_event(session, &attr, id);
828 		if (err) {
829 			pr_err("%s: failed to synthesize 'branches' event type\n",
830 			       __func__);
831 			return err;
832 		}
833 		bts->sample_branches = true;
834 		bts->branches_sample_type = attr.sample_type;
835 		bts->branches_id = id;
836 		/*
837 		 * We only use sample types from PERF_SAMPLE_MASK so we can use
838 		 * __perf_evsel__sample_size() here.
839 		 */
840 		bts->branches_event_size = sizeof(struct sample_event) +
841 				__perf_evsel__sample_size(attr.sample_type);
842 	}
843 
844 	bts->synth_needs_swap = evsel->needs_swap;
845 
846 	return 0;
847 }
848 
849 static const char * const intel_bts_info_fmts[] = {
850 	[INTEL_BTS_PMU_TYPE]		= "  PMU Type           %"PRId64"\n",
851 	[INTEL_BTS_TIME_SHIFT]		= "  Time Shift         %"PRIu64"\n",
852 	[INTEL_BTS_TIME_MULT]		= "  Time Muliplier     %"PRIu64"\n",
853 	[INTEL_BTS_TIME_ZERO]		= "  Time Zero          %"PRIu64"\n",
854 	[INTEL_BTS_CAP_USER_TIME_ZERO]	= "  Cap Time Zero      %"PRId64"\n",
855 	[INTEL_BTS_SNAPSHOT_MODE]	= "  Snapshot mode      %"PRId64"\n",
856 };
857 
858 static void intel_bts_print_info(u64 *arr, int start, int finish)
859 {
860 	int i;
861 
862 	if (!dump_trace)
863 		return;
864 
865 	for (i = start; i <= finish; i++)
866 		fprintf(stdout, intel_bts_info_fmts[i], arr[i]);
867 }
868 
869 int intel_bts_process_auxtrace_info(union perf_event *event,
870 				    struct perf_session *session)
871 {
872 	struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
873 	size_t min_sz = sizeof(u64) * INTEL_BTS_SNAPSHOT_MODE;
874 	struct intel_bts *bts;
875 	int err;
876 
877 	if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) +
878 					min_sz)
879 		return -EINVAL;
880 
881 	bts = zalloc(sizeof(struct intel_bts));
882 	if (!bts)
883 		return -ENOMEM;
884 
885 	err = auxtrace_queues__init(&bts->queues);
886 	if (err)
887 		goto err_free;
888 
889 	bts->session = session;
890 	bts->machine = &session->machines.host; /* No kvm support */
891 	bts->auxtrace_type = auxtrace_info->type;
892 	bts->pmu_type = auxtrace_info->priv[INTEL_BTS_PMU_TYPE];
893 	bts->tc.time_shift = auxtrace_info->priv[INTEL_BTS_TIME_SHIFT];
894 	bts->tc.time_mult = auxtrace_info->priv[INTEL_BTS_TIME_MULT];
895 	bts->tc.time_zero = auxtrace_info->priv[INTEL_BTS_TIME_ZERO];
896 	bts->cap_user_time_zero =
897 			auxtrace_info->priv[INTEL_BTS_CAP_USER_TIME_ZERO];
898 	bts->snapshot_mode = auxtrace_info->priv[INTEL_BTS_SNAPSHOT_MODE];
899 
900 	bts->sampling_mode = false;
901 
902 	bts->auxtrace.process_event = intel_bts_process_event;
903 	bts->auxtrace.process_auxtrace_event = intel_bts_process_auxtrace_event;
904 	bts->auxtrace.flush_events = intel_bts_flush;
905 	bts->auxtrace.free_events = intel_bts_free_events;
906 	bts->auxtrace.free = intel_bts_free;
907 	session->auxtrace = &bts->auxtrace;
908 
909 	intel_bts_print_info(&auxtrace_info->priv[0], INTEL_BTS_PMU_TYPE,
910 			     INTEL_BTS_SNAPSHOT_MODE);
911 
912 	if (dump_trace)
913 		return 0;
914 
915 	if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
916 		bts->synth_opts = *session->itrace_synth_opts;
917 	} else {
918 		itrace_synth_opts__set_default(&bts->synth_opts);
919 		if (session->itrace_synth_opts)
920 			bts->synth_opts.thread_stack =
921 				session->itrace_synth_opts->thread_stack;
922 	}
923 
924 	if (bts->synth_opts.calls)
925 		bts->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
926 					PERF_IP_FLAG_TRACE_END;
927 	if (bts->synth_opts.returns)
928 		bts->branches_filter |= PERF_IP_FLAG_RETURN |
929 					PERF_IP_FLAG_TRACE_BEGIN;
930 
931 	err = intel_bts_synth_events(bts, session);
932 	if (err)
933 		goto err_free_queues;
934 
935 	err = auxtrace_queues__process_index(&bts->queues, session);
936 	if (err)
937 		goto err_free_queues;
938 
939 	if (bts->queues.populated)
940 		bts->data_queued = true;
941 
942 	return 0;
943 
944 err_free_queues:
945 	auxtrace_queues__free(&bts->queues);
946 	session->auxtrace = NULL;
947 err_free:
948 	free(bts);
949 	return err;
950 }
951