1 /*
2  * CTF writing support via babeltrace.
3  *
4  * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
5  * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 
10 #include <linux/compiler.h>
11 #include <babeltrace/ctf-writer/writer.h>
12 #include <babeltrace/ctf-writer/clock.h>
13 #include <babeltrace/ctf-writer/stream.h>
14 #include <babeltrace/ctf-writer/event.h>
15 #include <babeltrace/ctf-writer/event-types.h>
16 #include <babeltrace/ctf-writer/event-fields.h>
17 #include <babeltrace/ctf-ir/utils.h>
18 #include <babeltrace/ctf/events.h>
19 #include <traceevent/event-parse.h>
20 #include "asm/bug.h"
21 #include "data-convert-bt.h"
22 #include "session.h"
23 #include "util.h"
24 #include "debug.h"
25 #include "tool.h"
26 #include "evlist.h"
27 #include "evsel.h"
28 #include "machine.h"
29 
30 #define pr_N(n, fmt, ...) \
31 	eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
32 
33 #define pr(fmt, ...)  pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
34 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
35 
36 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
37 
38 struct evsel_priv {
39 	struct bt_ctf_event_class *event_class;
40 };
41 
42 #define MAX_CPUS	4096
43 
44 struct ctf_stream {
45 	struct bt_ctf_stream *stream;
46 	int cpu;
47 	u32 count;
48 };
49 
50 struct ctf_writer {
51 	/* writer primitives */
52 	struct bt_ctf_writer		 *writer;
53 	struct ctf_stream		**stream;
54 	int				  stream_cnt;
55 	struct bt_ctf_stream_class	 *stream_class;
56 	struct bt_ctf_clock		 *clock;
57 
58 	/* data types */
59 	union {
60 		struct {
61 			struct bt_ctf_field_type	*s64;
62 			struct bt_ctf_field_type	*u64;
63 			struct bt_ctf_field_type	*s32;
64 			struct bt_ctf_field_type	*u32;
65 			struct bt_ctf_field_type	*string;
66 			struct bt_ctf_field_type	*u64_hex;
67 		};
68 		struct bt_ctf_field_type *array[6];
69 	} data;
70 };
71 
72 struct convert {
73 	struct perf_tool	tool;
74 	struct ctf_writer	writer;
75 
76 	u64			events_size;
77 	u64			events_count;
78 
79 	/* Ordered events configured queue size. */
80 	u64			queue_size;
81 };
82 
83 static int value_set(struct bt_ctf_field_type *type,
84 		     struct bt_ctf_event *event,
85 		     const char *name, u64 val)
86 {
87 	struct bt_ctf_field *field;
88 	bool sign = bt_ctf_field_type_integer_get_signed(type);
89 	int ret;
90 
91 	field = bt_ctf_field_create(type);
92 	if (!field) {
93 		pr_err("failed to create a field %s\n", name);
94 		return -1;
95 	}
96 
97 	if (sign) {
98 		ret = bt_ctf_field_signed_integer_set_value(field, val);
99 		if (ret) {
100 			pr_err("failed to set field value %s\n", name);
101 			goto err;
102 		}
103 	} else {
104 		ret = bt_ctf_field_unsigned_integer_set_value(field, val);
105 		if (ret) {
106 			pr_err("failed to set field value %s\n", name);
107 			goto err;
108 		}
109 	}
110 
111 	ret = bt_ctf_event_set_payload(event, name, field);
112 	if (ret) {
113 		pr_err("failed to set payload %s\n", name);
114 		goto err;
115 	}
116 
117 	pr2("  SET [%s = %" PRIu64 "]\n", name, val);
118 
119 err:
120 	bt_ctf_field_put(field);
121 	return ret;
122 }
123 
124 #define __FUNC_VALUE_SET(_name, _val_type)				\
125 static __maybe_unused int value_set_##_name(struct ctf_writer *cw,	\
126 			     struct bt_ctf_event *event,		\
127 			     const char *name,				\
128 			     _val_type val)				\
129 {									\
130 	struct bt_ctf_field_type *type = cw->data._name;		\
131 	return value_set(type, event, name, (u64) val);			\
132 }
133 
134 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
135 
136 FUNC_VALUE_SET(s32)
137 FUNC_VALUE_SET(u32)
138 FUNC_VALUE_SET(s64)
139 FUNC_VALUE_SET(u64)
140 __FUNC_VALUE_SET(u64_hex, u64)
141 
142 static struct bt_ctf_field_type*
143 get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field)
144 {
145 	unsigned long flags = field->flags;
146 
147 	if (flags & FIELD_IS_STRING)
148 		return cw->data.string;
149 
150 	if (!(flags & FIELD_IS_SIGNED)) {
151 		/* unsigned long are mostly pointers */
152 		if (flags & FIELD_IS_LONG || flags & FIELD_IS_POINTER)
153 			return cw->data.u64_hex;
154 	}
155 
156 	if (flags & FIELD_IS_SIGNED) {
157 		if (field->size == 8)
158 			return cw->data.s64;
159 		else
160 			return cw->data.s32;
161 	}
162 
163 	if (field->size == 8)
164 		return cw->data.u64;
165 	else
166 		return cw->data.u32;
167 }
168 
169 static unsigned long long adjust_signedness(unsigned long long value_int, int size)
170 {
171 	unsigned long long value_mask;
172 
173 	/*
174 	 * value_mask = (1 << (size * 8 - 1)) - 1.
175 	 * Directly set value_mask for code readers.
176 	 */
177 	switch (size) {
178 	case 1:
179 		value_mask = 0x7fULL;
180 		break;
181 	case 2:
182 		value_mask = 0x7fffULL;
183 		break;
184 	case 4:
185 		value_mask = 0x7fffffffULL;
186 		break;
187 	case 8:
188 		/*
189 		 * For 64 bit value, return it self. There is no need
190 		 * to fill high bit.
191 		 */
192 		/* Fall through */
193 	default:
194 		/* BUG! */
195 		return value_int;
196 	}
197 
198 	/* If it is a positive value, don't adjust. */
199 	if ((value_int & (~0ULL - value_mask)) == 0)
200 		return value_int;
201 
202 	/* Fill upper part of value_int with 1 to make it a negative long long. */
203 	return (value_int & value_mask) | ~value_mask;
204 }
205 
206 static int add_tracepoint_field_value(struct ctf_writer *cw,
207 				      struct bt_ctf_event_class *event_class,
208 				      struct bt_ctf_event *event,
209 				      struct perf_sample *sample,
210 				      struct format_field *fmtf)
211 {
212 	struct bt_ctf_field_type *type;
213 	struct bt_ctf_field *array_field;
214 	struct bt_ctf_field *field;
215 	const char *name = fmtf->name;
216 	void *data = sample->raw_data;
217 	unsigned long flags = fmtf->flags;
218 	unsigned int n_items;
219 	unsigned int i;
220 	unsigned int offset;
221 	unsigned int len;
222 	int ret;
223 
224 	name = fmtf->alias;
225 	offset = fmtf->offset;
226 	len = fmtf->size;
227 	if (flags & FIELD_IS_STRING)
228 		flags &= ~FIELD_IS_ARRAY;
229 
230 	if (flags & FIELD_IS_DYNAMIC) {
231 		unsigned long long tmp_val;
232 
233 		tmp_val = pevent_read_number(fmtf->event->pevent,
234 				data + offset, len);
235 		offset = tmp_val;
236 		len = offset >> 16;
237 		offset &= 0xffff;
238 	}
239 
240 	if (flags & FIELD_IS_ARRAY) {
241 
242 		type = bt_ctf_event_class_get_field_by_name(
243 				event_class, name);
244 		array_field = bt_ctf_field_create(type);
245 		bt_ctf_field_type_put(type);
246 		if (!array_field) {
247 			pr_err("Failed to create array type %s\n", name);
248 			return -1;
249 		}
250 
251 		len = fmtf->size / fmtf->arraylen;
252 		n_items = fmtf->arraylen;
253 	} else {
254 		n_items = 1;
255 		array_field = NULL;
256 	}
257 
258 	type = get_tracepoint_field_type(cw, fmtf);
259 
260 	for (i = 0; i < n_items; i++) {
261 		if (flags & FIELD_IS_ARRAY)
262 			field = bt_ctf_field_array_get_field(array_field, i);
263 		else
264 			field = bt_ctf_field_create(type);
265 
266 		if (!field) {
267 			pr_err("failed to create a field %s\n", name);
268 			return -1;
269 		}
270 
271 		if (flags & FIELD_IS_STRING)
272 			ret = bt_ctf_field_string_set_value(field,
273 					data + offset + i * len);
274 		else {
275 			unsigned long long value_int;
276 
277 			value_int = pevent_read_number(
278 					fmtf->event->pevent,
279 					data + offset + i * len, len);
280 
281 			if (!(flags & FIELD_IS_SIGNED))
282 				ret = bt_ctf_field_unsigned_integer_set_value(
283 						field, value_int);
284 			else
285 				ret = bt_ctf_field_signed_integer_set_value(
286 						field, adjust_signedness(value_int, len));
287 		}
288 
289 		if (ret) {
290 			pr_err("failed to set file value %s\n", name);
291 			goto err_put_field;
292 		}
293 		if (!(flags & FIELD_IS_ARRAY)) {
294 			ret = bt_ctf_event_set_payload(event, name, field);
295 			if (ret) {
296 				pr_err("failed to set payload %s\n", name);
297 				goto err_put_field;
298 			}
299 		}
300 		bt_ctf_field_put(field);
301 	}
302 	if (flags & FIELD_IS_ARRAY) {
303 		ret = bt_ctf_event_set_payload(event, name, array_field);
304 		if (ret) {
305 			pr_err("Failed add payload array %s\n", name);
306 			return -1;
307 		}
308 		bt_ctf_field_put(array_field);
309 	}
310 	return 0;
311 
312 err_put_field:
313 	bt_ctf_field_put(field);
314 	return -1;
315 }
316 
317 static int add_tracepoint_fields_values(struct ctf_writer *cw,
318 					struct bt_ctf_event_class *event_class,
319 					struct bt_ctf_event *event,
320 					struct format_field *fields,
321 					struct perf_sample *sample)
322 {
323 	struct format_field *field;
324 	int ret;
325 
326 	for (field = fields; field; field = field->next) {
327 		ret = add_tracepoint_field_value(cw, event_class, event, sample,
328 				field);
329 		if (ret)
330 			return -1;
331 	}
332 	return 0;
333 }
334 
335 static int add_tracepoint_values(struct ctf_writer *cw,
336 				 struct bt_ctf_event_class *event_class,
337 				 struct bt_ctf_event *event,
338 				 struct perf_evsel *evsel,
339 				 struct perf_sample *sample)
340 {
341 	struct format_field *common_fields = evsel->tp_format->format.common_fields;
342 	struct format_field *fields        = evsel->tp_format->format.fields;
343 	int ret;
344 
345 	ret = add_tracepoint_fields_values(cw, event_class, event,
346 					   common_fields, sample);
347 	if (!ret)
348 		ret = add_tracepoint_fields_values(cw, event_class, event,
349 						   fields, sample);
350 
351 	return ret;
352 }
353 
354 static int add_generic_values(struct ctf_writer *cw,
355 			      struct bt_ctf_event *event,
356 			      struct perf_evsel *evsel,
357 			      struct perf_sample *sample)
358 {
359 	u64 type = evsel->attr.sample_type;
360 	int ret;
361 
362 	/*
363 	 * missing:
364 	 *   PERF_SAMPLE_TIME         - not needed as we have it in
365 	 *                              ctf event header
366 	 *   PERF_SAMPLE_READ         - TODO
367 	 *   PERF_SAMPLE_CALLCHAIN    - TODO
368 	 *   PERF_SAMPLE_RAW          - tracepoint fields are handled separately
369 	 *   PERF_SAMPLE_BRANCH_STACK - TODO
370 	 *   PERF_SAMPLE_REGS_USER    - TODO
371 	 *   PERF_SAMPLE_STACK_USER   - TODO
372 	 */
373 
374 	if (type & PERF_SAMPLE_IP) {
375 		ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
376 		if (ret)
377 			return -1;
378 	}
379 
380 	if (type & PERF_SAMPLE_TID) {
381 		ret = value_set_s32(cw, event, "perf_tid", sample->tid);
382 		if (ret)
383 			return -1;
384 
385 		ret = value_set_s32(cw, event, "perf_pid", sample->pid);
386 		if (ret)
387 			return -1;
388 	}
389 
390 	if ((type & PERF_SAMPLE_ID) ||
391 	    (type & PERF_SAMPLE_IDENTIFIER)) {
392 		ret = value_set_u64(cw, event, "perf_id", sample->id);
393 		if (ret)
394 			return -1;
395 	}
396 
397 	if (type & PERF_SAMPLE_STREAM_ID) {
398 		ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
399 		if (ret)
400 			return -1;
401 	}
402 
403 	if (type & PERF_SAMPLE_PERIOD) {
404 		ret = value_set_u64(cw, event, "perf_period", sample->period);
405 		if (ret)
406 			return -1;
407 	}
408 
409 	if (type & PERF_SAMPLE_WEIGHT) {
410 		ret = value_set_u64(cw, event, "perf_weight", sample->weight);
411 		if (ret)
412 			return -1;
413 	}
414 
415 	if (type & PERF_SAMPLE_DATA_SRC) {
416 		ret = value_set_u64(cw, event, "perf_data_src",
417 				sample->data_src);
418 		if (ret)
419 			return -1;
420 	}
421 
422 	if (type & PERF_SAMPLE_TRANSACTION) {
423 		ret = value_set_u64(cw, event, "perf_transaction",
424 				sample->transaction);
425 		if (ret)
426 			return -1;
427 	}
428 
429 	return 0;
430 }
431 
432 static int ctf_stream__flush(struct ctf_stream *cs)
433 {
434 	int err = 0;
435 
436 	if (cs) {
437 		err = bt_ctf_stream_flush(cs->stream);
438 		if (err)
439 			pr_err("CTF stream %d flush failed\n", cs->cpu);
440 
441 		pr("Flush stream for cpu %d (%u samples)\n",
442 		   cs->cpu, cs->count);
443 
444 		cs->count = 0;
445 	}
446 
447 	return err;
448 }
449 
450 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
451 {
452 	struct ctf_stream *cs;
453 	struct bt_ctf_field *pkt_ctx   = NULL;
454 	struct bt_ctf_field *cpu_field = NULL;
455 	struct bt_ctf_stream *stream   = NULL;
456 	int ret;
457 
458 	cs = zalloc(sizeof(*cs));
459 	if (!cs) {
460 		pr_err("Failed to allocate ctf stream\n");
461 		return NULL;
462 	}
463 
464 	stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
465 	if (!stream) {
466 		pr_err("Failed to create CTF stream\n");
467 		goto out;
468 	}
469 
470 	pkt_ctx = bt_ctf_stream_get_packet_context(stream);
471 	if (!pkt_ctx) {
472 		pr_err("Failed to obtain packet context\n");
473 		goto out;
474 	}
475 
476 	cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
477 	bt_ctf_field_put(pkt_ctx);
478 	if (!cpu_field) {
479 		pr_err("Failed to obtain cpu field\n");
480 		goto out;
481 	}
482 
483 	ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
484 	if (ret) {
485 		pr_err("Failed to update CPU number\n");
486 		goto out;
487 	}
488 
489 	bt_ctf_field_put(cpu_field);
490 
491 	cs->cpu    = cpu;
492 	cs->stream = stream;
493 	return cs;
494 
495 out:
496 	if (cpu_field)
497 		bt_ctf_field_put(cpu_field);
498 	if (stream)
499 		bt_ctf_stream_put(stream);
500 
501 	free(cs);
502 	return NULL;
503 }
504 
505 static void ctf_stream__delete(struct ctf_stream *cs)
506 {
507 	if (cs) {
508 		bt_ctf_stream_put(cs->stream);
509 		free(cs);
510 	}
511 }
512 
513 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
514 {
515 	struct ctf_stream *cs = cw->stream[cpu];
516 
517 	if (!cs) {
518 		cs = ctf_stream__create(cw, cpu);
519 		cw->stream[cpu] = cs;
520 	}
521 
522 	return cs;
523 }
524 
525 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
526 			  struct perf_evsel *evsel)
527 {
528 	int cpu = 0;
529 
530 	if (evsel->attr.sample_type & PERF_SAMPLE_CPU)
531 		cpu = sample->cpu;
532 
533 	if (cpu > cw->stream_cnt) {
534 		pr_err("Event was recorded for CPU %d, limit is at %d.\n",
535 			cpu, cw->stream_cnt);
536 		cpu = 0;
537 	}
538 
539 	return cpu;
540 }
541 
542 #define STREAM_FLUSH_COUNT 100000
543 
544 /*
545  * Currently we have no other way to determine the
546  * time for the stream flush other than keep track
547  * of the number of events and check it against
548  * threshold.
549  */
550 static bool is_flush_needed(struct ctf_stream *cs)
551 {
552 	return cs->count >= STREAM_FLUSH_COUNT;
553 }
554 
555 static int process_sample_event(struct perf_tool *tool,
556 				union perf_event *_event __maybe_unused,
557 				struct perf_sample *sample,
558 				struct perf_evsel *evsel,
559 				struct machine *machine __maybe_unused)
560 {
561 	struct convert *c = container_of(tool, struct convert, tool);
562 	struct evsel_priv *priv = evsel->priv;
563 	struct ctf_writer *cw = &c->writer;
564 	struct ctf_stream *cs;
565 	struct bt_ctf_event_class *event_class;
566 	struct bt_ctf_event *event;
567 	int ret;
568 
569 	if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
570 		return 0;
571 
572 	event_class = priv->event_class;
573 
574 	/* update stats */
575 	c->events_count++;
576 	c->events_size += _event->header.size;
577 
578 	pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
579 
580 	event = bt_ctf_event_create(event_class);
581 	if (!event) {
582 		pr_err("Failed to create an CTF event\n");
583 		return -1;
584 	}
585 
586 	bt_ctf_clock_set_time(cw->clock, sample->time);
587 
588 	ret = add_generic_values(cw, event, evsel, sample);
589 	if (ret)
590 		return -1;
591 
592 	if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
593 		ret = add_tracepoint_values(cw, event_class, event,
594 					    evsel, sample);
595 		if (ret)
596 			return -1;
597 	}
598 
599 	cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
600 	if (cs) {
601 		if (is_flush_needed(cs))
602 			ctf_stream__flush(cs);
603 
604 		cs->count++;
605 		bt_ctf_stream_append_event(cs->stream, event);
606 	}
607 
608 	bt_ctf_event_put(event);
609 	return cs ? 0 : -1;
610 }
611 
612 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
613 static char *change_name(char *name, char *orig_name, int dup)
614 {
615 	char *new_name = NULL;
616 	size_t len;
617 
618 	if (!name)
619 		name = orig_name;
620 
621 	if (dup >= 10)
622 		goto out;
623 	/*
624 	 * Add '_' prefix to potential keywork.  According to
625 	 * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652),
626 	 * futher CTF spec updating may require us to use '$'.
627 	 */
628 	if (dup < 0)
629 		len = strlen(name) + sizeof("_");
630 	else
631 		len = strlen(orig_name) + sizeof("_dupl_X");
632 
633 	new_name = malloc(len);
634 	if (!new_name)
635 		goto out;
636 
637 	if (dup < 0)
638 		snprintf(new_name, len, "_%s", name);
639 	else
640 		snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
641 
642 out:
643 	if (name != orig_name)
644 		free(name);
645 	return new_name;
646 }
647 
648 static int event_class_add_field(struct bt_ctf_event_class *event_class,
649 		struct bt_ctf_field_type *type,
650 		struct format_field *field)
651 {
652 	struct bt_ctf_field_type *t = NULL;
653 	char *name;
654 	int dup = 1;
655 	int ret;
656 
657 	/* alias was already assigned */
658 	if (field->alias != field->name)
659 		return bt_ctf_event_class_add_field(event_class, type,
660 				(char *)field->alias);
661 
662 	name = field->name;
663 
664 	/* If 'name' is a keywork, add prefix. */
665 	if (bt_ctf_validate_identifier(name))
666 		name = change_name(name, field->name, -1);
667 
668 	if (!name) {
669 		pr_err("Failed to fix invalid identifier.");
670 		return -1;
671 	}
672 	while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
673 		bt_ctf_field_type_put(t);
674 		name = change_name(name, field->name, dup++);
675 		if (!name) {
676 			pr_err("Failed to create dup name for '%s'\n", field->name);
677 			return -1;
678 		}
679 	}
680 
681 	ret = bt_ctf_event_class_add_field(event_class, type, name);
682 	if (!ret)
683 		field->alias = name;
684 
685 	return ret;
686 }
687 
688 static int add_tracepoint_fields_types(struct ctf_writer *cw,
689 				       struct format_field *fields,
690 				       struct bt_ctf_event_class *event_class)
691 {
692 	struct format_field *field;
693 	int ret;
694 
695 	for (field = fields; field; field = field->next) {
696 		struct bt_ctf_field_type *type;
697 		unsigned long flags = field->flags;
698 
699 		pr2("  field '%s'\n", field->name);
700 
701 		type = get_tracepoint_field_type(cw, field);
702 		if (!type)
703 			return -1;
704 
705 		/*
706 		 * A string is an array of chars. For this we use the string
707 		 * type and don't care that it is an array. What we don't
708 		 * support is an array of strings.
709 		 */
710 		if (flags & FIELD_IS_STRING)
711 			flags &= ~FIELD_IS_ARRAY;
712 
713 		if (flags & FIELD_IS_ARRAY)
714 			type = bt_ctf_field_type_array_create(type, field->arraylen);
715 
716 		ret = event_class_add_field(event_class, type, field);
717 
718 		if (flags & FIELD_IS_ARRAY)
719 			bt_ctf_field_type_put(type);
720 
721 		if (ret) {
722 			pr_err("Failed to add field '%s': %d\n",
723 					field->name, ret);
724 			return -1;
725 		}
726 	}
727 
728 	return 0;
729 }
730 
731 static int add_tracepoint_types(struct ctf_writer *cw,
732 				struct perf_evsel *evsel,
733 				struct bt_ctf_event_class *class)
734 {
735 	struct format_field *common_fields = evsel->tp_format->format.common_fields;
736 	struct format_field *fields        = evsel->tp_format->format.fields;
737 	int ret;
738 
739 	ret = add_tracepoint_fields_types(cw, common_fields, class);
740 	if (!ret)
741 		ret = add_tracepoint_fields_types(cw, fields, class);
742 
743 	return ret;
744 }
745 
746 static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel,
747 			     struct bt_ctf_event_class *event_class)
748 {
749 	u64 type = evsel->attr.sample_type;
750 
751 	/*
752 	 * missing:
753 	 *   PERF_SAMPLE_TIME         - not needed as we have it in
754 	 *                              ctf event header
755 	 *   PERF_SAMPLE_READ         - TODO
756 	 *   PERF_SAMPLE_CALLCHAIN    - TODO
757 	 *   PERF_SAMPLE_RAW          - tracepoint fields are handled separately
758 	 *   PERF_SAMPLE_BRANCH_STACK - TODO
759 	 *   PERF_SAMPLE_REGS_USER    - TODO
760 	 *   PERF_SAMPLE_STACK_USER   - TODO
761 	 */
762 
763 #define ADD_FIELD(cl, t, n)						\
764 	do {								\
765 		pr2("  field '%s'\n", n);				\
766 		if (bt_ctf_event_class_add_field(cl, t, n)) {		\
767 			pr_err("Failed to add field '%s';\n", n);	\
768 			return -1;					\
769 		}							\
770 	} while (0)
771 
772 	if (type & PERF_SAMPLE_IP)
773 		ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
774 
775 	if (type & PERF_SAMPLE_TID) {
776 		ADD_FIELD(event_class, cw->data.s32, "perf_tid");
777 		ADD_FIELD(event_class, cw->data.s32, "perf_pid");
778 	}
779 
780 	if ((type & PERF_SAMPLE_ID) ||
781 	    (type & PERF_SAMPLE_IDENTIFIER))
782 		ADD_FIELD(event_class, cw->data.u64, "perf_id");
783 
784 	if (type & PERF_SAMPLE_STREAM_ID)
785 		ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
786 
787 	if (type & PERF_SAMPLE_PERIOD)
788 		ADD_FIELD(event_class, cw->data.u64, "perf_period");
789 
790 	if (type & PERF_SAMPLE_WEIGHT)
791 		ADD_FIELD(event_class, cw->data.u64, "perf_weight");
792 
793 	if (type & PERF_SAMPLE_DATA_SRC)
794 		ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
795 
796 	if (type & PERF_SAMPLE_TRANSACTION)
797 		ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
798 
799 #undef ADD_FIELD
800 	return 0;
801 }
802 
803 static int add_event(struct ctf_writer *cw, struct perf_evsel *evsel)
804 {
805 	struct bt_ctf_event_class *event_class;
806 	struct evsel_priv *priv;
807 	const char *name = perf_evsel__name(evsel);
808 	int ret;
809 
810 	pr("Adding event '%s' (type %d)\n", name, evsel->attr.type);
811 
812 	event_class = bt_ctf_event_class_create(name);
813 	if (!event_class)
814 		return -1;
815 
816 	ret = add_generic_types(cw, evsel, event_class);
817 	if (ret)
818 		goto err;
819 
820 	if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
821 		ret = add_tracepoint_types(cw, evsel, event_class);
822 		if (ret)
823 			goto err;
824 	}
825 
826 	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
827 	if (ret) {
828 		pr("Failed to add event class into stream.\n");
829 		goto err;
830 	}
831 
832 	priv = malloc(sizeof(*priv));
833 	if (!priv)
834 		goto err;
835 
836 	priv->event_class = event_class;
837 	evsel->priv       = priv;
838 	return 0;
839 
840 err:
841 	bt_ctf_event_class_put(event_class);
842 	pr_err("Failed to add event '%s'.\n", name);
843 	return -1;
844 }
845 
846 static int setup_events(struct ctf_writer *cw, struct perf_session *session)
847 {
848 	struct perf_evlist *evlist = session->evlist;
849 	struct perf_evsel *evsel;
850 	int ret;
851 
852 	evlist__for_each(evlist, evsel) {
853 		ret = add_event(cw, evsel);
854 		if (ret)
855 			return ret;
856 	}
857 	return 0;
858 }
859 
860 static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
861 {
862 	struct ctf_stream **stream;
863 	struct perf_header *ph = &session->header;
864 	int ncpus;
865 
866 	/*
867 	 * Try to get the number of cpus used in the data file,
868 	 * if not present fallback to the MAX_CPUS.
869 	 */
870 	ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
871 
872 	stream = zalloc(sizeof(*stream) * ncpus);
873 	if (!stream) {
874 		pr_err("Failed to allocate streams.\n");
875 		return -ENOMEM;
876 	}
877 
878 	cw->stream     = stream;
879 	cw->stream_cnt = ncpus;
880 	return 0;
881 }
882 
883 static void free_streams(struct ctf_writer *cw)
884 {
885 	int cpu;
886 
887 	for (cpu = 0; cpu < cw->stream_cnt; cpu++)
888 		ctf_stream__delete(cw->stream[cpu]);
889 
890 	free(cw->stream);
891 }
892 
893 static int ctf_writer__setup_env(struct ctf_writer *cw,
894 				 struct perf_session *session)
895 {
896 	struct perf_header *header = &session->header;
897 	struct bt_ctf_writer *writer = cw->writer;
898 
899 #define ADD(__n, __v)							\
900 do {									\
901 	if (bt_ctf_writer_add_environment_field(writer, __n, __v))	\
902 		return -1;						\
903 } while (0)
904 
905 	ADD("host",    header->env.hostname);
906 	ADD("sysname", "Linux");
907 	ADD("release", header->env.os_release);
908 	ADD("version", header->env.version);
909 	ADD("machine", header->env.arch);
910 	ADD("domain", "kernel");
911 	ADD("tracer_name", "perf");
912 
913 #undef ADD
914 	return 0;
915 }
916 
917 static int ctf_writer__setup_clock(struct ctf_writer *cw)
918 {
919 	struct bt_ctf_clock *clock = cw->clock;
920 
921 	bt_ctf_clock_set_description(clock, "perf clock");
922 
923 #define SET(__n, __v)				\
924 do {						\
925 	if (bt_ctf_clock_set_##__n(clock, __v))	\
926 		return -1;			\
927 } while (0)
928 
929 	SET(frequency,   1000000000);
930 	SET(offset_s,    0);
931 	SET(offset,      0);
932 	SET(precision,   10);
933 	SET(is_absolute, 0);
934 
935 #undef SET
936 	return 0;
937 }
938 
939 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
940 {
941 	struct bt_ctf_field_type *type;
942 
943 	type = bt_ctf_field_type_integer_create(size);
944 	if (!type)
945 		return NULL;
946 
947 	if (sign &&
948 	    bt_ctf_field_type_integer_set_signed(type, 1))
949 		goto err;
950 
951 	if (hex &&
952 	    bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
953 		goto err;
954 
955 	pr2("Created type: INTEGER %d-bit %ssigned %s\n",
956 	    size, sign ? "un" : "", hex ? "hex" : "");
957 	return type;
958 
959 err:
960 	bt_ctf_field_type_put(type);
961 	return NULL;
962 }
963 
964 static void ctf_writer__cleanup_data(struct ctf_writer *cw)
965 {
966 	unsigned int i;
967 
968 	for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
969 		bt_ctf_field_type_put(cw->data.array[i]);
970 }
971 
972 static int ctf_writer__init_data(struct ctf_writer *cw)
973 {
974 #define CREATE_INT_TYPE(type, size, sign, hex)		\
975 do {							\
976 	(type) = create_int_type(size, sign, hex);	\
977 	if (!(type))					\
978 		goto err;				\
979 } while (0)
980 
981 	CREATE_INT_TYPE(cw->data.s64, 64, true,  false);
982 	CREATE_INT_TYPE(cw->data.u64, 64, false, false);
983 	CREATE_INT_TYPE(cw->data.s32, 32, true,  false);
984 	CREATE_INT_TYPE(cw->data.u32, 32, false, false);
985 	CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
986 
987 	cw->data.string  = bt_ctf_field_type_string_create();
988 	if (cw->data.string)
989 		return 0;
990 
991 err:
992 	ctf_writer__cleanup_data(cw);
993 	pr_err("Failed to create data types.\n");
994 	return -1;
995 }
996 
997 static void ctf_writer__cleanup(struct ctf_writer *cw)
998 {
999 	ctf_writer__cleanup_data(cw);
1000 
1001 	bt_ctf_clock_put(cw->clock);
1002 	free_streams(cw);
1003 	bt_ctf_stream_class_put(cw->stream_class);
1004 	bt_ctf_writer_put(cw->writer);
1005 
1006 	/* and NULL all the pointers */
1007 	memset(cw, 0, sizeof(*cw));
1008 }
1009 
1010 static int ctf_writer__init(struct ctf_writer *cw, const char *path)
1011 {
1012 	struct bt_ctf_writer		*writer;
1013 	struct bt_ctf_stream_class	*stream_class;
1014 	struct bt_ctf_clock		*clock;
1015 	struct bt_ctf_field_type	*pkt_ctx_type;
1016 	int				ret;
1017 
1018 	/* CTF writer */
1019 	writer = bt_ctf_writer_create(path);
1020 	if (!writer)
1021 		goto err;
1022 
1023 	cw->writer = writer;
1024 
1025 	/* CTF clock */
1026 	clock = bt_ctf_clock_create("perf_clock");
1027 	if (!clock) {
1028 		pr("Failed to create CTF clock.\n");
1029 		goto err_cleanup;
1030 	}
1031 
1032 	cw->clock = clock;
1033 
1034 	if (ctf_writer__setup_clock(cw)) {
1035 		pr("Failed to setup CTF clock.\n");
1036 		goto err_cleanup;
1037 	}
1038 
1039 	/* CTF stream class */
1040 	stream_class = bt_ctf_stream_class_create("perf_stream");
1041 	if (!stream_class) {
1042 		pr("Failed to create CTF stream class.\n");
1043 		goto err_cleanup;
1044 	}
1045 
1046 	cw->stream_class = stream_class;
1047 
1048 	/* CTF clock stream setup */
1049 	if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
1050 		pr("Failed to assign CTF clock to stream class.\n");
1051 		goto err_cleanup;
1052 	}
1053 
1054 	if (ctf_writer__init_data(cw))
1055 		goto err_cleanup;
1056 
1057 	/* Add cpu_id for packet context */
1058 	pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
1059 	if (!pkt_ctx_type)
1060 		goto err_cleanup;
1061 
1062 	ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
1063 	bt_ctf_field_type_put(pkt_ctx_type);
1064 	if (ret)
1065 		goto err_cleanup;
1066 
1067 	/* CTF clock writer setup */
1068 	if (bt_ctf_writer_add_clock(writer, clock)) {
1069 		pr("Failed to assign CTF clock to writer.\n");
1070 		goto err_cleanup;
1071 	}
1072 
1073 	return 0;
1074 
1075 err_cleanup:
1076 	ctf_writer__cleanup(cw);
1077 err:
1078 	pr_err("Failed to setup CTF writer.\n");
1079 	return -1;
1080 }
1081 
1082 static int ctf_writer__flush_streams(struct ctf_writer *cw)
1083 {
1084 	int cpu, ret = 0;
1085 
1086 	for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
1087 		ret = ctf_stream__flush(cw->stream[cpu]);
1088 
1089 	return ret;
1090 }
1091 
1092 static int convert__config(const char *var, const char *value, void *cb)
1093 {
1094 	struct convert *c = cb;
1095 
1096 	if (!strcmp(var, "convert.queue-size")) {
1097 		c->queue_size = perf_config_u64(var, value);
1098 		return 0;
1099 	}
1100 
1101 	return perf_default_config(var, value, cb);
1102 }
1103 
1104 int bt_convert__perf2ctf(const char *input, const char *path, bool force)
1105 {
1106 	struct perf_session *session;
1107 	struct perf_data_file file = {
1108 		.path = input,
1109 		.mode = PERF_DATA_MODE_READ,
1110 		.force = force,
1111 	};
1112 	struct convert c = {
1113 		.tool = {
1114 			.sample          = process_sample_event,
1115 			.mmap            = perf_event__process_mmap,
1116 			.mmap2           = perf_event__process_mmap2,
1117 			.comm            = perf_event__process_comm,
1118 			.exit            = perf_event__process_exit,
1119 			.fork            = perf_event__process_fork,
1120 			.lost            = perf_event__process_lost,
1121 			.tracing_data    = perf_event__process_tracing_data,
1122 			.build_id        = perf_event__process_build_id,
1123 			.ordered_events  = true,
1124 			.ordering_requires_timestamps = true,
1125 		},
1126 	};
1127 	struct ctf_writer *cw = &c.writer;
1128 	int err = -1;
1129 
1130 	perf_config(convert__config, &c);
1131 
1132 	/* CTF writer */
1133 	if (ctf_writer__init(cw, path))
1134 		return -1;
1135 
1136 	/* perf.data session */
1137 	session = perf_session__new(&file, 0, &c.tool);
1138 	if (!session)
1139 		goto free_writer;
1140 
1141 	if (c.queue_size) {
1142 		ordered_events__set_alloc_size(&session->ordered_events,
1143 					       c.queue_size);
1144 	}
1145 
1146 	/* CTF writer env/clock setup  */
1147 	if (ctf_writer__setup_env(cw, session))
1148 		goto free_session;
1149 
1150 	/* CTF events setup */
1151 	if (setup_events(cw, session))
1152 		goto free_session;
1153 
1154 	if (setup_streams(cw, session))
1155 		goto free_session;
1156 
1157 	err = perf_session__process_events(session);
1158 	if (!err)
1159 		err = ctf_writer__flush_streams(cw);
1160 	else
1161 		pr_err("Error during conversion.\n");
1162 
1163 	fprintf(stderr,
1164 		"[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1165 		file.path, path);
1166 
1167 	fprintf(stderr,
1168 		"[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples) ]\n",
1169 		(double) c.events_size / 1024.0 / 1024.0,
1170 		c.events_count);
1171 
1172 	perf_session__delete(session);
1173 	ctf_writer__cleanup(cw);
1174 
1175 	return err;
1176 
1177 free_session:
1178 	perf_session__delete(session);
1179 free_writer:
1180 	ctf_writer__cleanup(cw);
1181 	pr_err("Error during conversion setup.\n");
1182 	return err;
1183 }
1184