1 /*
2  * CTF writing support via babeltrace.
3  *
4  * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
5  * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 
10 #include <linux/compiler.h>
11 #include <babeltrace/ctf-writer/writer.h>
12 #include <babeltrace/ctf-writer/clock.h>
13 #include <babeltrace/ctf-writer/stream.h>
14 #include <babeltrace/ctf-writer/event.h>
15 #include <babeltrace/ctf-writer/event-types.h>
16 #include <babeltrace/ctf-writer/event-fields.h>
17 #include <babeltrace/ctf-ir/utils.h>
18 #include <babeltrace/ctf/events.h>
19 #include <traceevent/event-parse.h>
20 #include "asm/bug.h"
21 #include "data-convert-bt.h"
22 #include "session.h"
23 #include "util.h"
24 #include "debug.h"
25 #include "tool.h"
26 #include "evlist.h"
27 #include "evsel.h"
28 #include "machine.h"
29 
30 #define pr_N(n, fmt, ...) \
31 	eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
32 
33 #define pr(fmt, ...)  pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
34 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
35 
36 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
37 
38 struct evsel_priv {
39 	struct bt_ctf_event_class *event_class;
40 };
41 
42 #define MAX_CPUS	4096
43 
44 struct ctf_stream {
45 	struct bt_ctf_stream *stream;
46 	int cpu;
47 	u32 count;
48 };
49 
50 struct ctf_writer {
51 	/* writer primitives */
52 	struct bt_ctf_writer		 *writer;
53 	struct ctf_stream		**stream;
54 	int				  stream_cnt;
55 	struct bt_ctf_stream_class	 *stream_class;
56 	struct bt_ctf_clock		 *clock;
57 
58 	/* data types */
59 	union {
60 		struct {
61 			struct bt_ctf_field_type	*s64;
62 			struct bt_ctf_field_type	*u64;
63 			struct bt_ctf_field_type	*s32;
64 			struct bt_ctf_field_type	*u32;
65 			struct bt_ctf_field_type	*string;
66 			struct bt_ctf_field_type	*u32_hex;
67 			struct bt_ctf_field_type	*u64_hex;
68 		};
69 		struct bt_ctf_field_type *array[6];
70 	} data;
71 };
72 
73 struct convert {
74 	struct perf_tool	tool;
75 	struct ctf_writer	writer;
76 
77 	u64			events_size;
78 	u64			events_count;
79 
80 	/* Ordered events configured queue size. */
81 	u64			queue_size;
82 };
83 
84 static int value_set(struct bt_ctf_field_type *type,
85 		     struct bt_ctf_event *event,
86 		     const char *name, u64 val)
87 {
88 	struct bt_ctf_field *field;
89 	bool sign = bt_ctf_field_type_integer_get_signed(type);
90 	int ret;
91 
92 	field = bt_ctf_field_create(type);
93 	if (!field) {
94 		pr_err("failed to create a field %s\n", name);
95 		return -1;
96 	}
97 
98 	if (sign) {
99 		ret = bt_ctf_field_signed_integer_set_value(field, val);
100 		if (ret) {
101 			pr_err("failed to set field value %s\n", name);
102 			goto err;
103 		}
104 	} else {
105 		ret = bt_ctf_field_unsigned_integer_set_value(field, val);
106 		if (ret) {
107 			pr_err("failed to set field value %s\n", name);
108 			goto err;
109 		}
110 	}
111 
112 	ret = bt_ctf_event_set_payload(event, name, field);
113 	if (ret) {
114 		pr_err("failed to set payload %s\n", name);
115 		goto err;
116 	}
117 
118 	pr2("  SET [%s = %" PRIu64 "]\n", name, val);
119 
120 err:
121 	bt_ctf_field_put(field);
122 	return ret;
123 }
124 
125 #define __FUNC_VALUE_SET(_name, _val_type)				\
126 static __maybe_unused int value_set_##_name(struct ctf_writer *cw,	\
127 			     struct bt_ctf_event *event,		\
128 			     const char *name,				\
129 			     _val_type val)				\
130 {									\
131 	struct bt_ctf_field_type *type = cw->data._name;		\
132 	return value_set(type, event, name, (u64) val);			\
133 }
134 
135 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
136 
137 FUNC_VALUE_SET(s32)
138 FUNC_VALUE_SET(u32)
139 FUNC_VALUE_SET(s64)
140 FUNC_VALUE_SET(u64)
141 __FUNC_VALUE_SET(u64_hex, u64)
142 
143 static struct bt_ctf_field_type*
144 get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field)
145 {
146 	unsigned long flags = field->flags;
147 
148 	if (flags & FIELD_IS_STRING)
149 		return cw->data.string;
150 
151 	if (!(flags & FIELD_IS_SIGNED)) {
152 		/* unsigned long are mostly pointers */
153 		if (flags & FIELD_IS_LONG || flags & FIELD_IS_POINTER)
154 			return cw->data.u64_hex;
155 	}
156 
157 	if (flags & FIELD_IS_SIGNED) {
158 		if (field->size == 8)
159 			return cw->data.s64;
160 		else
161 			return cw->data.s32;
162 	}
163 
164 	if (field->size == 8)
165 		return cw->data.u64;
166 	else
167 		return cw->data.u32;
168 }
169 
170 static unsigned long long adjust_signedness(unsigned long long value_int, int size)
171 {
172 	unsigned long long value_mask;
173 
174 	/*
175 	 * value_mask = (1 << (size * 8 - 1)) - 1.
176 	 * Directly set value_mask for code readers.
177 	 */
178 	switch (size) {
179 	case 1:
180 		value_mask = 0x7fULL;
181 		break;
182 	case 2:
183 		value_mask = 0x7fffULL;
184 		break;
185 	case 4:
186 		value_mask = 0x7fffffffULL;
187 		break;
188 	case 8:
189 		/*
190 		 * For 64 bit value, return it self. There is no need
191 		 * to fill high bit.
192 		 */
193 		/* Fall through */
194 	default:
195 		/* BUG! */
196 		return value_int;
197 	}
198 
199 	/* If it is a positive value, don't adjust. */
200 	if ((value_int & (~0ULL - value_mask)) == 0)
201 		return value_int;
202 
203 	/* Fill upper part of value_int with 1 to make it a negative long long. */
204 	return (value_int & value_mask) | ~value_mask;
205 }
206 
207 static int add_tracepoint_field_value(struct ctf_writer *cw,
208 				      struct bt_ctf_event_class *event_class,
209 				      struct bt_ctf_event *event,
210 				      struct perf_sample *sample,
211 				      struct format_field *fmtf)
212 {
213 	struct bt_ctf_field_type *type;
214 	struct bt_ctf_field *array_field;
215 	struct bt_ctf_field *field;
216 	const char *name = fmtf->name;
217 	void *data = sample->raw_data;
218 	unsigned long flags = fmtf->flags;
219 	unsigned int n_items;
220 	unsigned int i;
221 	unsigned int offset;
222 	unsigned int len;
223 	int ret;
224 
225 	name = fmtf->alias;
226 	offset = fmtf->offset;
227 	len = fmtf->size;
228 	if (flags & FIELD_IS_STRING)
229 		flags &= ~FIELD_IS_ARRAY;
230 
231 	if (flags & FIELD_IS_DYNAMIC) {
232 		unsigned long long tmp_val;
233 
234 		tmp_val = pevent_read_number(fmtf->event->pevent,
235 				data + offset, len);
236 		offset = tmp_val;
237 		len = offset >> 16;
238 		offset &= 0xffff;
239 	}
240 
241 	if (flags & FIELD_IS_ARRAY) {
242 
243 		type = bt_ctf_event_class_get_field_by_name(
244 				event_class, name);
245 		array_field = bt_ctf_field_create(type);
246 		bt_ctf_field_type_put(type);
247 		if (!array_field) {
248 			pr_err("Failed to create array type %s\n", name);
249 			return -1;
250 		}
251 
252 		len = fmtf->size / fmtf->arraylen;
253 		n_items = fmtf->arraylen;
254 	} else {
255 		n_items = 1;
256 		array_field = NULL;
257 	}
258 
259 	type = get_tracepoint_field_type(cw, fmtf);
260 
261 	for (i = 0; i < n_items; i++) {
262 		if (flags & FIELD_IS_ARRAY)
263 			field = bt_ctf_field_array_get_field(array_field, i);
264 		else
265 			field = bt_ctf_field_create(type);
266 
267 		if (!field) {
268 			pr_err("failed to create a field %s\n", name);
269 			return -1;
270 		}
271 
272 		if (flags & FIELD_IS_STRING)
273 			ret = bt_ctf_field_string_set_value(field,
274 					data + offset + i * len);
275 		else {
276 			unsigned long long value_int;
277 
278 			value_int = pevent_read_number(
279 					fmtf->event->pevent,
280 					data + offset + i * len, len);
281 
282 			if (!(flags & FIELD_IS_SIGNED))
283 				ret = bt_ctf_field_unsigned_integer_set_value(
284 						field, value_int);
285 			else
286 				ret = bt_ctf_field_signed_integer_set_value(
287 						field, adjust_signedness(value_int, len));
288 		}
289 
290 		if (ret) {
291 			pr_err("failed to set file value %s\n", name);
292 			goto err_put_field;
293 		}
294 		if (!(flags & FIELD_IS_ARRAY)) {
295 			ret = bt_ctf_event_set_payload(event, name, field);
296 			if (ret) {
297 				pr_err("failed to set payload %s\n", name);
298 				goto err_put_field;
299 			}
300 		}
301 		bt_ctf_field_put(field);
302 	}
303 	if (flags & FIELD_IS_ARRAY) {
304 		ret = bt_ctf_event_set_payload(event, name, array_field);
305 		if (ret) {
306 			pr_err("Failed add payload array %s\n", name);
307 			return -1;
308 		}
309 		bt_ctf_field_put(array_field);
310 	}
311 	return 0;
312 
313 err_put_field:
314 	bt_ctf_field_put(field);
315 	return -1;
316 }
317 
318 static int add_tracepoint_fields_values(struct ctf_writer *cw,
319 					struct bt_ctf_event_class *event_class,
320 					struct bt_ctf_event *event,
321 					struct format_field *fields,
322 					struct perf_sample *sample)
323 {
324 	struct format_field *field;
325 	int ret;
326 
327 	for (field = fields; field; field = field->next) {
328 		ret = add_tracepoint_field_value(cw, event_class, event, sample,
329 				field);
330 		if (ret)
331 			return -1;
332 	}
333 	return 0;
334 }
335 
336 static int add_tracepoint_values(struct ctf_writer *cw,
337 				 struct bt_ctf_event_class *event_class,
338 				 struct bt_ctf_event *event,
339 				 struct perf_evsel *evsel,
340 				 struct perf_sample *sample)
341 {
342 	struct format_field *common_fields = evsel->tp_format->format.common_fields;
343 	struct format_field *fields        = evsel->tp_format->format.fields;
344 	int ret;
345 
346 	ret = add_tracepoint_fields_values(cw, event_class, event,
347 					   common_fields, sample);
348 	if (!ret)
349 		ret = add_tracepoint_fields_values(cw, event_class, event,
350 						   fields, sample);
351 
352 	return ret;
353 }
354 
355 static int
356 add_bpf_output_values(struct bt_ctf_event_class *event_class,
357 		      struct bt_ctf_event *event,
358 		      struct perf_sample *sample)
359 {
360 	struct bt_ctf_field_type *len_type, *seq_type;
361 	struct bt_ctf_field *len_field, *seq_field;
362 	unsigned int raw_size = sample->raw_size;
363 	unsigned int nr_elements = raw_size / sizeof(u32);
364 	unsigned int i;
365 	int ret;
366 
367 	if (nr_elements * sizeof(u32) != raw_size)
368 		pr_warning("Incorrect raw_size (%u) in bpf output event, skip %lu bytes\n",
369 			   raw_size, nr_elements * sizeof(u32) - raw_size);
370 
371 	len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
372 	len_field = bt_ctf_field_create(len_type);
373 	if (!len_field) {
374 		pr_err("failed to create 'raw_len' for bpf output event\n");
375 		ret = -1;
376 		goto put_len_type;
377 	}
378 
379 	ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
380 	if (ret) {
381 		pr_err("failed to set field value for raw_len\n");
382 		goto put_len_field;
383 	}
384 	ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
385 	if (ret) {
386 		pr_err("failed to set payload to raw_len\n");
387 		goto put_len_field;
388 	}
389 
390 	seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
391 	seq_field = bt_ctf_field_create(seq_type);
392 	if (!seq_field) {
393 		pr_err("failed to create 'raw_data' for bpf output event\n");
394 		ret = -1;
395 		goto put_seq_type;
396 	}
397 
398 	ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
399 	if (ret) {
400 		pr_err("failed to set length of 'raw_data'\n");
401 		goto put_seq_field;
402 	}
403 
404 	for (i = 0; i < nr_elements; i++) {
405 		struct bt_ctf_field *elem_field =
406 			bt_ctf_field_sequence_get_field(seq_field, i);
407 
408 		ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
409 				((u32 *)(sample->raw_data))[i]);
410 
411 		bt_ctf_field_put(elem_field);
412 		if (ret) {
413 			pr_err("failed to set raw_data[%d]\n", i);
414 			goto put_seq_field;
415 		}
416 	}
417 
418 	ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
419 	if (ret)
420 		pr_err("failed to set payload for raw_data\n");
421 
422 put_seq_field:
423 	bt_ctf_field_put(seq_field);
424 put_seq_type:
425 	bt_ctf_field_type_put(seq_type);
426 put_len_field:
427 	bt_ctf_field_put(len_field);
428 put_len_type:
429 	bt_ctf_field_type_put(len_type);
430 	return ret;
431 }
432 
433 static int add_generic_values(struct ctf_writer *cw,
434 			      struct bt_ctf_event *event,
435 			      struct perf_evsel *evsel,
436 			      struct perf_sample *sample)
437 {
438 	u64 type = evsel->attr.sample_type;
439 	int ret;
440 
441 	/*
442 	 * missing:
443 	 *   PERF_SAMPLE_TIME         - not needed as we have it in
444 	 *                              ctf event header
445 	 *   PERF_SAMPLE_READ         - TODO
446 	 *   PERF_SAMPLE_CALLCHAIN    - TODO
447 	 *   PERF_SAMPLE_RAW          - tracepoint fields are handled separately
448 	 *   PERF_SAMPLE_BRANCH_STACK - TODO
449 	 *   PERF_SAMPLE_REGS_USER    - TODO
450 	 *   PERF_SAMPLE_STACK_USER   - TODO
451 	 */
452 
453 	if (type & PERF_SAMPLE_IP) {
454 		ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
455 		if (ret)
456 			return -1;
457 	}
458 
459 	if (type & PERF_SAMPLE_TID) {
460 		ret = value_set_s32(cw, event, "perf_tid", sample->tid);
461 		if (ret)
462 			return -1;
463 
464 		ret = value_set_s32(cw, event, "perf_pid", sample->pid);
465 		if (ret)
466 			return -1;
467 	}
468 
469 	if ((type & PERF_SAMPLE_ID) ||
470 	    (type & PERF_SAMPLE_IDENTIFIER)) {
471 		ret = value_set_u64(cw, event, "perf_id", sample->id);
472 		if (ret)
473 			return -1;
474 	}
475 
476 	if (type & PERF_SAMPLE_STREAM_ID) {
477 		ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
478 		if (ret)
479 			return -1;
480 	}
481 
482 	if (type & PERF_SAMPLE_PERIOD) {
483 		ret = value_set_u64(cw, event, "perf_period", sample->period);
484 		if (ret)
485 			return -1;
486 	}
487 
488 	if (type & PERF_SAMPLE_WEIGHT) {
489 		ret = value_set_u64(cw, event, "perf_weight", sample->weight);
490 		if (ret)
491 			return -1;
492 	}
493 
494 	if (type & PERF_SAMPLE_DATA_SRC) {
495 		ret = value_set_u64(cw, event, "perf_data_src",
496 				sample->data_src);
497 		if (ret)
498 			return -1;
499 	}
500 
501 	if (type & PERF_SAMPLE_TRANSACTION) {
502 		ret = value_set_u64(cw, event, "perf_transaction",
503 				sample->transaction);
504 		if (ret)
505 			return -1;
506 	}
507 
508 	return 0;
509 }
510 
511 static int ctf_stream__flush(struct ctf_stream *cs)
512 {
513 	int err = 0;
514 
515 	if (cs) {
516 		err = bt_ctf_stream_flush(cs->stream);
517 		if (err)
518 			pr_err("CTF stream %d flush failed\n", cs->cpu);
519 
520 		pr("Flush stream for cpu %d (%u samples)\n",
521 		   cs->cpu, cs->count);
522 
523 		cs->count = 0;
524 	}
525 
526 	return err;
527 }
528 
529 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
530 {
531 	struct ctf_stream *cs;
532 	struct bt_ctf_field *pkt_ctx   = NULL;
533 	struct bt_ctf_field *cpu_field = NULL;
534 	struct bt_ctf_stream *stream   = NULL;
535 	int ret;
536 
537 	cs = zalloc(sizeof(*cs));
538 	if (!cs) {
539 		pr_err("Failed to allocate ctf stream\n");
540 		return NULL;
541 	}
542 
543 	stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
544 	if (!stream) {
545 		pr_err("Failed to create CTF stream\n");
546 		goto out;
547 	}
548 
549 	pkt_ctx = bt_ctf_stream_get_packet_context(stream);
550 	if (!pkt_ctx) {
551 		pr_err("Failed to obtain packet context\n");
552 		goto out;
553 	}
554 
555 	cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
556 	bt_ctf_field_put(pkt_ctx);
557 	if (!cpu_field) {
558 		pr_err("Failed to obtain cpu field\n");
559 		goto out;
560 	}
561 
562 	ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
563 	if (ret) {
564 		pr_err("Failed to update CPU number\n");
565 		goto out;
566 	}
567 
568 	bt_ctf_field_put(cpu_field);
569 
570 	cs->cpu    = cpu;
571 	cs->stream = stream;
572 	return cs;
573 
574 out:
575 	if (cpu_field)
576 		bt_ctf_field_put(cpu_field);
577 	if (stream)
578 		bt_ctf_stream_put(stream);
579 
580 	free(cs);
581 	return NULL;
582 }
583 
584 static void ctf_stream__delete(struct ctf_stream *cs)
585 {
586 	if (cs) {
587 		bt_ctf_stream_put(cs->stream);
588 		free(cs);
589 	}
590 }
591 
592 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
593 {
594 	struct ctf_stream *cs = cw->stream[cpu];
595 
596 	if (!cs) {
597 		cs = ctf_stream__create(cw, cpu);
598 		cw->stream[cpu] = cs;
599 	}
600 
601 	return cs;
602 }
603 
604 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
605 			  struct perf_evsel *evsel)
606 {
607 	int cpu = 0;
608 
609 	if (evsel->attr.sample_type & PERF_SAMPLE_CPU)
610 		cpu = sample->cpu;
611 
612 	if (cpu > cw->stream_cnt) {
613 		pr_err("Event was recorded for CPU %d, limit is at %d.\n",
614 			cpu, cw->stream_cnt);
615 		cpu = 0;
616 	}
617 
618 	return cpu;
619 }
620 
621 #define STREAM_FLUSH_COUNT 100000
622 
623 /*
624  * Currently we have no other way to determine the
625  * time for the stream flush other than keep track
626  * of the number of events and check it against
627  * threshold.
628  */
629 static bool is_flush_needed(struct ctf_stream *cs)
630 {
631 	return cs->count >= STREAM_FLUSH_COUNT;
632 }
633 
634 static int process_sample_event(struct perf_tool *tool,
635 				union perf_event *_event,
636 				struct perf_sample *sample,
637 				struct perf_evsel *evsel,
638 				struct machine *machine __maybe_unused)
639 {
640 	struct convert *c = container_of(tool, struct convert, tool);
641 	struct evsel_priv *priv = evsel->priv;
642 	struct ctf_writer *cw = &c->writer;
643 	struct ctf_stream *cs;
644 	struct bt_ctf_event_class *event_class;
645 	struct bt_ctf_event *event;
646 	int ret;
647 
648 	if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
649 		return 0;
650 
651 	event_class = priv->event_class;
652 
653 	/* update stats */
654 	c->events_count++;
655 	c->events_size += _event->header.size;
656 
657 	pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
658 
659 	event = bt_ctf_event_create(event_class);
660 	if (!event) {
661 		pr_err("Failed to create an CTF event\n");
662 		return -1;
663 	}
664 
665 	bt_ctf_clock_set_time(cw->clock, sample->time);
666 
667 	ret = add_generic_values(cw, event, evsel, sample);
668 	if (ret)
669 		return -1;
670 
671 	if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
672 		ret = add_tracepoint_values(cw, event_class, event,
673 					    evsel, sample);
674 		if (ret)
675 			return -1;
676 	}
677 
678 	if (perf_evsel__is_bpf_output(evsel)) {
679 		ret = add_bpf_output_values(event_class, event, sample);
680 		if (ret)
681 			return -1;
682 	}
683 
684 	cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
685 	if (cs) {
686 		if (is_flush_needed(cs))
687 			ctf_stream__flush(cs);
688 
689 		cs->count++;
690 		bt_ctf_stream_append_event(cs->stream, event);
691 	}
692 
693 	bt_ctf_event_put(event);
694 	return cs ? 0 : -1;
695 }
696 
697 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
698 static char *change_name(char *name, char *orig_name, int dup)
699 {
700 	char *new_name = NULL;
701 	size_t len;
702 
703 	if (!name)
704 		name = orig_name;
705 
706 	if (dup >= 10)
707 		goto out;
708 	/*
709 	 * Add '_' prefix to potential keywork.  According to
710 	 * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652),
711 	 * futher CTF spec updating may require us to use '$'.
712 	 */
713 	if (dup < 0)
714 		len = strlen(name) + sizeof("_");
715 	else
716 		len = strlen(orig_name) + sizeof("_dupl_X");
717 
718 	new_name = malloc(len);
719 	if (!new_name)
720 		goto out;
721 
722 	if (dup < 0)
723 		snprintf(new_name, len, "_%s", name);
724 	else
725 		snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
726 
727 out:
728 	if (name != orig_name)
729 		free(name);
730 	return new_name;
731 }
732 
733 static int event_class_add_field(struct bt_ctf_event_class *event_class,
734 		struct bt_ctf_field_type *type,
735 		struct format_field *field)
736 {
737 	struct bt_ctf_field_type *t = NULL;
738 	char *name;
739 	int dup = 1;
740 	int ret;
741 
742 	/* alias was already assigned */
743 	if (field->alias != field->name)
744 		return bt_ctf_event_class_add_field(event_class, type,
745 				(char *)field->alias);
746 
747 	name = field->name;
748 
749 	/* If 'name' is a keywork, add prefix. */
750 	if (bt_ctf_validate_identifier(name))
751 		name = change_name(name, field->name, -1);
752 
753 	if (!name) {
754 		pr_err("Failed to fix invalid identifier.");
755 		return -1;
756 	}
757 	while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
758 		bt_ctf_field_type_put(t);
759 		name = change_name(name, field->name, dup++);
760 		if (!name) {
761 			pr_err("Failed to create dup name for '%s'\n", field->name);
762 			return -1;
763 		}
764 	}
765 
766 	ret = bt_ctf_event_class_add_field(event_class, type, name);
767 	if (!ret)
768 		field->alias = name;
769 
770 	return ret;
771 }
772 
773 static int add_tracepoint_fields_types(struct ctf_writer *cw,
774 				       struct format_field *fields,
775 				       struct bt_ctf_event_class *event_class)
776 {
777 	struct format_field *field;
778 	int ret;
779 
780 	for (field = fields; field; field = field->next) {
781 		struct bt_ctf_field_type *type;
782 		unsigned long flags = field->flags;
783 
784 		pr2("  field '%s'\n", field->name);
785 
786 		type = get_tracepoint_field_type(cw, field);
787 		if (!type)
788 			return -1;
789 
790 		/*
791 		 * A string is an array of chars. For this we use the string
792 		 * type and don't care that it is an array. What we don't
793 		 * support is an array of strings.
794 		 */
795 		if (flags & FIELD_IS_STRING)
796 			flags &= ~FIELD_IS_ARRAY;
797 
798 		if (flags & FIELD_IS_ARRAY)
799 			type = bt_ctf_field_type_array_create(type, field->arraylen);
800 
801 		ret = event_class_add_field(event_class, type, field);
802 
803 		if (flags & FIELD_IS_ARRAY)
804 			bt_ctf_field_type_put(type);
805 
806 		if (ret) {
807 			pr_err("Failed to add field '%s': %d\n",
808 					field->name, ret);
809 			return -1;
810 		}
811 	}
812 
813 	return 0;
814 }
815 
816 static int add_tracepoint_types(struct ctf_writer *cw,
817 				struct perf_evsel *evsel,
818 				struct bt_ctf_event_class *class)
819 {
820 	struct format_field *common_fields = evsel->tp_format->format.common_fields;
821 	struct format_field *fields        = evsel->tp_format->format.fields;
822 	int ret;
823 
824 	ret = add_tracepoint_fields_types(cw, common_fields, class);
825 	if (!ret)
826 		ret = add_tracepoint_fields_types(cw, fields, class);
827 
828 	return ret;
829 }
830 
831 static int add_bpf_output_types(struct ctf_writer *cw,
832 				struct bt_ctf_event_class *class)
833 {
834 	struct bt_ctf_field_type *len_type = cw->data.u32;
835 	struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
836 	struct bt_ctf_field_type *seq_type;
837 	int ret;
838 
839 	ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
840 	if (ret)
841 		return ret;
842 
843 	seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
844 	if (!seq_type)
845 		return -1;
846 
847 	return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
848 }
849 
850 static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel,
851 			     struct bt_ctf_event_class *event_class)
852 {
853 	u64 type = evsel->attr.sample_type;
854 
855 	/*
856 	 * missing:
857 	 *   PERF_SAMPLE_TIME         - not needed as we have it in
858 	 *                              ctf event header
859 	 *   PERF_SAMPLE_READ         - TODO
860 	 *   PERF_SAMPLE_CALLCHAIN    - TODO
861 	 *   PERF_SAMPLE_RAW          - tracepoint fields and BPF output
862 	 *                              are handled separately
863 	 *   PERF_SAMPLE_BRANCH_STACK - TODO
864 	 *   PERF_SAMPLE_REGS_USER    - TODO
865 	 *   PERF_SAMPLE_STACK_USER   - TODO
866 	 */
867 
868 #define ADD_FIELD(cl, t, n)						\
869 	do {								\
870 		pr2("  field '%s'\n", n);				\
871 		if (bt_ctf_event_class_add_field(cl, t, n)) {		\
872 			pr_err("Failed to add field '%s';\n", n);	\
873 			return -1;					\
874 		}							\
875 	} while (0)
876 
877 	if (type & PERF_SAMPLE_IP)
878 		ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
879 
880 	if (type & PERF_SAMPLE_TID) {
881 		ADD_FIELD(event_class, cw->data.s32, "perf_tid");
882 		ADD_FIELD(event_class, cw->data.s32, "perf_pid");
883 	}
884 
885 	if ((type & PERF_SAMPLE_ID) ||
886 	    (type & PERF_SAMPLE_IDENTIFIER))
887 		ADD_FIELD(event_class, cw->data.u64, "perf_id");
888 
889 	if (type & PERF_SAMPLE_STREAM_ID)
890 		ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
891 
892 	if (type & PERF_SAMPLE_PERIOD)
893 		ADD_FIELD(event_class, cw->data.u64, "perf_period");
894 
895 	if (type & PERF_SAMPLE_WEIGHT)
896 		ADD_FIELD(event_class, cw->data.u64, "perf_weight");
897 
898 	if (type & PERF_SAMPLE_DATA_SRC)
899 		ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
900 
901 	if (type & PERF_SAMPLE_TRANSACTION)
902 		ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
903 
904 #undef ADD_FIELD
905 	return 0;
906 }
907 
908 static int add_event(struct ctf_writer *cw, struct perf_evsel *evsel)
909 {
910 	struct bt_ctf_event_class *event_class;
911 	struct evsel_priv *priv;
912 	const char *name = perf_evsel__name(evsel);
913 	int ret;
914 
915 	pr("Adding event '%s' (type %d)\n", name, evsel->attr.type);
916 
917 	event_class = bt_ctf_event_class_create(name);
918 	if (!event_class)
919 		return -1;
920 
921 	ret = add_generic_types(cw, evsel, event_class);
922 	if (ret)
923 		goto err;
924 
925 	if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
926 		ret = add_tracepoint_types(cw, evsel, event_class);
927 		if (ret)
928 			goto err;
929 	}
930 
931 	if (perf_evsel__is_bpf_output(evsel)) {
932 		ret = add_bpf_output_types(cw, event_class);
933 		if (ret)
934 			goto err;
935 	}
936 
937 	ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
938 	if (ret) {
939 		pr("Failed to add event class into stream.\n");
940 		goto err;
941 	}
942 
943 	priv = malloc(sizeof(*priv));
944 	if (!priv)
945 		goto err;
946 
947 	priv->event_class = event_class;
948 	evsel->priv       = priv;
949 	return 0;
950 
951 err:
952 	bt_ctf_event_class_put(event_class);
953 	pr_err("Failed to add event '%s'.\n", name);
954 	return -1;
955 }
956 
957 static int setup_events(struct ctf_writer *cw, struct perf_session *session)
958 {
959 	struct perf_evlist *evlist = session->evlist;
960 	struct perf_evsel *evsel;
961 	int ret;
962 
963 	evlist__for_each(evlist, evsel) {
964 		ret = add_event(cw, evsel);
965 		if (ret)
966 			return ret;
967 	}
968 	return 0;
969 }
970 
971 static void cleanup_events(struct perf_session *session)
972 {
973 	struct perf_evlist *evlist = session->evlist;
974 	struct perf_evsel *evsel;
975 
976 	evlist__for_each(evlist, evsel) {
977 		struct evsel_priv *priv;
978 
979 		priv = evsel->priv;
980 		bt_ctf_event_class_put(priv->event_class);
981 		zfree(&evsel->priv);
982 	}
983 
984 	perf_evlist__delete(evlist);
985 	session->evlist = NULL;
986 }
987 
988 static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
989 {
990 	struct ctf_stream **stream;
991 	struct perf_header *ph = &session->header;
992 	int ncpus;
993 
994 	/*
995 	 * Try to get the number of cpus used in the data file,
996 	 * if not present fallback to the MAX_CPUS.
997 	 */
998 	ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
999 
1000 	stream = zalloc(sizeof(*stream) * ncpus);
1001 	if (!stream) {
1002 		pr_err("Failed to allocate streams.\n");
1003 		return -ENOMEM;
1004 	}
1005 
1006 	cw->stream     = stream;
1007 	cw->stream_cnt = ncpus;
1008 	return 0;
1009 }
1010 
1011 static void free_streams(struct ctf_writer *cw)
1012 {
1013 	int cpu;
1014 
1015 	for (cpu = 0; cpu < cw->stream_cnt; cpu++)
1016 		ctf_stream__delete(cw->stream[cpu]);
1017 
1018 	free(cw->stream);
1019 }
1020 
1021 static int ctf_writer__setup_env(struct ctf_writer *cw,
1022 				 struct perf_session *session)
1023 {
1024 	struct perf_header *header = &session->header;
1025 	struct bt_ctf_writer *writer = cw->writer;
1026 
1027 #define ADD(__n, __v)							\
1028 do {									\
1029 	if (bt_ctf_writer_add_environment_field(writer, __n, __v))	\
1030 		return -1;						\
1031 } while (0)
1032 
1033 	ADD("host",    header->env.hostname);
1034 	ADD("sysname", "Linux");
1035 	ADD("release", header->env.os_release);
1036 	ADD("version", header->env.version);
1037 	ADD("machine", header->env.arch);
1038 	ADD("domain", "kernel");
1039 	ADD("tracer_name", "perf");
1040 
1041 #undef ADD
1042 	return 0;
1043 }
1044 
1045 static int ctf_writer__setup_clock(struct ctf_writer *cw)
1046 {
1047 	struct bt_ctf_clock *clock = cw->clock;
1048 
1049 	bt_ctf_clock_set_description(clock, "perf clock");
1050 
1051 #define SET(__n, __v)				\
1052 do {						\
1053 	if (bt_ctf_clock_set_##__n(clock, __v))	\
1054 		return -1;			\
1055 } while (0)
1056 
1057 	SET(frequency,   1000000000);
1058 	SET(offset_s,    0);
1059 	SET(offset,      0);
1060 	SET(precision,   10);
1061 	SET(is_absolute, 0);
1062 
1063 #undef SET
1064 	return 0;
1065 }
1066 
1067 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
1068 {
1069 	struct bt_ctf_field_type *type;
1070 
1071 	type = bt_ctf_field_type_integer_create(size);
1072 	if (!type)
1073 		return NULL;
1074 
1075 	if (sign &&
1076 	    bt_ctf_field_type_integer_set_signed(type, 1))
1077 		goto err;
1078 
1079 	if (hex &&
1080 	    bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
1081 		goto err;
1082 
1083 #if __BYTE_ORDER == __BIG_ENDIAN
1084 	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
1085 #else
1086 	bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
1087 #endif
1088 
1089 	pr2("Created type: INTEGER %d-bit %ssigned %s\n",
1090 	    size, sign ? "un" : "", hex ? "hex" : "");
1091 	return type;
1092 
1093 err:
1094 	bt_ctf_field_type_put(type);
1095 	return NULL;
1096 }
1097 
1098 static void ctf_writer__cleanup_data(struct ctf_writer *cw)
1099 {
1100 	unsigned int i;
1101 
1102 	for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
1103 		bt_ctf_field_type_put(cw->data.array[i]);
1104 }
1105 
1106 static int ctf_writer__init_data(struct ctf_writer *cw)
1107 {
1108 #define CREATE_INT_TYPE(type, size, sign, hex)		\
1109 do {							\
1110 	(type) = create_int_type(size, sign, hex);	\
1111 	if (!(type))					\
1112 		goto err;				\
1113 } while (0)
1114 
1115 	CREATE_INT_TYPE(cw->data.s64, 64, true,  false);
1116 	CREATE_INT_TYPE(cw->data.u64, 64, false, false);
1117 	CREATE_INT_TYPE(cw->data.s32, 32, true,  false);
1118 	CREATE_INT_TYPE(cw->data.u32, 32, false, false);
1119 	CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
1120 	CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
1121 
1122 	cw->data.string  = bt_ctf_field_type_string_create();
1123 	if (cw->data.string)
1124 		return 0;
1125 
1126 err:
1127 	ctf_writer__cleanup_data(cw);
1128 	pr_err("Failed to create data types.\n");
1129 	return -1;
1130 }
1131 
1132 static void ctf_writer__cleanup(struct ctf_writer *cw)
1133 {
1134 	ctf_writer__cleanup_data(cw);
1135 
1136 	bt_ctf_clock_put(cw->clock);
1137 	free_streams(cw);
1138 	bt_ctf_stream_class_put(cw->stream_class);
1139 	bt_ctf_writer_put(cw->writer);
1140 
1141 	/* and NULL all the pointers */
1142 	memset(cw, 0, sizeof(*cw));
1143 }
1144 
1145 static int ctf_writer__init(struct ctf_writer *cw, const char *path)
1146 {
1147 	struct bt_ctf_writer		*writer;
1148 	struct bt_ctf_stream_class	*stream_class;
1149 	struct bt_ctf_clock		*clock;
1150 	struct bt_ctf_field_type	*pkt_ctx_type;
1151 	int				ret;
1152 
1153 	/* CTF writer */
1154 	writer = bt_ctf_writer_create(path);
1155 	if (!writer)
1156 		goto err;
1157 
1158 	cw->writer = writer;
1159 
1160 	/* CTF clock */
1161 	clock = bt_ctf_clock_create("perf_clock");
1162 	if (!clock) {
1163 		pr("Failed to create CTF clock.\n");
1164 		goto err_cleanup;
1165 	}
1166 
1167 	cw->clock = clock;
1168 
1169 	if (ctf_writer__setup_clock(cw)) {
1170 		pr("Failed to setup CTF clock.\n");
1171 		goto err_cleanup;
1172 	}
1173 
1174 	/* CTF stream class */
1175 	stream_class = bt_ctf_stream_class_create("perf_stream");
1176 	if (!stream_class) {
1177 		pr("Failed to create CTF stream class.\n");
1178 		goto err_cleanup;
1179 	}
1180 
1181 	cw->stream_class = stream_class;
1182 
1183 	/* CTF clock stream setup */
1184 	if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
1185 		pr("Failed to assign CTF clock to stream class.\n");
1186 		goto err_cleanup;
1187 	}
1188 
1189 	if (ctf_writer__init_data(cw))
1190 		goto err_cleanup;
1191 
1192 	/* Add cpu_id for packet context */
1193 	pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
1194 	if (!pkt_ctx_type)
1195 		goto err_cleanup;
1196 
1197 	ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
1198 	bt_ctf_field_type_put(pkt_ctx_type);
1199 	if (ret)
1200 		goto err_cleanup;
1201 
1202 	/* CTF clock writer setup */
1203 	if (bt_ctf_writer_add_clock(writer, clock)) {
1204 		pr("Failed to assign CTF clock to writer.\n");
1205 		goto err_cleanup;
1206 	}
1207 
1208 	return 0;
1209 
1210 err_cleanup:
1211 	ctf_writer__cleanup(cw);
1212 err:
1213 	pr_err("Failed to setup CTF writer.\n");
1214 	return -1;
1215 }
1216 
1217 static int ctf_writer__flush_streams(struct ctf_writer *cw)
1218 {
1219 	int cpu, ret = 0;
1220 
1221 	for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
1222 		ret = ctf_stream__flush(cw->stream[cpu]);
1223 
1224 	return ret;
1225 }
1226 
1227 static int convert__config(const char *var, const char *value, void *cb)
1228 {
1229 	struct convert *c = cb;
1230 
1231 	if (!strcmp(var, "convert.queue-size")) {
1232 		c->queue_size = perf_config_u64(var, value);
1233 		return 0;
1234 	}
1235 
1236 	return 0;
1237 }
1238 
1239 int bt_convert__perf2ctf(const char *input, const char *path, bool force)
1240 {
1241 	struct perf_session *session;
1242 	struct perf_data_file file = {
1243 		.path = input,
1244 		.mode = PERF_DATA_MODE_READ,
1245 		.force = force,
1246 	};
1247 	struct convert c = {
1248 		.tool = {
1249 			.sample          = process_sample_event,
1250 			.mmap            = perf_event__process_mmap,
1251 			.mmap2           = perf_event__process_mmap2,
1252 			.comm            = perf_event__process_comm,
1253 			.exit            = perf_event__process_exit,
1254 			.fork            = perf_event__process_fork,
1255 			.lost            = perf_event__process_lost,
1256 			.tracing_data    = perf_event__process_tracing_data,
1257 			.build_id        = perf_event__process_build_id,
1258 			.ordered_events  = true,
1259 			.ordering_requires_timestamps = true,
1260 		},
1261 	};
1262 	struct ctf_writer *cw = &c.writer;
1263 	int err = -1;
1264 
1265 	perf_config(convert__config, &c);
1266 
1267 	/* CTF writer */
1268 	if (ctf_writer__init(cw, path))
1269 		return -1;
1270 
1271 	/* perf.data session */
1272 	session = perf_session__new(&file, 0, &c.tool);
1273 	if (!session)
1274 		goto free_writer;
1275 
1276 	if (c.queue_size) {
1277 		ordered_events__set_alloc_size(&session->ordered_events,
1278 					       c.queue_size);
1279 	}
1280 
1281 	/* CTF writer env/clock setup  */
1282 	if (ctf_writer__setup_env(cw, session))
1283 		goto free_session;
1284 
1285 	/* CTF events setup */
1286 	if (setup_events(cw, session))
1287 		goto free_session;
1288 
1289 	if (setup_streams(cw, session))
1290 		goto free_session;
1291 
1292 	err = perf_session__process_events(session);
1293 	if (!err)
1294 		err = ctf_writer__flush_streams(cw);
1295 	else
1296 		pr_err("Error during conversion.\n");
1297 
1298 	fprintf(stderr,
1299 		"[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
1300 		file.path, path);
1301 
1302 	fprintf(stderr,
1303 		"[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples) ]\n",
1304 		(double) c.events_size / 1024.0 / 1024.0,
1305 		c.events_count);
1306 
1307 	cleanup_events(session);
1308 	perf_session__delete(session);
1309 	ctf_writer__cleanup(cw);
1310 
1311 	return err;
1312 
1313 free_session:
1314 	perf_session__delete(session);
1315 free_writer:
1316 	ctf_writer__cleanup(cw);
1317 	pr_err("Error during conversion setup.\n");
1318 	return err;
1319 }
1320