1 /* 2 * CTF writing support via babeltrace. 3 * 4 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com> 5 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de> 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 10 #include <linux/compiler.h> 11 #include <linux/kernel.h> 12 #include <babeltrace/ctf-writer/writer.h> 13 #include <babeltrace/ctf-writer/clock.h> 14 #include <babeltrace/ctf-writer/stream.h> 15 #include <babeltrace/ctf-writer/event.h> 16 #include <babeltrace/ctf-writer/event-types.h> 17 #include <babeltrace/ctf-writer/event-fields.h> 18 #include <babeltrace/ctf-ir/utils.h> 19 #include <babeltrace/ctf/events.h> 20 #include <traceevent/event-parse.h> 21 #include "asm/bug.h" 22 #include "data-convert-bt.h" 23 #include "session.h" 24 #include "util.h" 25 #include "debug.h" 26 #include "tool.h" 27 #include "evlist.h" 28 #include "evsel.h" 29 #include "machine.h" 30 #include "config.h" 31 32 #define pr_N(n, fmt, ...) \ 33 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__) 34 35 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__) 36 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__) 37 38 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__) 39 40 struct evsel_priv { 41 struct bt_ctf_event_class *event_class; 42 }; 43 44 #define MAX_CPUS 4096 45 46 struct ctf_stream { 47 struct bt_ctf_stream *stream; 48 int cpu; 49 u32 count; 50 }; 51 52 struct ctf_writer { 53 /* writer primitives */ 54 struct bt_ctf_writer *writer; 55 struct ctf_stream **stream; 56 int stream_cnt; 57 struct bt_ctf_stream_class *stream_class; 58 struct bt_ctf_clock *clock; 59 60 /* data types */ 61 union { 62 struct { 63 struct bt_ctf_field_type *s64; 64 struct bt_ctf_field_type *u64; 65 struct bt_ctf_field_type *s32; 66 struct bt_ctf_field_type *u32; 67 struct bt_ctf_field_type *string; 68 struct bt_ctf_field_type *u32_hex; 69 struct bt_ctf_field_type *u64_hex; 70 }; 71 struct bt_ctf_field_type *array[6]; 72 } data; 73 struct bt_ctf_event_class *comm_class; 74 struct bt_ctf_event_class *exit_class; 75 struct bt_ctf_event_class *fork_class; 76 }; 77 78 struct convert { 79 struct perf_tool tool; 80 struct ctf_writer writer; 81 82 u64 events_size; 83 u64 events_count; 84 u64 non_sample_count; 85 86 /* Ordered events configured queue size. */ 87 u64 queue_size; 88 }; 89 90 static int value_set(struct bt_ctf_field_type *type, 91 struct bt_ctf_event *event, 92 const char *name, u64 val) 93 { 94 struct bt_ctf_field *field; 95 bool sign = bt_ctf_field_type_integer_get_signed(type); 96 int ret; 97 98 field = bt_ctf_field_create(type); 99 if (!field) { 100 pr_err("failed to create a field %s\n", name); 101 return -1; 102 } 103 104 if (sign) { 105 ret = bt_ctf_field_signed_integer_set_value(field, val); 106 if (ret) { 107 pr_err("failed to set field value %s\n", name); 108 goto err; 109 } 110 } else { 111 ret = bt_ctf_field_unsigned_integer_set_value(field, val); 112 if (ret) { 113 pr_err("failed to set field value %s\n", name); 114 goto err; 115 } 116 } 117 118 ret = bt_ctf_event_set_payload(event, name, field); 119 if (ret) { 120 pr_err("failed to set payload %s\n", name); 121 goto err; 122 } 123 124 pr2(" SET [%s = %" PRIu64 "]\n", name, val); 125 126 err: 127 bt_ctf_field_put(field); 128 return ret; 129 } 130 131 #define __FUNC_VALUE_SET(_name, _val_type) \ 132 static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \ 133 struct bt_ctf_event *event, \ 134 const char *name, \ 135 _val_type val) \ 136 { \ 137 struct bt_ctf_field_type *type = cw->data._name; \ 138 return value_set(type, event, name, (u64) val); \ 139 } 140 141 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name) 142 143 FUNC_VALUE_SET(s32) 144 FUNC_VALUE_SET(u32) 145 FUNC_VALUE_SET(s64) 146 FUNC_VALUE_SET(u64) 147 __FUNC_VALUE_SET(u64_hex, u64) 148 149 static int string_set_value(struct bt_ctf_field *field, const char *string); 150 static __maybe_unused int 151 value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event, 152 const char *name, const char *string) 153 { 154 struct bt_ctf_field_type *type = cw->data.string; 155 struct bt_ctf_field *field; 156 int ret = 0; 157 158 field = bt_ctf_field_create(type); 159 if (!field) { 160 pr_err("failed to create a field %s\n", name); 161 return -1; 162 } 163 164 ret = string_set_value(field, string); 165 if (ret) { 166 pr_err("failed to set value %s\n", name); 167 goto err_put_field; 168 } 169 170 ret = bt_ctf_event_set_payload(event, name, field); 171 if (ret) 172 pr_err("failed to set payload %s\n", name); 173 174 err_put_field: 175 bt_ctf_field_put(field); 176 return ret; 177 } 178 179 static struct bt_ctf_field_type* 180 get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field) 181 { 182 unsigned long flags = field->flags; 183 184 if (flags & FIELD_IS_STRING) 185 return cw->data.string; 186 187 if (!(flags & FIELD_IS_SIGNED)) { 188 /* unsigned long are mostly pointers */ 189 if (flags & FIELD_IS_LONG || flags & FIELD_IS_POINTER) 190 return cw->data.u64_hex; 191 } 192 193 if (flags & FIELD_IS_SIGNED) { 194 if (field->size == 8) 195 return cw->data.s64; 196 else 197 return cw->data.s32; 198 } 199 200 if (field->size == 8) 201 return cw->data.u64; 202 else 203 return cw->data.u32; 204 } 205 206 static unsigned long long adjust_signedness(unsigned long long value_int, int size) 207 { 208 unsigned long long value_mask; 209 210 /* 211 * value_mask = (1 << (size * 8 - 1)) - 1. 212 * Directly set value_mask for code readers. 213 */ 214 switch (size) { 215 case 1: 216 value_mask = 0x7fULL; 217 break; 218 case 2: 219 value_mask = 0x7fffULL; 220 break; 221 case 4: 222 value_mask = 0x7fffffffULL; 223 break; 224 case 8: 225 /* 226 * For 64 bit value, return it self. There is no need 227 * to fill high bit. 228 */ 229 /* Fall through */ 230 default: 231 /* BUG! */ 232 return value_int; 233 } 234 235 /* If it is a positive value, don't adjust. */ 236 if ((value_int & (~0ULL - value_mask)) == 0) 237 return value_int; 238 239 /* Fill upper part of value_int with 1 to make it a negative long long. */ 240 return (value_int & value_mask) | ~value_mask; 241 } 242 243 static int string_set_value(struct bt_ctf_field *field, const char *string) 244 { 245 char *buffer = NULL; 246 size_t len = strlen(string), i, p; 247 int err; 248 249 for (i = p = 0; i < len; i++, p++) { 250 if (isprint(string[i])) { 251 if (!buffer) 252 continue; 253 buffer[p] = string[i]; 254 } else { 255 char numstr[5]; 256 257 snprintf(numstr, sizeof(numstr), "\\x%02x", 258 (unsigned int)(string[i]) & 0xff); 259 260 if (!buffer) { 261 buffer = zalloc(i + (len - i) * 4 + 2); 262 if (!buffer) { 263 pr_err("failed to set unprintable string '%s'\n", string); 264 return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING"); 265 } 266 if (i > 0) 267 strncpy(buffer, string, i); 268 } 269 strncat(buffer + p, numstr, 4); 270 p += 3; 271 } 272 } 273 274 if (!buffer) 275 return bt_ctf_field_string_set_value(field, string); 276 err = bt_ctf_field_string_set_value(field, buffer); 277 free(buffer); 278 return err; 279 } 280 281 static int add_tracepoint_field_value(struct ctf_writer *cw, 282 struct bt_ctf_event_class *event_class, 283 struct bt_ctf_event *event, 284 struct perf_sample *sample, 285 struct format_field *fmtf) 286 { 287 struct bt_ctf_field_type *type; 288 struct bt_ctf_field *array_field; 289 struct bt_ctf_field *field; 290 const char *name = fmtf->name; 291 void *data = sample->raw_data; 292 unsigned long flags = fmtf->flags; 293 unsigned int n_items; 294 unsigned int i; 295 unsigned int offset; 296 unsigned int len; 297 int ret; 298 299 name = fmtf->alias; 300 offset = fmtf->offset; 301 len = fmtf->size; 302 if (flags & FIELD_IS_STRING) 303 flags &= ~FIELD_IS_ARRAY; 304 305 if (flags & FIELD_IS_DYNAMIC) { 306 unsigned long long tmp_val; 307 308 tmp_val = pevent_read_number(fmtf->event->pevent, 309 data + offset, len); 310 offset = tmp_val; 311 len = offset >> 16; 312 offset &= 0xffff; 313 } 314 315 if (flags & FIELD_IS_ARRAY) { 316 317 type = bt_ctf_event_class_get_field_by_name( 318 event_class, name); 319 array_field = bt_ctf_field_create(type); 320 bt_ctf_field_type_put(type); 321 if (!array_field) { 322 pr_err("Failed to create array type %s\n", name); 323 return -1; 324 } 325 326 len = fmtf->size / fmtf->arraylen; 327 n_items = fmtf->arraylen; 328 } else { 329 n_items = 1; 330 array_field = NULL; 331 } 332 333 type = get_tracepoint_field_type(cw, fmtf); 334 335 for (i = 0; i < n_items; i++) { 336 if (flags & FIELD_IS_ARRAY) 337 field = bt_ctf_field_array_get_field(array_field, i); 338 else 339 field = bt_ctf_field_create(type); 340 341 if (!field) { 342 pr_err("failed to create a field %s\n", name); 343 return -1; 344 } 345 346 if (flags & FIELD_IS_STRING) 347 ret = string_set_value(field, data + offset + i * len); 348 else { 349 unsigned long long value_int; 350 351 value_int = pevent_read_number( 352 fmtf->event->pevent, 353 data + offset + i * len, len); 354 355 if (!(flags & FIELD_IS_SIGNED)) 356 ret = bt_ctf_field_unsigned_integer_set_value( 357 field, value_int); 358 else 359 ret = bt_ctf_field_signed_integer_set_value( 360 field, adjust_signedness(value_int, len)); 361 } 362 363 if (ret) { 364 pr_err("failed to set file value %s\n", name); 365 goto err_put_field; 366 } 367 if (!(flags & FIELD_IS_ARRAY)) { 368 ret = bt_ctf_event_set_payload(event, name, field); 369 if (ret) { 370 pr_err("failed to set payload %s\n", name); 371 goto err_put_field; 372 } 373 } 374 bt_ctf_field_put(field); 375 } 376 if (flags & FIELD_IS_ARRAY) { 377 ret = bt_ctf_event_set_payload(event, name, array_field); 378 if (ret) { 379 pr_err("Failed add payload array %s\n", name); 380 return -1; 381 } 382 bt_ctf_field_put(array_field); 383 } 384 return 0; 385 386 err_put_field: 387 bt_ctf_field_put(field); 388 return -1; 389 } 390 391 static int add_tracepoint_fields_values(struct ctf_writer *cw, 392 struct bt_ctf_event_class *event_class, 393 struct bt_ctf_event *event, 394 struct format_field *fields, 395 struct perf_sample *sample) 396 { 397 struct format_field *field; 398 int ret; 399 400 for (field = fields; field; field = field->next) { 401 ret = add_tracepoint_field_value(cw, event_class, event, sample, 402 field); 403 if (ret) 404 return -1; 405 } 406 return 0; 407 } 408 409 static int add_tracepoint_values(struct ctf_writer *cw, 410 struct bt_ctf_event_class *event_class, 411 struct bt_ctf_event *event, 412 struct perf_evsel *evsel, 413 struct perf_sample *sample) 414 { 415 struct format_field *common_fields = evsel->tp_format->format.common_fields; 416 struct format_field *fields = evsel->tp_format->format.fields; 417 int ret; 418 419 ret = add_tracepoint_fields_values(cw, event_class, event, 420 common_fields, sample); 421 if (!ret) 422 ret = add_tracepoint_fields_values(cw, event_class, event, 423 fields, sample); 424 425 return ret; 426 } 427 428 static int 429 add_bpf_output_values(struct bt_ctf_event_class *event_class, 430 struct bt_ctf_event *event, 431 struct perf_sample *sample) 432 { 433 struct bt_ctf_field_type *len_type, *seq_type; 434 struct bt_ctf_field *len_field, *seq_field; 435 unsigned int raw_size = sample->raw_size; 436 unsigned int nr_elements = raw_size / sizeof(u32); 437 unsigned int i; 438 int ret; 439 440 if (nr_elements * sizeof(u32) != raw_size) 441 pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n", 442 raw_size, nr_elements * sizeof(u32) - raw_size); 443 444 len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len"); 445 len_field = bt_ctf_field_create(len_type); 446 if (!len_field) { 447 pr_err("failed to create 'raw_len' for bpf output event\n"); 448 ret = -1; 449 goto put_len_type; 450 } 451 452 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements); 453 if (ret) { 454 pr_err("failed to set field value for raw_len\n"); 455 goto put_len_field; 456 } 457 ret = bt_ctf_event_set_payload(event, "raw_len", len_field); 458 if (ret) { 459 pr_err("failed to set payload to raw_len\n"); 460 goto put_len_field; 461 } 462 463 seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data"); 464 seq_field = bt_ctf_field_create(seq_type); 465 if (!seq_field) { 466 pr_err("failed to create 'raw_data' for bpf output event\n"); 467 ret = -1; 468 goto put_seq_type; 469 } 470 471 ret = bt_ctf_field_sequence_set_length(seq_field, len_field); 472 if (ret) { 473 pr_err("failed to set length of 'raw_data'\n"); 474 goto put_seq_field; 475 } 476 477 for (i = 0; i < nr_elements; i++) { 478 struct bt_ctf_field *elem_field = 479 bt_ctf_field_sequence_get_field(seq_field, i); 480 481 ret = bt_ctf_field_unsigned_integer_set_value(elem_field, 482 ((u32 *)(sample->raw_data))[i]); 483 484 bt_ctf_field_put(elem_field); 485 if (ret) { 486 pr_err("failed to set raw_data[%d]\n", i); 487 goto put_seq_field; 488 } 489 } 490 491 ret = bt_ctf_event_set_payload(event, "raw_data", seq_field); 492 if (ret) 493 pr_err("failed to set payload for raw_data\n"); 494 495 put_seq_field: 496 bt_ctf_field_put(seq_field); 497 put_seq_type: 498 bt_ctf_field_type_put(seq_type); 499 put_len_field: 500 bt_ctf_field_put(len_field); 501 put_len_type: 502 bt_ctf_field_type_put(len_type); 503 return ret; 504 } 505 506 static int add_generic_values(struct ctf_writer *cw, 507 struct bt_ctf_event *event, 508 struct perf_evsel *evsel, 509 struct perf_sample *sample) 510 { 511 u64 type = evsel->attr.sample_type; 512 int ret; 513 514 /* 515 * missing: 516 * PERF_SAMPLE_TIME - not needed as we have it in 517 * ctf event header 518 * PERF_SAMPLE_READ - TODO 519 * PERF_SAMPLE_CALLCHAIN - TODO 520 * PERF_SAMPLE_RAW - tracepoint fields are handled separately 521 * PERF_SAMPLE_BRANCH_STACK - TODO 522 * PERF_SAMPLE_REGS_USER - TODO 523 * PERF_SAMPLE_STACK_USER - TODO 524 */ 525 526 if (type & PERF_SAMPLE_IP) { 527 ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip); 528 if (ret) 529 return -1; 530 } 531 532 if (type & PERF_SAMPLE_TID) { 533 ret = value_set_s32(cw, event, "perf_tid", sample->tid); 534 if (ret) 535 return -1; 536 537 ret = value_set_s32(cw, event, "perf_pid", sample->pid); 538 if (ret) 539 return -1; 540 } 541 542 if ((type & PERF_SAMPLE_ID) || 543 (type & PERF_SAMPLE_IDENTIFIER)) { 544 ret = value_set_u64(cw, event, "perf_id", sample->id); 545 if (ret) 546 return -1; 547 } 548 549 if (type & PERF_SAMPLE_STREAM_ID) { 550 ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id); 551 if (ret) 552 return -1; 553 } 554 555 if (type & PERF_SAMPLE_PERIOD) { 556 ret = value_set_u64(cw, event, "perf_period", sample->period); 557 if (ret) 558 return -1; 559 } 560 561 if (type & PERF_SAMPLE_WEIGHT) { 562 ret = value_set_u64(cw, event, "perf_weight", sample->weight); 563 if (ret) 564 return -1; 565 } 566 567 if (type & PERF_SAMPLE_DATA_SRC) { 568 ret = value_set_u64(cw, event, "perf_data_src", 569 sample->data_src); 570 if (ret) 571 return -1; 572 } 573 574 if (type & PERF_SAMPLE_TRANSACTION) { 575 ret = value_set_u64(cw, event, "perf_transaction", 576 sample->transaction); 577 if (ret) 578 return -1; 579 } 580 581 return 0; 582 } 583 584 static int ctf_stream__flush(struct ctf_stream *cs) 585 { 586 int err = 0; 587 588 if (cs) { 589 err = bt_ctf_stream_flush(cs->stream); 590 if (err) 591 pr_err("CTF stream %d flush failed\n", cs->cpu); 592 593 pr("Flush stream for cpu %d (%u samples)\n", 594 cs->cpu, cs->count); 595 596 cs->count = 0; 597 } 598 599 return err; 600 } 601 602 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu) 603 { 604 struct ctf_stream *cs; 605 struct bt_ctf_field *pkt_ctx = NULL; 606 struct bt_ctf_field *cpu_field = NULL; 607 struct bt_ctf_stream *stream = NULL; 608 int ret; 609 610 cs = zalloc(sizeof(*cs)); 611 if (!cs) { 612 pr_err("Failed to allocate ctf stream\n"); 613 return NULL; 614 } 615 616 stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class); 617 if (!stream) { 618 pr_err("Failed to create CTF stream\n"); 619 goto out; 620 } 621 622 pkt_ctx = bt_ctf_stream_get_packet_context(stream); 623 if (!pkt_ctx) { 624 pr_err("Failed to obtain packet context\n"); 625 goto out; 626 } 627 628 cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id"); 629 bt_ctf_field_put(pkt_ctx); 630 if (!cpu_field) { 631 pr_err("Failed to obtain cpu field\n"); 632 goto out; 633 } 634 635 ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu); 636 if (ret) { 637 pr_err("Failed to update CPU number\n"); 638 goto out; 639 } 640 641 bt_ctf_field_put(cpu_field); 642 643 cs->cpu = cpu; 644 cs->stream = stream; 645 return cs; 646 647 out: 648 if (cpu_field) 649 bt_ctf_field_put(cpu_field); 650 if (stream) 651 bt_ctf_stream_put(stream); 652 653 free(cs); 654 return NULL; 655 } 656 657 static void ctf_stream__delete(struct ctf_stream *cs) 658 { 659 if (cs) { 660 bt_ctf_stream_put(cs->stream); 661 free(cs); 662 } 663 } 664 665 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu) 666 { 667 struct ctf_stream *cs = cw->stream[cpu]; 668 669 if (!cs) { 670 cs = ctf_stream__create(cw, cpu); 671 cw->stream[cpu] = cs; 672 } 673 674 return cs; 675 } 676 677 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample, 678 struct perf_evsel *evsel) 679 { 680 int cpu = 0; 681 682 if (evsel->attr.sample_type & PERF_SAMPLE_CPU) 683 cpu = sample->cpu; 684 685 if (cpu > cw->stream_cnt) { 686 pr_err("Event was recorded for CPU %d, limit is at %d.\n", 687 cpu, cw->stream_cnt); 688 cpu = 0; 689 } 690 691 return cpu; 692 } 693 694 #define STREAM_FLUSH_COUNT 100000 695 696 /* 697 * Currently we have no other way to determine the 698 * time for the stream flush other than keep track 699 * of the number of events and check it against 700 * threshold. 701 */ 702 static bool is_flush_needed(struct ctf_stream *cs) 703 { 704 return cs->count >= STREAM_FLUSH_COUNT; 705 } 706 707 static int process_sample_event(struct perf_tool *tool, 708 union perf_event *_event, 709 struct perf_sample *sample, 710 struct perf_evsel *evsel, 711 struct machine *machine __maybe_unused) 712 { 713 struct convert *c = container_of(tool, struct convert, tool); 714 struct evsel_priv *priv = evsel->priv; 715 struct ctf_writer *cw = &c->writer; 716 struct ctf_stream *cs; 717 struct bt_ctf_event_class *event_class; 718 struct bt_ctf_event *event; 719 int ret; 720 721 if (WARN_ONCE(!priv, "Failed to setup all events.\n")) 722 return 0; 723 724 event_class = priv->event_class; 725 726 /* update stats */ 727 c->events_count++; 728 c->events_size += _event->header.size; 729 730 pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count); 731 732 event = bt_ctf_event_create(event_class); 733 if (!event) { 734 pr_err("Failed to create an CTF event\n"); 735 return -1; 736 } 737 738 bt_ctf_clock_set_time(cw->clock, sample->time); 739 740 ret = add_generic_values(cw, event, evsel, sample); 741 if (ret) 742 return -1; 743 744 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) { 745 ret = add_tracepoint_values(cw, event_class, event, 746 evsel, sample); 747 if (ret) 748 return -1; 749 } 750 751 if (perf_evsel__is_bpf_output(evsel)) { 752 ret = add_bpf_output_values(event_class, event, sample); 753 if (ret) 754 return -1; 755 } 756 757 cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel)); 758 if (cs) { 759 if (is_flush_needed(cs)) 760 ctf_stream__flush(cs); 761 762 cs->count++; 763 bt_ctf_stream_append_event(cs->stream, event); 764 } 765 766 bt_ctf_event_put(event); 767 return cs ? 0 : -1; 768 } 769 770 #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) \ 771 do { \ 772 ret = value_set_##_type(cw, event, #_field, _event->_name._field);\ 773 if (ret) \ 774 return -1; \ 775 } while(0) 776 777 #define __FUNC_PROCESS_NON_SAMPLE(_name, body) \ 778 static int process_##_name##_event(struct perf_tool *tool, \ 779 union perf_event *_event, \ 780 struct perf_sample *sample, \ 781 struct machine *machine) \ 782 { \ 783 struct convert *c = container_of(tool, struct convert, tool);\ 784 struct ctf_writer *cw = &c->writer; \ 785 struct bt_ctf_event_class *event_class = cw->_name##_class;\ 786 struct bt_ctf_event *event; \ 787 struct ctf_stream *cs; \ 788 int ret; \ 789 \ 790 c->non_sample_count++; \ 791 c->events_size += _event->header.size; \ 792 event = bt_ctf_event_create(event_class); \ 793 if (!event) { \ 794 pr_err("Failed to create an CTF event\n"); \ 795 return -1; \ 796 } \ 797 \ 798 bt_ctf_clock_set_time(cw->clock, sample->time); \ 799 body \ 800 cs = ctf_stream(cw, 0); \ 801 if (cs) { \ 802 if (is_flush_needed(cs)) \ 803 ctf_stream__flush(cs); \ 804 \ 805 cs->count++; \ 806 bt_ctf_stream_append_event(cs->stream, event); \ 807 } \ 808 bt_ctf_event_put(event); \ 809 \ 810 return perf_event__process_##_name(tool, _event, sample, machine);\ 811 } 812 813 __FUNC_PROCESS_NON_SAMPLE(comm, 814 __NON_SAMPLE_SET_FIELD(comm, u32, pid); 815 __NON_SAMPLE_SET_FIELD(comm, u32, tid); 816 __NON_SAMPLE_SET_FIELD(comm, string, comm); 817 ) 818 __FUNC_PROCESS_NON_SAMPLE(fork, 819 __NON_SAMPLE_SET_FIELD(fork, u32, pid); 820 __NON_SAMPLE_SET_FIELD(fork, u32, ppid); 821 __NON_SAMPLE_SET_FIELD(fork, u32, tid); 822 __NON_SAMPLE_SET_FIELD(fork, u32, ptid); 823 __NON_SAMPLE_SET_FIELD(fork, u64, time); 824 ) 825 826 __FUNC_PROCESS_NON_SAMPLE(exit, 827 __NON_SAMPLE_SET_FIELD(fork, u32, pid); 828 __NON_SAMPLE_SET_FIELD(fork, u32, ppid); 829 __NON_SAMPLE_SET_FIELD(fork, u32, tid); 830 __NON_SAMPLE_SET_FIELD(fork, u32, ptid); 831 __NON_SAMPLE_SET_FIELD(fork, u64, time); 832 ) 833 #undef __NON_SAMPLE_SET_FIELD 834 #undef __FUNC_PROCESS_NON_SAMPLE 835 836 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */ 837 static char *change_name(char *name, char *orig_name, int dup) 838 { 839 char *new_name = NULL; 840 size_t len; 841 842 if (!name) 843 name = orig_name; 844 845 if (dup >= 10) 846 goto out; 847 /* 848 * Add '_' prefix to potential keywork. According to 849 * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652), 850 * futher CTF spec updating may require us to use '$'. 851 */ 852 if (dup < 0) 853 len = strlen(name) + sizeof("_"); 854 else 855 len = strlen(orig_name) + sizeof("_dupl_X"); 856 857 new_name = malloc(len); 858 if (!new_name) 859 goto out; 860 861 if (dup < 0) 862 snprintf(new_name, len, "_%s", name); 863 else 864 snprintf(new_name, len, "%s_dupl_%d", orig_name, dup); 865 866 out: 867 if (name != orig_name) 868 free(name); 869 return new_name; 870 } 871 872 static int event_class_add_field(struct bt_ctf_event_class *event_class, 873 struct bt_ctf_field_type *type, 874 struct format_field *field) 875 { 876 struct bt_ctf_field_type *t = NULL; 877 char *name; 878 int dup = 1; 879 int ret; 880 881 /* alias was already assigned */ 882 if (field->alias != field->name) 883 return bt_ctf_event_class_add_field(event_class, type, 884 (char *)field->alias); 885 886 name = field->name; 887 888 /* If 'name' is a keywork, add prefix. */ 889 if (bt_ctf_validate_identifier(name)) 890 name = change_name(name, field->name, -1); 891 892 if (!name) { 893 pr_err("Failed to fix invalid identifier."); 894 return -1; 895 } 896 while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) { 897 bt_ctf_field_type_put(t); 898 name = change_name(name, field->name, dup++); 899 if (!name) { 900 pr_err("Failed to create dup name for '%s'\n", field->name); 901 return -1; 902 } 903 } 904 905 ret = bt_ctf_event_class_add_field(event_class, type, name); 906 if (!ret) 907 field->alias = name; 908 909 return ret; 910 } 911 912 static int add_tracepoint_fields_types(struct ctf_writer *cw, 913 struct format_field *fields, 914 struct bt_ctf_event_class *event_class) 915 { 916 struct format_field *field; 917 int ret; 918 919 for (field = fields; field; field = field->next) { 920 struct bt_ctf_field_type *type; 921 unsigned long flags = field->flags; 922 923 pr2(" field '%s'\n", field->name); 924 925 type = get_tracepoint_field_type(cw, field); 926 if (!type) 927 return -1; 928 929 /* 930 * A string is an array of chars. For this we use the string 931 * type and don't care that it is an array. What we don't 932 * support is an array of strings. 933 */ 934 if (flags & FIELD_IS_STRING) 935 flags &= ~FIELD_IS_ARRAY; 936 937 if (flags & FIELD_IS_ARRAY) 938 type = bt_ctf_field_type_array_create(type, field->arraylen); 939 940 ret = event_class_add_field(event_class, type, field); 941 942 if (flags & FIELD_IS_ARRAY) 943 bt_ctf_field_type_put(type); 944 945 if (ret) { 946 pr_err("Failed to add field '%s': %d\n", 947 field->name, ret); 948 return -1; 949 } 950 } 951 952 return 0; 953 } 954 955 static int add_tracepoint_types(struct ctf_writer *cw, 956 struct perf_evsel *evsel, 957 struct bt_ctf_event_class *class) 958 { 959 struct format_field *common_fields = evsel->tp_format->format.common_fields; 960 struct format_field *fields = evsel->tp_format->format.fields; 961 int ret; 962 963 ret = add_tracepoint_fields_types(cw, common_fields, class); 964 if (!ret) 965 ret = add_tracepoint_fields_types(cw, fields, class); 966 967 return ret; 968 } 969 970 static int add_bpf_output_types(struct ctf_writer *cw, 971 struct bt_ctf_event_class *class) 972 { 973 struct bt_ctf_field_type *len_type = cw->data.u32; 974 struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex; 975 struct bt_ctf_field_type *seq_type; 976 int ret; 977 978 ret = bt_ctf_event_class_add_field(class, len_type, "raw_len"); 979 if (ret) 980 return ret; 981 982 seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len"); 983 if (!seq_type) 984 return -1; 985 986 return bt_ctf_event_class_add_field(class, seq_type, "raw_data"); 987 } 988 989 static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel, 990 struct bt_ctf_event_class *event_class) 991 { 992 u64 type = evsel->attr.sample_type; 993 994 /* 995 * missing: 996 * PERF_SAMPLE_TIME - not needed as we have it in 997 * ctf event header 998 * PERF_SAMPLE_READ - TODO 999 * PERF_SAMPLE_CALLCHAIN - TODO 1000 * PERF_SAMPLE_RAW - tracepoint fields and BPF output 1001 * are handled separately 1002 * PERF_SAMPLE_BRANCH_STACK - TODO 1003 * PERF_SAMPLE_REGS_USER - TODO 1004 * PERF_SAMPLE_STACK_USER - TODO 1005 */ 1006 1007 #define ADD_FIELD(cl, t, n) \ 1008 do { \ 1009 pr2(" field '%s'\n", n); \ 1010 if (bt_ctf_event_class_add_field(cl, t, n)) { \ 1011 pr_err("Failed to add field '%s';\n", n); \ 1012 return -1; \ 1013 } \ 1014 } while (0) 1015 1016 if (type & PERF_SAMPLE_IP) 1017 ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip"); 1018 1019 if (type & PERF_SAMPLE_TID) { 1020 ADD_FIELD(event_class, cw->data.s32, "perf_tid"); 1021 ADD_FIELD(event_class, cw->data.s32, "perf_pid"); 1022 } 1023 1024 if ((type & PERF_SAMPLE_ID) || 1025 (type & PERF_SAMPLE_IDENTIFIER)) 1026 ADD_FIELD(event_class, cw->data.u64, "perf_id"); 1027 1028 if (type & PERF_SAMPLE_STREAM_ID) 1029 ADD_FIELD(event_class, cw->data.u64, "perf_stream_id"); 1030 1031 if (type & PERF_SAMPLE_PERIOD) 1032 ADD_FIELD(event_class, cw->data.u64, "perf_period"); 1033 1034 if (type & PERF_SAMPLE_WEIGHT) 1035 ADD_FIELD(event_class, cw->data.u64, "perf_weight"); 1036 1037 if (type & PERF_SAMPLE_DATA_SRC) 1038 ADD_FIELD(event_class, cw->data.u64, "perf_data_src"); 1039 1040 if (type & PERF_SAMPLE_TRANSACTION) 1041 ADD_FIELD(event_class, cw->data.u64, "perf_transaction"); 1042 1043 #undef ADD_FIELD 1044 return 0; 1045 } 1046 1047 static int add_event(struct ctf_writer *cw, struct perf_evsel *evsel) 1048 { 1049 struct bt_ctf_event_class *event_class; 1050 struct evsel_priv *priv; 1051 const char *name = perf_evsel__name(evsel); 1052 int ret; 1053 1054 pr("Adding event '%s' (type %d)\n", name, evsel->attr.type); 1055 1056 event_class = bt_ctf_event_class_create(name); 1057 if (!event_class) 1058 return -1; 1059 1060 ret = add_generic_types(cw, evsel, event_class); 1061 if (ret) 1062 goto err; 1063 1064 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) { 1065 ret = add_tracepoint_types(cw, evsel, event_class); 1066 if (ret) 1067 goto err; 1068 } 1069 1070 if (perf_evsel__is_bpf_output(evsel)) { 1071 ret = add_bpf_output_types(cw, event_class); 1072 if (ret) 1073 goto err; 1074 } 1075 1076 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class); 1077 if (ret) { 1078 pr("Failed to add event class into stream.\n"); 1079 goto err; 1080 } 1081 1082 priv = malloc(sizeof(*priv)); 1083 if (!priv) 1084 goto err; 1085 1086 priv->event_class = event_class; 1087 evsel->priv = priv; 1088 return 0; 1089 1090 err: 1091 bt_ctf_event_class_put(event_class); 1092 pr_err("Failed to add event '%s'.\n", name); 1093 return -1; 1094 } 1095 1096 static int setup_events(struct ctf_writer *cw, struct perf_session *session) 1097 { 1098 struct perf_evlist *evlist = session->evlist; 1099 struct perf_evsel *evsel; 1100 int ret; 1101 1102 evlist__for_each_entry(evlist, evsel) { 1103 ret = add_event(cw, evsel); 1104 if (ret) 1105 return ret; 1106 } 1107 return 0; 1108 } 1109 1110 #define __NON_SAMPLE_ADD_FIELD(t, n) \ 1111 do { \ 1112 pr2(" field '%s'\n", #n); \ 1113 if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\ 1114 pr_err("Failed to add field '%s';\n", #n);\ 1115 return -1; \ 1116 } \ 1117 } while(0) 1118 1119 #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) \ 1120 static int add_##_name##_event(struct ctf_writer *cw) \ 1121 { \ 1122 struct bt_ctf_event_class *event_class; \ 1123 int ret; \ 1124 \ 1125 pr("Adding "#_name" event\n"); \ 1126 event_class = bt_ctf_event_class_create("perf_" #_name);\ 1127 if (!event_class) \ 1128 return -1; \ 1129 body \ 1130 \ 1131 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\ 1132 if (ret) { \ 1133 pr("Failed to add event class '"#_name"' into stream.\n");\ 1134 return ret; \ 1135 } \ 1136 \ 1137 cw->_name##_class = event_class; \ 1138 bt_ctf_event_class_put(event_class); \ 1139 return 0; \ 1140 } 1141 1142 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm, 1143 __NON_SAMPLE_ADD_FIELD(u32, pid); 1144 __NON_SAMPLE_ADD_FIELD(u32, tid); 1145 __NON_SAMPLE_ADD_FIELD(string, comm); 1146 ) 1147 1148 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork, 1149 __NON_SAMPLE_ADD_FIELD(u32, pid); 1150 __NON_SAMPLE_ADD_FIELD(u32, ppid); 1151 __NON_SAMPLE_ADD_FIELD(u32, tid); 1152 __NON_SAMPLE_ADD_FIELD(u32, ptid); 1153 __NON_SAMPLE_ADD_FIELD(u64, time); 1154 ) 1155 1156 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit, 1157 __NON_SAMPLE_ADD_FIELD(u32, pid); 1158 __NON_SAMPLE_ADD_FIELD(u32, ppid); 1159 __NON_SAMPLE_ADD_FIELD(u32, tid); 1160 __NON_SAMPLE_ADD_FIELD(u32, ptid); 1161 __NON_SAMPLE_ADD_FIELD(u64, time); 1162 ) 1163 1164 #undef __NON_SAMPLE_ADD_FIELD 1165 #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS 1166 1167 static int setup_non_sample_events(struct ctf_writer *cw, 1168 struct perf_session *session __maybe_unused) 1169 { 1170 int ret; 1171 1172 ret = add_comm_event(cw); 1173 if (ret) 1174 return ret; 1175 ret = add_exit_event(cw); 1176 if (ret) 1177 return ret; 1178 ret = add_fork_event(cw); 1179 if (ret) 1180 return ret; 1181 return 0; 1182 } 1183 1184 static void cleanup_events(struct perf_session *session) 1185 { 1186 struct perf_evlist *evlist = session->evlist; 1187 struct perf_evsel *evsel; 1188 1189 evlist__for_each_entry(evlist, evsel) { 1190 struct evsel_priv *priv; 1191 1192 priv = evsel->priv; 1193 bt_ctf_event_class_put(priv->event_class); 1194 zfree(&evsel->priv); 1195 } 1196 1197 perf_evlist__delete(evlist); 1198 session->evlist = NULL; 1199 } 1200 1201 static int setup_streams(struct ctf_writer *cw, struct perf_session *session) 1202 { 1203 struct ctf_stream **stream; 1204 struct perf_header *ph = &session->header; 1205 int ncpus; 1206 1207 /* 1208 * Try to get the number of cpus used in the data file, 1209 * if not present fallback to the MAX_CPUS. 1210 */ 1211 ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS; 1212 1213 stream = zalloc(sizeof(*stream) * ncpus); 1214 if (!stream) { 1215 pr_err("Failed to allocate streams.\n"); 1216 return -ENOMEM; 1217 } 1218 1219 cw->stream = stream; 1220 cw->stream_cnt = ncpus; 1221 return 0; 1222 } 1223 1224 static void free_streams(struct ctf_writer *cw) 1225 { 1226 int cpu; 1227 1228 for (cpu = 0; cpu < cw->stream_cnt; cpu++) 1229 ctf_stream__delete(cw->stream[cpu]); 1230 1231 free(cw->stream); 1232 } 1233 1234 static int ctf_writer__setup_env(struct ctf_writer *cw, 1235 struct perf_session *session) 1236 { 1237 struct perf_header *header = &session->header; 1238 struct bt_ctf_writer *writer = cw->writer; 1239 1240 #define ADD(__n, __v) \ 1241 do { \ 1242 if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \ 1243 return -1; \ 1244 } while (0) 1245 1246 ADD("host", header->env.hostname); 1247 ADD("sysname", "Linux"); 1248 ADD("release", header->env.os_release); 1249 ADD("version", header->env.version); 1250 ADD("machine", header->env.arch); 1251 ADD("domain", "kernel"); 1252 ADD("tracer_name", "perf"); 1253 1254 #undef ADD 1255 return 0; 1256 } 1257 1258 static int ctf_writer__setup_clock(struct ctf_writer *cw) 1259 { 1260 struct bt_ctf_clock *clock = cw->clock; 1261 1262 bt_ctf_clock_set_description(clock, "perf clock"); 1263 1264 #define SET(__n, __v) \ 1265 do { \ 1266 if (bt_ctf_clock_set_##__n(clock, __v)) \ 1267 return -1; \ 1268 } while (0) 1269 1270 SET(frequency, 1000000000); 1271 SET(offset_s, 0); 1272 SET(offset, 0); 1273 SET(precision, 10); 1274 SET(is_absolute, 0); 1275 1276 #undef SET 1277 return 0; 1278 } 1279 1280 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex) 1281 { 1282 struct bt_ctf_field_type *type; 1283 1284 type = bt_ctf_field_type_integer_create(size); 1285 if (!type) 1286 return NULL; 1287 1288 if (sign && 1289 bt_ctf_field_type_integer_set_signed(type, 1)) 1290 goto err; 1291 1292 if (hex && 1293 bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL)) 1294 goto err; 1295 1296 #if __BYTE_ORDER == __BIG_ENDIAN 1297 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN); 1298 #else 1299 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN); 1300 #endif 1301 1302 pr2("Created type: INTEGER %d-bit %ssigned %s\n", 1303 size, sign ? "un" : "", hex ? "hex" : ""); 1304 return type; 1305 1306 err: 1307 bt_ctf_field_type_put(type); 1308 return NULL; 1309 } 1310 1311 static void ctf_writer__cleanup_data(struct ctf_writer *cw) 1312 { 1313 unsigned int i; 1314 1315 for (i = 0; i < ARRAY_SIZE(cw->data.array); i++) 1316 bt_ctf_field_type_put(cw->data.array[i]); 1317 } 1318 1319 static int ctf_writer__init_data(struct ctf_writer *cw) 1320 { 1321 #define CREATE_INT_TYPE(type, size, sign, hex) \ 1322 do { \ 1323 (type) = create_int_type(size, sign, hex); \ 1324 if (!(type)) \ 1325 goto err; \ 1326 } while (0) 1327 1328 CREATE_INT_TYPE(cw->data.s64, 64, true, false); 1329 CREATE_INT_TYPE(cw->data.u64, 64, false, false); 1330 CREATE_INT_TYPE(cw->data.s32, 32, true, false); 1331 CREATE_INT_TYPE(cw->data.u32, 32, false, false); 1332 CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true); 1333 CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true); 1334 1335 cw->data.string = bt_ctf_field_type_string_create(); 1336 if (cw->data.string) 1337 return 0; 1338 1339 err: 1340 ctf_writer__cleanup_data(cw); 1341 pr_err("Failed to create data types.\n"); 1342 return -1; 1343 } 1344 1345 static void ctf_writer__cleanup(struct ctf_writer *cw) 1346 { 1347 ctf_writer__cleanup_data(cw); 1348 1349 bt_ctf_clock_put(cw->clock); 1350 free_streams(cw); 1351 bt_ctf_stream_class_put(cw->stream_class); 1352 bt_ctf_writer_put(cw->writer); 1353 1354 /* and NULL all the pointers */ 1355 memset(cw, 0, sizeof(*cw)); 1356 } 1357 1358 static int ctf_writer__init(struct ctf_writer *cw, const char *path) 1359 { 1360 struct bt_ctf_writer *writer; 1361 struct bt_ctf_stream_class *stream_class; 1362 struct bt_ctf_clock *clock; 1363 struct bt_ctf_field_type *pkt_ctx_type; 1364 int ret; 1365 1366 /* CTF writer */ 1367 writer = bt_ctf_writer_create(path); 1368 if (!writer) 1369 goto err; 1370 1371 cw->writer = writer; 1372 1373 /* CTF clock */ 1374 clock = bt_ctf_clock_create("perf_clock"); 1375 if (!clock) { 1376 pr("Failed to create CTF clock.\n"); 1377 goto err_cleanup; 1378 } 1379 1380 cw->clock = clock; 1381 1382 if (ctf_writer__setup_clock(cw)) { 1383 pr("Failed to setup CTF clock.\n"); 1384 goto err_cleanup; 1385 } 1386 1387 /* CTF stream class */ 1388 stream_class = bt_ctf_stream_class_create("perf_stream"); 1389 if (!stream_class) { 1390 pr("Failed to create CTF stream class.\n"); 1391 goto err_cleanup; 1392 } 1393 1394 cw->stream_class = stream_class; 1395 1396 /* CTF clock stream setup */ 1397 if (bt_ctf_stream_class_set_clock(stream_class, clock)) { 1398 pr("Failed to assign CTF clock to stream class.\n"); 1399 goto err_cleanup; 1400 } 1401 1402 if (ctf_writer__init_data(cw)) 1403 goto err_cleanup; 1404 1405 /* Add cpu_id for packet context */ 1406 pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class); 1407 if (!pkt_ctx_type) 1408 goto err_cleanup; 1409 1410 ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id"); 1411 bt_ctf_field_type_put(pkt_ctx_type); 1412 if (ret) 1413 goto err_cleanup; 1414 1415 /* CTF clock writer setup */ 1416 if (bt_ctf_writer_add_clock(writer, clock)) { 1417 pr("Failed to assign CTF clock to writer.\n"); 1418 goto err_cleanup; 1419 } 1420 1421 return 0; 1422 1423 err_cleanup: 1424 ctf_writer__cleanup(cw); 1425 err: 1426 pr_err("Failed to setup CTF writer.\n"); 1427 return -1; 1428 } 1429 1430 static int ctf_writer__flush_streams(struct ctf_writer *cw) 1431 { 1432 int cpu, ret = 0; 1433 1434 for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++) 1435 ret = ctf_stream__flush(cw->stream[cpu]); 1436 1437 return ret; 1438 } 1439 1440 static int convert__config(const char *var, const char *value, void *cb) 1441 { 1442 struct convert *c = cb; 1443 1444 if (!strcmp(var, "convert.queue-size")) { 1445 c->queue_size = perf_config_u64(var, value); 1446 return 0; 1447 } 1448 1449 return 0; 1450 } 1451 1452 int bt_convert__perf2ctf(const char *input, const char *path, 1453 struct perf_data_convert_opts *opts) 1454 { 1455 struct perf_session *session; 1456 struct perf_data_file file = { 1457 .path = input, 1458 .mode = PERF_DATA_MODE_READ, 1459 .force = opts->force, 1460 }; 1461 struct convert c = { 1462 .tool = { 1463 .sample = process_sample_event, 1464 .mmap = perf_event__process_mmap, 1465 .mmap2 = perf_event__process_mmap2, 1466 .comm = perf_event__process_comm, 1467 .exit = perf_event__process_exit, 1468 .fork = perf_event__process_fork, 1469 .lost = perf_event__process_lost, 1470 .tracing_data = perf_event__process_tracing_data, 1471 .build_id = perf_event__process_build_id, 1472 .namespaces = perf_event__process_namespaces, 1473 .ordered_events = true, 1474 .ordering_requires_timestamps = true, 1475 }, 1476 }; 1477 struct ctf_writer *cw = &c.writer; 1478 int err; 1479 1480 if (opts->all) { 1481 c.tool.comm = process_comm_event; 1482 c.tool.exit = process_exit_event; 1483 c.tool.fork = process_fork_event; 1484 } 1485 1486 err = perf_config(convert__config, &c); 1487 if (err) 1488 return err; 1489 1490 /* CTF writer */ 1491 if (ctf_writer__init(cw, path)) 1492 return -1; 1493 1494 err = -1; 1495 /* perf.data session */ 1496 session = perf_session__new(&file, 0, &c.tool); 1497 if (!session) 1498 goto free_writer; 1499 1500 if (c.queue_size) { 1501 ordered_events__set_alloc_size(&session->ordered_events, 1502 c.queue_size); 1503 } 1504 1505 /* CTF writer env/clock setup */ 1506 if (ctf_writer__setup_env(cw, session)) 1507 goto free_session; 1508 1509 /* CTF events setup */ 1510 if (setup_events(cw, session)) 1511 goto free_session; 1512 1513 if (opts->all && setup_non_sample_events(cw, session)) 1514 goto free_session; 1515 1516 if (setup_streams(cw, session)) 1517 goto free_session; 1518 1519 err = perf_session__process_events(session); 1520 if (!err) 1521 err = ctf_writer__flush_streams(cw); 1522 else 1523 pr_err("Error during conversion.\n"); 1524 1525 fprintf(stderr, 1526 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n", 1527 file.path, path); 1528 1529 fprintf(stderr, 1530 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples", 1531 (double) c.events_size / 1024.0 / 1024.0, 1532 c.events_count); 1533 1534 if (!c.non_sample_count) 1535 fprintf(stderr, ") ]\n"); 1536 else 1537 fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count); 1538 1539 cleanup_events(session); 1540 perf_session__delete(session); 1541 ctf_writer__cleanup(cw); 1542 1543 return err; 1544 1545 free_session: 1546 perf_session__delete(session); 1547 free_writer: 1548 ctf_writer__cleanup(cw); 1549 pr_err("Error during conversion setup.\n"); 1550 return err; 1551 } 1552