1 /* 2 * CTF writing support via babeltrace. 3 * 4 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com> 5 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de> 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 10 #include <inttypes.h> 11 #include <linux/compiler.h> 12 #include <linux/kernel.h> 13 #include <babeltrace/ctf-writer/writer.h> 14 #include <babeltrace/ctf-writer/clock.h> 15 #include <babeltrace/ctf-writer/stream.h> 16 #include <babeltrace/ctf-writer/event.h> 17 #include <babeltrace/ctf-writer/event-types.h> 18 #include <babeltrace/ctf-writer/event-fields.h> 19 #include <babeltrace/ctf-ir/utils.h> 20 #include <babeltrace/ctf/events.h> 21 #include <traceevent/event-parse.h> 22 #include "asm/bug.h" 23 #include "data-convert-bt.h" 24 #include "session.h" 25 #include "util.h" 26 #include "debug.h" 27 #include "tool.h" 28 #include "evlist.h" 29 #include "evsel.h" 30 #include "machine.h" 31 #include "config.h" 32 33 #define pr_N(n, fmt, ...) \ 34 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__) 35 36 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__) 37 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__) 38 39 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__) 40 41 struct evsel_priv { 42 struct bt_ctf_event_class *event_class; 43 }; 44 45 #define MAX_CPUS 4096 46 47 struct ctf_stream { 48 struct bt_ctf_stream *stream; 49 int cpu; 50 u32 count; 51 }; 52 53 struct ctf_writer { 54 /* writer primitives */ 55 struct bt_ctf_writer *writer; 56 struct ctf_stream **stream; 57 int stream_cnt; 58 struct bt_ctf_stream_class *stream_class; 59 struct bt_ctf_clock *clock; 60 61 /* data types */ 62 union { 63 struct { 64 struct bt_ctf_field_type *s64; 65 struct bt_ctf_field_type *u64; 66 struct bt_ctf_field_type *s32; 67 struct bt_ctf_field_type *u32; 68 struct bt_ctf_field_type *string; 69 struct bt_ctf_field_type *u32_hex; 70 struct bt_ctf_field_type *u64_hex; 71 }; 72 struct bt_ctf_field_type *array[6]; 73 } data; 74 struct bt_ctf_event_class *comm_class; 75 struct bt_ctf_event_class *exit_class; 76 struct bt_ctf_event_class *fork_class; 77 }; 78 79 struct convert { 80 struct perf_tool tool; 81 struct ctf_writer writer; 82 83 u64 events_size; 84 u64 events_count; 85 u64 non_sample_count; 86 87 /* Ordered events configured queue size. */ 88 u64 queue_size; 89 }; 90 91 static int value_set(struct bt_ctf_field_type *type, 92 struct bt_ctf_event *event, 93 const char *name, u64 val) 94 { 95 struct bt_ctf_field *field; 96 bool sign = bt_ctf_field_type_integer_get_signed(type); 97 int ret; 98 99 field = bt_ctf_field_create(type); 100 if (!field) { 101 pr_err("failed to create a field %s\n", name); 102 return -1; 103 } 104 105 if (sign) { 106 ret = bt_ctf_field_signed_integer_set_value(field, val); 107 if (ret) { 108 pr_err("failed to set field value %s\n", name); 109 goto err; 110 } 111 } else { 112 ret = bt_ctf_field_unsigned_integer_set_value(field, val); 113 if (ret) { 114 pr_err("failed to set field value %s\n", name); 115 goto err; 116 } 117 } 118 119 ret = bt_ctf_event_set_payload(event, name, field); 120 if (ret) { 121 pr_err("failed to set payload %s\n", name); 122 goto err; 123 } 124 125 pr2(" SET [%s = %" PRIu64 "]\n", name, val); 126 127 err: 128 bt_ctf_field_put(field); 129 return ret; 130 } 131 132 #define __FUNC_VALUE_SET(_name, _val_type) \ 133 static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \ 134 struct bt_ctf_event *event, \ 135 const char *name, \ 136 _val_type val) \ 137 { \ 138 struct bt_ctf_field_type *type = cw->data._name; \ 139 return value_set(type, event, name, (u64) val); \ 140 } 141 142 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name) 143 144 FUNC_VALUE_SET(s32) 145 FUNC_VALUE_SET(u32) 146 FUNC_VALUE_SET(s64) 147 FUNC_VALUE_SET(u64) 148 __FUNC_VALUE_SET(u64_hex, u64) 149 150 static int string_set_value(struct bt_ctf_field *field, const char *string); 151 static __maybe_unused int 152 value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event, 153 const char *name, const char *string) 154 { 155 struct bt_ctf_field_type *type = cw->data.string; 156 struct bt_ctf_field *field; 157 int ret = 0; 158 159 field = bt_ctf_field_create(type); 160 if (!field) { 161 pr_err("failed to create a field %s\n", name); 162 return -1; 163 } 164 165 ret = string_set_value(field, string); 166 if (ret) { 167 pr_err("failed to set value %s\n", name); 168 goto err_put_field; 169 } 170 171 ret = bt_ctf_event_set_payload(event, name, field); 172 if (ret) 173 pr_err("failed to set payload %s\n", name); 174 175 err_put_field: 176 bt_ctf_field_put(field); 177 return ret; 178 } 179 180 static struct bt_ctf_field_type* 181 get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field) 182 { 183 unsigned long flags = field->flags; 184 185 if (flags & FIELD_IS_STRING) 186 return cw->data.string; 187 188 if (!(flags & FIELD_IS_SIGNED)) { 189 /* unsigned long are mostly pointers */ 190 if (flags & FIELD_IS_LONG || flags & FIELD_IS_POINTER) 191 return cw->data.u64_hex; 192 } 193 194 if (flags & FIELD_IS_SIGNED) { 195 if (field->size == 8) 196 return cw->data.s64; 197 else 198 return cw->data.s32; 199 } 200 201 if (field->size == 8) 202 return cw->data.u64; 203 else 204 return cw->data.u32; 205 } 206 207 static unsigned long long adjust_signedness(unsigned long long value_int, int size) 208 { 209 unsigned long long value_mask; 210 211 /* 212 * value_mask = (1 << (size * 8 - 1)) - 1. 213 * Directly set value_mask for code readers. 214 */ 215 switch (size) { 216 case 1: 217 value_mask = 0x7fULL; 218 break; 219 case 2: 220 value_mask = 0x7fffULL; 221 break; 222 case 4: 223 value_mask = 0x7fffffffULL; 224 break; 225 case 8: 226 /* 227 * For 64 bit value, return it self. There is no need 228 * to fill high bit. 229 */ 230 /* Fall through */ 231 default: 232 /* BUG! */ 233 return value_int; 234 } 235 236 /* If it is a positive value, don't adjust. */ 237 if ((value_int & (~0ULL - value_mask)) == 0) 238 return value_int; 239 240 /* Fill upper part of value_int with 1 to make it a negative long long. */ 241 return (value_int & value_mask) | ~value_mask; 242 } 243 244 static int string_set_value(struct bt_ctf_field *field, const char *string) 245 { 246 char *buffer = NULL; 247 size_t len = strlen(string), i, p; 248 int err; 249 250 for (i = p = 0; i < len; i++, p++) { 251 if (isprint(string[i])) { 252 if (!buffer) 253 continue; 254 buffer[p] = string[i]; 255 } else { 256 char numstr[5]; 257 258 snprintf(numstr, sizeof(numstr), "\\x%02x", 259 (unsigned int)(string[i]) & 0xff); 260 261 if (!buffer) { 262 buffer = zalloc(i + (len - i) * 4 + 2); 263 if (!buffer) { 264 pr_err("failed to set unprintable string '%s'\n", string); 265 return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING"); 266 } 267 if (i > 0) 268 strncpy(buffer, string, i); 269 } 270 strncat(buffer + p, numstr, 4); 271 p += 3; 272 } 273 } 274 275 if (!buffer) 276 return bt_ctf_field_string_set_value(field, string); 277 err = bt_ctf_field_string_set_value(field, buffer); 278 free(buffer); 279 return err; 280 } 281 282 static int add_tracepoint_field_value(struct ctf_writer *cw, 283 struct bt_ctf_event_class *event_class, 284 struct bt_ctf_event *event, 285 struct perf_sample *sample, 286 struct format_field *fmtf) 287 { 288 struct bt_ctf_field_type *type; 289 struct bt_ctf_field *array_field; 290 struct bt_ctf_field *field; 291 const char *name = fmtf->name; 292 void *data = sample->raw_data; 293 unsigned long flags = fmtf->flags; 294 unsigned int n_items; 295 unsigned int i; 296 unsigned int offset; 297 unsigned int len; 298 int ret; 299 300 name = fmtf->alias; 301 offset = fmtf->offset; 302 len = fmtf->size; 303 if (flags & FIELD_IS_STRING) 304 flags &= ~FIELD_IS_ARRAY; 305 306 if (flags & FIELD_IS_DYNAMIC) { 307 unsigned long long tmp_val; 308 309 tmp_val = pevent_read_number(fmtf->event->pevent, 310 data + offset, len); 311 offset = tmp_val; 312 len = offset >> 16; 313 offset &= 0xffff; 314 } 315 316 if (flags & FIELD_IS_ARRAY) { 317 318 type = bt_ctf_event_class_get_field_by_name( 319 event_class, name); 320 array_field = bt_ctf_field_create(type); 321 bt_ctf_field_type_put(type); 322 if (!array_field) { 323 pr_err("Failed to create array type %s\n", name); 324 return -1; 325 } 326 327 len = fmtf->size / fmtf->arraylen; 328 n_items = fmtf->arraylen; 329 } else { 330 n_items = 1; 331 array_field = NULL; 332 } 333 334 type = get_tracepoint_field_type(cw, fmtf); 335 336 for (i = 0; i < n_items; i++) { 337 if (flags & FIELD_IS_ARRAY) 338 field = bt_ctf_field_array_get_field(array_field, i); 339 else 340 field = bt_ctf_field_create(type); 341 342 if (!field) { 343 pr_err("failed to create a field %s\n", name); 344 return -1; 345 } 346 347 if (flags & FIELD_IS_STRING) 348 ret = string_set_value(field, data + offset + i * len); 349 else { 350 unsigned long long value_int; 351 352 value_int = pevent_read_number( 353 fmtf->event->pevent, 354 data + offset + i * len, len); 355 356 if (!(flags & FIELD_IS_SIGNED)) 357 ret = bt_ctf_field_unsigned_integer_set_value( 358 field, value_int); 359 else 360 ret = bt_ctf_field_signed_integer_set_value( 361 field, adjust_signedness(value_int, len)); 362 } 363 364 if (ret) { 365 pr_err("failed to set file value %s\n", name); 366 goto err_put_field; 367 } 368 if (!(flags & FIELD_IS_ARRAY)) { 369 ret = bt_ctf_event_set_payload(event, name, field); 370 if (ret) { 371 pr_err("failed to set payload %s\n", name); 372 goto err_put_field; 373 } 374 } 375 bt_ctf_field_put(field); 376 } 377 if (flags & FIELD_IS_ARRAY) { 378 ret = bt_ctf_event_set_payload(event, name, array_field); 379 if (ret) { 380 pr_err("Failed add payload array %s\n", name); 381 return -1; 382 } 383 bt_ctf_field_put(array_field); 384 } 385 return 0; 386 387 err_put_field: 388 bt_ctf_field_put(field); 389 return -1; 390 } 391 392 static int add_tracepoint_fields_values(struct ctf_writer *cw, 393 struct bt_ctf_event_class *event_class, 394 struct bt_ctf_event *event, 395 struct format_field *fields, 396 struct perf_sample *sample) 397 { 398 struct format_field *field; 399 int ret; 400 401 for (field = fields; field; field = field->next) { 402 ret = add_tracepoint_field_value(cw, event_class, event, sample, 403 field); 404 if (ret) 405 return -1; 406 } 407 return 0; 408 } 409 410 static int add_tracepoint_values(struct ctf_writer *cw, 411 struct bt_ctf_event_class *event_class, 412 struct bt_ctf_event *event, 413 struct perf_evsel *evsel, 414 struct perf_sample *sample) 415 { 416 struct format_field *common_fields = evsel->tp_format->format.common_fields; 417 struct format_field *fields = evsel->tp_format->format.fields; 418 int ret; 419 420 ret = add_tracepoint_fields_values(cw, event_class, event, 421 common_fields, sample); 422 if (!ret) 423 ret = add_tracepoint_fields_values(cw, event_class, event, 424 fields, sample); 425 426 return ret; 427 } 428 429 static int 430 add_bpf_output_values(struct bt_ctf_event_class *event_class, 431 struct bt_ctf_event *event, 432 struct perf_sample *sample) 433 { 434 struct bt_ctf_field_type *len_type, *seq_type; 435 struct bt_ctf_field *len_field, *seq_field; 436 unsigned int raw_size = sample->raw_size; 437 unsigned int nr_elements = raw_size / sizeof(u32); 438 unsigned int i; 439 int ret; 440 441 if (nr_elements * sizeof(u32) != raw_size) 442 pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n", 443 raw_size, nr_elements * sizeof(u32) - raw_size); 444 445 len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len"); 446 len_field = bt_ctf_field_create(len_type); 447 if (!len_field) { 448 pr_err("failed to create 'raw_len' for bpf output event\n"); 449 ret = -1; 450 goto put_len_type; 451 } 452 453 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements); 454 if (ret) { 455 pr_err("failed to set field value for raw_len\n"); 456 goto put_len_field; 457 } 458 ret = bt_ctf_event_set_payload(event, "raw_len", len_field); 459 if (ret) { 460 pr_err("failed to set payload to raw_len\n"); 461 goto put_len_field; 462 } 463 464 seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data"); 465 seq_field = bt_ctf_field_create(seq_type); 466 if (!seq_field) { 467 pr_err("failed to create 'raw_data' for bpf output event\n"); 468 ret = -1; 469 goto put_seq_type; 470 } 471 472 ret = bt_ctf_field_sequence_set_length(seq_field, len_field); 473 if (ret) { 474 pr_err("failed to set length of 'raw_data'\n"); 475 goto put_seq_field; 476 } 477 478 for (i = 0; i < nr_elements; i++) { 479 struct bt_ctf_field *elem_field = 480 bt_ctf_field_sequence_get_field(seq_field, i); 481 482 ret = bt_ctf_field_unsigned_integer_set_value(elem_field, 483 ((u32 *)(sample->raw_data))[i]); 484 485 bt_ctf_field_put(elem_field); 486 if (ret) { 487 pr_err("failed to set raw_data[%d]\n", i); 488 goto put_seq_field; 489 } 490 } 491 492 ret = bt_ctf_event_set_payload(event, "raw_data", seq_field); 493 if (ret) 494 pr_err("failed to set payload for raw_data\n"); 495 496 put_seq_field: 497 bt_ctf_field_put(seq_field); 498 put_seq_type: 499 bt_ctf_field_type_put(seq_type); 500 put_len_field: 501 bt_ctf_field_put(len_field); 502 put_len_type: 503 bt_ctf_field_type_put(len_type); 504 return ret; 505 } 506 507 static int add_generic_values(struct ctf_writer *cw, 508 struct bt_ctf_event *event, 509 struct perf_evsel *evsel, 510 struct perf_sample *sample) 511 { 512 u64 type = evsel->attr.sample_type; 513 int ret; 514 515 /* 516 * missing: 517 * PERF_SAMPLE_TIME - not needed as we have it in 518 * ctf event header 519 * PERF_SAMPLE_READ - TODO 520 * PERF_SAMPLE_CALLCHAIN - TODO 521 * PERF_SAMPLE_RAW - tracepoint fields are handled separately 522 * PERF_SAMPLE_BRANCH_STACK - TODO 523 * PERF_SAMPLE_REGS_USER - TODO 524 * PERF_SAMPLE_STACK_USER - TODO 525 */ 526 527 if (type & PERF_SAMPLE_IP) { 528 ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip); 529 if (ret) 530 return -1; 531 } 532 533 if (type & PERF_SAMPLE_TID) { 534 ret = value_set_s32(cw, event, "perf_tid", sample->tid); 535 if (ret) 536 return -1; 537 538 ret = value_set_s32(cw, event, "perf_pid", sample->pid); 539 if (ret) 540 return -1; 541 } 542 543 if ((type & PERF_SAMPLE_ID) || 544 (type & PERF_SAMPLE_IDENTIFIER)) { 545 ret = value_set_u64(cw, event, "perf_id", sample->id); 546 if (ret) 547 return -1; 548 } 549 550 if (type & PERF_SAMPLE_STREAM_ID) { 551 ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id); 552 if (ret) 553 return -1; 554 } 555 556 if (type & PERF_SAMPLE_PERIOD) { 557 ret = value_set_u64(cw, event, "perf_period", sample->period); 558 if (ret) 559 return -1; 560 } 561 562 if (type & PERF_SAMPLE_WEIGHT) { 563 ret = value_set_u64(cw, event, "perf_weight", sample->weight); 564 if (ret) 565 return -1; 566 } 567 568 if (type & PERF_SAMPLE_DATA_SRC) { 569 ret = value_set_u64(cw, event, "perf_data_src", 570 sample->data_src); 571 if (ret) 572 return -1; 573 } 574 575 if (type & PERF_SAMPLE_TRANSACTION) { 576 ret = value_set_u64(cw, event, "perf_transaction", 577 sample->transaction); 578 if (ret) 579 return -1; 580 } 581 582 return 0; 583 } 584 585 static int ctf_stream__flush(struct ctf_stream *cs) 586 { 587 int err = 0; 588 589 if (cs) { 590 err = bt_ctf_stream_flush(cs->stream); 591 if (err) 592 pr_err("CTF stream %d flush failed\n", cs->cpu); 593 594 pr("Flush stream for cpu %d (%u samples)\n", 595 cs->cpu, cs->count); 596 597 cs->count = 0; 598 } 599 600 return err; 601 } 602 603 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu) 604 { 605 struct ctf_stream *cs; 606 struct bt_ctf_field *pkt_ctx = NULL; 607 struct bt_ctf_field *cpu_field = NULL; 608 struct bt_ctf_stream *stream = NULL; 609 int ret; 610 611 cs = zalloc(sizeof(*cs)); 612 if (!cs) { 613 pr_err("Failed to allocate ctf stream\n"); 614 return NULL; 615 } 616 617 stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class); 618 if (!stream) { 619 pr_err("Failed to create CTF stream\n"); 620 goto out; 621 } 622 623 pkt_ctx = bt_ctf_stream_get_packet_context(stream); 624 if (!pkt_ctx) { 625 pr_err("Failed to obtain packet context\n"); 626 goto out; 627 } 628 629 cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id"); 630 bt_ctf_field_put(pkt_ctx); 631 if (!cpu_field) { 632 pr_err("Failed to obtain cpu field\n"); 633 goto out; 634 } 635 636 ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu); 637 if (ret) { 638 pr_err("Failed to update CPU number\n"); 639 goto out; 640 } 641 642 bt_ctf_field_put(cpu_field); 643 644 cs->cpu = cpu; 645 cs->stream = stream; 646 return cs; 647 648 out: 649 if (cpu_field) 650 bt_ctf_field_put(cpu_field); 651 if (stream) 652 bt_ctf_stream_put(stream); 653 654 free(cs); 655 return NULL; 656 } 657 658 static void ctf_stream__delete(struct ctf_stream *cs) 659 { 660 if (cs) { 661 bt_ctf_stream_put(cs->stream); 662 free(cs); 663 } 664 } 665 666 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu) 667 { 668 struct ctf_stream *cs = cw->stream[cpu]; 669 670 if (!cs) { 671 cs = ctf_stream__create(cw, cpu); 672 cw->stream[cpu] = cs; 673 } 674 675 return cs; 676 } 677 678 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample, 679 struct perf_evsel *evsel) 680 { 681 int cpu = 0; 682 683 if (evsel->attr.sample_type & PERF_SAMPLE_CPU) 684 cpu = sample->cpu; 685 686 if (cpu > cw->stream_cnt) { 687 pr_err("Event was recorded for CPU %d, limit is at %d.\n", 688 cpu, cw->stream_cnt); 689 cpu = 0; 690 } 691 692 return cpu; 693 } 694 695 #define STREAM_FLUSH_COUNT 100000 696 697 /* 698 * Currently we have no other way to determine the 699 * time for the stream flush other than keep track 700 * of the number of events and check it against 701 * threshold. 702 */ 703 static bool is_flush_needed(struct ctf_stream *cs) 704 { 705 return cs->count >= STREAM_FLUSH_COUNT; 706 } 707 708 static int process_sample_event(struct perf_tool *tool, 709 union perf_event *_event, 710 struct perf_sample *sample, 711 struct perf_evsel *evsel, 712 struct machine *machine __maybe_unused) 713 { 714 struct convert *c = container_of(tool, struct convert, tool); 715 struct evsel_priv *priv = evsel->priv; 716 struct ctf_writer *cw = &c->writer; 717 struct ctf_stream *cs; 718 struct bt_ctf_event_class *event_class; 719 struct bt_ctf_event *event; 720 int ret; 721 722 if (WARN_ONCE(!priv, "Failed to setup all events.\n")) 723 return 0; 724 725 event_class = priv->event_class; 726 727 /* update stats */ 728 c->events_count++; 729 c->events_size += _event->header.size; 730 731 pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count); 732 733 event = bt_ctf_event_create(event_class); 734 if (!event) { 735 pr_err("Failed to create an CTF event\n"); 736 return -1; 737 } 738 739 bt_ctf_clock_set_time(cw->clock, sample->time); 740 741 ret = add_generic_values(cw, event, evsel, sample); 742 if (ret) 743 return -1; 744 745 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) { 746 ret = add_tracepoint_values(cw, event_class, event, 747 evsel, sample); 748 if (ret) 749 return -1; 750 } 751 752 if (perf_evsel__is_bpf_output(evsel)) { 753 ret = add_bpf_output_values(event_class, event, sample); 754 if (ret) 755 return -1; 756 } 757 758 cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel)); 759 if (cs) { 760 if (is_flush_needed(cs)) 761 ctf_stream__flush(cs); 762 763 cs->count++; 764 bt_ctf_stream_append_event(cs->stream, event); 765 } 766 767 bt_ctf_event_put(event); 768 return cs ? 0 : -1; 769 } 770 771 #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) \ 772 do { \ 773 ret = value_set_##_type(cw, event, #_field, _event->_name._field);\ 774 if (ret) \ 775 return -1; \ 776 } while(0) 777 778 #define __FUNC_PROCESS_NON_SAMPLE(_name, body) \ 779 static int process_##_name##_event(struct perf_tool *tool, \ 780 union perf_event *_event, \ 781 struct perf_sample *sample, \ 782 struct machine *machine) \ 783 { \ 784 struct convert *c = container_of(tool, struct convert, tool);\ 785 struct ctf_writer *cw = &c->writer; \ 786 struct bt_ctf_event_class *event_class = cw->_name##_class;\ 787 struct bt_ctf_event *event; \ 788 struct ctf_stream *cs; \ 789 int ret; \ 790 \ 791 c->non_sample_count++; \ 792 c->events_size += _event->header.size; \ 793 event = bt_ctf_event_create(event_class); \ 794 if (!event) { \ 795 pr_err("Failed to create an CTF event\n"); \ 796 return -1; \ 797 } \ 798 \ 799 bt_ctf_clock_set_time(cw->clock, sample->time); \ 800 body \ 801 cs = ctf_stream(cw, 0); \ 802 if (cs) { \ 803 if (is_flush_needed(cs)) \ 804 ctf_stream__flush(cs); \ 805 \ 806 cs->count++; \ 807 bt_ctf_stream_append_event(cs->stream, event); \ 808 } \ 809 bt_ctf_event_put(event); \ 810 \ 811 return perf_event__process_##_name(tool, _event, sample, machine);\ 812 } 813 814 __FUNC_PROCESS_NON_SAMPLE(comm, 815 __NON_SAMPLE_SET_FIELD(comm, u32, pid); 816 __NON_SAMPLE_SET_FIELD(comm, u32, tid); 817 __NON_SAMPLE_SET_FIELD(comm, string, comm); 818 ) 819 __FUNC_PROCESS_NON_SAMPLE(fork, 820 __NON_SAMPLE_SET_FIELD(fork, u32, pid); 821 __NON_SAMPLE_SET_FIELD(fork, u32, ppid); 822 __NON_SAMPLE_SET_FIELD(fork, u32, tid); 823 __NON_SAMPLE_SET_FIELD(fork, u32, ptid); 824 __NON_SAMPLE_SET_FIELD(fork, u64, time); 825 ) 826 827 __FUNC_PROCESS_NON_SAMPLE(exit, 828 __NON_SAMPLE_SET_FIELD(fork, u32, pid); 829 __NON_SAMPLE_SET_FIELD(fork, u32, ppid); 830 __NON_SAMPLE_SET_FIELD(fork, u32, tid); 831 __NON_SAMPLE_SET_FIELD(fork, u32, ptid); 832 __NON_SAMPLE_SET_FIELD(fork, u64, time); 833 ) 834 #undef __NON_SAMPLE_SET_FIELD 835 #undef __FUNC_PROCESS_NON_SAMPLE 836 837 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */ 838 static char *change_name(char *name, char *orig_name, int dup) 839 { 840 char *new_name = NULL; 841 size_t len; 842 843 if (!name) 844 name = orig_name; 845 846 if (dup >= 10) 847 goto out; 848 /* 849 * Add '_' prefix to potential keywork. According to 850 * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652), 851 * futher CTF spec updating may require us to use '$'. 852 */ 853 if (dup < 0) 854 len = strlen(name) + sizeof("_"); 855 else 856 len = strlen(orig_name) + sizeof("_dupl_X"); 857 858 new_name = malloc(len); 859 if (!new_name) 860 goto out; 861 862 if (dup < 0) 863 snprintf(new_name, len, "_%s", name); 864 else 865 snprintf(new_name, len, "%s_dupl_%d", orig_name, dup); 866 867 out: 868 if (name != orig_name) 869 free(name); 870 return new_name; 871 } 872 873 static int event_class_add_field(struct bt_ctf_event_class *event_class, 874 struct bt_ctf_field_type *type, 875 struct format_field *field) 876 { 877 struct bt_ctf_field_type *t = NULL; 878 char *name; 879 int dup = 1; 880 int ret; 881 882 /* alias was already assigned */ 883 if (field->alias != field->name) 884 return bt_ctf_event_class_add_field(event_class, type, 885 (char *)field->alias); 886 887 name = field->name; 888 889 /* If 'name' is a keywork, add prefix. */ 890 if (bt_ctf_validate_identifier(name)) 891 name = change_name(name, field->name, -1); 892 893 if (!name) { 894 pr_err("Failed to fix invalid identifier."); 895 return -1; 896 } 897 while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) { 898 bt_ctf_field_type_put(t); 899 name = change_name(name, field->name, dup++); 900 if (!name) { 901 pr_err("Failed to create dup name for '%s'\n", field->name); 902 return -1; 903 } 904 } 905 906 ret = bt_ctf_event_class_add_field(event_class, type, name); 907 if (!ret) 908 field->alias = name; 909 910 return ret; 911 } 912 913 static int add_tracepoint_fields_types(struct ctf_writer *cw, 914 struct format_field *fields, 915 struct bt_ctf_event_class *event_class) 916 { 917 struct format_field *field; 918 int ret; 919 920 for (field = fields; field; field = field->next) { 921 struct bt_ctf_field_type *type; 922 unsigned long flags = field->flags; 923 924 pr2(" field '%s'\n", field->name); 925 926 type = get_tracepoint_field_type(cw, field); 927 if (!type) 928 return -1; 929 930 /* 931 * A string is an array of chars. For this we use the string 932 * type and don't care that it is an array. What we don't 933 * support is an array of strings. 934 */ 935 if (flags & FIELD_IS_STRING) 936 flags &= ~FIELD_IS_ARRAY; 937 938 if (flags & FIELD_IS_ARRAY) 939 type = bt_ctf_field_type_array_create(type, field->arraylen); 940 941 ret = event_class_add_field(event_class, type, field); 942 943 if (flags & FIELD_IS_ARRAY) 944 bt_ctf_field_type_put(type); 945 946 if (ret) { 947 pr_err("Failed to add field '%s': %d\n", 948 field->name, ret); 949 return -1; 950 } 951 } 952 953 return 0; 954 } 955 956 static int add_tracepoint_types(struct ctf_writer *cw, 957 struct perf_evsel *evsel, 958 struct bt_ctf_event_class *class) 959 { 960 struct format_field *common_fields = evsel->tp_format->format.common_fields; 961 struct format_field *fields = evsel->tp_format->format.fields; 962 int ret; 963 964 ret = add_tracepoint_fields_types(cw, common_fields, class); 965 if (!ret) 966 ret = add_tracepoint_fields_types(cw, fields, class); 967 968 return ret; 969 } 970 971 static int add_bpf_output_types(struct ctf_writer *cw, 972 struct bt_ctf_event_class *class) 973 { 974 struct bt_ctf_field_type *len_type = cw->data.u32; 975 struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex; 976 struct bt_ctf_field_type *seq_type; 977 int ret; 978 979 ret = bt_ctf_event_class_add_field(class, len_type, "raw_len"); 980 if (ret) 981 return ret; 982 983 seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len"); 984 if (!seq_type) 985 return -1; 986 987 return bt_ctf_event_class_add_field(class, seq_type, "raw_data"); 988 } 989 990 static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel, 991 struct bt_ctf_event_class *event_class) 992 { 993 u64 type = evsel->attr.sample_type; 994 995 /* 996 * missing: 997 * PERF_SAMPLE_TIME - not needed as we have it in 998 * ctf event header 999 * PERF_SAMPLE_READ - TODO 1000 * PERF_SAMPLE_CALLCHAIN - TODO 1001 * PERF_SAMPLE_RAW - tracepoint fields and BPF output 1002 * are handled separately 1003 * PERF_SAMPLE_BRANCH_STACK - TODO 1004 * PERF_SAMPLE_REGS_USER - TODO 1005 * PERF_SAMPLE_STACK_USER - TODO 1006 */ 1007 1008 #define ADD_FIELD(cl, t, n) \ 1009 do { \ 1010 pr2(" field '%s'\n", n); \ 1011 if (bt_ctf_event_class_add_field(cl, t, n)) { \ 1012 pr_err("Failed to add field '%s';\n", n); \ 1013 return -1; \ 1014 } \ 1015 } while (0) 1016 1017 if (type & PERF_SAMPLE_IP) 1018 ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip"); 1019 1020 if (type & PERF_SAMPLE_TID) { 1021 ADD_FIELD(event_class, cw->data.s32, "perf_tid"); 1022 ADD_FIELD(event_class, cw->data.s32, "perf_pid"); 1023 } 1024 1025 if ((type & PERF_SAMPLE_ID) || 1026 (type & PERF_SAMPLE_IDENTIFIER)) 1027 ADD_FIELD(event_class, cw->data.u64, "perf_id"); 1028 1029 if (type & PERF_SAMPLE_STREAM_ID) 1030 ADD_FIELD(event_class, cw->data.u64, "perf_stream_id"); 1031 1032 if (type & PERF_SAMPLE_PERIOD) 1033 ADD_FIELD(event_class, cw->data.u64, "perf_period"); 1034 1035 if (type & PERF_SAMPLE_WEIGHT) 1036 ADD_FIELD(event_class, cw->data.u64, "perf_weight"); 1037 1038 if (type & PERF_SAMPLE_DATA_SRC) 1039 ADD_FIELD(event_class, cw->data.u64, "perf_data_src"); 1040 1041 if (type & PERF_SAMPLE_TRANSACTION) 1042 ADD_FIELD(event_class, cw->data.u64, "perf_transaction"); 1043 1044 #undef ADD_FIELD 1045 return 0; 1046 } 1047 1048 static int add_event(struct ctf_writer *cw, struct perf_evsel *evsel) 1049 { 1050 struct bt_ctf_event_class *event_class; 1051 struct evsel_priv *priv; 1052 const char *name = perf_evsel__name(evsel); 1053 int ret; 1054 1055 pr("Adding event '%s' (type %d)\n", name, evsel->attr.type); 1056 1057 event_class = bt_ctf_event_class_create(name); 1058 if (!event_class) 1059 return -1; 1060 1061 ret = add_generic_types(cw, evsel, event_class); 1062 if (ret) 1063 goto err; 1064 1065 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) { 1066 ret = add_tracepoint_types(cw, evsel, event_class); 1067 if (ret) 1068 goto err; 1069 } 1070 1071 if (perf_evsel__is_bpf_output(evsel)) { 1072 ret = add_bpf_output_types(cw, event_class); 1073 if (ret) 1074 goto err; 1075 } 1076 1077 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class); 1078 if (ret) { 1079 pr("Failed to add event class into stream.\n"); 1080 goto err; 1081 } 1082 1083 priv = malloc(sizeof(*priv)); 1084 if (!priv) 1085 goto err; 1086 1087 priv->event_class = event_class; 1088 evsel->priv = priv; 1089 return 0; 1090 1091 err: 1092 bt_ctf_event_class_put(event_class); 1093 pr_err("Failed to add event '%s'.\n", name); 1094 return -1; 1095 } 1096 1097 static int setup_events(struct ctf_writer *cw, struct perf_session *session) 1098 { 1099 struct perf_evlist *evlist = session->evlist; 1100 struct perf_evsel *evsel; 1101 int ret; 1102 1103 evlist__for_each_entry(evlist, evsel) { 1104 ret = add_event(cw, evsel); 1105 if (ret) 1106 return ret; 1107 } 1108 return 0; 1109 } 1110 1111 #define __NON_SAMPLE_ADD_FIELD(t, n) \ 1112 do { \ 1113 pr2(" field '%s'\n", #n); \ 1114 if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\ 1115 pr_err("Failed to add field '%s';\n", #n);\ 1116 return -1; \ 1117 } \ 1118 } while(0) 1119 1120 #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) \ 1121 static int add_##_name##_event(struct ctf_writer *cw) \ 1122 { \ 1123 struct bt_ctf_event_class *event_class; \ 1124 int ret; \ 1125 \ 1126 pr("Adding "#_name" event\n"); \ 1127 event_class = bt_ctf_event_class_create("perf_" #_name);\ 1128 if (!event_class) \ 1129 return -1; \ 1130 body \ 1131 \ 1132 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\ 1133 if (ret) { \ 1134 pr("Failed to add event class '"#_name"' into stream.\n");\ 1135 return ret; \ 1136 } \ 1137 \ 1138 cw->_name##_class = event_class; \ 1139 bt_ctf_event_class_put(event_class); \ 1140 return 0; \ 1141 } 1142 1143 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm, 1144 __NON_SAMPLE_ADD_FIELD(u32, pid); 1145 __NON_SAMPLE_ADD_FIELD(u32, tid); 1146 __NON_SAMPLE_ADD_FIELD(string, comm); 1147 ) 1148 1149 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork, 1150 __NON_SAMPLE_ADD_FIELD(u32, pid); 1151 __NON_SAMPLE_ADD_FIELD(u32, ppid); 1152 __NON_SAMPLE_ADD_FIELD(u32, tid); 1153 __NON_SAMPLE_ADD_FIELD(u32, ptid); 1154 __NON_SAMPLE_ADD_FIELD(u64, time); 1155 ) 1156 1157 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit, 1158 __NON_SAMPLE_ADD_FIELD(u32, pid); 1159 __NON_SAMPLE_ADD_FIELD(u32, ppid); 1160 __NON_SAMPLE_ADD_FIELD(u32, tid); 1161 __NON_SAMPLE_ADD_FIELD(u32, ptid); 1162 __NON_SAMPLE_ADD_FIELD(u64, time); 1163 ) 1164 1165 #undef __NON_SAMPLE_ADD_FIELD 1166 #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS 1167 1168 static int setup_non_sample_events(struct ctf_writer *cw, 1169 struct perf_session *session __maybe_unused) 1170 { 1171 int ret; 1172 1173 ret = add_comm_event(cw); 1174 if (ret) 1175 return ret; 1176 ret = add_exit_event(cw); 1177 if (ret) 1178 return ret; 1179 ret = add_fork_event(cw); 1180 if (ret) 1181 return ret; 1182 return 0; 1183 } 1184 1185 static void cleanup_events(struct perf_session *session) 1186 { 1187 struct perf_evlist *evlist = session->evlist; 1188 struct perf_evsel *evsel; 1189 1190 evlist__for_each_entry(evlist, evsel) { 1191 struct evsel_priv *priv; 1192 1193 priv = evsel->priv; 1194 bt_ctf_event_class_put(priv->event_class); 1195 zfree(&evsel->priv); 1196 } 1197 1198 perf_evlist__delete(evlist); 1199 session->evlist = NULL; 1200 } 1201 1202 static int setup_streams(struct ctf_writer *cw, struct perf_session *session) 1203 { 1204 struct ctf_stream **stream; 1205 struct perf_header *ph = &session->header; 1206 int ncpus; 1207 1208 /* 1209 * Try to get the number of cpus used in the data file, 1210 * if not present fallback to the MAX_CPUS. 1211 */ 1212 ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS; 1213 1214 stream = zalloc(sizeof(*stream) * ncpus); 1215 if (!stream) { 1216 pr_err("Failed to allocate streams.\n"); 1217 return -ENOMEM; 1218 } 1219 1220 cw->stream = stream; 1221 cw->stream_cnt = ncpus; 1222 return 0; 1223 } 1224 1225 static void free_streams(struct ctf_writer *cw) 1226 { 1227 int cpu; 1228 1229 for (cpu = 0; cpu < cw->stream_cnt; cpu++) 1230 ctf_stream__delete(cw->stream[cpu]); 1231 1232 free(cw->stream); 1233 } 1234 1235 static int ctf_writer__setup_env(struct ctf_writer *cw, 1236 struct perf_session *session) 1237 { 1238 struct perf_header *header = &session->header; 1239 struct bt_ctf_writer *writer = cw->writer; 1240 1241 #define ADD(__n, __v) \ 1242 do { \ 1243 if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \ 1244 return -1; \ 1245 } while (0) 1246 1247 ADD("host", header->env.hostname); 1248 ADD("sysname", "Linux"); 1249 ADD("release", header->env.os_release); 1250 ADD("version", header->env.version); 1251 ADD("machine", header->env.arch); 1252 ADD("domain", "kernel"); 1253 ADD("tracer_name", "perf"); 1254 1255 #undef ADD 1256 return 0; 1257 } 1258 1259 static int ctf_writer__setup_clock(struct ctf_writer *cw) 1260 { 1261 struct bt_ctf_clock *clock = cw->clock; 1262 1263 bt_ctf_clock_set_description(clock, "perf clock"); 1264 1265 #define SET(__n, __v) \ 1266 do { \ 1267 if (bt_ctf_clock_set_##__n(clock, __v)) \ 1268 return -1; \ 1269 } while (0) 1270 1271 SET(frequency, 1000000000); 1272 SET(offset_s, 0); 1273 SET(offset, 0); 1274 SET(precision, 10); 1275 SET(is_absolute, 0); 1276 1277 #undef SET 1278 return 0; 1279 } 1280 1281 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex) 1282 { 1283 struct bt_ctf_field_type *type; 1284 1285 type = bt_ctf_field_type_integer_create(size); 1286 if (!type) 1287 return NULL; 1288 1289 if (sign && 1290 bt_ctf_field_type_integer_set_signed(type, 1)) 1291 goto err; 1292 1293 if (hex && 1294 bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL)) 1295 goto err; 1296 1297 #if __BYTE_ORDER == __BIG_ENDIAN 1298 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN); 1299 #else 1300 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN); 1301 #endif 1302 1303 pr2("Created type: INTEGER %d-bit %ssigned %s\n", 1304 size, sign ? "un" : "", hex ? "hex" : ""); 1305 return type; 1306 1307 err: 1308 bt_ctf_field_type_put(type); 1309 return NULL; 1310 } 1311 1312 static void ctf_writer__cleanup_data(struct ctf_writer *cw) 1313 { 1314 unsigned int i; 1315 1316 for (i = 0; i < ARRAY_SIZE(cw->data.array); i++) 1317 bt_ctf_field_type_put(cw->data.array[i]); 1318 } 1319 1320 static int ctf_writer__init_data(struct ctf_writer *cw) 1321 { 1322 #define CREATE_INT_TYPE(type, size, sign, hex) \ 1323 do { \ 1324 (type) = create_int_type(size, sign, hex); \ 1325 if (!(type)) \ 1326 goto err; \ 1327 } while (0) 1328 1329 CREATE_INT_TYPE(cw->data.s64, 64, true, false); 1330 CREATE_INT_TYPE(cw->data.u64, 64, false, false); 1331 CREATE_INT_TYPE(cw->data.s32, 32, true, false); 1332 CREATE_INT_TYPE(cw->data.u32, 32, false, false); 1333 CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true); 1334 CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true); 1335 1336 cw->data.string = bt_ctf_field_type_string_create(); 1337 if (cw->data.string) 1338 return 0; 1339 1340 err: 1341 ctf_writer__cleanup_data(cw); 1342 pr_err("Failed to create data types.\n"); 1343 return -1; 1344 } 1345 1346 static void ctf_writer__cleanup(struct ctf_writer *cw) 1347 { 1348 ctf_writer__cleanup_data(cw); 1349 1350 bt_ctf_clock_put(cw->clock); 1351 free_streams(cw); 1352 bt_ctf_stream_class_put(cw->stream_class); 1353 bt_ctf_writer_put(cw->writer); 1354 1355 /* and NULL all the pointers */ 1356 memset(cw, 0, sizeof(*cw)); 1357 } 1358 1359 static int ctf_writer__init(struct ctf_writer *cw, const char *path) 1360 { 1361 struct bt_ctf_writer *writer; 1362 struct bt_ctf_stream_class *stream_class; 1363 struct bt_ctf_clock *clock; 1364 struct bt_ctf_field_type *pkt_ctx_type; 1365 int ret; 1366 1367 /* CTF writer */ 1368 writer = bt_ctf_writer_create(path); 1369 if (!writer) 1370 goto err; 1371 1372 cw->writer = writer; 1373 1374 /* CTF clock */ 1375 clock = bt_ctf_clock_create("perf_clock"); 1376 if (!clock) { 1377 pr("Failed to create CTF clock.\n"); 1378 goto err_cleanup; 1379 } 1380 1381 cw->clock = clock; 1382 1383 if (ctf_writer__setup_clock(cw)) { 1384 pr("Failed to setup CTF clock.\n"); 1385 goto err_cleanup; 1386 } 1387 1388 /* CTF stream class */ 1389 stream_class = bt_ctf_stream_class_create("perf_stream"); 1390 if (!stream_class) { 1391 pr("Failed to create CTF stream class.\n"); 1392 goto err_cleanup; 1393 } 1394 1395 cw->stream_class = stream_class; 1396 1397 /* CTF clock stream setup */ 1398 if (bt_ctf_stream_class_set_clock(stream_class, clock)) { 1399 pr("Failed to assign CTF clock to stream class.\n"); 1400 goto err_cleanup; 1401 } 1402 1403 if (ctf_writer__init_data(cw)) 1404 goto err_cleanup; 1405 1406 /* Add cpu_id for packet context */ 1407 pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class); 1408 if (!pkt_ctx_type) 1409 goto err_cleanup; 1410 1411 ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id"); 1412 bt_ctf_field_type_put(pkt_ctx_type); 1413 if (ret) 1414 goto err_cleanup; 1415 1416 /* CTF clock writer setup */ 1417 if (bt_ctf_writer_add_clock(writer, clock)) { 1418 pr("Failed to assign CTF clock to writer.\n"); 1419 goto err_cleanup; 1420 } 1421 1422 return 0; 1423 1424 err_cleanup: 1425 ctf_writer__cleanup(cw); 1426 err: 1427 pr_err("Failed to setup CTF writer.\n"); 1428 return -1; 1429 } 1430 1431 static int ctf_writer__flush_streams(struct ctf_writer *cw) 1432 { 1433 int cpu, ret = 0; 1434 1435 for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++) 1436 ret = ctf_stream__flush(cw->stream[cpu]); 1437 1438 return ret; 1439 } 1440 1441 static int convert__config(const char *var, const char *value, void *cb) 1442 { 1443 struct convert *c = cb; 1444 1445 if (!strcmp(var, "convert.queue-size")) { 1446 c->queue_size = perf_config_u64(var, value); 1447 return 0; 1448 } 1449 1450 return 0; 1451 } 1452 1453 int bt_convert__perf2ctf(const char *input, const char *path, 1454 struct perf_data_convert_opts *opts) 1455 { 1456 struct perf_session *session; 1457 struct perf_data_file file = { 1458 .path = input, 1459 .mode = PERF_DATA_MODE_READ, 1460 .force = opts->force, 1461 }; 1462 struct convert c = { 1463 .tool = { 1464 .sample = process_sample_event, 1465 .mmap = perf_event__process_mmap, 1466 .mmap2 = perf_event__process_mmap2, 1467 .comm = perf_event__process_comm, 1468 .exit = perf_event__process_exit, 1469 .fork = perf_event__process_fork, 1470 .lost = perf_event__process_lost, 1471 .tracing_data = perf_event__process_tracing_data, 1472 .build_id = perf_event__process_build_id, 1473 .namespaces = perf_event__process_namespaces, 1474 .ordered_events = true, 1475 .ordering_requires_timestamps = true, 1476 }, 1477 }; 1478 struct ctf_writer *cw = &c.writer; 1479 int err; 1480 1481 if (opts->all) { 1482 c.tool.comm = process_comm_event; 1483 c.tool.exit = process_exit_event; 1484 c.tool.fork = process_fork_event; 1485 } 1486 1487 err = perf_config(convert__config, &c); 1488 if (err) 1489 return err; 1490 1491 /* CTF writer */ 1492 if (ctf_writer__init(cw, path)) 1493 return -1; 1494 1495 err = -1; 1496 /* perf.data session */ 1497 session = perf_session__new(&file, 0, &c.tool); 1498 if (!session) 1499 goto free_writer; 1500 1501 if (c.queue_size) { 1502 ordered_events__set_alloc_size(&session->ordered_events, 1503 c.queue_size); 1504 } 1505 1506 /* CTF writer env/clock setup */ 1507 if (ctf_writer__setup_env(cw, session)) 1508 goto free_session; 1509 1510 /* CTF events setup */ 1511 if (setup_events(cw, session)) 1512 goto free_session; 1513 1514 if (opts->all && setup_non_sample_events(cw, session)) 1515 goto free_session; 1516 1517 if (setup_streams(cw, session)) 1518 goto free_session; 1519 1520 err = perf_session__process_events(session); 1521 if (!err) 1522 err = ctf_writer__flush_streams(cw); 1523 else 1524 pr_err("Error during conversion.\n"); 1525 1526 fprintf(stderr, 1527 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n", 1528 file.path, path); 1529 1530 fprintf(stderr, 1531 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples", 1532 (double) c.events_size / 1024.0 / 1024.0, 1533 c.events_count); 1534 1535 if (!c.non_sample_count) 1536 fprintf(stderr, ") ]\n"); 1537 else 1538 fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count); 1539 1540 cleanup_events(session); 1541 perf_session__delete(session); 1542 ctf_writer__cleanup(cw); 1543 1544 return err; 1545 1546 free_session: 1547 perf_session__delete(session); 1548 free_writer: 1549 ctf_writer__cleanup(cw); 1550 pr_err("Error during conversion setup.\n"); 1551 return err; 1552 } 1553