1 /* 2 * CTF writing support via babeltrace. 3 * 4 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com> 5 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de> 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 10 #include <errno.h> 11 #include <inttypes.h> 12 #include <linux/compiler.h> 13 #include <linux/kernel.h> 14 #include <babeltrace/ctf-writer/writer.h> 15 #include <babeltrace/ctf-writer/clock.h> 16 #include <babeltrace/ctf-writer/stream.h> 17 #include <babeltrace/ctf-writer/event.h> 18 #include <babeltrace/ctf-writer/event-types.h> 19 #include <babeltrace/ctf-writer/event-fields.h> 20 #include <babeltrace/ctf-ir/utils.h> 21 #include <babeltrace/ctf/events.h> 22 #include <traceevent/event-parse.h> 23 #include "asm/bug.h" 24 #include "data-convert-bt.h" 25 #include "session.h" 26 #include "util.h" 27 #include "debug.h" 28 #include "tool.h" 29 #include "evlist.h" 30 #include "evsel.h" 31 #include "machine.h" 32 #include "config.h" 33 #include "sane_ctype.h" 34 35 #define pr_N(n, fmt, ...) \ 36 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__) 37 38 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__) 39 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__) 40 41 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__) 42 43 struct evsel_priv { 44 struct bt_ctf_event_class *event_class; 45 }; 46 47 #define MAX_CPUS 4096 48 49 struct ctf_stream { 50 struct bt_ctf_stream *stream; 51 int cpu; 52 u32 count; 53 }; 54 55 struct ctf_writer { 56 /* writer primitives */ 57 struct bt_ctf_writer *writer; 58 struct ctf_stream **stream; 59 int stream_cnt; 60 struct bt_ctf_stream_class *stream_class; 61 struct bt_ctf_clock *clock; 62 63 /* data types */ 64 union { 65 struct { 66 struct bt_ctf_field_type *s64; 67 struct bt_ctf_field_type *u64; 68 struct bt_ctf_field_type *s32; 69 struct bt_ctf_field_type *u32; 70 struct bt_ctf_field_type *string; 71 struct bt_ctf_field_type *u32_hex; 72 struct bt_ctf_field_type *u64_hex; 73 }; 74 struct bt_ctf_field_type *array[6]; 75 } data; 76 struct bt_ctf_event_class *comm_class; 77 struct bt_ctf_event_class *exit_class; 78 struct bt_ctf_event_class *fork_class; 79 }; 80 81 struct convert { 82 struct perf_tool tool; 83 struct ctf_writer writer; 84 85 u64 events_size; 86 u64 events_count; 87 u64 non_sample_count; 88 89 /* Ordered events configured queue size. */ 90 u64 queue_size; 91 }; 92 93 static int value_set(struct bt_ctf_field_type *type, 94 struct bt_ctf_event *event, 95 const char *name, u64 val) 96 { 97 struct bt_ctf_field *field; 98 bool sign = bt_ctf_field_type_integer_get_signed(type); 99 int ret; 100 101 field = bt_ctf_field_create(type); 102 if (!field) { 103 pr_err("failed to create a field %s\n", name); 104 return -1; 105 } 106 107 if (sign) { 108 ret = bt_ctf_field_signed_integer_set_value(field, val); 109 if (ret) { 110 pr_err("failed to set field value %s\n", name); 111 goto err; 112 } 113 } else { 114 ret = bt_ctf_field_unsigned_integer_set_value(field, val); 115 if (ret) { 116 pr_err("failed to set field value %s\n", name); 117 goto err; 118 } 119 } 120 121 ret = bt_ctf_event_set_payload(event, name, field); 122 if (ret) { 123 pr_err("failed to set payload %s\n", name); 124 goto err; 125 } 126 127 pr2(" SET [%s = %" PRIu64 "]\n", name, val); 128 129 err: 130 bt_ctf_field_put(field); 131 return ret; 132 } 133 134 #define __FUNC_VALUE_SET(_name, _val_type) \ 135 static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \ 136 struct bt_ctf_event *event, \ 137 const char *name, \ 138 _val_type val) \ 139 { \ 140 struct bt_ctf_field_type *type = cw->data._name; \ 141 return value_set(type, event, name, (u64) val); \ 142 } 143 144 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name) 145 146 FUNC_VALUE_SET(s32) 147 FUNC_VALUE_SET(u32) 148 FUNC_VALUE_SET(s64) 149 FUNC_VALUE_SET(u64) 150 __FUNC_VALUE_SET(u64_hex, u64) 151 152 static int string_set_value(struct bt_ctf_field *field, const char *string); 153 static __maybe_unused int 154 value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event, 155 const char *name, const char *string) 156 { 157 struct bt_ctf_field_type *type = cw->data.string; 158 struct bt_ctf_field *field; 159 int ret = 0; 160 161 field = bt_ctf_field_create(type); 162 if (!field) { 163 pr_err("failed to create a field %s\n", name); 164 return -1; 165 } 166 167 ret = string_set_value(field, string); 168 if (ret) { 169 pr_err("failed to set value %s\n", name); 170 goto err_put_field; 171 } 172 173 ret = bt_ctf_event_set_payload(event, name, field); 174 if (ret) 175 pr_err("failed to set payload %s\n", name); 176 177 err_put_field: 178 bt_ctf_field_put(field); 179 return ret; 180 } 181 182 static struct bt_ctf_field_type* 183 get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field) 184 { 185 unsigned long flags = field->flags; 186 187 if (flags & FIELD_IS_STRING) 188 return cw->data.string; 189 190 if (!(flags & FIELD_IS_SIGNED)) { 191 /* unsigned long are mostly pointers */ 192 if (flags & FIELD_IS_LONG || flags & FIELD_IS_POINTER) 193 return cw->data.u64_hex; 194 } 195 196 if (flags & FIELD_IS_SIGNED) { 197 if (field->size == 8) 198 return cw->data.s64; 199 else 200 return cw->data.s32; 201 } 202 203 if (field->size == 8) 204 return cw->data.u64; 205 else 206 return cw->data.u32; 207 } 208 209 static unsigned long long adjust_signedness(unsigned long long value_int, int size) 210 { 211 unsigned long long value_mask; 212 213 /* 214 * value_mask = (1 << (size * 8 - 1)) - 1. 215 * Directly set value_mask for code readers. 216 */ 217 switch (size) { 218 case 1: 219 value_mask = 0x7fULL; 220 break; 221 case 2: 222 value_mask = 0x7fffULL; 223 break; 224 case 4: 225 value_mask = 0x7fffffffULL; 226 break; 227 case 8: 228 /* 229 * For 64 bit value, return it self. There is no need 230 * to fill high bit. 231 */ 232 /* Fall through */ 233 default: 234 /* BUG! */ 235 return value_int; 236 } 237 238 /* If it is a positive value, don't adjust. */ 239 if ((value_int & (~0ULL - value_mask)) == 0) 240 return value_int; 241 242 /* Fill upper part of value_int with 1 to make it a negative long long. */ 243 return (value_int & value_mask) | ~value_mask; 244 } 245 246 static int string_set_value(struct bt_ctf_field *field, const char *string) 247 { 248 char *buffer = NULL; 249 size_t len = strlen(string), i, p; 250 int err; 251 252 for (i = p = 0; i < len; i++, p++) { 253 if (isprint(string[i])) { 254 if (!buffer) 255 continue; 256 buffer[p] = string[i]; 257 } else { 258 char numstr[5]; 259 260 snprintf(numstr, sizeof(numstr), "\\x%02x", 261 (unsigned int)(string[i]) & 0xff); 262 263 if (!buffer) { 264 buffer = zalloc(i + (len - i) * 4 + 2); 265 if (!buffer) { 266 pr_err("failed to set unprintable string '%s'\n", string); 267 return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING"); 268 } 269 if (i > 0) 270 strncpy(buffer, string, i); 271 } 272 strncat(buffer + p, numstr, 4); 273 p += 3; 274 } 275 } 276 277 if (!buffer) 278 return bt_ctf_field_string_set_value(field, string); 279 err = bt_ctf_field_string_set_value(field, buffer); 280 free(buffer); 281 return err; 282 } 283 284 static int add_tracepoint_field_value(struct ctf_writer *cw, 285 struct bt_ctf_event_class *event_class, 286 struct bt_ctf_event *event, 287 struct perf_sample *sample, 288 struct format_field *fmtf) 289 { 290 struct bt_ctf_field_type *type; 291 struct bt_ctf_field *array_field; 292 struct bt_ctf_field *field; 293 const char *name = fmtf->name; 294 void *data = sample->raw_data; 295 unsigned long flags = fmtf->flags; 296 unsigned int n_items; 297 unsigned int i; 298 unsigned int offset; 299 unsigned int len; 300 int ret; 301 302 name = fmtf->alias; 303 offset = fmtf->offset; 304 len = fmtf->size; 305 if (flags & FIELD_IS_STRING) 306 flags &= ~FIELD_IS_ARRAY; 307 308 if (flags & FIELD_IS_DYNAMIC) { 309 unsigned long long tmp_val; 310 311 tmp_val = pevent_read_number(fmtf->event->pevent, 312 data + offset, len); 313 offset = tmp_val; 314 len = offset >> 16; 315 offset &= 0xffff; 316 } 317 318 if (flags & FIELD_IS_ARRAY) { 319 320 type = bt_ctf_event_class_get_field_by_name( 321 event_class, name); 322 array_field = bt_ctf_field_create(type); 323 bt_ctf_field_type_put(type); 324 if (!array_field) { 325 pr_err("Failed to create array type %s\n", name); 326 return -1; 327 } 328 329 len = fmtf->size / fmtf->arraylen; 330 n_items = fmtf->arraylen; 331 } else { 332 n_items = 1; 333 array_field = NULL; 334 } 335 336 type = get_tracepoint_field_type(cw, fmtf); 337 338 for (i = 0; i < n_items; i++) { 339 if (flags & FIELD_IS_ARRAY) 340 field = bt_ctf_field_array_get_field(array_field, i); 341 else 342 field = bt_ctf_field_create(type); 343 344 if (!field) { 345 pr_err("failed to create a field %s\n", name); 346 return -1; 347 } 348 349 if (flags & FIELD_IS_STRING) 350 ret = string_set_value(field, data + offset + i * len); 351 else { 352 unsigned long long value_int; 353 354 value_int = pevent_read_number( 355 fmtf->event->pevent, 356 data + offset + i * len, len); 357 358 if (!(flags & FIELD_IS_SIGNED)) 359 ret = bt_ctf_field_unsigned_integer_set_value( 360 field, value_int); 361 else 362 ret = bt_ctf_field_signed_integer_set_value( 363 field, adjust_signedness(value_int, len)); 364 } 365 366 if (ret) { 367 pr_err("failed to set file value %s\n", name); 368 goto err_put_field; 369 } 370 if (!(flags & FIELD_IS_ARRAY)) { 371 ret = bt_ctf_event_set_payload(event, name, field); 372 if (ret) { 373 pr_err("failed to set payload %s\n", name); 374 goto err_put_field; 375 } 376 } 377 bt_ctf_field_put(field); 378 } 379 if (flags & FIELD_IS_ARRAY) { 380 ret = bt_ctf_event_set_payload(event, name, array_field); 381 if (ret) { 382 pr_err("Failed add payload array %s\n", name); 383 return -1; 384 } 385 bt_ctf_field_put(array_field); 386 } 387 return 0; 388 389 err_put_field: 390 bt_ctf_field_put(field); 391 return -1; 392 } 393 394 static int add_tracepoint_fields_values(struct ctf_writer *cw, 395 struct bt_ctf_event_class *event_class, 396 struct bt_ctf_event *event, 397 struct format_field *fields, 398 struct perf_sample *sample) 399 { 400 struct format_field *field; 401 int ret; 402 403 for (field = fields; field; field = field->next) { 404 ret = add_tracepoint_field_value(cw, event_class, event, sample, 405 field); 406 if (ret) 407 return -1; 408 } 409 return 0; 410 } 411 412 static int add_tracepoint_values(struct ctf_writer *cw, 413 struct bt_ctf_event_class *event_class, 414 struct bt_ctf_event *event, 415 struct perf_evsel *evsel, 416 struct perf_sample *sample) 417 { 418 struct format_field *common_fields = evsel->tp_format->format.common_fields; 419 struct format_field *fields = evsel->tp_format->format.fields; 420 int ret; 421 422 ret = add_tracepoint_fields_values(cw, event_class, event, 423 common_fields, sample); 424 if (!ret) 425 ret = add_tracepoint_fields_values(cw, event_class, event, 426 fields, sample); 427 428 return ret; 429 } 430 431 static int 432 add_bpf_output_values(struct bt_ctf_event_class *event_class, 433 struct bt_ctf_event *event, 434 struct perf_sample *sample) 435 { 436 struct bt_ctf_field_type *len_type, *seq_type; 437 struct bt_ctf_field *len_field, *seq_field; 438 unsigned int raw_size = sample->raw_size; 439 unsigned int nr_elements = raw_size / sizeof(u32); 440 unsigned int i; 441 int ret; 442 443 if (nr_elements * sizeof(u32) != raw_size) 444 pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n", 445 raw_size, nr_elements * sizeof(u32) - raw_size); 446 447 len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len"); 448 len_field = bt_ctf_field_create(len_type); 449 if (!len_field) { 450 pr_err("failed to create 'raw_len' for bpf output event\n"); 451 ret = -1; 452 goto put_len_type; 453 } 454 455 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements); 456 if (ret) { 457 pr_err("failed to set field value for raw_len\n"); 458 goto put_len_field; 459 } 460 ret = bt_ctf_event_set_payload(event, "raw_len", len_field); 461 if (ret) { 462 pr_err("failed to set payload to raw_len\n"); 463 goto put_len_field; 464 } 465 466 seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data"); 467 seq_field = bt_ctf_field_create(seq_type); 468 if (!seq_field) { 469 pr_err("failed to create 'raw_data' for bpf output event\n"); 470 ret = -1; 471 goto put_seq_type; 472 } 473 474 ret = bt_ctf_field_sequence_set_length(seq_field, len_field); 475 if (ret) { 476 pr_err("failed to set length of 'raw_data'\n"); 477 goto put_seq_field; 478 } 479 480 for (i = 0; i < nr_elements; i++) { 481 struct bt_ctf_field *elem_field = 482 bt_ctf_field_sequence_get_field(seq_field, i); 483 484 ret = bt_ctf_field_unsigned_integer_set_value(elem_field, 485 ((u32 *)(sample->raw_data))[i]); 486 487 bt_ctf_field_put(elem_field); 488 if (ret) { 489 pr_err("failed to set raw_data[%d]\n", i); 490 goto put_seq_field; 491 } 492 } 493 494 ret = bt_ctf_event_set_payload(event, "raw_data", seq_field); 495 if (ret) 496 pr_err("failed to set payload for raw_data\n"); 497 498 put_seq_field: 499 bt_ctf_field_put(seq_field); 500 put_seq_type: 501 bt_ctf_field_type_put(seq_type); 502 put_len_field: 503 bt_ctf_field_put(len_field); 504 put_len_type: 505 bt_ctf_field_type_put(len_type); 506 return ret; 507 } 508 509 static int add_generic_values(struct ctf_writer *cw, 510 struct bt_ctf_event *event, 511 struct perf_evsel *evsel, 512 struct perf_sample *sample) 513 { 514 u64 type = evsel->attr.sample_type; 515 int ret; 516 517 /* 518 * missing: 519 * PERF_SAMPLE_TIME - not needed as we have it in 520 * ctf event header 521 * PERF_SAMPLE_READ - TODO 522 * PERF_SAMPLE_CALLCHAIN - TODO 523 * PERF_SAMPLE_RAW - tracepoint fields are handled separately 524 * PERF_SAMPLE_BRANCH_STACK - TODO 525 * PERF_SAMPLE_REGS_USER - TODO 526 * PERF_SAMPLE_STACK_USER - TODO 527 */ 528 529 if (type & PERF_SAMPLE_IP) { 530 ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip); 531 if (ret) 532 return -1; 533 } 534 535 if (type & PERF_SAMPLE_TID) { 536 ret = value_set_s32(cw, event, "perf_tid", sample->tid); 537 if (ret) 538 return -1; 539 540 ret = value_set_s32(cw, event, "perf_pid", sample->pid); 541 if (ret) 542 return -1; 543 } 544 545 if ((type & PERF_SAMPLE_ID) || 546 (type & PERF_SAMPLE_IDENTIFIER)) { 547 ret = value_set_u64(cw, event, "perf_id", sample->id); 548 if (ret) 549 return -1; 550 } 551 552 if (type & PERF_SAMPLE_STREAM_ID) { 553 ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id); 554 if (ret) 555 return -1; 556 } 557 558 if (type & PERF_SAMPLE_PERIOD) { 559 ret = value_set_u64(cw, event, "perf_period", sample->period); 560 if (ret) 561 return -1; 562 } 563 564 if (type & PERF_SAMPLE_WEIGHT) { 565 ret = value_set_u64(cw, event, "perf_weight", sample->weight); 566 if (ret) 567 return -1; 568 } 569 570 if (type & PERF_SAMPLE_DATA_SRC) { 571 ret = value_set_u64(cw, event, "perf_data_src", 572 sample->data_src); 573 if (ret) 574 return -1; 575 } 576 577 if (type & PERF_SAMPLE_TRANSACTION) { 578 ret = value_set_u64(cw, event, "perf_transaction", 579 sample->transaction); 580 if (ret) 581 return -1; 582 } 583 584 return 0; 585 } 586 587 static int ctf_stream__flush(struct ctf_stream *cs) 588 { 589 int err = 0; 590 591 if (cs) { 592 err = bt_ctf_stream_flush(cs->stream); 593 if (err) 594 pr_err("CTF stream %d flush failed\n", cs->cpu); 595 596 pr("Flush stream for cpu %d (%u samples)\n", 597 cs->cpu, cs->count); 598 599 cs->count = 0; 600 } 601 602 return err; 603 } 604 605 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu) 606 { 607 struct ctf_stream *cs; 608 struct bt_ctf_field *pkt_ctx = NULL; 609 struct bt_ctf_field *cpu_field = NULL; 610 struct bt_ctf_stream *stream = NULL; 611 int ret; 612 613 cs = zalloc(sizeof(*cs)); 614 if (!cs) { 615 pr_err("Failed to allocate ctf stream\n"); 616 return NULL; 617 } 618 619 stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class); 620 if (!stream) { 621 pr_err("Failed to create CTF stream\n"); 622 goto out; 623 } 624 625 pkt_ctx = bt_ctf_stream_get_packet_context(stream); 626 if (!pkt_ctx) { 627 pr_err("Failed to obtain packet context\n"); 628 goto out; 629 } 630 631 cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id"); 632 bt_ctf_field_put(pkt_ctx); 633 if (!cpu_field) { 634 pr_err("Failed to obtain cpu field\n"); 635 goto out; 636 } 637 638 ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu); 639 if (ret) { 640 pr_err("Failed to update CPU number\n"); 641 goto out; 642 } 643 644 bt_ctf_field_put(cpu_field); 645 646 cs->cpu = cpu; 647 cs->stream = stream; 648 return cs; 649 650 out: 651 if (cpu_field) 652 bt_ctf_field_put(cpu_field); 653 if (stream) 654 bt_ctf_stream_put(stream); 655 656 free(cs); 657 return NULL; 658 } 659 660 static void ctf_stream__delete(struct ctf_stream *cs) 661 { 662 if (cs) { 663 bt_ctf_stream_put(cs->stream); 664 free(cs); 665 } 666 } 667 668 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu) 669 { 670 struct ctf_stream *cs = cw->stream[cpu]; 671 672 if (!cs) { 673 cs = ctf_stream__create(cw, cpu); 674 cw->stream[cpu] = cs; 675 } 676 677 return cs; 678 } 679 680 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample, 681 struct perf_evsel *evsel) 682 { 683 int cpu = 0; 684 685 if (evsel->attr.sample_type & PERF_SAMPLE_CPU) 686 cpu = sample->cpu; 687 688 if (cpu > cw->stream_cnt) { 689 pr_err("Event was recorded for CPU %d, limit is at %d.\n", 690 cpu, cw->stream_cnt); 691 cpu = 0; 692 } 693 694 return cpu; 695 } 696 697 #define STREAM_FLUSH_COUNT 100000 698 699 /* 700 * Currently we have no other way to determine the 701 * time for the stream flush other than keep track 702 * of the number of events and check it against 703 * threshold. 704 */ 705 static bool is_flush_needed(struct ctf_stream *cs) 706 { 707 return cs->count >= STREAM_FLUSH_COUNT; 708 } 709 710 static int process_sample_event(struct perf_tool *tool, 711 union perf_event *_event, 712 struct perf_sample *sample, 713 struct perf_evsel *evsel, 714 struct machine *machine __maybe_unused) 715 { 716 struct convert *c = container_of(tool, struct convert, tool); 717 struct evsel_priv *priv = evsel->priv; 718 struct ctf_writer *cw = &c->writer; 719 struct ctf_stream *cs; 720 struct bt_ctf_event_class *event_class; 721 struct bt_ctf_event *event; 722 int ret; 723 724 if (WARN_ONCE(!priv, "Failed to setup all events.\n")) 725 return 0; 726 727 event_class = priv->event_class; 728 729 /* update stats */ 730 c->events_count++; 731 c->events_size += _event->header.size; 732 733 pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count); 734 735 event = bt_ctf_event_create(event_class); 736 if (!event) { 737 pr_err("Failed to create an CTF event\n"); 738 return -1; 739 } 740 741 bt_ctf_clock_set_time(cw->clock, sample->time); 742 743 ret = add_generic_values(cw, event, evsel, sample); 744 if (ret) 745 return -1; 746 747 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) { 748 ret = add_tracepoint_values(cw, event_class, event, 749 evsel, sample); 750 if (ret) 751 return -1; 752 } 753 754 if (perf_evsel__is_bpf_output(evsel)) { 755 ret = add_bpf_output_values(event_class, event, sample); 756 if (ret) 757 return -1; 758 } 759 760 cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel)); 761 if (cs) { 762 if (is_flush_needed(cs)) 763 ctf_stream__flush(cs); 764 765 cs->count++; 766 bt_ctf_stream_append_event(cs->stream, event); 767 } 768 769 bt_ctf_event_put(event); 770 return cs ? 0 : -1; 771 } 772 773 #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) \ 774 do { \ 775 ret = value_set_##_type(cw, event, #_field, _event->_name._field);\ 776 if (ret) \ 777 return -1; \ 778 } while(0) 779 780 #define __FUNC_PROCESS_NON_SAMPLE(_name, body) \ 781 static int process_##_name##_event(struct perf_tool *tool, \ 782 union perf_event *_event, \ 783 struct perf_sample *sample, \ 784 struct machine *machine) \ 785 { \ 786 struct convert *c = container_of(tool, struct convert, tool);\ 787 struct ctf_writer *cw = &c->writer; \ 788 struct bt_ctf_event_class *event_class = cw->_name##_class;\ 789 struct bt_ctf_event *event; \ 790 struct ctf_stream *cs; \ 791 int ret; \ 792 \ 793 c->non_sample_count++; \ 794 c->events_size += _event->header.size; \ 795 event = bt_ctf_event_create(event_class); \ 796 if (!event) { \ 797 pr_err("Failed to create an CTF event\n"); \ 798 return -1; \ 799 } \ 800 \ 801 bt_ctf_clock_set_time(cw->clock, sample->time); \ 802 body \ 803 cs = ctf_stream(cw, 0); \ 804 if (cs) { \ 805 if (is_flush_needed(cs)) \ 806 ctf_stream__flush(cs); \ 807 \ 808 cs->count++; \ 809 bt_ctf_stream_append_event(cs->stream, event); \ 810 } \ 811 bt_ctf_event_put(event); \ 812 \ 813 return perf_event__process_##_name(tool, _event, sample, machine);\ 814 } 815 816 __FUNC_PROCESS_NON_SAMPLE(comm, 817 __NON_SAMPLE_SET_FIELD(comm, u32, pid); 818 __NON_SAMPLE_SET_FIELD(comm, u32, tid); 819 __NON_SAMPLE_SET_FIELD(comm, string, comm); 820 ) 821 __FUNC_PROCESS_NON_SAMPLE(fork, 822 __NON_SAMPLE_SET_FIELD(fork, u32, pid); 823 __NON_SAMPLE_SET_FIELD(fork, u32, ppid); 824 __NON_SAMPLE_SET_FIELD(fork, u32, tid); 825 __NON_SAMPLE_SET_FIELD(fork, u32, ptid); 826 __NON_SAMPLE_SET_FIELD(fork, u64, time); 827 ) 828 829 __FUNC_PROCESS_NON_SAMPLE(exit, 830 __NON_SAMPLE_SET_FIELD(fork, u32, pid); 831 __NON_SAMPLE_SET_FIELD(fork, u32, ppid); 832 __NON_SAMPLE_SET_FIELD(fork, u32, tid); 833 __NON_SAMPLE_SET_FIELD(fork, u32, ptid); 834 __NON_SAMPLE_SET_FIELD(fork, u64, time); 835 ) 836 #undef __NON_SAMPLE_SET_FIELD 837 #undef __FUNC_PROCESS_NON_SAMPLE 838 839 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */ 840 static char *change_name(char *name, char *orig_name, int dup) 841 { 842 char *new_name = NULL; 843 size_t len; 844 845 if (!name) 846 name = orig_name; 847 848 if (dup >= 10) 849 goto out; 850 /* 851 * Add '_' prefix to potential keywork. According to 852 * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652), 853 * futher CTF spec updating may require us to use '$'. 854 */ 855 if (dup < 0) 856 len = strlen(name) + sizeof("_"); 857 else 858 len = strlen(orig_name) + sizeof("_dupl_X"); 859 860 new_name = malloc(len); 861 if (!new_name) 862 goto out; 863 864 if (dup < 0) 865 snprintf(new_name, len, "_%s", name); 866 else 867 snprintf(new_name, len, "%s_dupl_%d", orig_name, dup); 868 869 out: 870 if (name != orig_name) 871 free(name); 872 return new_name; 873 } 874 875 static int event_class_add_field(struct bt_ctf_event_class *event_class, 876 struct bt_ctf_field_type *type, 877 struct format_field *field) 878 { 879 struct bt_ctf_field_type *t = NULL; 880 char *name; 881 int dup = 1; 882 int ret; 883 884 /* alias was already assigned */ 885 if (field->alias != field->name) 886 return bt_ctf_event_class_add_field(event_class, type, 887 (char *)field->alias); 888 889 name = field->name; 890 891 /* If 'name' is a keywork, add prefix. */ 892 if (bt_ctf_validate_identifier(name)) 893 name = change_name(name, field->name, -1); 894 895 if (!name) { 896 pr_err("Failed to fix invalid identifier."); 897 return -1; 898 } 899 while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) { 900 bt_ctf_field_type_put(t); 901 name = change_name(name, field->name, dup++); 902 if (!name) { 903 pr_err("Failed to create dup name for '%s'\n", field->name); 904 return -1; 905 } 906 } 907 908 ret = bt_ctf_event_class_add_field(event_class, type, name); 909 if (!ret) 910 field->alias = name; 911 912 return ret; 913 } 914 915 static int add_tracepoint_fields_types(struct ctf_writer *cw, 916 struct format_field *fields, 917 struct bt_ctf_event_class *event_class) 918 { 919 struct format_field *field; 920 int ret; 921 922 for (field = fields; field; field = field->next) { 923 struct bt_ctf_field_type *type; 924 unsigned long flags = field->flags; 925 926 pr2(" field '%s'\n", field->name); 927 928 type = get_tracepoint_field_type(cw, field); 929 if (!type) 930 return -1; 931 932 /* 933 * A string is an array of chars. For this we use the string 934 * type and don't care that it is an array. What we don't 935 * support is an array of strings. 936 */ 937 if (flags & FIELD_IS_STRING) 938 flags &= ~FIELD_IS_ARRAY; 939 940 if (flags & FIELD_IS_ARRAY) 941 type = bt_ctf_field_type_array_create(type, field->arraylen); 942 943 ret = event_class_add_field(event_class, type, field); 944 945 if (flags & FIELD_IS_ARRAY) 946 bt_ctf_field_type_put(type); 947 948 if (ret) { 949 pr_err("Failed to add field '%s': %d\n", 950 field->name, ret); 951 return -1; 952 } 953 } 954 955 return 0; 956 } 957 958 static int add_tracepoint_types(struct ctf_writer *cw, 959 struct perf_evsel *evsel, 960 struct bt_ctf_event_class *class) 961 { 962 struct format_field *common_fields = evsel->tp_format->format.common_fields; 963 struct format_field *fields = evsel->tp_format->format.fields; 964 int ret; 965 966 ret = add_tracepoint_fields_types(cw, common_fields, class); 967 if (!ret) 968 ret = add_tracepoint_fields_types(cw, fields, class); 969 970 return ret; 971 } 972 973 static int add_bpf_output_types(struct ctf_writer *cw, 974 struct bt_ctf_event_class *class) 975 { 976 struct bt_ctf_field_type *len_type = cw->data.u32; 977 struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex; 978 struct bt_ctf_field_type *seq_type; 979 int ret; 980 981 ret = bt_ctf_event_class_add_field(class, len_type, "raw_len"); 982 if (ret) 983 return ret; 984 985 seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len"); 986 if (!seq_type) 987 return -1; 988 989 return bt_ctf_event_class_add_field(class, seq_type, "raw_data"); 990 } 991 992 static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel, 993 struct bt_ctf_event_class *event_class) 994 { 995 u64 type = evsel->attr.sample_type; 996 997 /* 998 * missing: 999 * PERF_SAMPLE_TIME - not needed as we have it in 1000 * ctf event header 1001 * PERF_SAMPLE_READ - TODO 1002 * PERF_SAMPLE_CALLCHAIN - TODO 1003 * PERF_SAMPLE_RAW - tracepoint fields and BPF output 1004 * are handled separately 1005 * PERF_SAMPLE_BRANCH_STACK - TODO 1006 * PERF_SAMPLE_REGS_USER - TODO 1007 * PERF_SAMPLE_STACK_USER - TODO 1008 */ 1009 1010 #define ADD_FIELD(cl, t, n) \ 1011 do { \ 1012 pr2(" field '%s'\n", n); \ 1013 if (bt_ctf_event_class_add_field(cl, t, n)) { \ 1014 pr_err("Failed to add field '%s';\n", n); \ 1015 return -1; \ 1016 } \ 1017 } while (0) 1018 1019 if (type & PERF_SAMPLE_IP) 1020 ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip"); 1021 1022 if (type & PERF_SAMPLE_TID) { 1023 ADD_FIELD(event_class, cw->data.s32, "perf_tid"); 1024 ADD_FIELD(event_class, cw->data.s32, "perf_pid"); 1025 } 1026 1027 if ((type & PERF_SAMPLE_ID) || 1028 (type & PERF_SAMPLE_IDENTIFIER)) 1029 ADD_FIELD(event_class, cw->data.u64, "perf_id"); 1030 1031 if (type & PERF_SAMPLE_STREAM_ID) 1032 ADD_FIELD(event_class, cw->data.u64, "perf_stream_id"); 1033 1034 if (type & PERF_SAMPLE_PERIOD) 1035 ADD_FIELD(event_class, cw->data.u64, "perf_period"); 1036 1037 if (type & PERF_SAMPLE_WEIGHT) 1038 ADD_FIELD(event_class, cw->data.u64, "perf_weight"); 1039 1040 if (type & PERF_SAMPLE_DATA_SRC) 1041 ADD_FIELD(event_class, cw->data.u64, "perf_data_src"); 1042 1043 if (type & PERF_SAMPLE_TRANSACTION) 1044 ADD_FIELD(event_class, cw->data.u64, "perf_transaction"); 1045 1046 #undef ADD_FIELD 1047 return 0; 1048 } 1049 1050 static int add_event(struct ctf_writer *cw, struct perf_evsel *evsel) 1051 { 1052 struct bt_ctf_event_class *event_class; 1053 struct evsel_priv *priv; 1054 const char *name = perf_evsel__name(evsel); 1055 int ret; 1056 1057 pr("Adding event '%s' (type %d)\n", name, evsel->attr.type); 1058 1059 event_class = bt_ctf_event_class_create(name); 1060 if (!event_class) 1061 return -1; 1062 1063 ret = add_generic_types(cw, evsel, event_class); 1064 if (ret) 1065 goto err; 1066 1067 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) { 1068 ret = add_tracepoint_types(cw, evsel, event_class); 1069 if (ret) 1070 goto err; 1071 } 1072 1073 if (perf_evsel__is_bpf_output(evsel)) { 1074 ret = add_bpf_output_types(cw, event_class); 1075 if (ret) 1076 goto err; 1077 } 1078 1079 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class); 1080 if (ret) { 1081 pr("Failed to add event class into stream.\n"); 1082 goto err; 1083 } 1084 1085 priv = malloc(sizeof(*priv)); 1086 if (!priv) 1087 goto err; 1088 1089 priv->event_class = event_class; 1090 evsel->priv = priv; 1091 return 0; 1092 1093 err: 1094 bt_ctf_event_class_put(event_class); 1095 pr_err("Failed to add event '%s'.\n", name); 1096 return -1; 1097 } 1098 1099 static int setup_events(struct ctf_writer *cw, struct perf_session *session) 1100 { 1101 struct perf_evlist *evlist = session->evlist; 1102 struct perf_evsel *evsel; 1103 int ret; 1104 1105 evlist__for_each_entry(evlist, evsel) { 1106 ret = add_event(cw, evsel); 1107 if (ret) 1108 return ret; 1109 } 1110 return 0; 1111 } 1112 1113 #define __NON_SAMPLE_ADD_FIELD(t, n) \ 1114 do { \ 1115 pr2(" field '%s'\n", #n); \ 1116 if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\ 1117 pr_err("Failed to add field '%s';\n", #n);\ 1118 return -1; \ 1119 } \ 1120 } while(0) 1121 1122 #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) \ 1123 static int add_##_name##_event(struct ctf_writer *cw) \ 1124 { \ 1125 struct bt_ctf_event_class *event_class; \ 1126 int ret; \ 1127 \ 1128 pr("Adding "#_name" event\n"); \ 1129 event_class = bt_ctf_event_class_create("perf_" #_name);\ 1130 if (!event_class) \ 1131 return -1; \ 1132 body \ 1133 \ 1134 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\ 1135 if (ret) { \ 1136 pr("Failed to add event class '"#_name"' into stream.\n");\ 1137 return ret; \ 1138 } \ 1139 \ 1140 cw->_name##_class = event_class; \ 1141 bt_ctf_event_class_put(event_class); \ 1142 return 0; \ 1143 } 1144 1145 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm, 1146 __NON_SAMPLE_ADD_FIELD(u32, pid); 1147 __NON_SAMPLE_ADD_FIELD(u32, tid); 1148 __NON_SAMPLE_ADD_FIELD(string, comm); 1149 ) 1150 1151 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork, 1152 __NON_SAMPLE_ADD_FIELD(u32, pid); 1153 __NON_SAMPLE_ADD_FIELD(u32, ppid); 1154 __NON_SAMPLE_ADD_FIELD(u32, tid); 1155 __NON_SAMPLE_ADD_FIELD(u32, ptid); 1156 __NON_SAMPLE_ADD_FIELD(u64, time); 1157 ) 1158 1159 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit, 1160 __NON_SAMPLE_ADD_FIELD(u32, pid); 1161 __NON_SAMPLE_ADD_FIELD(u32, ppid); 1162 __NON_SAMPLE_ADD_FIELD(u32, tid); 1163 __NON_SAMPLE_ADD_FIELD(u32, ptid); 1164 __NON_SAMPLE_ADD_FIELD(u64, time); 1165 ) 1166 1167 #undef __NON_SAMPLE_ADD_FIELD 1168 #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS 1169 1170 static int setup_non_sample_events(struct ctf_writer *cw, 1171 struct perf_session *session __maybe_unused) 1172 { 1173 int ret; 1174 1175 ret = add_comm_event(cw); 1176 if (ret) 1177 return ret; 1178 ret = add_exit_event(cw); 1179 if (ret) 1180 return ret; 1181 ret = add_fork_event(cw); 1182 if (ret) 1183 return ret; 1184 return 0; 1185 } 1186 1187 static void cleanup_events(struct perf_session *session) 1188 { 1189 struct perf_evlist *evlist = session->evlist; 1190 struct perf_evsel *evsel; 1191 1192 evlist__for_each_entry(evlist, evsel) { 1193 struct evsel_priv *priv; 1194 1195 priv = evsel->priv; 1196 bt_ctf_event_class_put(priv->event_class); 1197 zfree(&evsel->priv); 1198 } 1199 1200 perf_evlist__delete(evlist); 1201 session->evlist = NULL; 1202 } 1203 1204 static int setup_streams(struct ctf_writer *cw, struct perf_session *session) 1205 { 1206 struct ctf_stream **stream; 1207 struct perf_header *ph = &session->header; 1208 int ncpus; 1209 1210 /* 1211 * Try to get the number of cpus used in the data file, 1212 * if not present fallback to the MAX_CPUS. 1213 */ 1214 ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS; 1215 1216 stream = zalloc(sizeof(*stream) * ncpus); 1217 if (!stream) { 1218 pr_err("Failed to allocate streams.\n"); 1219 return -ENOMEM; 1220 } 1221 1222 cw->stream = stream; 1223 cw->stream_cnt = ncpus; 1224 return 0; 1225 } 1226 1227 static void free_streams(struct ctf_writer *cw) 1228 { 1229 int cpu; 1230 1231 for (cpu = 0; cpu < cw->stream_cnt; cpu++) 1232 ctf_stream__delete(cw->stream[cpu]); 1233 1234 free(cw->stream); 1235 } 1236 1237 static int ctf_writer__setup_env(struct ctf_writer *cw, 1238 struct perf_session *session) 1239 { 1240 struct perf_header *header = &session->header; 1241 struct bt_ctf_writer *writer = cw->writer; 1242 1243 #define ADD(__n, __v) \ 1244 do { \ 1245 if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \ 1246 return -1; \ 1247 } while (0) 1248 1249 ADD("host", header->env.hostname); 1250 ADD("sysname", "Linux"); 1251 ADD("release", header->env.os_release); 1252 ADD("version", header->env.version); 1253 ADD("machine", header->env.arch); 1254 ADD("domain", "kernel"); 1255 ADD("tracer_name", "perf"); 1256 1257 #undef ADD 1258 return 0; 1259 } 1260 1261 static int ctf_writer__setup_clock(struct ctf_writer *cw) 1262 { 1263 struct bt_ctf_clock *clock = cw->clock; 1264 1265 bt_ctf_clock_set_description(clock, "perf clock"); 1266 1267 #define SET(__n, __v) \ 1268 do { \ 1269 if (bt_ctf_clock_set_##__n(clock, __v)) \ 1270 return -1; \ 1271 } while (0) 1272 1273 SET(frequency, 1000000000); 1274 SET(offset_s, 0); 1275 SET(offset, 0); 1276 SET(precision, 10); 1277 SET(is_absolute, 0); 1278 1279 #undef SET 1280 return 0; 1281 } 1282 1283 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex) 1284 { 1285 struct bt_ctf_field_type *type; 1286 1287 type = bt_ctf_field_type_integer_create(size); 1288 if (!type) 1289 return NULL; 1290 1291 if (sign && 1292 bt_ctf_field_type_integer_set_signed(type, 1)) 1293 goto err; 1294 1295 if (hex && 1296 bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL)) 1297 goto err; 1298 1299 #if __BYTE_ORDER == __BIG_ENDIAN 1300 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN); 1301 #else 1302 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN); 1303 #endif 1304 1305 pr2("Created type: INTEGER %d-bit %ssigned %s\n", 1306 size, sign ? "un" : "", hex ? "hex" : ""); 1307 return type; 1308 1309 err: 1310 bt_ctf_field_type_put(type); 1311 return NULL; 1312 } 1313 1314 static void ctf_writer__cleanup_data(struct ctf_writer *cw) 1315 { 1316 unsigned int i; 1317 1318 for (i = 0; i < ARRAY_SIZE(cw->data.array); i++) 1319 bt_ctf_field_type_put(cw->data.array[i]); 1320 } 1321 1322 static int ctf_writer__init_data(struct ctf_writer *cw) 1323 { 1324 #define CREATE_INT_TYPE(type, size, sign, hex) \ 1325 do { \ 1326 (type) = create_int_type(size, sign, hex); \ 1327 if (!(type)) \ 1328 goto err; \ 1329 } while (0) 1330 1331 CREATE_INT_TYPE(cw->data.s64, 64, true, false); 1332 CREATE_INT_TYPE(cw->data.u64, 64, false, false); 1333 CREATE_INT_TYPE(cw->data.s32, 32, true, false); 1334 CREATE_INT_TYPE(cw->data.u32, 32, false, false); 1335 CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true); 1336 CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true); 1337 1338 cw->data.string = bt_ctf_field_type_string_create(); 1339 if (cw->data.string) 1340 return 0; 1341 1342 err: 1343 ctf_writer__cleanup_data(cw); 1344 pr_err("Failed to create data types.\n"); 1345 return -1; 1346 } 1347 1348 static void ctf_writer__cleanup(struct ctf_writer *cw) 1349 { 1350 ctf_writer__cleanup_data(cw); 1351 1352 bt_ctf_clock_put(cw->clock); 1353 free_streams(cw); 1354 bt_ctf_stream_class_put(cw->stream_class); 1355 bt_ctf_writer_put(cw->writer); 1356 1357 /* and NULL all the pointers */ 1358 memset(cw, 0, sizeof(*cw)); 1359 } 1360 1361 static int ctf_writer__init(struct ctf_writer *cw, const char *path) 1362 { 1363 struct bt_ctf_writer *writer; 1364 struct bt_ctf_stream_class *stream_class; 1365 struct bt_ctf_clock *clock; 1366 struct bt_ctf_field_type *pkt_ctx_type; 1367 int ret; 1368 1369 /* CTF writer */ 1370 writer = bt_ctf_writer_create(path); 1371 if (!writer) 1372 goto err; 1373 1374 cw->writer = writer; 1375 1376 /* CTF clock */ 1377 clock = bt_ctf_clock_create("perf_clock"); 1378 if (!clock) { 1379 pr("Failed to create CTF clock.\n"); 1380 goto err_cleanup; 1381 } 1382 1383 cw->clock = clock; 1384 1385 if (ctf_writer__setup_clock(cw)) { 1386 pr("Failed to setup CTF clock.\n"); 1387 goto err_cleanup; 1388 } 1389 1390 /* CTF stream class */ 1391 stream_class = bt_ctf_stream_class_create("perf_stream"); 1392 if (!stream_class) { 1393 pr("Failed to create CTF stream class.\n"); 1394 goto err_cleanup; 1395 } 1396 1397 cw->stream_class = stream_class; 1398 1399 /* CTF clock stream setup */ 1400 if (bt_ctf_stream_class_set_clock(stream_class, clock)) { 1401 pr("Failed to assign CTF clock to stream class.\n"); 1402 goto err_cleanup; 1403 } 1404 1405 if (ctf_writer__init_data(cw)) 1406 goto err_cleanup; 1407 1408 /* Add cpu_id for packet context */ 1409 pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class); 1410 if (!pkt_ctx_type) 1411 goto err_cleanup; 1412 1413 ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id"); 1414 bt_ctf_field_type_put(pkt_ctx_type); 1415 if (ret) 1416 goto err_cleanup; 1417 1418 /* CTF clock writer setup */ 1419 if (bt_ctf_writer_add_clock(writer, clock)) { 1420 pr("Failed to assign CTF clock to writer.\n"); 1421 goto err_cleanup; 1422 } 1423 1424 return 0; 1425 1426 err_cleanup: 1427 ctf_writer__cleanup(cw); 1428 err: 1429 pr_err("Failed to setup CTF writer.\n"); 1430 return -1; 1431 } 1432 1433 static int ctf_writer__flush_streams(struct ctf_writer *cw) 1434 { 1435 int cpu, ret = 0; 1436 1437 for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++) 1438 ret = ctf_stream__flush(cw->stream[cpu]); 1439 1440 return ret; 1441 } 1442 1443 static int convert__config(const char *var, const char *value, void *cb) 1444 { 1445 struct convert *c = cb; 1446 1447 if (!strcmp(var, "convert.queue-size")) 1448 return perf_config_u64(&c->queue_size, var, value); 1449 1450 return 0; 1451 } 1452 1453 int bt_convert__perf2ctf(const char *input, const char *path, 1454 struct perf_data_convert_opts *opts) 1455 { 1456 struct perf_session *session; 1457 struct perf_data_file file = { 1458 .path = input, 1459 .mode = PERF_DATA_MODE_READ, 1460 .force = opts->force, 1461 }; 1462 struct convert c = { 1463 .tool = { 1464 .sample = process_sample_event, 1465 .mmap = perf_event__process_mmap, 1466 .mmap2 = perf_event__process_mmap2, 1467 .comm = perf_event__process_comm, 1468 .exit = perf_event__process_exit, 1469 .fork = perf_event__process_fork, 1470 .lost = perf_event__process_lost, 1471 .tracing_data = perf_event__process_tracing_data, 1472 .build_id = perf_event__process_build_id, 1473 .namespaces = perf_event__process_namespaces, 1474 .ordered_events = true, 1475 .ordering_requires_timestamps = true, 1476 }, 1477 }; 1478 struct ctf_writer *cw = &c.writer; 1479 int err; 1480 1481 if (opts->all) { 1482 c.tool.comm = process_comm_event; 1483 c.tool.exit = process_exit_event; 1484 c.tool.fork = process_fork_event; 1485 } 1486 1487 err = perf_config(convert__config, &c); 1488 if (err) 1489 return err; 1490 1491 /* CTF writer */ 1492 if (ctf_writer__init(cw, path)) 1493 return -1; 1494 1495 err = -1; 1496 /* perf.data session */ 1497 session = perf_session__new(&file, 0, &c.tool); 1498 if (!session) 1499 goto free_writer; 1500 1501 if (c.queue_size) { 1502 ordered_events__set_alloc_size(&session->ordered_events, 1503 c.queue_size); 1504 } 1505 1506 /* CTF writer env/clock setup */ 1507 if (ctf_writer__setup_env(cw, session)) 1508 goto free_session; 1509 1510 /* CTF events setup */ 1511 if (setup_events(cw, session)) 1512 goto free_session; 1513 1514 if (opts->all && setup_non_sample_events(cw, session)) 1515 goto free_session; 1516 1517 if (setup_streams(cw, session)) 1518 goto free_session; 1519 1520 err = perf_session__process_events(session); 1521 if (!err) 1522 err = ctf_writer__flush_streams(cw); 1523 else 1524 pr_err("Error during conversion.\n"); 1525 1526 fprintf(stderr, 1527 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n", 1528 file.path, path); 1529 1530 fprintf(stderr, 1531 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples", 1532 (double) c.events_size / 1024.0 / 1024.0, 1533 c.events_count); 1534 1535 if (!c.non_sample_count) 1536 fprintf(stderr, ") ]\n"); 1537 else 1538 fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count); 1539 1540 cleanup_events(session); 1541 perf_session__delete(session); 1542 ctf_writer__cleanup(cw); 1543 1544 return err; 1545 1546 free_session: 1547 perf_session__delete(session); 1548 free_writer: 1549 ctf_writer__cleanup(cw); 1550 pr_err("Error during conversion setup.\n"); 1551 return err; 1552 } 1553