1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * CTF writing support via babeltrace. 4 * 5 * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com> 6 * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de> 7 */ 8 9 #include <errno.h> 10 #include <inttypes.h> 11 #include <linux/compiler.h> 12 #include <linux/kernel.h> 13 #include <linux/zalloc.h> 14 #include <babeltrace/ctf-writer/writer.h> 15 #include <babeltrace/ctf-writer/clock.h> 16 #include <babeltrace/ctf-writer/stream.h> 17 #include <babeltrace/ctf-writer/event.h> 18 #include <babeltrace/ctf-writer/event-types.h> 19 #include <babeltrace/ctf-writer/event-fields.h> 20 #include <babeltrace/ctf-ir/utils.h> 21 #include <babeltrace/ctf/events.h> 22 #include <traceevent/event-parse.h> 23 #include "asm/bug.h" 24 #include "data-convert-bt.h" 25 #include "session.h" 26 #include "debug.h" 27 #include "tool.h" 28 #include "evlist.h" 29 #include "evsel.h" 30 #include "machine.h" 31 #include "config.h" 32 #include <linux/ctype.h> 33 34 #define pr_N(n, fmt, ...) \ 35 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__) 36 37 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__) 38 #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__) 39 40 #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__) 41 42 struct evsel_priv { 43 struct bt_ctf_event_class *event_class; 44 }; 45 46 #define MAX_CPUS 4096 47 48 struct ctf_stream { 49 struct bt_ctf_stream *stream; 50 int cpu; 51 u32 count; 52 }; 53 54 struct ctf_writer { 55 /* writer primitives */ 56 struct bt_ctf_writer *writer; 57 struct ctf_stream **stream; 58 int stream_cnt; 59 struct bt_ctf_stream_class *stream_class; 60 struct bt_ctf_clock *clock; 61 62 /* data types */ 63 union { 64 struct { 65 struct bt_ctf_field_type *s64; 66 struct bt_ctf_field_type *u64; 67 struct bt_ctf_field_type *s32; 68 struct bt_ctf_field_type *u32; 69 struct bt_ctf_field_type *string; 70 struct bt_ctf_field_type *u32_hex; 71 struct bt_ctf_field_type *u64_hex; 72 }; 73 struct bt_ctf_field_type *array[6]; 74 } data; 75 struct bt_ctf_event_class *comm_class; 76 struct bt_ctf_event_class *exit_class; 77 struct bt_ctf_event_class *fork_class; 78 struct bt_ctf_event_class *mmap_class; 79 struct bt_ctf_event_class *mmap2_class; 80 }; 81 82 struct convert { 83 struct perf_tool tool; 84 struct ctf_writer writer; 85 86 u64 events_size; 87 u64 events_count; 88 u64 non_sample_count; 89 90 /* Ordered events configured queue size. */ 91 u64 queue_size; 92 }; 93 94 static int value_set(struct bt_ctf_field_type *type, 95 struct bt_ctf_event *event, 96 const char *name, u64 val) 97 { 98 struct bt_ctf_field *field; 99 bool sign = bt_ctf_field_type_integer_get_signed(type); 100 int ret; 101 102 field = bt_ctf_field_create(type); 103 if (!field) { 104 pr_err("failed to create a field %s\n", name); 105 return -1; 106 } 107 108 if (sign) { 109 ret = bt_ctf_field_signed_integer_set_value(field, val); 110 if (ret) { 111 pr_err("failed to set field value %s\n", name); 112 goto err; 113 } 114 } else { 115 ret = bt_ctf_field_unsigned_integer_set_value(field, val); 116 if (ret) { 117 pr_err("failed to set field value %s\n", name); 118 goto err; 119 } 120 } 121 122 ret = bt_ctf_event_set_payload(event, name, field); 123 if (ret) { 124 pr_err("failed to set payload %s\n", name); 125 goto err; 126 } 127 128 pr2(" SET [%s = %" PRIu64 "]\n", name, val); 129 130 err: 131 bt_ctf_field_put(field); 132 return ret; 133 } 134 135 #define __FUNC_VALUE_SET(_name, _val_type) \ 136 static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \ 137 struct bt_ctf_event *event, \ 138 const char *name, \ 139 _val_type val) \ 140 { \ 141 struct bt_ctf_field_type *type = cw->data._name; \ 142 return value_set(type, event, name, (u64) val); \ 143 } 144 145 #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name) 146 147 FUNC_VALUE_SET(s32) 148 FUNC_VALUE_SET(u32) 149 FUNC_VALUE_SET(s64) 150 FUNC_VALUE_SET(u64) 151 __FUNC_VALUE_SET(u64_hex, u64) 152 153 static int string_set_value(struct bt_ctf_field *field, const char *string); 154 static __maybe_unused int 155 value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event, 156 const char *name, const char *string) 157 { 158 struct bt_ctf_field_type *type = cw->data.string; 159 struct bt_ctf_field *field; 160 int ret = 0; 161 162 field = bt_ctf_field_create(type); 163 if (!field) { 164 pr_err("failed to create a field %s\n", name); 165 return -1; 166 } 167 168 ret = string_set_value(field, string); 169 if (ret) { 170 pr_err("failed to set value %s\n", name); 171 goto err_put_field; 172 } 173 174 ret = bt_ctf_event_set_payload(event, name, field); 175 if (ret) 176 pr_err("failed to set payload %s\n", name); 177 178 err_put_field: 179 bt_ctf_field_put(field); 180 return ret; 181 } 182 183 static struct bt_ctf_field_type* 184 get_tracepoint_field_type(struct ctf_writer *cw, struct tep_format_field *field) 185 { 186 unsigned long flags = field->flags; 187 188 if (flags & TEP_FIELD_IS_STRING) 189 return cw->data.string; 190 191 if (!(flags & TEP_FIELD_IS_SIGNED)) { 192 /* unsigned long are mostly pointers */ 193 if (flags & TEP_FIELD_IS_LONG || flags & TEP_FIELD_IS_POINTER) 194 return cw->data.u64_hex; 195 } 196 197 if (flags & TEP_FIELD_IS_SIGNED) { 198 if (field->size == 8) 199 return cw->data.s64; 200 else 201 return cw->data.s32; 202 } 203 204 if (field->size == 8) 205 return cw->data.u64; 206 else 207 return cw->data.u32; 208 } 209 210 static unsigned long long adjust_signedness(unsigned long long value_int, int size) 211 { 212 unsigned long long value_mask; 213 214 /* 215 * value_mask = (1 << (size * 8 - 1)) - 1. 216 * Directly set value_mask for code readers. 217 */ 218 switch (size) { 219 case 1: 220 value_mask = 0x7fULL; 221 break; 222 case 2: 223 value_mask = 0x7fffULL; 224 break; 225 case 4: 226 value_mask = 0x7fffffffULL; 227 break; 228 case 8: 229 /* 230 * For 64 bit value, return it self. There is no need 231 * to fill high bit. 232 */ 233 /* Fall through */ 234 default: 235 /* BUG! */ 236 return value_int; 237 } 238 239 /* If it is a positive value, don't adjust. */ 240 if ((value_int & (~0ULL - value_mask)) == 0) 241 return value_int; 242 243 /* Fill upper part of value_int with 1 to make it a negative long long. */ 244 return (value_int & value_mask) | ~value_mask; 245 } 246 247 static int string_set_value(struct bt_ctf_field *field, const char *string) 248 { 249 char *buffer = NULL; 250 size_t len = strlen(string), i, p; 251 int err; 252 253 for (i = p = 0; i < len; i++, p++) { 254 if (isprint(string[i])) { 255 if (!buffer) 256 continue; 257 buffer[p] = string[i]; 258 } else { 259 char numstr[5]; 260 261 snprintf(numstr, sizeof(numstr), "\\x%02x", 262 (unsigned int)(string[i]) & 0xff); 263 264 if (!buffer) { 265 buffer = zalloc(i + (len - i) * 4 + 2); 266 if (!buffer) { 267 pr_err("failed to set unprintable string '%s'\n", string); 268 return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING"); 269 } 270 if (i > 0) 271 strncpy(buffer, string, i); 272 } 273 memcpy(buffer + p, numstr, 4); 274 p += 3; 275 } 276 } 277 278 if (!buffer) 279 return bt_ctf_field_string_set_value(field, string); 280 err = bt_ctf_field_string_set_value(field, buffer); 281 free(buffer); 282 return err; 283 } 284 285 static int add_tracepoint_field_value(struct ctf_writer *cw, 286 struct bt_ctf_event_class *event_class, 287 struct bt_ctf_event *event, 288 struct perf_sample *sample, 289 struct tep_format_field *fmtf) 290 { 291 struct bt_ctf_field_type *type; 292 struct bt_ctf_field *array_field; 293 struct bt_ctf_field *field; 294 const char *name = fmtf->name; 295 void *data = sample->raw_data; 296 unsigned long flags = fmtf->flags; 297 unsigned int n_items; 298 unsigned int i; 299 unsigned int offset; 300 unsigned int len; 301 int ret; 302 303 name = fmtf->alias; 304 offset = fmtf->offset; 305 len = fmtf->size; 306 if (flags & TEP_FIELD_IS_STRING) 307 flags &= ~TEP_FIELD_IS_ARRAY; 308 309 if (flags & TEP_FIELD_IS_DYNAMIC) { 310 unsigned long long tmp_val; 311 312 tmp_val = tep_read_number(fmtf->event->tep, 313 data + offset, len); 314 offset = tmp_val; 315 len = offset >> 16; 316 offset &= 0xffff; 317 } 318 319 if (flags & TEP_FIELD_IS_ARRAY) { 320 321 type = bt_ctf_event_class_get_field_by_name( 322 event_class, name); 323 array_field = bt_ctf_field_create(type); 324 bt_ctf_field_type_put(type); 325 if (!array_field) { 326 pr_err("Failed to create array type %s\n", name); 327 return -1; 328 } 329 330 len = fmtf->size / fmtf->arraylen; 331 n_items = fmtf->arraylen; 332 } else { 333 n_items = 1; 334 array_field = NULL; 335 } 336 337 type = get_tracepoint_field_type(cw, fmtf); 338 339 for (i = 0; i < n_items; i++) { 340 if (flags & TEP_FIELD_IS_ARRAY) 341 field = bt_ctf_field_array_get_field(array_field, i); 342 else 343 field = bt_ctf_field_create(type); 344 345 if (!field) { 346 pr_err("failed to create a field %s\n", name); 347 return -1; 348 } 349 350 if (flags & TEP_FIELD_IS_STRING) 351 ret = string_set_value(field, data + offset + i * len); 352 else { 353 unsigned long long value_int; 354 355 value_int = tep_read_number( 356 fmtf->event->tep, 357 data + offset + i * len, len); 358 359 if (!(flags & TEP_FIELD_IS_SIGNED)) 360 ret = bt_ctf_field_unsigned_integer_set_value( 361 field, value_int); 362 else 363 ret = bt_ctf_field_signed_integer_set_value( 364 field, adjust_signedness(value_int, len)); 365 } 366 367 if (ret) { 368 pr_err("failed to set file value %s\n", name); 369 goto err_put_field; 370 } 371 if (!(flags & TEP_FIELD_IS_ARRAY)) { 372 ret = bt_ctf_event_set_payload(event, name, field); 373 if (ret) { 374 pr_err("failed to set payload %s\n", name); 375 goto err_put_field; 376 } 377 } 378 bt_ctf_field_put(field); 379 } 380 if (flags & TEP_FIELD_IS_ARRAY) { 381 ret = bt_ctf_event_set_payload(event, name, array_field); 382 if (ret) { 383 pr_err("Failed add payload array %s\n", name); 384 return -1; 385 } 386 bt_ctf_field_put(array_field); 387 } 388 return 0; 389 390 err_put_field: 391 bt_ctf_field_put(field); 392 return -1; 393 } 394 395 static int add_tracepoint_fields_values(struct ctf_writer *cw, 396 struct bt_ctf_event_class *event_class, 397 struct bt_ctf_event *event, 398 struct tep_format_field *fields, 399 struct perf_sample *sample) 400 { 401 struct tep_format_field *field; 402 int ret; 403 404 for (field = fields; field; field = field->next) { 405 ret = add_tracepoint_field_value(cw, event_class, event, sample, 406 field); 407 if (ret) 408 return -1; 409 } 410 return 0; 411 } 412 413 static int add_tracepoint_values(struct ctf_writer *cw, 414 struct bt_ctf_event_class *event_class, 415 struct bt_ctf_event *event, 416 struct evsel *evsel, 417 struct perf_sample *sample) 418 { 419 struct tep_format_field *common_fields = evsel->tp_format->format.common_fields; 420 struct tep_format_field *fields = evsel->tp_format->format.fields; 421 int ret; 422 423 ret = add_tracepoint_fields_values(cw, event_class, event, 424 common_fields, sample); 425 if (!ret) 426 ret = add_tracepoint_fields_values(cw, event_class, event, 427 fields, sample); 428 429 return ret; 430 } 431 432 static int 433 add_bpf_output_values(struct bt_ctf_event_class *event_class, 434 struct bt_ctf_event *event, 435 struct perf_sample *sample) 436 { 437 struct bt_ctf_field_type *len_type, *seq_type; 438 struct bt_ctf_field *len_field, *seq_field; 439 unsigned int raw_size = sample->raw_size; 440 unsigned int nr_elements = raw_size / sizeof(u32); 441 unsigned int i; 442 int ret; 443 444 if (nr_elements * sizeof(u32) != raw_size) 445 pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n", 446 raw_size, nr_elements * sizeof(u32) - raw_size); 447 448 len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len"); 449 len_field = bt_ctf_field_create(len_type); 450 if (!len_field) { 451 pr_err("failed to create 'raw_len' for bpf output event\n"); 452 ret = -1; 453 goto put_len_type; 454 } 455 456 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements); 457 if (ret) { 458 pr_err("failed to set field value for raw_len\n"); 459 goto put_len_field; 460 } 461 ret = bt_ctf_event_set_payload(event, "raw_len", len_field); 462 if (ret) { 463 pr_err("failed to set payload to raw_len\n"); 464 goto put_len_field; 465 } 466 467 seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data"); 468 seq_field = bt_ctf_field_create(seq_type); 469 if (!seq_field) { 470 pr_err("failed to create 'raw_data' for bpf output event\n"); 471 ret = -1; 472 goto put_seq_type; 473 } 474 475 ret = bt_ctf_field_sequence_set_length(seq_field, len_field); 476 if (ret) { 477 pr_err("failed to set length of 'raw_data'\n"); 478 goto put_seq_field; 479 } 480 481 for (i = 0; i < nr_elements; i++) { 482 struct bt_ctf_field *elem_field = 483 bt_ctf_field_sequence_get_field(seq_field, i); 484 485 ret = bt_ctf_field_unsigned_integer_set_value(elem_field, 486 ((u32 *)(sample->raw_data))[i]); 487 488 bt_ctf_field_put(elem_field); 489 if (ret) { 490 pr_err("failed to set raw_data[%d]\n", i); 491 goto put_seq_field; 492 } 493 } 494 495 ret = bt_ctf_event_set_payload(event, "raw_data", seq_field); 496 if (ret) 497 pr_err("failed to set payload for raw_data\n"); 498 499 put_seq_field: 500 bt_ctf_field_put(seq_field); 501 put_seq_type: 502 bt_ctf_field_type_put(seq_type); 503 put_len_field: 504 bt_ctf_field_put(len_field); 505 put_len_type: 506 bt_ctf_field_type_put(len_type); 507 return ret; 508 } 509 510 static int 511 add_callchain_output_values(struct bt_ctf_event_class *event_class, 512 struct bt_ctf_event *event, 513 struct ip_callchain *callchain) 514 { 515 struct bt_ctf_field_type *len_type, *seq_type; 516 struct bt_ctf_field *len_field, *seq_field; 517 unsigned int nr_elements = callchain->nr; 518 unsigned int i; 519 int ret; 520 521 len_type = bt_ctf_event_class_get_field_by_name( 522 event_class, "perf_callchain_size"); 523 len_field = bt_ctf_field_create(len_type); 524 if (!len_field) { 525 pr_err("failed to create 'perf_callchain_size' for callchain output event\n"); 526 ret = -1; 527 goto put_len_type; 528 } 529 530 ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements); 531 if (ret) { 532 pr_err("failed to set field value for perf_callchain_size\n"); 533 goto put_len_field; 534 } 535 ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field); 536 if (ret) { 537 pr_err("failed to set payload to perf_callchain_size\n"); 538 goto put_len_field; 539 } 540 541 seq_type = bt_ctf_event_class_get_field_by_name( 542 event_class, "perf_callchain"); 543 seq_field = bt_ctf_field_create(seq_type); 544 if (!seq_field) { 545 pr_err("failed to create 'perf_callchain' for callchain output event\n"); 546 ret = -1; 547 goto put_seq_type; 548 } 549 550 ret = bt_ctf_field_sequence_set_length(seq_field, len_field); 551 if (ret) { 552 pr_err("failed to set length of 'perf_callchain'\n"); 553 goto put_seq_field; 554 } 555 556 for (i = 0; i < nr_elements; i++) { 557 struct bt_ctf_field *elem_field = 558 bt_ctf_field_sequence_get_field(seq_field, i); 559 560 ret = bt_ctf_field_unsigned_integer_set_value(elem_field, 561 ((u64 *)(callchain->ips))[i]); 562 563 bt_ctf_field_put(elem_field); 564 if (ret) { 565 pr_err("failed to set callchain[%d]\n", i); 566 goto put_seq_field; 567 } 568 } 569 570 ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field); 571 if (ret) 572 pr_err("failed to set payload for raw_data\n"); 573 574 put_seq_field: 575 bt_ctf_field_put(seq_field); 576 put_seq_type: 577 bt_ctf_field_type_put(seq_type); 578 put_len_field: 579 bt_ctf_field_put(len_field); 580 put_len_type: 581 bt_ctf_field_type_put(len_type); 582 return ret; 583 } 584 585 static int add_generic_values(struct ctf_writer *cw, 586 struct bt_ctf_event *event, 587 struct evsel *evsel, 588 struct perf_sample *sample) 589 { 590 u64 type = evsel->core.attr.sample_type; 591 int ret; 592 593 /* 594 * missing: 595 * PERF_SAMPLE_TIME - not needed as we have it in 596 * ctf event header 597 * PERF_SAMPLE_READ - TODO 598 * PERF_SAMPLE_RAW - tracepoint fields are handled separately 599 * PERF_SAMPLE_BRANCH_STACK - TODO 600 * PERF_SAMPLE_REGS_USER - TODO 601 * PERF_SAMPLE_STACK_USER - TODO 602 */ 603 604 if (type & PERF_SAMPLE_IP) { 605 ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip); 606 if (ret) 607 return -1; 608 } 609 610 if (type & PERF_SAMPLE_TID) { 611 ret = value_set_s32(cw, event, "perf_tid", sample->tid); 612 if (ret) 613 return -1; 614 615 ret = value_set_s32(cw, event, "perf_pid", sample->pid); 616 if (ret) 617 return -1; 618 } 619 620 if ((type & PERF_SAMPLE_ID) || 621 (type & PERF_SAMPLE_IDENTIFIER)) { 622 ret = value_set_u64(cw, event, "perf_id", sample->id); 623 if (ret) 624 return -1; 625 } 626 627 if (type & PERF_SAMPLE_STREAM_ID) { 628 ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id); 629 if (ret) 630 return -1; 631 } 632 633 if (type & PERF_SAMPLE_PERIOD) { 634 ret = value_set_u64(cw, event, "perf_period", sample->period); 635 if (ret) 636 return -1; 637 } 638 639 if (type & PERF_SAMPLE_WEIGHT) { 640 ret = value_set_u64(cw, event, "perf_weight", sample->weight); 641 if (ret) 642 return -1; 643 } 644 645 if (type & PERF_SAMPLE_DATA_SRC) { 646 ret = value_set_u64(cw, event, "perf_data_src", 647 sample->data_src); 648 if (ret) 649 return -1; 650 } 651 652 if (type & PERF_SAMPLE_TRANSACTION) { 653 ret = value_set_u64(cw, event, "perf_transaction", 654 sample->transaction); 655 if (ret) 656 return -1; 657 } 658 659 return 0; 660 } 661 662 static int ctf_stream__flush(struct ctf_stream *cs) 663 { 664 int err = 0; 665 666 if (cs) { 667 err = bt_ctf_stream_flush(cs->stream); 668 if (err) 669 pr_err("CTF stream %d flush failed\n", cs->cpu); 670 671 pr("Flush stream for cpu %d (%u samples)\n", 672 cs->cpu, cs->count); 673 674 cs->count = 0; 675 } 676 677 return err; 678 } 679 680 static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu) 681 { 682 struct ctf_stream *cs; 683 struct bt_ctf_field *pkt_ctx = NULL; 684 struct bt_ctf_field *cpu_field = NULL; 685 struct bt_ctf_stream *stream = NULL; 686 int ret; 687 688 cs = zalloc(sizeof(*cs)); 689 if (!cs) { 690 pr_err("Failed to allocate ctf stream\n"); 691 return NULL; 692 } 693 694 stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class); 695 if (!stream) { 696 pr_err("Failed to create CTF stream\n"); 697 goto out; 698 } 699 700 pkt_ctx = bt_ctf_stream_get_packet_context(stream); 701 if (!pkt_ctx) { 702 pr_err("Failed to obtain packet context\n"); 703 goto out; 704 } 705 706 cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id"); 707 bt_ctf_field_put(pkt_ctx); 708 if (!cpu_field) { 709 pr_err("Failed to obtain cpu field\n"); 710 goto out; 711 } 712 713 ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu); 714 if (ret) { 715 pr_err("Failed to update CPU number\n"); 716 goto out; 717 } 718 719 bt_ctf_field_put(cpu_field); 720 721 cs->cpu = cpu; 722 cs->stream = stream; 723 return cs; 724 725 out: 726 if (cpu_field) 727 bt_ctf_field_put(cpu_field); 728 if (stream) 729 bt_ctf_stream_put(stream); 730 731 free(cs); 732 return NULL; 733 } 734 735 static void ctf_stream__delete(struct ctf_stream *cs) 736 { 737 if (cs) { 738 bt_ctf_stream_put(cs->stream); 739 free(cs); 740 } 741 } 742 743 static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu) 744 { 745 struct ctf_stream *cs = cw->stream[cpu]; 746 747 if (!cs) { 748 cs = ctf_stream__create(cw, cpu); 749 cw->stream[cpu] = cs; 750 } 751 752 return cs; 753 } 754 755 static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample, 756 struct evsel *evsel) 757 { 758 int cpu = 0; 759 760 if (evsel->core.attr.sample_type & PERF_SAMPLE_CPU) 761 cpu = sample->cpu; 762 763 if (cpu > cw->stream_cnt) { 764 pr_err("Event was recorded for CPU %d, limit is at %d.\n", 765 cpu, cw->stream_cnt); 766 cpu = 0; 767 } 768 769 return cpu; 770 } 771 772 #define STREAM_FLUSH_COUNT 100000 773 774 /* 775 * Currently we have no other way to determine the 776 * time for the stream flush other than keep track 777 * of the number of events and check it against 778 * threshold. 779 */ 780 static bool is_flush_needed(struct ctf_stream *cs) 781 { 782 return cs->count >= STREAM_FLUSH_COUNT; 783 } 784 785 static int process_sample_event(struct perf_tool *tool, 786 union perf_event *_event, 787 struct perf_sample *sample, 788 struct evsel *evsel, 789 struct machine *machine __maybe_unused) 790 { 791 struct convert *c = container_of(tool, struct convert, tool); 792 struct evsel_priv *priv = evsel->priv; 793 struct ctf_writer *cw = &c->writer; 794 struct ctf_stream *cs; 795 struct bt_ctf_event_class *event_class; 796 struct bt_ctf_event *event; 797 int ret; 798 unsigned long type = evsel->core.attr.sample_type; 799 800 if (WARN_ONCE(!priv, "Failed to setup all events.\n")) 801 return 0; 802 803 event_class = priv->event_class; 804 805 /* update stats */ 806 c->events_count++; 807 c->events_size += _event->header.size; 808 809 pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count); 810 811 event = bt_ctf_event_create(event_class); 812 if (!event) { 813 pr_err("Failed to create an CTF event\n"); 814 return -1; 815 } 816 817 bt_ctf_clock_set_time(cw->clock, sample->time); 818 819 ret = add_generic_values(cw, event, evsel, sample); 820 if (ret) 821 return -1; 822 823 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 824 ret = add_tracepoint_values(cw, event_class, event, 825 evsel, sample); 826 if (ret) 827 return -1; 828 } 829 830 if (type & PERF_SAMPLE_CALLCHAIN) { 831 ret = add_callchain_output_values(event_class, 832 event, sample->callchain); 833 if (ret) 834 return -1; 835 } 836 837 if (perf_evsel__is_bpf_output(evsel)) { 838 ret = add_bpf_output_values(event_class, event, sample); 839 if (ret) 840 return -1; 841 } 842 843 cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel)); 844 if (cs) { 845 if (is_flush_needed(cs)) 846 ctf_stream__flush(cs); 847 848 cs->count++; 849 bt_ctf_stream_append_event(cs->stream, event); 850 } 851 852 bt_ctf_event_put(event); 853 return cs ? 0 : -1; 854 } 855 856 #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) \ 857 do { \ 858 ret = value_set_##_type(cw, event, #_field, _event->_name._field);\ 859 if (ret) \ 860 return -1; \ 861 } while(0) 862 863 #define __FUNC_PROCESS_NON_SAMPLE(_name, body) \ 864 static int process_##_name##_event(struct perf_tool *tool, \ 865 union perf_event *_event, \ 866 struct perf_sample *sample, \ 867 struct machine *machine) \ 868 { \ 869 struct convert *c = container_of(tool, struct convert, tool);\ 870 struct ctf_writer *cw = &c->writer; \ 871 struct bt_ctf_event_class *event_class = cw->_name##_class;\ 872 struct bt_ctf_event *event; \ 873 struct ctf_stream *cs; \ 874 int ret; \ 875 \ 876 c->non_sample_count++; \ 877 c->events_size += _event->header.size; \ 878 event = bt_ctf_event_create(event_class); \ 879 if (!event) { \ 880 pr_err("Failed to create an CTF event\n"); \ 881 return -1; \ 882 } \ 883 \ 884 bt_ctf_clock_set_time(cw->clock, sample->time); \ 885 body \ 886 cs = ctf_stream(cw, 0); \ 887 if (cs) { \ 888 if (is_flush_needed(cs)) \ 889 ctf_stream__flush(cs); \ 890 \ 891 cs->count++; \ 892 bt_ctf_stream_append_event(cs->stream, event); \ 893 } \ 894 bt_ctf_event_put(event); \ 895 \ 896 return perf_event__process_##_name(tool, _event, sample, machine);\ 897 } 898 899 __FUNC_PROCESS_NON_SAMPLE(comm, 900 __NON_SAMPLE_SET_FIELD(comm, u32, pid); 901 __NON_SAMPLE_SET_FIELD(comm, u32, tid); 902 __NON_SAMPLE_SET_FIELD(comm, string, comm); 903 ) 904 __FUNC_PROCESS_NON_SAMPLE(fork, 905 __NON_SAMPLE_SET_FIELD(fork, u32, pid); 906 __NON_SAMPLE_SET_FIELD(fork, u32, ppid); 907 __NON_SAMPLE_SET_FIELD(fork, u32, tid); 908 __NON_SAMPLE_SET_FIELD(fork, u32, ptid); 909 __NON_SAMPLE_SET_FIELD(fork, u64, time); 910 ) 911 912 __FUNC_PROCESS_NON_SAMPLE(exit, 913 __NON_SAMPLE_SET_FIELD(fork, u32, pid); 914 __NON_SAMPLE_SET_FIELD(fork, u32, ppid); 915 __NON_SAMPLE_SET_FIELD(fork, u32, tid); 916 __NON_SAMPLE_SET_FIELD(fork, u32, ptid); 917 __NON_SAMPLE_SET_FIELD(fork, u64, time); 918 ) 919 __FUNC_PROCESS_NON_SAMPLE(mmap, 920 __NON_SAMPLE_SET_FIELD(mmap, u32, pid); 921 __NON_SAMPLE_SET_FIELD(mmap, u32, tid); 922 __NON_SAMPLE_SET_FIELD(mmap, u64_hex, start); 923 __NON_SAMPLE_SET_FIELD(mmap, string, filename); 924 ) 925 __FUNC_PROCESS_NON_SAMPLE(mmap2, 926 __NON_SAMPLE_SET_FIELD(mmap2, u32, pid); 927 __NON_SAMPLE_SET_FIELD(mmap2, u32, tid); 928 __NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start); 929 __NON_SAMPLE_SET_FIELD(mmap2, string, filename); 930 ) 931 #undef __NON_SAMPLE_SET_FIELD 932 #undef __FUNC_PROCESS_NON_SAMPLE 933 934 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */ 935 static char *change_name(char *name, char *orig_name, int dup) 936 { 937 char *new_name = NULL; 938 size_t len; 939 940 if (!name) 941 name = orig_name; 942 943 if (dup >= 10) 944 goto out; 945 /* 946 * Add '_' prefix to potential keywork. According to 947 * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652), 948 * futher CTF spec updating may require us to use '$'. 949 */ 950 if (dup < 0) 951 len = strlen(name) + sizeof("_"); 952 else 953 len = strlen(orig_name) + sizeof("_dupl_X"); 954 955 new_name = malloc(len); 956 if (!new_name) 957 goto out; 958 959 if (dup < 0) 960 snprintf(new_name, len, "_%s", name); 961 else 962 snprintf(new_name, len, "%s_dupl_%d", orig_name, dup); 963 964 out: 965 if (name != orig_name) 966 free(name); 967 return new_name; 968 } 969 970 static int event_class_add_field(struct bt_ctf_event_class *event_class, 971 struct bt_ctf_field_type *type, 972 struct tep_format_field *field) 973 { 974 struct bt_ctf_field_type *t = NULL; 975 char *name; 976 int dup = 1; 977 int ret; 978 979 /* alias was already assigned */ 980 if (field->alias != field->name) 981 return bt_ctf_event_class_add_field(event_class, type, 982 (char *)field->alias); 983 984 name = field->name; 985 986 /* If 'name' is a keywork, add prefix. */ 987 if (bt_ctf_validate_identifier(name)) 988 name = change_name(name, field->name, -1); 989 990 if (!name) { 991 pr_err("Failed to fix invalid identifier."); 992 return -1; 993 } 994 while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) { 995 bt_ctf_field_type_put(t); 996 name = change_name(name, field->name, dup++); 997 if (!name) { 998 pr_err("Failed to create dup name for '%s'\n", field->name); 999 return -1; 1000 } 1001 } 1002 1003 ret = bt_ctf_event_class_add_field(event_class, type, name); 1004 if (!ret) 1005 field->alias = name; 1006 1007 return ret; 1008 } 1009 1010 static int add_tracepoint_fields_types(struct ctf_writer *cw, 1011 struct tep_format_field *fields, 1012 struct bt_ctf_event_class *event_class) 1013 { 1014 struct tep_format_field *field; 1015 int ret; 1016 1017 for (field = fields; field; field = field->next) { 1018 struct bt_ctf_field_type *type; 1019 unsigned long flags = field->flags; 1020 1021 pr2(" field '%s'\n", field->name); 1022 1023 type = get_tracepoint_field_type(cw, field); 1024 if (!type) 1025 return -1; 1026 1027 /* 1028 * A string is an array of chars. For this we use the string 1029 * type and don't care that it is an array. What we don't 1030 * support is an array of strings. 1031 */ 1032 if (flags & TEP_FIELD_IS_STRING) 1033 flags &= ~TEP_FIELD_IS_ARRAY; 1034 1035 if (flags & TEP_FIELD_IS_ARRAY) 1036 type = bt_ctf_field_type_array_create(type, field->arraylen); 1037 1038 ret = event_class_add_field(event_class, type, field); 1039 1040 if (flags & TEP_FIELD_IS_ARRAY) 1041 bt_ctf_field_type_put(type); 1042 1043 if (ret) { 1044 pr_err("Failed to add field '%s': %d\n", 1045 field->name, ret); 1046 return -1; 1047 } 1048 } 1049 1050 return 0; 1051 } 1052 1053 static int add_tracepoint_types(struct ctf_writer *cw, 1054 struct evsel *evsel, 1055 struct bt_ctf_event_class *class) 1056 { 1057 struct tep_format_field *common_fields = evsel->tp_format->format.common_fields; 1058 struct tep_format_field *fields = evsel->tp_format->format.fields; 1059 int ret; 1060 1061 ret = add_tracepoint_fields_types(cw, common_fields, class); 1062 if (!ret) 1063 ret = add_tracepoint_fields_types(cw, fields, class); 1064 1065 return ret; 1066 } 1067 1068 static int add_bpf_output_types(struct ctf_writer *cw, 1069 struct bt_ctf_event_class *class) 1070 { 1071 struct bt_ctf_field_type *len_type = cw->data.u32; 1072 struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex; 1073 struct bt_ctf_field_type *seq_type; 1074 int ret; 1075 1076 ret = bt_ctf_event_class_add_field(class, len_type, "raw_len"); 1077 if (ret) 1078 return ret; 1079 1080 seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len"); 1081 if (!seq_type) 1082 return -1; 1083 1084 return bt_ctf_event_class_add_field(class, seq_type, "raw_data"); 1085 } 1086 1087 static int add_generic_types(struct ctf_writer *cw, struct evsel *evsel, 1088 struct bt_ctf_event_class *event_class) 1089 { 1090 u64 type = evsel->core.attr.sample_type; 1091 1092 /* 1093 * missing: 1094 * PERF_SAMPLE_TIME - not needed as we have it in 1095 * ctf event header 1096 * PERF_SAMPLE_READ - TODO 1097 * PERF_SAMPLE_CALLCHAIN - TODO 1098 * PERF_SAMPLE_RAW - tracepoint fields and BPF output 1099 * are handled separately 1100 * PERF_SAMPLE_BRANCH_STACK - TODO 1101 * PERF_SAMPLE_REGS_USER - TODO 1102 * PERF_SAMPLE_STACK_USER - TODO 1103 */ 1104 1105 #define ADD_FIELD(cl, t, n) \ 1106 do { \ 1107 pr2(" field '%s'\n", n); \ 1108 if (bt_ctf_event_class_add_field(cl, t, n)) { \ 1109 pr_err("Failed to add field '%s';\n", n); \ 1110 return -1; \ 1111 } \ 1112 } while (0) 1113 1114 if (type & PERF_SAMPLE_IP) 1115 ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip"); 1116 1117 if (type & PERF_SAMPLE_TID) { 1118 ADD_FIELD(event_class, cw->data.s32, "perf_tid"); 1119 ADD_FIELD(event_class, cw->data.s32, "perf_pid"); 1120 } 1121 1122 if ((type & PERF_SAMPLE_ID) || 1123 (type & PERF_SAMPLE_IDENTIFIER)) 1124 ADD_FIELD(event_class, cw->data.u64, "perf_id"); 1125 1126 if (type & PERF_SAMPLE_STREAM_ID) 1127 ADD_FIELD(event_class, cw->data.u64, "perf_stream_id"); 1128 1129 if (type & PERF_SAMPLE_PERIOD) 1130 ADD_FIELD(event_class, cw->data.u64, "perf_period"); 1131 1132 if (type & PERF_SAMPLE_WEIGHT) 1133 ADD_FIELD(event_class, cw->data.u64, "perf_weight"); 1134 1135 if (type & PERF_SAMPLE_DATA_SRC) 1136 ADD_FIELD(event_class, cw->data.u64, "perf_data_src"); 1137 1138 if (type & PERF_SAMPLE_TRANSACTION) 1139 ADD_FIELD(event_class, cw->data.u64, "perf_transaction"); 1140 1141 if (type & PERF_SAMPLE_CALLCHAIN) { 1142 ADD_FIELD(event_class, cw->data.u32, "perf_callchain_size"); 1143 ADD_FIELD(event_class, 1144 bt_ctf_field_type_sequence_create( 1145 cw->data.u64_hex, "perf_callchain_size"), 1146 "perf_callchain"); 1147 } 1148 1149 #undef ADD_FIELD 1150 return 0; 1151 } 1152 1153 static int add_event(struct ctf_writer *cw, struct evsel *evsel) 1154 { 1155 struct bt_ctf_event_class *event_class; 1156 struct evsel_priv *priv; 1157 const char *name = perf_evsel__name(evsel); 1158 int ret; 1159 1160 pr("Adding event '%s' (type %d)\n", name, evsel->core.attr.type); 1161 1162 event_class = bt_ctf_event_class_create(name); 1163 if (!event_class) 1164 return -1; 1165 1166 ret = add_generic_types(cw, evsel, event_class); 1167 if (ret) 1168 goto err; 1169 1170 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) { 1171 ret = add_tracepoint_types(cw, evsel, event_class); 1172 if (ret) 1173 goto err; 1174 } 1175 1176 if (perf_evsel__is_bpf_output(evsel)) { 1177 ret = add_bpf_output_types(cw, event_class); 1178 if (ret) 1179 goto err; 1180 } 1181 1182 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class); 1183 if (ret) { 1184 pr("Failed to add event class into stream.\n"); 1185 goto err; 1186 } 1187 1188 priv = malloc(sizeof(*priv)); 1189 if (!priv) 1190 goto err; 1191 1192 priv->event_class = event_class; 1193 evsel->priv = priv; 1194 return 0; 1195 1196 err: 1197 bt_ctf_event_class_put(event_class); 1198 pr_err("Failed to add event '%s'.\n", name); 1199 return -1; 1200 } 1201 1202 static int setup_events(struct ctf_writer *cw, struct perf_session *session) 1203 { 1204 struct evlist *evlist = session->evlist; 1205 struct evsel *evsel; 1206 int ret; 1207 1208 evlist__for_each_entry(evlist, evsel) { 1209 ret = add_event(cw, evsel); 1210 if (ret) 1211 return ret; 1212 } 1213 return 0; 1214 } 1215 1216 #define __NON_SAMPLE_ADD_FIELD(t, n) \ 1217 do { \ 1218 pr2(" field '%s'\n", #n); \ 1219 if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\ 1220 pr_err("Failed to add field '%s';\n", #n);\ 1221 return -1; \ 1222 } \ 1223 } while(0) 1224 1225 #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) \ 1226 static int add_##_name##_event(struct ctf_writer *cw) \ 1227 { \ 1228 struct bt_ctf_event_class *event_class; \ 1229 int ret; \ 1230 \ 1231 pr("Adding "#_name" event\n"); \ 1232 event_class = bt_ctf_event_class_create("perf_" #_name);\ 1233 if (!event_class) \ 1234 return -1; \ 1235 body \ 1236 \ 1237 ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\ 1238 if (ret) { \ 1239 pr("Failed to add event class '"#_name"' into stream.\n");\ 1240 return ret; \ 1241 } \ 1242 \ 1243 cw->_name##_class = event_class; \ 1244 bt_ctf_event_class_put(event_class); \ 1245 return 0; \ 1246 } 1247 1248 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm, 1249 __NON_SAMPLE_ADD_FIELD(u32, pid); 1250 __NON_SAMPLE_ADD_FIELD(u32, tid); 1251 __NON_SAMPLE_ADD_FIELD(string, comm); 1252 ) 1253 1254 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork, 1255 __NON_SAMPLE_ADD_FIELD(u32, pid); 1256 __NON_SAMPLE_ADD_FIELD(u32, ppid); 1257 __NON_SAMPLE_ADD_FIELD(u32, tid); 1258 __NON_SAMPLE_ADD_FIELD(u32, ptid); 1259 __NON_SAMPLE_ADD_FIELD(u64, time); 1260 ) 1261 1262 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit, 1263 __NON_SAMPLE_ADD_FIELD(u32, pid); 1264 __NON_SAMPLE_ADD_FIELD(u32, ppid); 1265 __NON_SAMPLE_ADD_FIELD(u32, tid); 1266 __NON_SAMPLE_ADD_FIELD(u32, ptid); 1267 __NON_SAMPLE_ADD_FIELD(u64, time); 1268 ) 1269 1270 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap, 1271 __NON_SAMPLE_ADD_FIELD(u32, pid); 1272 __NON_SAMPLE_ADD_FIELD(u32, tid); 1273 __NON_SAMPLE_ADD_FIELD(u64_hex, start); 1274 __NON_SAMPLE_ADD_FIELD(string, filename); 1275 ) 1276 1277 __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap2, 1278 __NON_SAMPLE_ADD_FIELD(u32, pid); 1279 __NON_SAMPLE_ADD_FIELD(u32, tid); 1280 __NON_SAMPLE_ADD_FIELD(u64_hex, start); 1281 __NON_SAMPLE_ADD_FIELD(string, filename); 1282 ) 1283 #undef __NON_SAMPLE_ADD_FIELD 1284 #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS 1285 1286 static int setup_non_sample_events(struct ctf_writer *cw, 1287 struct perf_session *session __maybe_unused) 1288 { 1289 int ret; 1290 1291 ret = add_comm_event(cw); 1292 if (ret) 1293 return ret; 1294 ret = add_exit_event(cw); 1295 if (ret) 1296 return ret; 1297 ret = add_fork_event(cw); 1298 if (ret) 1299 return ret; 1300 ret = add_mmap_event(cw); 1301 if (ret) 1302 return ret; 1303 ret = add_mmap2_event(cw); 1304 if (ret) 1305 return ret; 1306 return 0; 1307 } 1308 1309 static void cleanup_events(struct perf_session *session) 1310 { 1311 struct evlist *evlist = session->evlist; 1312 struct evsel *evsel; 1313 1314 evlist__for_each_entry(evlist, evsel) { 1315 struct evsel_priv *priv; 1316 1317 priv = evsel->priv; 1318 bt_ctf_event_class_put(priv->event_class); 1319 zfree(&evsel->priv); 1320 } 1321 1322 evlist__delete(evlist); 1323 session->evlist = NULL; 1324 } 1325 1326 static int setup_streams(struct ctf_writer *cw, struct perf_session *session) 1327 { 1328 struct ctf_stream **stream; 1329 struct perf_header *ph = &session->header; 1330 int ncpus; 1331 1332 /* 1333 * Try to get the number of cpus used in the data file, 1334 * if not present fallback to the MAX_CPUS. 1335 */ 1336 ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS; 1337 1338 stream = zalloc(sizeof(*stream) * ncpus); 1339 if (!stream) { 1340 pr_err("Failed to allocate streams.\n"); 1341 return -ENOMEM; 1342 } 1343 1344 cw->stream = stream; 1345 cw->stream_cnt = ncpus; 1346 return 0; 1347 } 1348 1349 static void free_streams(struct ctf_writer *cw) 1350 { 1351 int cpu; 1352 1353 for (cpu = 0; cpu < cw->stream_cnt; cpu++) 1354 ctf_stream__delete(cw->stream[cpu]); 1355 1356 zfree(&cw->stream); 1357 } 1358 1359 static int ctf_writer__setup_env(struct ctf_writer *cw, 1360 struct perf_session *session) 1361 { 1362 struct perf_header *header = &session->header; 1363 struct bt_ctf_writer *writer = cw->writer; 1364 1365 #define ADD(__n, __v) \ 1366 do { \ 1367 if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \ 1368 return -1; \ 1369 } while (0) 1370 1371 ADD("host", header->env.hostname); 1372 ADD("sysname", "Linux"); 1373 ADD("release", header->env.os_release); 1374 ADD("version", header->env.version); 1375 ADD("machine", header->env.arch); 1376 ADD("domain", "kernel"); 1377 ADD("tracer_name", "perf"); 1378 1379 #undef ADD 1380 return 0; 1381 } 1382 1383 static int ctf_writer__setup_clock(struct ctf_writer *cw) 1384 { 1385 struct bt_ctf_clock *clock = cw->clock; 1386 1387 bt_ctf_clock_set_description(clock, "perf clock"); 1388 1389 #define SET(__n, __v) \ 1390 do { \ 1391 if (bt_ctf_clock_set_##__n(clock, __v)) \ 1392 return -1; \ 1393 } while (0) 1394 1395 SET(frequency, 1000000000); 1396 SET(offset_s, 0); 1397 SET(offset, 0); 1398 SET(precision, 10); 1399 SET(is_absolute, 0); 1400 1401 #undef SET 1402 return 0; 1403 } 1404 1405 static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex) 1406 { 1407 struct bt_ctf_field_type *type; 1408 1409 type = bt_ctf_field_type_integer_create(size); 1410 if (!type) 1411 return NULL; 1412 1413 if (sign && 1414 bt_ctf_field_type_integer_set_signed(type, 1)) 1415 goto err; 1416 1417 if (hex && 1418 bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL)) 1419 goto err; 1420 1421 #if __BYTE_ORDER == __BIG_ENDIAN 1422 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN); 1423 #else 1424 bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN); 1425 #endif 1426 1427 pr2("Created type: INTEGER %d-bit %ssigned %s\n", 1428 size, sign ? "un" : "", hex ? "hex" : ""); 1429 return type; 1430 1431 err: 1432 bt_ctf_field_type_put(type); 1433 return NULL; 1434 } 1435 1436 static void ctf_writer__cleanup_data(struct ctf_writer *cw) 1437 { 1438 unsigned int i; 1439 1440 for (i = 0; i < ARRAY_SIZE(cw->data.array); i++) 1441 bt_ctf_field_type_put(cw->data.array[i]); 1442 } 1443 1444 static int ctf_writer__init_data(struct ctf_writer *cw) 1445 { 1446 #define CREATE_INT_TYPE(type, size, sign, hex) \ 1447 do { \ 1448 (type) = create_int_type(size, sign, hex); \ 1449 if (!(type)) \ 1450 goto err; \ 1451 } while (0) 1452 1453 CREATE_INT_TYPE(cw->data.s64, 64, true, false); 1454 CREATE_INT_TYPE(cw->data.u64, 64, false, false); 1455 CREATE_INT_TYPE(cw->data.s32, 32, true, false); 1456 CREATE_INT_TYPE(cw->data.u32, 32, false, false); 1457 CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true); 1458 CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true); 1459 1460 cw->data.string = bt_ctf_field_type_string_create(); 1461 if (cw->data.string) 1462 return 0; 1463 1464 err: 1465 ctf_writer__cleanup_data(cw); 1466 pr_err("Failed to create data types.\n"); 1467 return -1; 1468 } 1469 1470 static void ctf_writer__cleanup(struct ctf_writer *cw) 1471 { 1472 ctf_writer__cleanup_data(cw); 1473 1474 bt_ctf_clock_put(cw->clock); 1475 free_streams(cw); 1476 bt_ctf_stream_class_put(cw->stream_class); 1477 bt_ctf_writer_put(cw->writer); 1478 1479 /* and NULL all the pointers */ 1480 memset(cw, 0, sizeof(*cw)); 1481 } 1482 1483 static int ctf_writer__init(struct ctf_writer *cw, const char *path) 1484 { 1485 struct bt_ctf_writer *writer; 1486 struct bt_ctf_stream_class *stream_class; 1487 struct bt_ctf_clock *clock; 1488 struct bt_ctf_field_type *pkt_ctx_type; 1489 int ret; 1490 1491 /* CTF writer */ 1492 writer = bt_ctf_writer_create(path); 1493 if (!writer) 1494 goto err; 1495 1496 cw->writer = writer; 1497 1498 /* CTF clock */ 1499 clock = bt_ctf_clock_create("perf_clock"); 1500 if (!clock) { 1501 pr("Failed to create CTF clock.\n"); 1502 goto err_cleanup; 1503 } 1504 1505 cw->clock = clock; 1506 1507 if (ctf_writer__setup_clock(cw)) { 1508 pr("Failed to setup CTF clock.\n"); 1509 goto err_cleanup; 1510 } 1511 1512 /* CTF stream class */ 1513 stream_class = bt_ctf_stream_class_create("perf_stream"); 1514 if (!stream_class) { 1515 pr("Failed to create CTF stream class.\n"); 1516 goto err_cleanup; 1517 } 1518 1519 cw->stream_class = stream_class; 1520 1521 /* CTF clock stream setup */ 1522 if (bt_ctf_stream_class_set_clock(stream_class, clock)) { 1523 pr("Failed to assign CTF clock to stream class.\n"); 1524 goto err_cleanup; 1525 } 1526 1527 if (ctf_writer__init_data(cw)) 1528 goto err_cleanup; 1529 1530 /* Add cpu_id for packet context */ 1531 pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class); 1532 if (!pkt_ctx_type) 1533 goto err_cleanup; 1534 1535 ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id"); 1536 bt_ctf_field_type_put(pkt_ctx_type); 1537 if (ret) 1538 goto err_cleanup; 1539 1540 /* CTF clock writer setup */ 1541 if (bt_ctf_writer_add_clock(writer, clock)) { 1542 pr("Failed to assign CTF clock to writer.\n"); 1543 goto err_cleanup; 1544 } 1545 1546 return 0; 1547 1548 err_cleanup: 1549 ctf_writer__cleanup(cw); 1550 err: 1551 pr_err("Failed to setup CTF writer.\n"); 1552 return -1; 1553 } 1554 1555 static int ctf_writer__flush_streams(struct ctf_writer *cw) 1556 { 1557 int cpu, ret = 0; 1558 1559 for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++) 1560 ret = ctf_stream__flush(cw->stream[cpu]); 1561 1562 return ret; 1563 } 1564 1565 static int convert__config(const char *var, const char *value, void *cb) 1566 { 1567 struct convert *c = cb; 1568 1569 if (!strcmp(var, "convert.queue-size")) 1570 return perf_config_u64(&c->queue_size, var, value); 1571 1572 return 0; 1573 } 1574 1575 int bt_convert__perf2ctf(const char *input, const char *path, 1576 struct perf_data_convert_opts *opts) 1577 { 1578 struct perf_session *session; 1579 struct perf_data data = { 1580 .path = input, 1581 .mode = PERF_DATA_MODE_READ, 1582 .force = opts->force, 1583 }; 1584 struct convert c = { 1585 .tool = { 1586 .sample = process_sample_event, 1587 .mmap = perf_event__process_mmap, 1588 .mmap2 = perf_event__process_mmap2, 1589 .comm = perf_event__process_comm, 1590 .exit = perf_event__process_exit, 1591 .fork = perf_event__process_fork, 1592 .lost = perf_event__process_lost, 1593 .tracing_data = perf_event__process_tracing_data, 1594 .build_id = perf_event__process_build_id, 1595 .namespaces = perf_event__process_namespaces, 1596 .ordered_events = true, 1597 .ordering_requires_timestamps = true, 1598 }, 1599 }; 1600 struct ctf_writer *cw = &c.writer; 1601 int err; 1602 1603 if (opts->all) { 1604 c.tool.comm = process_comm_event; 1605 c.tool.exit = process_exit_event; 1606 c.tool.fork = process_fork_event; 1607 c.tool.mmap = process_mmap_event; 1608 c.tool.mmap2 = process_mmap2_event; 1609 } 1610 1611 err = perf_config(convert__config, &c); 1612 if (err) 1613 return err; 1614 1615 /* CTF writer */ 1616 if (ctf_writer__init(cw, path)) 1617 return -1; 1618 1619 err = -1; 1620 /* perf.data session */ 1621 session = perf_session__new(&data, 0, &c.tool); 1622 if (!session) 1623 goto free_writer; 1624 1625 if (c.queue_size) { 1626 ordered_events__set_alloc_size(&session->ordered_events, 1627 c.queue_size); 1628 } 1629 1630 /* CTF writer env/clock setup */ 1631 if (ctf_writer__setup_env(cw, session)) 1632 goto free_session; 1633 1634 /* CTF events setup */ 1635 if (setup_events(cw, session)) 1636 goto free_session; 1637 1638 if (opts->all && setup_non_sample_events(cw, session)) 1639 goto free_session; 1640 1641 if (setup_streams(cw, session)) 1642 goto free_session; 1643 1644 err = perf_session__process_events(session); 1645 if (!err) 1646 err = ctf_writer__flush_streams(cw); 1647 else 1648 pr_err("Error during conversion.\n"); 1649 1650 fprintf(stderr, 1651 "[ perf data convert: Converted '%s' into CTF data '%s' ]\n", 1652 data.path, path); 1653 1654 fprintf(stderr, 1655 "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples", 1656 (double) c.events_size / 1024.0 / 1024.0, 1657 c.events_count); 1658 1659 if (!c.non_sample_count) 1660 fprintf(stderr, ") ]\n"); 1661 else 1662 fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count); 1663 1664 cleanup_events(session); 1665 perf_session__delete(session); 1666 ctf_writer__cleanup(cw); 1667 1668 return err; 1669 1670 free_session: 1671 perf_session__delete(session); 1672 free_writer: 1673 ctf_writer__cleanup(cw); 1674 pr_err("Error during conversion setup.\n"); 1675 return err; 1676 } 1677