1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * trace_events_synth - synthetic trace events 4 * 5 * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com> 6 */ 7 8 #include <linux/module.h> 9 #include <linux/kallsyms.h> 10 #include <linux/security.h> 11 #include <linux/mutex.h> 12 #include <linux/slab.h> 13 #include <linux/stacktrace.h> 14 #include <linux/rculist.h> 15 #include <linux/tracefs.h> 16 17 /* for gfp flag names */ 18 #include <linux/trace_events.h> 19 #include <trace/events/mmflags.h> 20 21 #include "trace_synth.h" 22 23 #undef ERRORS 24 #define ERRORS \ 25 C(BAD_NAME, "Illegal name"), \ 26 C(CMD_INCOMPLETE, "Incomplete command"), \ 27 C(EVENT_EXISTS, "Event already exists"), \ 28 C(TOO_MANY_FIELDS, "Too many fields"), \ 29 C(INCOMPLETE_TYPE, "Incomplete type"), \ 30 C(INVALID_TYPE, "Invalid type"), \ 31 C(INVALID_FIELD, "Invalid field"), \ 32 C(CMD_TOO_LONG, "Command too long"), 33 34 #undef C 35 #define C(a, b) SYNTH_ERR_##a 36 37 enum { ERRORS }; 38 39 #undef C 40 #define C(a, b) b 41 42 static const char *err_text[] = { ERRORS }; 43 44 static char last_cmd[MAX_FILTER_STR_VAL]; 45 46 static int errpos(const char *str) 47 { 48 return err_pos(last_cmd, str); 49 } 50 51 static void last_cmd_set(char *str) 52 { 53 if (!str) 54 return; 55 56 strncpy(last_cmd, str, MAX_FILTER_STR_VAL - 1); 57 } 58 59 static void synth_err(u8 err_type, u8 err_pos) 60 { 61 tracing_log_err(NULL, "synthetic_events", last_cmd, err_text, 62 err_type, err_pos); 63 } 64 65 static int create_synth_event(int argc, const char **argv); 66 static int synth_event_show(struct seq_file *m, struct dyn_event *ev); 67 static int synth_event_release(struct dyn_event *ev); 68 static bool synth_event_is_busy(struct dyn_event *ev); 69 static bool synth_event_match(const char *system, const char *event, 70 int argc, const char **argv, struct dyn_event *ev); 71 72 static struct dyn_event_operations synth_event_ops = { 73 .create = create_synth_event, 74 .show = synth_event_show, 75 .is_busy = synth_event_is_busy, 76 .free = synth_event_release, 77 .match = synth_event_match, 78 }; 79 80 static bool is_synth_event(struct dyn_event *ev) 81 { 82 return ev->ops == &synth_event_ops; 83 } 84 85 static struct synth_event *to_synth_event(struct dyn_event *ev) 86 { 87 return container_of(ev, struct synth_event, devent); 88 } 89 90 static bool synth_event_is_busy(struct dyn_event *ev) 91 { 92 struct synth_event *event = to_synth_event(ev); 93 94 return event->ref != 0; 95 } 96 97 static bool synth_event_match(const char *system, const char *event, 98 int argc, const char **argv, struct dyn_event *ev) 99 { 100 struct synth_event *sev = to_synth_event(ev); 101 102 return strcmp(sev->name, event) == 0 && 103 (!system || strcmp(system, SYNTH_SYSTEM) == 0); 104 } 105 106 struct synth_trace_event { 107 struct trace_entry ent; 108 u64 fields[]; 109 }; 110 111 static int synth_event_define_fields(struct trace_event_call *call) 112 { 113 struct synth_trace_event trace; 114 int offset = offsetof(typeof(trace), fields); 115 struct synth_event *event = call->data; 116 unsigned int i, size, n_u64; 117 char *name, *type; 118 bool is_signed; 119 int ret = 0; 120 121 for (i = 0, n_u64 = 0; i < event->n_fields; i++) { 122 size = event->fields[i]->size; 123 is_signed = event->fields[i]->is_signed; 124 type = event->fields[i]->type; 125 name = event->fields[i]->name; 126 ret = trace_define_field(call, type, name, offset, size, 127 is_signed, FILTER_OTHER); 128 if (ret) 129 break; 130 131 event->fields[i]->offset = n_u64; 132 133 if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) { 134 offset += STR_VAR_LEN_MAX; 135 n_u64 += STR_VAR_LEN_MAX / sizeof(u64); 136 } else { 137 offset += sizeof(u64); 138 n_u64++; 139 } 140 } 141 142 event->n_u64 = n_u64; 143 144 return ret; 145 } 146 147 static bool synth_field_signed(char *type) 148 { 149 if (str_has_prefix(type, "u")) 150 return false; 151 if (strcmp(type, "gfp_t") == 0) 152 return false; 153 154 return true; 155 } 156 157 static int synth_field_is_string(char *type) 158 { 159 if (strstr(type, "char[") != NULL) 160 return true; 161 162 return false; 163 } 164 165 static int synth_field_string_size(char *type) 166 { 167 char buf[4], *end, *start; 168 unsigned int len; 169 int size, err; 170 171 start = strstr(type, "char["); 172 if (start == NULL) 173 return -EINVAL; 174 start += sizeof("char[") - 1; 175 176 end = strchr(type, ']'); 177 if (!end || end < start || type + strlen(type) > end + 1) 178 return -EINVAL; 179 180 len = end - start; 181 if (len > 3) 182 return -EINVAL; 183 184 if (len == 0) 185 return 0; /* variable-length string */ 186 187 strncpy(buf, start, len); 188 buf[len] = '\0'; 189 190 err = kstrtouint(buf, 0, &size); 191 if (err) 192 return err; 193 194 if (size > STR_VAR_LEN_MAX) 195 return -EINVAL; 196 197 return size; 198 } 199 200 static int synth_field_size(char *type) 201 { 202 int size = 0; 203 204 if (strcmp(type, "s64") == 0) 205 size = sizeof(s64); 206 else if (strcmp(type, "u64") == 0) 207 size = sizeof(u64); 208 else if (strcmp(type, "s32") == 0) 209 size = sizeof(s32); 210 else if (strcmp(type, "u32") == 0) 211 size = sizeof(u32); 212 else if (strcmp(type, "s16") == 0) 213 size = sizeof(s16); 214 else if (strcmp(type, "u16") == 0) 215 size = sizeof(u16); 216 else if (strcmp(type, "s8") == 0) 217 size = sizeof(s8); 218 else if (strcmp(type, "u8") == 0) 219 size = sizeof(u8); 220 else if (strcmp(type, "char") == 0) 221 size = sizeof(char); 222 else if (strcmp(type, "unsigned char") == 0) 223 size = sizeof(unsigned char); 224 else if (strcmp(type, "int") == 0) 225 size = sizeof(int); 226 else if (strcmp(type, "unsigned int") == 0) 227 size = sizeof(unsigned int); 228 else if (strcmp(type, "long") == 0) 229 size = sizeof(long); 230 else if (strcmp(type, "unsigned long") == 0) 231 size = sizeof(unsigned long); 232 else if (strcmp(type, "bool") == 0) 233 size = sizeof(bool); 234 else if (strcmp(type, "pid_t") == 0) 235 size = sizeof(pid_t); 236 else if (strcmp(type, "gfp_t") == 0) 237 size = sizeof(gfp_t); 238 else if (synth_field_is_string(type)) 239 size = synth_field_string_size(type); 240 241 return size; 242 } 243 244 static const char *synth_field_fmt(char *type) 245 { 246 const char *fmt = "%llu"; 247 248 if (strcmp(type, "s64") == 0) 249 fmt = "%lld"; 250 else if (strcmp(type, "u64") == 0) 251 fmt = "%llu"; 252 else if (strcmp(type, "s32") == 0) 253 fmt = "%d"; 254 else if (strcmp(type, "u32") == 0) 255 fmt = "%u"; 256 else if (strcmp(type, "s16") == 0) 257 fmt = "%d"; 258 else if (strcmp(type, "u16") == 0) 259 fmt = "%u"; 260 else if (strcmp(type, "s8") == 0) 261 fmt = "%d"; 262 else if (strcmp(type, "u8") == 0) 263 fmt = "%u"; 264 else if (strcmp(type, "char") == 0) 265 fmt = "%d"; 266 else if (strcmp(type, "unsigned char") == 0) 267 fmt = "%u"; 268 else if (strcmp(type, "int") == 0) 269 fmt = "%d"; 270 else if (strcmp(type, "unsigned int") == 0) 271 fmt = "%u"; 272 else if (strcmp(type, "long") == 0) 273 fmt = "%ld"; 274 else if (strcmp(type, "unsigned long") == 0) 275 fmt = "%lu"; 276 else if (strcmp(type, "bool") == 0) 277 fmt = "%d"; 278 else if (strcmp(type, "pid_t") == 0) 279 fmt = "%d"; 280 else if (strcmp(type, "gfp_t") == 0) 281 fmt = "%x"; 282 else if (synth_field_is_string(type)) 283 fmt = "%.*s"; 284 285 return fmt; 286 } 287 288 static void print_synth_event_num_val(struct trace_seq *s, 289 char *print_fmt, char *name, 290 int size, u64 val, char *space) 291 { 292 switch (size) { 293 case 1: 294 trace_seq_printf(s, print_fmt, name, (u8)val, space); 295 break; 296 297 case 2: 298 trace_seq_printf(s, print_fmt, name, (u16)val, space); 299 break; 300 301 case 4: 302 trace_seq_printf(s, print_fmt, name, (u32)val, space); 303 break; 304 305 default: 306 trace_seq_printf(s, print_fmt, name, val, space); 307 break; 308 } 309 } 310 311 static enum print_line_t print_synth_event(struct trace_iterator *iter, 312 int flags, 313 struct trace_event *event) 314 { 315 struct trace_array *tr = iter->tr; 316 struct trace_seq *s = &iter->seq; 317 struct synth_trace_event *entry; 318 struct synth_event *se; 319 unsigned int i, n_u64; 320 char print_fmt[32]; 321 const char *fmt; 322 323 entry = (struct synth_trace_event *)iter->ent; 324 se = container_of(event, struct synth_event, call.event); 325 326 trace_seq_printf(s, "%s: ", se->name); 327 328 for (i = 0, n_u64 = 0; i < se->n_fields; i++) { 329 if (trace_seq_has_overflowed(s)) 330 goto end; 331 332 fmt = synth_field_fmt(se->fields[i]->type); 333 334 /* parameter types */ 335 if (tr && tr->trace_flags & TRACE_ITER_VERBOSE) 336 trace_seq_printf(s, "%s ", fmt); 337 338 snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt); 339 340 /* parameter values */ 341 if (se->fields[i]->is_string) { 342 if (se->fields[i]->is_dynamic) { 343 u32 offset, data_offset; 344 char *str_field; 345 346 offset = (u32)entry->fields[n_u64]; 347 data_offset = offset & 0xffff; 348 349 str_field = (char *)entry + data_offset; 350 351 trace_seq_printf(s, print_fmt, se->fields[i]->name, 352 STR_VAR_LEN_MAX, 353 str_field, 354 i == se->n_fields - 1 ? "" : " "); 355 n_u64++; 356 } else { 357 trace_seq_printf(s, print_fmt, se->fields[i]->name, 358 STR_VAR_LEN_MAX, 359 (char *)&entry->fields[n_u64], 360 i == se->n_fields - 1 ? "" : " "); 361 n_u64 += STR_VAR_LEN_MAX / sizeof(u64); 362 } 363 } else { 364 struct trace_print_flags __flags[] = { 365 __def_gfpflag_names, {-1, NULL} }; 366 char *space = (i == se->n_fields - 1 ? "" : " "); 367 368 print_synth_event_num_val(s, print_fmt, 369 se->fields[i]->name, 370 se->fields[i]->size, 371 entry->fields[n_u64], 372 space); 373 374 if (strcmp(se->fields[i]->type, "gfp_t") == 0) { 375 trace_seq_puts(s, " ("); 376 trace_print_flags_seq(s, "|", 377 entry->fields[n_u64], 378 __flags); 379 trace_seq_putc(s, ')'); 380 } 381 n_u64++; 382 } 383 } 384 end: 385 trace_seq_putc(s, '\n'); 386 387 return trace_handle_return(s); 388 } 389 390 static struct trace_event_functions synth_event_funcs = { 391 .trace = print_synth_event 392 }; 393 394 static unsigned int trace_string(struct synth_trace_event *entry, 395 struct synth_event *event, 396 char *str_val, 397 bool is_dynamic, 398 unsigned int data_size, 399 unsigned int *n_u64) 400 { 401 unsigned int len = 0; 402 char *str_field; 403 404 if (is_dynamic) { 405 u32 data_offset; 406 407 data_offset = offsetof(typeof(*entry), fields); 408 data_offset += event->n_u64 * sizeof(u64); 409 data_offset += data_size; 410 411 str_field = (char *)entry + data_offset; 412 413 len = strlen(str_val) + 1; 414 strscpy(str_field, str_val, len); 415 416 data_offset |= len << 16; 417 *(u32 *)&entry->fields[*n_u64] = data_offset; 418 419 (*n_u64)++; 420 } else { 421 str_field = (char *)&entry->fields[*n_u64]; 422 423 strscpy(str_field, str_val, STR_VAR_LEN_MAX); 424 (*n_u64) += STR_VAR_LEN_MAX / sizeof(u64); 425 } 426 427 return len; 428 } 429 430 static notrace void trace_event_raw_event_synth(void *__data, 431 u64 *var_ref_vals, 432 unsigned int *var_ref_idx) 433 { 434 unsigned int i, n_u64, val_idx, len, data_size = 0; 435 struct trace_event_file *trace_file = __data; 436 struct synth_trace_event *entry; 437 struct trace_event_buffer fbuffer; 438 struct trace_buffer *buffer; 439 struct synth_event *event; 440 int fields_size = 0; 441 442 event = trace_file->event_call->data; 443 444 if (trace_trigger_soft_disabled(trace_file)) 445 return; 446 447 fields_size = event->n_u64 * sizeof(u64); 448 449 for (i = 0; i < event->n_dynamic_fields; i++) { 450 unsigned int field_pos = event->dynamic_fields[i]->field_pos; 451 char *str_val; 452 453 val_idx = var_ref_idx[field_pos]; 454 str_val = (char *)(long)var_ref_vals[val_idx]; 455 456 len = strlen(str_val) + 1; 457 458 fields_size += len; 459 } 460 461 /* 462 * Avoid ring buffer recursion detection, as this event 463 * is being performed within another event. 464 */ 465 buffer = trace_file->tr->array_buffer.buffer; 466 ring_buffer_nest_start(buffer); 467 468 entry = trace_event_buffer_reserve(&fbuffer, trace_file, 469 sizeof(*entry) + fields_size); 470 if (!entry) 471 goto out; 472 473 for (i = 0, n_u64 = 0; i < event->n_fields; i++) { 474 val_idx = var_ref_idx[i]; 475 if (event->fields[i]->is_string) { 476 char *str_val = (char *)(long)var_ref_vals[val_idx]; 477 478 len = trace_string(entry, event, str_val, 479 event->fields[i]->is_dynamic, 480 data_size, &n_u64); 481 data_size += len; /* only dynamic string increments */ 482 } else { 483 struct synth_field *field = event->fields[i]; 484 u64 val = var_ref_vals[val_idx]; 485 486 switch (field->size) { 487 case 1: 488 *(u8 *)&entry->fields[n_u64] = (u8)val; 489 break; 490 491 case 2: 492 *(u16 *)&entry->fields[n_u64] = (u16)val; 493 break; 494 495 case 4: 496 *(u32 *)&entry->fields[n_u64] = (u32)val; 497 break; 498 499 default: 500 entry->fields[n_u64] = val; 501 break; 502 } 503 n_u64++; 504 } 505 } 506 507 trace_event_buffer_commit(&fbuffer); 508 out: 509 ring_buffer_nest_end(buffer); 510 } 511 512 static void free_synth_event_print_fmt(struct trace_event_call *call) 513 { 514 if (call) { 515 kfree(call->print_fmt); 516 call->print_fmt = NULL; 517 } 518 } 519 520 static int __set_synth_event_print_fmt(struct synth_event *event, 521 char *buf, int len) 522 { 523 const char *fmt; 524 int pos = 0; 525 int i; 526 527 /* When len=0, we just calculate the needed length */ 528 #define LEN_OR_ZERO (len ? len - pos : 0) 529 530 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 531 for (i = 0; i < event->n_fields; i++) { 532 fmt = synth_field_fmt(event->fields[i]->type); 533 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s", 534 event->fields[i]->name, fmt, 535 i == event->n_fields - 1 ? "" : ", "); 536 } 537 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 538 539 for (i = 0; i < event->n_fields; i++) { 540 if (event->fields[i]->is_string && 541 event->fields[i]->is_dynamic) 542 pos += snprintf(buf + pos, LEN_OR_ZERO, 543 ", __get_str(%s)", event->fields[i]->name); 544 else 545 pos += snprintf(buf + pos, LEN_OR_ZERO, 546 ", REC->%s", event->fields[i]->name); 547 } 548 549 #undef LEN_OR_ZERO 550 551 /* return the length of print_fmt */ 552 return pos; 553 } 554 555 static int set_synth_event_print_fmt(struct trace_event_call *call) 556 { 557 struct synth_event *event = call->data; 558 char *print_fmt; 559 int len; 560 561 /* First: called with 0 length to calculate the needed length */ 562 len = __set_synth_event_print_fmt(event, NULL, 0); 563 564 print_fmt = kmalloc(len + 1, GFP_KERNEL); 565 if (!print_fmt) 566 return -ENOMEM; 567 568 /* Second: actually write the @print_fmt */ 569 __set_synth_event_print_fmt(event, print_fmt, len + 1); 570 call->print_fmt = print_fmt; 571 572 return 0; 573 } 574 575 static void free_synth_field(struct synth_field *field) 576 { 577 kfree(field->type); 578 kfree(field->name); 579 kfree(field); 580 } 581 582 static struct synth_field *parse_synth_field(int argc, const char **argv, 583 int *consumed) 584 { 585 struct synth_field *field; 586 const char *prefix = NULL, *field_type = argv[0], *field_name, *array; 587 int len, ret = -ENOMEM; 588 struct seq_buf s; 589 ssize_t size; 590 591 if (field_type[0] == ';') 592 field_type++; 593 594 if (!strcmp(field_type, "unsigned")) { 595 if (argc < 3) { 596 synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type)); 597 return ERR_PTR(-EINVAL); 598 } 599 prefix = "unsigned "; 600 field_type = argv[1]; 601 field_name = argv[2]; 602 *consumed = 3; 603 } else { 604 field_name = argv[1]; 605 *consumed = 2; 606 } 607 608 field = kzalloc(sizeof(*field), GFP_KERNEL); 609 if (!field) 610 return ERR_PTR(-ENOMEM); 611 612 len = strlen(field_name); 613 array = strchr(field_name, '['); 614 if (array) 615 len -= strlen(array); 616 else if (field_name[len - 1] == ';') 617 len--; 618 619 field->name = kmemdup_nul(field_name, len, GFP_KERNEL); 620 if (!field->name) 621 goto free; 622 623 if (!is_good_name(field->name)) { 624 synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name)); 625 ret = -EINVAL; 626 goto free; 627 } 628 629 if (field_type[0] == ';') 630 field_type++; 631 len = strlen(field_type) + 1; 632 633 if (array) 634 len += strlen(array); 635 636 if (prefix) 637 len += strlen(prefix); 638 639 field->type = kzalloc(len, GFP_KERNEL); 640 if (!field->type) 641 goto free; 642 643 seq_buf_init(&s, field->type, len); 644 if (prefix) 645 seq_buf_puts(&s, prefix); 646 seq_buf_puts(&s, field_type); 647 if (array) { 648 seq_buf_puts(&s, array); 649 if (s.buffer[s.len - 1] == ';') 650 s.len--; 651 } 652 if (WARN_ON_ONCE(!seq_buf_buffer_left(&s))) 653 goto free; 654 655 s.buffer[s.len] = '\0'; 656 657 size = synth_field_size(field->type); 658 if (size < 0) { 659 synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type)); 660 ret = -EINVAL; 661 goto free; 662 } else if (size == 0) { 663 if (synth_field_is_string(field->type)) { 664 char *type; 665 666 len = sizeof("__data_loc ") + strlen(field->type) + 1; 667 type = kzalloc(len, GFP_KERNEL); 668 if (!type) 669 goto free; 670 671 seq_buf_init(&s, type, len); 672 seq_buf_puts(&s, "__data_loc "); 673 seq_buf_puts(&s, field->type); 674 675 if (WARN_ON_ONCE(!seq_buf_buffer_left(&s))) 676 goto free; 677 s.buffer[s.len] = '\0'; 678 679 kfree(field->type); 680 field->type = type; 681 682 field->is_dynamic = true; 683 size = sizeof(u64); 684 } else { 685 synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type)); 686 ret = -EINVAL; 687 goto free; 688 } 689 } 690 field->size = size; 691 692 if (synth_field_is_string(field->type)) 693 field->is_string = true; 694 695 field->is_signed = synth_field_signed(field->type); 696 out: 697 return field; 698 free: 699 free_synth_field(field); 700 field = ERR_PTR(ret); 701 goto out; 702 } 703 704 static void free_synth_tracepoint(struct tracepoint *tp) 705 { 706 if (!tp) 707 return; 708 709 kfree(tp->name); 710 kfree(tp); 711 } 712 713 static struct tracepoint *alloc_synth_tracepoint(char *name) 714 { 715 struct tracepoint *tp; 716 717 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 718 if (!tp) 719 return ERR_PTR(-ENOMEM); 720 721 tp->name = kstrdup(name, GFP_KERNEL); 722 if (!tp->name) { 723 kfree(tp); 724 return ERR_PTR(-ENOMEM); 725 } 726 727 return tp; 728 } 729 730 struct synth_event *find_synth_event(const char *name) 731 { 732 struct dyn_event *pos; 733 struct synth_event *event; 734 735 for_each_dyn_event(pos) { 736 if (!is_synth_event(pos)) 737 continue; 738 event = to_synth_event(pos); 739 if (strcmp(event->name, name) == 0) 740 return event; 741 } 742 743 return NULL; 744 } 745 746 static struct trace_event_fields synth_event_fields_array[] = { 747 { .type = TRACE_FUNCTION_TYPE, 748 .define_fields = synth_event_define_fields }, 749 {} 750 }; 751 752 static int register_synth_event(struct synth_event *event) 753 { 754 struct trace_event_call *call = &event->call; 755 int ret = 0; 756 757 event->call.class = &event->class; 758 event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL); 759 if (!event->class.system) { 760 ret = -ENOMEM; 761 goto out; 762 } 763 764 event->tp = alloc_synth_tracepoint(event->name); 765 if (IS_ERR(event->tp)) { 766 ret = PTR_ERR(event->tp); 767 event->tp = NULL; 768 goto out; 769 } 770 771 INIT_LIST_HEAD(&call->class->fields); 772 call->event.funcs = &synth_event_funcs; 773 call->class->fields_array = synth_event_fields_array; 774 775 ret = register_trace_event(&call->event); 776 if (!ret) { 777 ret = -ENODEV; 778 goto out; 779 } 780 call->flags = TRACE_EVENT_FL_TRACEPOINT; 781 call->class->reg = trace_event_reg; 782 call->class->probe = trace_event_raw_event_synth; 783 call->data = event; 784 call->tp = event->tp; 785 786 ret = trace_add_event_call(call); 787 if (ret) { 788 pr_warn("Failed to register synthetic event: %s\n", 789 trace_event_name(call)); 790 goto err; 791 } 792 793 ret = set_synth_event_print_fmt(call); 794 if (ret < 0) { 795 trace_remove_event_call(call); 796 goto err; 797 } 798 out: 799 return ret; 800 err: 801 unregister_trace_event(&call->event); 802 goto out; 803 } 804 805 static int unregister_synth_event(struct synth_event *event) 806 { 807 struct trace_event_call *call = &event->call; 808 int ret; 809 810 ret = trace_remove_event_call(call); 811 812 return ret; 813 } 814 815 static void free_synth_event(struct synth_event *event) 816 { 817 unsigned int i; 818 819 if (!event) 820 return; 821 822 for (i = 0; i < event->n_fields; i++) 823 free_synth_field(event->fields[i]); 824 825 kfree(event->fields); 826 kfree(event->dynamic_fields); 827 kfree(event->name); 828 kfree(event->class.system); 829 free_synth_tracepoint(event->tp); 830 free_synth_event_print_fmt(&event->call); 831 kfree(event); 832 } 833 834 static struct synth_event *alloc_synth_event(const char *name, int n_fields, 835 struct synth_field **fields) 836 { 837 unsigned int i, j, n_dynamic_fields = 0; 838 struct synth_event *event; 839 840 event = kzalloc(sizeof(*event), GFP_KERNEL); 841 if (!event) { 842 event = ERR_PTR(-ENOMEM); 843 goto out; 844 } 845 846 event->name = kstrdup(name, GFP_KERNEL); 847 if (!event->name) { 848 kfree(event); 849 event = ERR_PTR(-ENOMEM); 850 goto out; 851 } 852 853 event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL); 854 if (!event->fields) { 855 free_synth_event(event); 856 event = ERR_PTR(-ENOMEM); 857 goto out; 858 } 859 860 for (i = 0; i < n_fields; i++) 861 if (fields[i]->is_dynamic) 862 n_dynamic_fields++; 863 864 if (n_dynamic_fields) { 865 event->dynamic_fields = kcalloc(n_dynamic_fields, 866 sizeof(*event->dynamic_fields), 867 GFP_KERNEL); 868 if (!event->dynamic_fields) { 869 free_synth_event(event); 870 event = ERR_PTR(-ENOMEM); 871 goto out; 872 } 873 } 874 875 dyn_event_init(&event->devent, &synth_event_ops); 876 877 for (i = 0, j = 0; i < n_fields; i++) { 878 event->fields[i] = fields[i]; 879 880 if (fields[i]->is_dynamic) { 881 event->dynamic_fields[j] = fields[i]; 882 event->dynamic_fields[j]->field_pos = i; 883 event->dynamic_fields[j++] = fields[i]; 884 event->n_dynamic_fields++; 885 } 886 } 887 event->n_fields = n_fields; 888 out: 889 return event; 890 } 891 892 static int synth_event_check_arg_fn(void *data) 893 { 894 struct dynevent_arg_pair *arg_pair = data; 895 int size; 896 897 size = synth_field_size((char *)arg_pair->lhs); 898 if (size == 0) { 899 if (strstr((char *)arg_pair->lhs, "[")) 900 return 0; 901 } 902 903 return size ? 0 : -EINVAL; 904 } 905 906 /** 907 * synth_event_add_field - Add a new field to a synthetic event cmd 908 * @cmd: A pointer to the dynevent_cmd struct representing the new event 909 * @type: The type of the new field to add 910 * @name: The name of the new field to add 911 * 912 * Add a new field to a synthetic event cmd object. Field ordering is in 913 * the same order the fields are added. 914 * 915 * See synth_field_size() for available types. If field_name contains 916 * [n] the field is considered to be an array. 917 * 918 * Return: 0 if successful, error otherwise. 919 */ 920 int synth_event_add_field(struct dynevent_cmd *cmd, const char *type, 921 const char *name) 922 { 923 struct dynevent_arg_pair arg_pair; 924 int ret; 925 926 if (cmd->type != DYNEVENT_TYPE_SYNTH) 927 return -EINVAL; 928 929 if (!type || !name) 930 return -EINVAL; 931 932 dynevent_arg_pair_init(&arg_pair, 0, ';'); 933 934 arg_pair.lhs = type; 935 arg_pair.rhs = name; 936 937 ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn); 938 if (ret) 939 return ret; 940 941 if (++cmd->n_fields > SYNTH_FIELDS_MAX) 942 ret = -EINVAL; 943 944 return ret; 945 } 946 EXPORT_SYMBOL_GPL(synth_event_add_field); 947 948 /** 949 * synth_event_add_field_str - Add a new field to a synthetic event cmd 950 * @cmd: A pointer to the dynevent_cmd struct representing the new event 951 * @type_name: The type and name of the new field to add, as a single string 952 * 953 * Add a new field to a synthetic event cmd object, as a single 954 * string. The @type_name string is expected to be of the form 'type 955 * name', which will be appended by ';'. No sanity checking is done - 956 * what's passed in is assumed to already be well-formed. Field 957 * ordering is in the same order the fields are added. 958 * 959 * See synth_field_size() for available types. If field_name contains 960 * [n] the field is considered to be an array. 961 * 962 * Return: 0 if successful, error otherwise. 963 */ 964 int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name) 965 { 966 struct dynevent_arg arg; 967 int ret; 968 969 if (cmd->type != DYNEVENT_TYPE_SYNTH) 970 return -EINVAL; 971 972 if (!type_name) 973 return -EINVAL; 974 975 dynevent_arg_init(&arg, ';'); 976 977 arg.str = type_name; 978 979 ret = dynevent_arg_add(cmd, &arg, NULL); 980 if (ret) 981 return ret; 982 983 if (++cmd->n_fields > SYNTH_FIELDS_MAX) 984 ret = -EINVAL; 985 986 return ret; 987 } 988 EXPORT_SYMBOL_GPL(synth_event_add_field_str); 989 990 /** 991 * synth_event_add_fields - Add multiple fields to a synthetic event cmd 992 * @cmd: A pointer to the dynevent_cmd struct representing the new event 993 * @fields: An array of type/name field descriptions 994 * @n_fields: The number of field descriptions contained in the fields array 995 * 996 * Add a new set of fields to a synthetic event cmd object. The event 997 * fields that will be defined for the event should be passed in as an 998 * array of struct synth_field_desc, and the number of elements in the 999 * array passed in as n_fields. Field ordering will retain the 1000 * ordering given in the fields array. 1001 * 1002 * See synth_field_size() for available types. If field_name contains 1003 * [n] the field is considered to be an array. 1004 * 1005 * Return: 0 if successful, error otherwise. 1006 */ 1007 int synth_event_add_fields(struct dynevent_cmd *cmd, 1008 struct synth_field_desc *fields, 1009 unsigned int n_fields) 1010 { 1011 unsigned int i; 1012 int ret = 0; 1013 1014 for (i = 0; i < n_fields; i++) { 1015 if (fields[i].type == NULL || fields[i].name == NULL) { 1016 ret = -EINVAL; 1017 break; 1018 } 1019 1020 ret = synth_event_add_field(cmd, fields[i].type, fields[i].name); 1021 if (ret) 1022 break; 1023 } 1024 1025 return ret; 1026 } 1027 EXPORT_SYMBOL_GPL(synth_event_add_fields); 1028 1029 /** 1030 * __synth_event_gen_cmd_start - Start a synthetic event command from arg list 1031 * @cmd: A pointer to the dynevent_cmd struct representing the new event 1032 * @name: The name of the synthetic event 1033 * @mod: The module creating the event, NULL if not created from a module 1034 * @args: Variable number of arg (pairs), one pair for each field 1035 * 1036 * NOTE: Users normally won't want to call this function directly, but 1037 * rather use the synth_event_gen_cmd_start() wrapper, which 1038 * automatically adds a NULL to the end of the arg list. If this 1039 * function is used directly, make sure the last arg in the variable 1040 * arg list is NULL. 1041 * 1042 * Generate a synthetic event command to be executed by 1043 * synth_event_gen_cmd_end(). This function can be used to generate 1044 * the complete command or only the first part of it; in the latter 1045 * case, synth_event_add_field(), synth_event_add_field_str(), or 1046 * synth_event_add_fields() can be used to add more fields following 1047 * this. 1048 * 1049 * There should be an even number variable args, each pair consisting 1050 * of a type followed by a field name. 1051 * 1052 * See synth_field_size() for available types. If field_name contains 1053 * [n] the field is considered to be an array. 1054 * 1055 * Return: 0 if successful, error otherwise. 1056 */ 1057 int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name, 1058 struct module *mod, ...) 1059 { 1060 struct dynevent_arg arg; 1061 va_list args; 1062 int ret; 1063 1064 cmd->event_name = name; 1065 cmd->private_data = mod; 1066 1067 if (cmd->type != DYNEVENT_TYPE_SYNTH) 1068 return -EINVAL; 1069 1070 dynevent_arg_init(&arg, 0); 1071 arg.str = name; 1072 ret = dynevent_arg_add(cmd, &arg, NULL); 1073 if (ret) 1074 return ret; 1075 1076 va_start(args, mod); 1077 for (;;) { 1078 const char *type, *name; 1079 1080 type = va_arg(args, const char *); 1081 if (!type) 1082 break; 1083 name = va_arg(args, const char *); 1084 if (!name) 1085 break; 1086 1087 if (++cmd->n_fields > SYNTH_FIELDS_MAX) { 1088 ret = -EINVAL; 1089 break; 1090 } 1091 1092 ret = synth_event_add_field(cmd, type, name); 1093 if (ret) 1094 break; 1095 } 1096 va_end(args); 1097 1098 return ret; 1099 } 1100 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start); 1101 1102 /** 1103 * synth_event_gen_cmd_array_start - Start synthetic event command from an array 1104 * @cmd: A pointer to the dynevent_cmd struct representing the new event 1105 * @name: The name of the synthetic event 1106 * @fields: An array of type/name field descriptions 1107 * @n_fields: The number of field descriptions contained in the fields array 1108 * 1109 * Generate a synthetic event command to be executed by 1110 * synth_event_gen_cmd_end(). This function can be used to generate 1111 * the complete command or only the first part of it; in the latter 1112 * case, synth_event_add_field(), synth_event_add_field_str(), or 1113 * synth_event_add_fields() can be used to add more fields following 1114 * this. 1115 * 1116 * The event fields that will be defined for the event should be 1117 * passed in as an array of struct synth_field_desc, and the number of 1118 * elements in the array passed in as n_fields. Field ordering will 1119 * retain the ordering given in the fields array. 1120 * 1121 * See synth_field_size() for available types. If field_name contains 1122 * [n] the field is considered to be an array. 1123 * 1124 * Return: 0 if successful, error otherwise. 1125 */ 1126 int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name, 1127 struct module *mod, 1128 struct synth_field_desc *fields, 1129 unsigned int n_fields) 1130 { 1131 struct dynevent_arg arg; 1132 unsigned int i; 1133 int ret = 0; 1134 1135 cmd->event_name = name; 1136 cmd->private_data = mod; 1137 1138 if (cmd->type != DYNEVENT_TYPE_SYNTH) 1139 return -EINVAL; 1140 1141 if (n_fields > SYNTH_FIELDS_MAX) 1142 return -EINVAL; 1143 1144 dynevent_arg_init(&arg, 0); 1145 arg.str = name; 1146 ret = dynevent_arg_add(cmd, &arg, NULL); 1147 if (ret) 1148 return ret; 1149 1150 for (i = 0; i < n_fields; i++) { 1151 if (fields[i].type == NULL || fields[i].name == NULL) 1152 return -EINVAL; 1153 1154 ret = synth_event_add_field(cmd, fields[i].type, fields[i].name); 1155 if (ret) 1156 break; 1157 } 1158 1159 return ret; 1160 } 1161 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start); 1162 1163 static int save_cmdstr(int argc, const char *name, const char **argv) 1164 { 1165 struct seq_buf s; 1166 char *buf; 1167 int i; 1168 1169 buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL); 1170 if (!buf) 1171 return -ENOMEM; 1172 1173 seq_buf_init(&s, buf, MAX_DYNEVENT_CMD_LEN); 1174 1175 seq_buf_puts(&s, name); 1176 1177 for (i = 0; i < argc; i++) { 1178 seq_buf_putc(&s, ' '); 1179 seq_buf_puts(&s, argv[i]); 1180 } 1181 1182 if (!seq_buf_buffer_left(&s)) { 1183 synth_err(SYNTH_ERR_CMD_TOO_LONG, 0); 1184 kfree(buf); 1185 return -EINVAL; 1186 } 1187 buf[s.len] = 0; 1188 last_cmd_set(buf); 1189 1190 kfree(buf); 1191 return 0; 1192 } 1193 1194 static int __create_synth_event(int argc, const char *name, const char **argv) 1195 { 1196 struct synth_field *field, *fields[SYNTH_FIELDS_MAX]; 1197 struct synth_event *event = NULL; 1198 int i, consumed = 0, n_fields = 0, ret = 0; 1199 1200 ret = save_cmdstr(argc, name, argv); 1201 if (ret) 1202 return ret; 1203 1204 /* 1205 * Argument syntax: 1206 * - Add synthetic event: <event_name> field[;field] ... 1207 * - Remove synthetic event: !<event_name> field[;field] ... 1208 * where 'field' = type field_name 1209 */ 1210 1211 if (name[0] == '\0' || argc < 1) { 1212 synth_err(SYNTH_ERR_CMD_INCOMPLETE, 0); 1213 return -EINVAL; 1214 } 1215 1216 mutex_lock(&event_mutex); 1217 1218 if (!is_good_name(name)) { 1219 synth_err(SYNTH_ERR_BAD_NAME, errpos(name)); 1220 ret = -EINVAL; 1221 goto out; 1222 } 1223 1224 event = find_synth_event(name); 1225 if (event) { 1226 synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name)); 1227 ret = -EEXIST; 1228 goto out; 1229 } 1230 1231 for (i = 0; i < argc - 1; i++) { 1232 if (strcmp(argv[i], ";") == 0) 1233 continue; 1234 if (n_fields == SYNTH_FIELDS_MAX) { 1235 synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0); 1236 ret = -EINVAL; 1237 goto err; 1238 } 1239 1240 field = parse_synth_field(argc - i, &argv[i], &consumed); 1241 if (IS_ERR(field)) { 1242 ret = PTR_ERR(field); 1243 goto err; 1244 } 1245 fields[n_fields++] = field; 1246 i += consumed - 1; 1247 } 1248 1249 if (i < argc && strcmp(argv[i], ";") != 0) { 1250 synth_err(SYNTH_ERR_INVALID_FIELD, errpos(argv[i])); 1251 ret = -EINVAL; 1252 goto err; 1253 } 1254 1255 event = alloc_synth_event(name, n_fields, fields); 1256 if (IS_ERR(event)) { 1257 ret = PTR_ERR(event); 1258 event = NULL; 1259 goto err; 1260 } 1261 ret = register_synth_event(event); 1262 if (!ret) 1263 dyn_event_add(&event->devent); 1264 else 1265 free_synth_event(event); 1266 out: 1267 mutex_unlock(&event_mutex); 1268 1269 return ret; 1270 err: 1271 for (i = 0; i < n_fields; i++) 1272 free_synth_field(fields[i]); 1273 1274 goto out; 1275 } 1276 1277 /** 1278 * synth_event_create - Create a new synthetic event 1279 * @name: The name of the new sythetic event 1280 * @fields: An array of type/name field descriptions 1281 * @n_fields: The number of field descriptions contained in the fields array 1282 * @mod: The module creating the event, NULL if not created from a module 1283 * 1284 * Create a new synthetic event with the given name under the 1285 * trace/events/synthetic/ directory. The event fields that will be 1286 * defined for the event should be passed in as an array of struct 1287 * synth_field_desc, and the number elements in the array passed in as 1288 * n_fields. Field ordering will retain the ordering given in the 1289 * fields array. 1290 * 1291 * If the new synthetic event is being created from a module, the mod 1292 * param must be non-NULL. This will ensure that the trace buffer 1293 * won't contain unreadable events. 1294 * 1295 * The new synth event should be deleted using synth_event_delete() 1296 * function. The new synthetic event can be generated from modules or 1297 * other kernel code using trace_synth_event() and related functions. 1298 * 1299 * Return: 0 if successful, error otherwise. 1300 */ 1301 int synth_event_create(const char *name, struct synth_field_desc *fields, 1302 unsigned int n_fields, struct module *mod) 1303 { 1304 struct dynevent_cmd cmd; 1305 char *buf; 1306 int ret; 1307 1308 buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL); 1309 if (!buf) 1310 return -ENOMEM; 1311 1312 synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN); 1313 1314 ret = synth_event_gen_cmd_array_start(&cmd, name, mod, 1315 fields, n_fields); 1316 if (ret) 1317 goto out; 1318 1319 ret = synth_event_gen_cmd_end(&cmd); 1320 out: 1321 kfree(buf); 1322 1323 return ret; 1324 } 1325 EXPORT_SYMBOL_GPL(synth_event_create); 1326 1327 static int destroy_synth_event(struct synth_event *se) 1328 { 1329 int ret; 1330 1331 if (se->ref) 1332 ret = -EBUSY; 1333 else { 1334 ret = unregister_synth_event(se); 1335 if (!ret) { 1336 dyn_event_remove(&se->devent); 1337 free_synth_event(se); 1338 } 1339 } 1340 1341 return ret; 1342 } 1343 1344 /** 1345 * synth_event_delete - Delete a synthetic event 1346 * @event_name: The name of the new sythetic event 1347 * 1348 * Delete a synthetic event that was created with synth_event_create(). 1349 * 1350 * Return: 0 if successful, error otherwise. 1351 */ 1352 int synth_event_delete(const char *event_name) 1353 { 1354 struct synth_event *se = NULL; 1355 struct module *mod = NULL; 1356 int ret = -ENOENT; 1357 1358 mutex_lock(&event_mutex); 1359 se = find_synth_event(event_name); 1360 if (se) { 1361 mod = se->mod; 1362 ret = destroy_synth_event(se); 1363 } 1364 mutex_unlock(&event_mutex); 1365 1366 if (mod) { 1367 mutex_lock(&trace_types_lock); 1368 /* 1369 * It is safest to reset the ring buffer if the module 1370 * being unloaded registered any events that were 1371 * used. The only worry is if a new module gets 1372 * loaded, and takes on the same id as the events of 1373 * this module. When printing out the buffer, traced 1374 * events left over from this module may be passed to 1375 * the new module events and unexpected results may 1376 * occur. 1377 */ 1378 tracing_reset_all_online_cpus(); 1379 mutex_unlock(&trace_types_lock); 1380 } 1381 1382 return ret; 1383 } 1384 EXPORT_SYMBOL_GPL(synth_event_delete); 1385 1386 static int create_or_delete_synth_event(int argc, char **argv) 1387 { 1388 const char *name = argv[0]; 1389 int ret; 1390 1391 /* trace_run_command() ensures argc != 0 */ 1392 if (name[0] == '!') { 1393 ret = synth_event_delete(name + 1); 1394 return ret; 1395 } 1396 1397 ret = __create_synth_event(argc - 1, name, (const char **)argv + 1); 1398 return ret == -ECANCELED ? -EINVAL : ret; 1399 } 1400 1401 static int synth_event_run_command(struct dynevent_cmd *cmd) 1402 { 1403 struct synth_event *se; 1404 int ret; 1405 1406 ret = trace_run_command(cmd->seq.buffer, create_or_delete_synth_event); 1407 if (ret) 1408 return ret; 1409 1410 se = find_synth_event(cmd->event_name); 1411 if (WARN_ON(!se)) 1412 return -ENOENT; 1413 1414 se->mod = cmd->private_data; 1415 1416 return ret; 1417 } 1418 1419 /** 1420 * synth_event_cmd_init - Initialize a synthetic event command object 1421 * @cmd: A pointer to the dynevent_cmd struct representing the new event 1422 * @buf: A pointer to the buffer used to build the command 1423 * @maxlen: The length of the buffer passed in @buf 1424 * 1425 * Initialize a synthetic event command object. Use this before 1426 * calling any of the other dyenvent_cmd functions. 1427 */ 1428 void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen) 1429 { 1430 dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH, 1431 synth_event_run_command); 1432 } 1433 EXPORT_SYMBOL_GPL(synth_event_cmd_init); 1434 1435 static inline int 1436 __synth_event_trace_init(struct trace_event_file *file, 1437 struct synth_event_trace_state *trace_state) 1438 { 1439 int ret = 0; 1440 1441 memset(trace_state, '\0', sizeof(*trace_state)); 1442 1443 /* 1444 * Normal event tracing doesn't get called at all unless the 1445 * ENABLED bit is set (which attaches the probe thus allowing 1446 * this code to be called, etc). Because this is called 1447 * directly by the user, we don't have that but we still need 1448 * to honor not logging when disabled. For the iterated 1449 * trace case, we save the enabed state upon start and just 1450 * ignore the following data calls. 1451 */ 1452 if (!(file->flags & EVENT_FILE_FL_ENABLED) || 1453 trace_trigger_soft_disabled(file)) { 1454 trace_state->disabled = true; 1455 ret = -ENOENT; 1456 goto out; 1457 } 1458 1459 trace_state->event = file->event_call->data; 1460 out: 1461 return ret; 1462 } 1463 1464 static inline int 1465 __synth_event_trace_start(struct trace_event_file *file, 1466 struct synth_event_trace_state *trace_state, 1467 int dynamic_fields_size) 1468 { 1469 int entry_size, fields_size = 0; 1470 int ret = 0; 1471 1472 fields_size = trace_state->event->n_u64 * sizeof(u64); 1473 fields_size += dynamic_fields_size; 1474 1475 /* 1476 * Avoid ring buffer recursion detection, as this event 1477 * is being performed within another event. 1478 */ 1479 trace_state->buffer = file->tr->array_buffer.buffer; 1480 ring_buffer_nest_start(trace_state->buffer); 1481 1482 entry_size = sizeof(*trace_state->entry) + fields_size; 1483 trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer, 1484 file, 1485 entry_size); 1486 if (!trace_state->entry) { 1487 ring_buffer_nest_end(trace_state->buffer); 1488 ret = -EINVAL; 1489 } 1490 1491 return ret; 1492 } 1493 1494 static inline void 1495 __synth_event_trace_end(struct synth_event_trace_state *trace_state) 1496 { 1497 trace_event_buffer_commit(&trace_state->fbuffer); 1498 1499 ring_buffer_nest_end(trace_state->buffer); 1500 } 1501 1502 /** 1503 * synth_event_trace - Trace a synthetic event 1504 * @file: The trace_event_file representing the synthetic event 1505 * @n_vals: The number of values in vals 1506 * @args: Variable number of args containing the event values 1507 * 1508 * Trace a synthetic event using the values passed in the variable 1509 * argument list. 1510 * 1511 * The argument list should be a list 'n_vals' u64 values. The number 1512 * of vals must match the number of field in the synthetic event, and 1513 * must be in the same order as the synthetic event fields. 1514 * 1515 * All vals should be cast to u64, and string vals are just pointers 1516 * to strings, cast to u64. Strings will be copied into space 1517 * reserved in the event for the string, using these pointers. 1518 * 1519 * Return: 0 on success, err otherwise. 1520 */ 1521 int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...) 1522 { 1523 unsigned int i, n_u64, len, data_size = 0; 1524 struct synth_event_trace_state state; 1525 va_list args; 1526 int ret; 1527 1528 ret = __synth_event_trace_init(file, &state); 1529 if (ret) { 1530 if (ret == -ENOENT) 1531 ret = 0; /* just disabled, not really an error */ 1532 return ret; 1533 } 1534 1535 if (state.event->n_dynamic_fields) { 1536 va_start(args, n_vals); 1537 1538 for (i = 0; i < state.event->n_fields; i++) { 1539 u64 val = va_arg(args, u64); 1540 1541 if (state.event->fields[i]->is_string && 1542 state.event->fields[i]->is_dynamic) { 1543 char *str_val = (char *)(long)val; 1544 1545 data_size += strlen(str_val) + 1; 1546 } 1547 } 1548 1549 va_end(args); 1550 } 1551 1552 ret = __synth_event_trace_start(file, &state, data_size); 1553 if (ret) 1554 return ret; 1555 1556 if (n_vals != state.event->n_fields) { 1557 ret = -EINVAL; 1558 goto out; 1559 } 1560 1561 data_size = 0; 1562 1563 va_start(args, n_vals); 1564 for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) { 1565 u64 val; 1566 1567 val = va_arg(args, u64); 1568 1569 if (state.event->fields[i]->is_string) { 1570 char *str_val = (char *)(long)val; 1571 1572 len = trace_string(state.entry, state.event, str_val, 1573 state.event->fields[i]->is_dynamic, 1574 data_size, &n_u64); 1575 data_size += len; /* only dynamic string increments */ 1576 } else { 1577 struct synth_field *field = state.event->fields[i]; 1578 1579 switch (field->size) { 1580 case 1: 1581 *(u8 *)&state.entry->fields[n_u64] = (u8)val; 1582 break; 1583 1584 case 2: 1585 *(u16 *)&state.entry->fields[n_u64] = (u16)val; 1586 break; 1587 1588 case 4: 1589 *(u32 *)&state.entry->fields[n_u64] = (u32)val; 1590 break; 1591 1592 default: 1593 state.entry->fields[n_u64] = val; 1594 break; 1595 } 1596 n_u64++; 1597 } 1598 } 1599 va_end(args); 1600 out: 1601 __synth_event_trace_end(&state); 1602 1603 return ret; 1604 } 1605 EXPORT_SYMBOL_GPL(synth_event_trace); 1606 1607 /** 1608 * synth_event_trace_array - Trace a synthetic event from an array 1609 * @file: The trace_event_file representing the synthetic event 1610 * @vals: Array of values 1611 * @n_vals: The number of values in vals 1612 * 1613 * Trace a synthetic event using the values passed in as 'vals'. 1614 * 1615 * The 'vals' array is just an array of 'n_vals' u64. The number of 1616 * vals must match the number of field in the synthetic event, and 1617 * must be in the same order as the synthetic event fields. 1618 * 1619 * All vals should be cast to u64, and string vals are just pointers 1620 * to strings, cast to u64. Strings will be copied into space 1621 * reserved in the event for the string, using these pointers. 1622 * 1623 * Return: 0 on success, err otherwise. 1624 */ 1625 int synth_event_trace_array(struct trace_event_file *file, u64 *vals, 1626 unsigned int n_vals) 1627 { 1628 unsigned int i, n_u64, field_pos, len, data_size = 0; 1629 struct synth_event_trace_state state; 1630 char *str_val; 1631 int ret; 1632 1633 ret = __synth_event_trace_init(file, &state); 1634 if (ret) { 1635 if (ret == -ENOENT) 1636 ret = 0; /* just disabled, not really an error */ 1637 return ret; 1638 } 1639 1640 if (state.event->n_dynamic_fields) { 1641 for (i = 0; i < state.event->n_dynamic_fields; i++) { 1642 field_pos = state.event->dynamic_fields[i]->field_pos; 1643 str_val = (char *)(long)vals[field_pos]; 1644 len = strlen(str_val) + 1; 1645 data_size += len; 1646 } 1647 } 1648 1649 ret = __synth_event_trace_start(file, &state, data_size); 1650 if (ret) 1651 return ret; 1652 1653 if (n_vals != state.event->n_fields) { 1654 ret = -EINVAL; 1655 goto out; 1656 } 1657 1658 data_size = 0; 1659 1660 for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) { 1661 if (state.event->fields[i]->is_string) { 1662 char *str_val = (char *)(long)vals[i]; 1663 1664 len = trace_string(state.entry, state.event, str_val, 1665 state.event->fields[i]->is_dynamic, 1666 data_size, &n_u64); 1667 data_size += len; /* only dynamic string increments */ 1668 } else { 1669 struct synth_field *field = state.event->fields[i]; 1670 u64 val = vals[i]; 1671 1672 switch (field->size) { 1673 case 1: 1674 *(u8 *)&state.entry->fields[n_u64] = (u8)val; 1675 break; 1676 1677 case 2: 1678 *(u16 *)&state.entry->fields[n_u64] = (u16)val; 1679 break; 1680 1681 case 4: 1682 *(u32 *)&state.entry->fields[n_u64] = (u32)val; 1683 break; 1684 1685 default: 1686 state.entry->fields[n_u64] = val; 1687 break; 1688 } 1689 n_u64++; 1690 } 1691 } 1692 out: 1693 __synth_event_trace_end(&state); 1694 1695 return ret; 1696 } 1697 EXPORT_SYMBOL_GPL(synth_event_trace_array); 1698 1699 /** 1700 * synth_event_trace_start - Start piecewise synthetic event trace 1701 * @file: The trace_event_file representing the synthetic event 1702 * @trace_state: A pointer to object tracking the piecewise trace state 1703 * 1704 * Start the trace of a synthetic event field-by-field rather than all 1705 * at once. 1706 * 1707 * This function 'opens' an event trace, which means space is reserved 1708 * for the event in the trace buffer, after which the event's 1709 * individual field values can be set through either 1710 * synth_event_add_next_val() or synth_event_add_val(). 1711 * 1712 * A pointer to a trace_state object is passed in, which will keep 1713 * track of the current event trace state until the event trace is 1714 * closed (and the event finally traced) using 1715 * synth_event_trace_end(). 1716 * 1717 * Note that synth_event_trace_end() must be called after all values 1718 * have been added for each event trace, regardless of whether adding 1719 * all field values succeeded or not. 1720 * 1721 * Note also that for a given event trace, all fields must be added 1722 * using either synth_event_add_next_val() or synth_event_add_val() 1723 * but not both together or interleaved. 1724 * 1725 * Return: 0 on success, err otherwise. 1726 */ 1727 int synth_event_trace_start(struct trace_event_file *file, 1728 struct synth_event_trace_state *trace_state) 1729 { 1730 int ret; 1731 1732 if (!trace_state) 1733 return -EINVAL; 1734 1735 ret = __synth_event_trace_init(file, trace_state); 1736 if (ret) { 1737 if (ret == -ENOENT) 1738 ret = 0; /* just disabled, not really an error */ 1739 return ret; 1740 } 1741 1742 if (trace_state->event->n_dynamic_fields) 1743 return -ENOTSUPP; 1744 1745 ret = __synth_event_trace_start(file, trace_state, 0); 1746 1747 return ret; 1748 } 1749 EXPORT_SYMBOL_GPL(synth_event_trace_start); 1750 1751 static int __synth_event_add_val(const char *field_name, u64 val, 1752 struct synth_event_trace_state *trace_state) 1753 { 1754 struct synth_field *field = NULL; 1755 struct synth_trace_event *entry; 1756 struct synth_event *event; 1757 int i, ret = 0; 1758 1759 if (!trace_state) { 1760 ret = -EINVAL; 1761 goto out; 1762 } 1763 1764 /* can't mix add_next_synth_val() with add_synth_val() */ 1765 if (field_name) { 1766 if (trace_state->add_next) { 1767 ret = -EINVAL; 1768 goto out; 1769 } 1770 trace_state->add_name = true; 1771 } else { 1772 if (trace_state->add_name) { 1773 ret = -EINVAL; 1774 goto out; 1775 } 1776 trace_state->add_next = true; 1777 } 1778 1779 if (trace_state->disabled) 1780 goto out; 1781 1782 event = trace_state->event; 1783 if (trace_state->add_name) { 1784 for (i = 0; i < event->n_fields; i++) { 1785 field = event->fields[i]; 1786 if (strcmp(field->name, field_name) == 0) 1787 break; 1788 } 1789 if (!field) { 1790 ret = -EINVAL; 1791 goto out; 1792 } 1793 } else { 1794 if (trace_state->cur_field >= event->n_fields) { 1795 ret = -EINVAL; 1796 goto out; 1797 } 1798 field = event->fields[trace_state->cur_field++]; 1799 } 1800 1801 entry = trace_state->entry; 1802 if (field->is_string) { 1803 char *str_val = (char *)(long)val; 1804 char *str_field; 1805 1806 if (field->is_dynamic) { /* add_val can't do dynamic strings */ 1807 ret = -EINVAL; 1808 goto out; 1809 } 1810 1811 if (!str_val) { 1812 ret = -EINVAL; 1813 goto out; 1814 } 1815 1816 str_field = (char *)&entry->fields[field->offset]; 1817 strscpy(str_field, str_val, STR_VAR_LEN_MAX); 1818 } else { 1819 switch (field->size) { 1820 case 1: 1821 *(u8 *)&trace_state->entry->fields[field->offset] = (u8)val; 1822 break; 1823 1824 case 2: 1825 *(u16 *)&trace_state->entry->fields[field->offset] = (u16)val; 1826 break; 1827 1828 case 4: 1829 *(u32 *)&trace_state->entry->fields[field->offset] = (u32)val; 1830 break; 1831 1832 default: 1833 trace_state->entry->fields[field->offset] = val; 1834 break; 1835 } 1836 } 1837 out: 1838 return ret; 1839 } 1840 1841 /** 1842 * synth_event_add_next_val - Add the next field's value to an open synth trace 1843 * @val: The value to set the next field to 1844 * @trace_state: A pointer to object tracking the piecewise trace state 1845 * 1846 * Set the value of the next field in an event that's been opened by 1847 * synth_event_trace_start(). 1848 * 1849 * The val param should be the value cast to u64. If the value points 1850 * to a string, the val param should be a char * cast to u64. 1851 * 1852 * This function assumes all the fields in an event are to be set one 1853 * after another - successive calls to this function are made, one for 1854 * each field, in the order of the fields in the event, until all 1855 * fields have been set. If you'd rather set each field individually 1856 * without regard to ordering, synth_event_add_val() can be used 1857 * instead. 1858 * 1859 * Note however that synth_event_add_next_val() and 1860 * synth_event_add_val() can't be intermixed for a given event trace - 1861 * one or the other but not both can be used at the same time. 1862 * 1863 * Note also that synth_event_trace_end() must be called after all 1864 * values have been added for each event trace, regardless of whether 1865 * adding all field values succeeded or not. 1866 * 1867 * Return: 0 on success, err otherwise. 1868 */ 1869 int synth_event_add_next_val(u64 val, 1870 struct synth_event_trace_state *trace_state) 1871 { 1872 return __synth_event_add_val(NULL, val, trace_state); 1873 } 1874 EXPORT_SYMBOL_GPL(synth_event_add_next_val); 1875 1876 /** 1877 * synth_event_add_val - Add a named field's value to an open synth trace 1878 * @field_name: The name of the synthetic event field value to set 1879 * @val: The value to set the next field to 1880 * @trace_state: A pointer to object tracking the piecewise trace state 1881 * 1882 * Set the value of the named field in an event that's been opened by 1883 * synth_event_trace_start(). 1884 * 1885 * The val param should be the value cast to u64. If the value points 1886 * to a string, the val param should be a char * cast to u64. 1887 * 1888 * This function looks up the field name, and if found, sets the field 1889 * to the specified value. This lookup makes this function more 1890 * expensive than synth_event_add_next_val(), so use that or the 1891 * none-piecewise synth_event_trace() instead if efficiency is more 1892 * important. 1893 * 1894 * Note however that synth_event_add_next_val() and 1895 * synth_event_add_val() can't be intermixed for a given event trace - 1896 * one or the other but not both can be used at the same time. 1897 * 1898 * Note also that synth_event_trace_end() must be called after all 1899 * values have been added for each event trace, regardless of whether 1900 * adding all field values succeeded or not. 1901 * 1902 * Return: 0 on success, err otherwise. 1903 */ 1904 int synth_event_add_val(const char *field_name, u64 val, 1905 struct synth_event_trace_state *trace_state) 1906 { 1907 return __synth_event_add_val(field_name, val, trace_state); 1908 } 1909 EXPORT_SYMBOL_GPL(synth_event_add_val); 1910 1911 /** 1912 * synth_event_trace_end - End piecewise synthetic event trace 1913 * @trace_state: A pointer to object tracking the piecewise trace state 1914 * 1915 * End the trace of a synthetic event opened by 1916 * synth_event_trace__start(). 1917 * 1918 * This function 'closes' an event trace, which basically means that 1919 * it commits the reserved event and cleans up other loose ends. 1920 * 1921 * A pointer to a trace_state object is passed in, which will keep 1922 * track of the current event trace state opened with 1923 * synth_event_trace_start(). 1924 * 1925 * Note that this function must be called after all values have been 1926 * added for each event trace, regardless of whether adding all field 1927 * values succeeded or not. 1928 * 1929 * Return: 0 on success, err otherwise. 1930 */ 1931 int synth_event_trace_end(struct synth_event_trace_state *trace_state) 1932 { 1933 if (!trace_state) 1934 return -EINVAL; 1935 1936 __synth_event_trace_end(trace_state); 1937 1938 return 0; 1939 } 1940 EXPORT_SYMBOL_GPL(synth_event_trace_end); 1941 1942 static int create_synth_event(int argc, const char **argv) 1943 { 1944 const char *name = argv[0]; 1945 int len; 1946 1947 if (name[0] != 's' || name[1] != ':') 1948 return -ECANCELED; 1949 name += 2; 1950 1951 /* This interface accepts group name prefix */ 1952 if (strchr(name, '/')) { 1953 len = str_has_prefix(name, SYNTH_SYSTEM "/"); 1954 if (len == 0) 1955 return -EINVAL; 1956 name += len; 1957 } 1958 return __create_synth_event(argc - 1, name, argv + 1); 1959 } 1960 1961 static int synth_event_release(struct dyn_event *ev) 1962 { 1963 struct synth_event *event = to_synth_event(ev); 1964 int ret; 1965 1966 if (event->ref) 1967 return -EBUSY; 1968 1969 ret = unregister_synth_event(event); 1970 if (ret) 1971 return ret; 1972 1973 dyn_event_remove(ev); 1974 free_synth_event(event); 1975 return 0; 1976 } 1977 1978 static int __synth_event_show(struct seq_file *m, struct synth_event *event) 1979 { 1980 struct synth_field *field; 1981 unsigned int i; 1982 char *type, *t; 1983 1984 seq_printf(m, "%s\t", event->name); 1985 1986 for (i = 0; i < event->n_fields; i++) { 1987 field = event->fields[i]; 1988 1989 type = field->type; 1990 t = strstr(type, "__data_loc"); 1991 if (t) { /* __data_loc belongs in format but not event desc */ 1992 t += sizeof("__data_loc"); 1993 type = t; 1994 } 1995 1996 /* parameter values */ 1997 seq_printf(m, "%s %s%s", type, field->name, 1998 i == event->n_fields - 1 ? "" : "; "); 1999 } 2000 2001 seq_putc(m, '\n'); 2002 2003 return 0; 2004 } 2005 2006 static int synth_event_show(struct seq_file *m, struct dyn_event *ev) 2007 { 2008 struct synth_event *event = to_synth_event(ev); 2009 2010 seq_printf(m, "s:%s/", event->class.system); 2011 2012 return __synth_event_show(m, event); 2013 } 2014 2015 static int synth_events_seq_show(struct seq_file *m, void *v) 2016 { 2017 struct dyn_event *ev = v; 2018 2019 if (!is_synth_event(ev)) 2020 return 0; 2021 2022 return __synth_event_show(m, to_synth_event(ev)); 2023 } 2024 2025 static const struct seq_operations synth_events_seq_op = { 2026 .start = dyn_event_seq_start, 2027 .next = dyn_event_seq_next, 2028 .stop = dyn_event_seq_stop, 2029 .show = synth_events_seq_show, 2030 }; 2031 2032 static int synth_events_open(struct inode *inode, struct file *file) 2033 { 2034 int ret; 2035 2036 ret = security_locked_down(LOCKDOWN_TRACEFS); 2037 if (ret) 2038 return ret; 2039 2040 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 2041 ret = dyn_events_release_all(&synth_event_ops); 2042 if (ret < 0) 2043 return ret; 2044 } 2045 2046 return seq_open(file, &synth_events_seq_op); 2047 } 2048 2049 static ssize_t synth_events_write(struct file *file, 2050 const char __user *buffer, 2051 size_t count, loff_t *ppos) 2052 { 2053 return trace_parse_run_command(file, buffer, count, ppos, 2054 create_or_delete_synth_event); 2055 } 2056 2057 static const struct file_operations synth_events_fops = { 2058 .open = synth_events_open, 2059 .write = synth_events_write, 2060 .read = seq_read, 2061 .llseek = seq_lseek, 2062 .release = seq_release, 2063 }; 2064 2065 /* 2066 * Register dynevent at core_initcall. This allows kernel to setup kprobe 2067 * events in postcore_initcall without tracefs. 2068 */ 2069 static __init int trace_events_synth_init_early(void) 2070 { 2071 int err = 0; 2072 2073 err = dyn_event_register(&synth_event_ops); 2074 if (err) 2075 pr_warn("Could not register synth_event_ops\n"); 2076 2077 return err; 2078 } 2079 core_initcall(trace_events_synth_init_early); 2080 2081 static __init int trace_events_synth_init(void) 2082 { 2083 struct dentry *entry = NULL; 2084 int err = 0; 2085 err = tracing_init_dentry(); 2086 if (err) 2087 goto err; 2088 2089 entry = tracefs_create_file("synthetic_events", 0644, NULL, 2090 NULL, &synth_events_fops); 2091 if (!entry) { 2092 err = -ENODEV; 2093 goto err; 2094 } 2095 2096 return err; 2097 err: 2098 pr_warn("Could not create tracefs 'synthetic_events' entry\n"); 2099 2100 return err; 2101 } 2102 2103 fs_initcall(trace_events_synth_init); 2104