1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * trace_events_synth - synthetic trace events 4 * 5 * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com> 6 */ 7 8 #include <linux/module.h> 9 #include <linux/kallsyms.h> 10 #include <linux/security.h> 11 #include <linux/mutex.h> 12 #include <linux/slab.h> 13 #include <linux/stacktrace.h> 14 #include <linux/rculist.h> 15 #include <linux/tracefs.h> 16 17 /* for gfp flag names */ 18 #include <linux/trace_events.h> 19 #include <trace/events/mmflags.h> 20 #include "trace_probe.h" 21 #include "trace_probe_kernel.h" 22 23 #include "trace_synth.h" 24 25 #undef ERRORS 26 #define ERRORS \ 27 C(BAD_NAME, "Illegal name"), \ 28 C(INVALID_CMD, "Command must be of the form: <name> field[;field] ..."),\ 29 C(INVALID_DYN_CMD, "Command must be of the form: s or -:[synthetic/]<name> field[;field] ..."),\ 30 C(EVENT_EXISTS, "Event already exists"), \ 31 C(TOO_MANY_FIELDS, "Too many fields"), \ 32 C(INCOMPLETE_TYPE, "Incomplete type"), \ 33 C(INVALID_TYPE, "Invalid type"), \ 34 C(INVALID_FIELD, "Invalid field"), \ 35 C(INVALID_ARRAY_SPEC, "Invalid array specification"), 36 37 #undef C 38 #define C(a, b) SYNTH_ERR_##a 39 40 enum { ERRORS }; 41 42 #undef C 43 #define C(a, b) b 44 45 static const char *err_text[] = { ERRORS }; 46 47 static DEFINE_MUTEX(lastcmd_mutex); 48 static char *last_cmd; 49 50 static int errpos(const char *str) 51 { 52 int ret = 0; 53 54 mutex_lock(&lastcmd_mutex); 55 if (!str || !last_cmd) 56 goto out; 57 58 ret = err_pos(last_cmd, str); 59 out: 60 mutex_unlock(&lastcmd_mutex); 61 return ret; 62 } 63 64 static void last_cmd_set(const char *str) 65 { 66 if (!str) 67 return; 68 69 mutex_lock(&lastcmd_mutex); 70 kfree(last_cmd); 71 last_cmd = kstrdup(str, GFP_KERNEL); 72 mutex_unlock(&lastcmd_mutex); 73 } 74 75 static void synth_err(u8 err_type, u16 err_pos) 76 { 77 mutex_lock(&lastcmd_mutex); 78 if (!last_cmd) 79 goto out; 80 81 tracing_log_err(NULL, "synthetic_events", last_cmd, err_text, 82 err_type, err_pos); 83 out: 84 mutex_unlock(&lastcmd_mutex); 85 } 86 87 static int create_synth_event(const char *raw_command); 88 static int synth_event_show(struct seq_file *m, struct dyn_event *ev); 89 static int synth_event_release(struct dyn_event *ev); 90 static bool synth_event_is_busy(struct dyn_event *ev); 91 static bool synth_event_match(const char *system, const char *event, 92 int argc, const char **argv, struct dyn_event *ev); 93 94 static struct dyn_event_operations synth_event_ops = { 95 .create = create_synth_event, 96 .show = synth_event_show, 97 .is_busy = synth_event_is_busy, 98 .free = synth_event_release, 99 .match = synth_event_match, 100 }; 101 102 static bool is_synth_event(struct dyn_event *ev) 103 { 104 return ev->ops == &synth_event_ops; 105 } 106 107 static struct synth_event *to_synth_event(struct dyn_event *ev) 108 { 109 return container_of(ev, struct synth_event, devent); 110 } 111 112 static bool synth_event_is_busy(struct dyn_event *ev) 113 { 114 struct synth_event *event = to_synth_event(ev); 115 116 return event->ref != 0; 117 } 118 119 static bool synth_event_match(const char *system, const char *event, 120 int argc, const char **argv, struct dyn_event *ev) 121 { 122 struct synth_event *sev = to_synth_event(ev); 123 124 return strcmp(sev->name, event) == 0 && 125 (!system || strcmp(system, SYNTH_SYSTEM) == 0); 126 } 127 128 struct synth_trace_event { 129 struct trace_entry ent; 130 union trace_synth_field fields[]; 131 }; 132 133 static int synth_event_define_fields(struct trace_event_call *call) 134 { 135 struct synth_trace_event trace; 136 int offset = offsetof(typeof(trace), fields); 137 struct synth_event *event = call->data; 138 unsigned int i, size, n_u64; 139 char *name, *type; 140 bool is_signed; 141 int ret = 0; 142 143 for (i = 0, n_u64 = 0; i < event->n_fields; i++) { 144 size = event->fields[i]->size; 145 is_signed = event->fields[i]->is_signed; 146 type = event->fields[i]->type; 147 name = event->fields[i]->name; 148 ret = trace_define_field(call, type, name, offset, size, 149 is_signed, FILTER_OTHER); 150 if (ret) 151 break; 152 153 event->fields[i]->offset = n_u64; 154 155 if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) { 156 offset += STR_VAR_LEN_MAX; 157 n_u64 += STR_VAR_LEN_MAX / sizeof(u64); 158 } else { 159 offset += sizeof(u64); 160 n_u64++; 161 } 162 } 163 164 event->n_u64 = n_u64; 165 166 return ret; 167 } 168 169 static bool synth_field_signed(char *type) 170 { 171 if (str_has_prefix(type, "u")) 172 return false; 173 if (strcmp(type, "gfp_t") == 0) 174 return false; 175 176 return true; 177 } 178 179 static int synth_field_is_string(char *type) 180 { 181 if (strstr(type, "char[") != NULL) 182 return true; 183 184 return false; 185 } 186 187 static int synth_field_is_stack(char *type) 188 { 189 if (strstr(type, "long[") != NULL) 190 return true; 191 192 return false; 193 } 194 195 static int synth_field_string_size(char *type) 196 { 197 char buf[4], *end, *start; 198 unsigned int len; 199 int size, err; 200 201 start = strstr(type, "char["); 202 if (start == NULL) 203 return -EINVAL; 204 start += sizeof("char[") - 1; 205 206 end = strchr(type, ']'); 207 if (!end || end < start || type + strlen(type) > end + 1) 208 return -EINVAL; 209 210 len = end - start; 211 if (len > 3) 212 return -EINVAL; 213 214 if (len == 0) 215 return 0; /* variable-length string */ 216 217 strncpy(buf, start, len); 218 buf[len] = '\0'; 219 220 err = kstrtouint(buf, 0, &size); 221 if (err) 222 return err; 223 224 if (size > STR_VAR_LEN_MAX) 225 return -EINVAL; 226 227 return size; 228 } 229 230 static int synth_field_size(char *type) 231 { 232 int size = 0; 233 234 if (strcmp(type, "s64") == 0) 235 size = sizeof(s64); 236 else if (strcmp(type, "u64") == 0) 237 size = sizeof(u64); 238 else if (strcmp(type, "s32") == 0) 239 size = sizeof(s32); 240 else if (strcmp(type, "u32") == 0) 241 size = sizeof(u32); 242 else if (strcmp(type, "s16") == 0) 243 size = sizeof(s16); 244 else if (strcmp(type, "u16") == 0) 245 size = sizeof(u16); 246 else if (strcmp(type, "s8") == 0) 247 size = sizeof(s8); 248 else if (strcmp(type, "u8") == 0) 249 size = sizeof(u8); 250 else if (strcmp(type, "char") == 0) 251 size = sizeof(char); 252 else if (strcmp(type, "unsigned char") == 0) 253 size = sizeof(unsigned char); 254 else if (strcmp(type, "int") == 0) 255 size = sizeof(int); 256 else if (strcmp(type, "unsigned int") == 0) 257 size = sizeof(unsigned int); 258 else if (strcmp(type, "long") == 0) 259 size = sizeof(long); 260 else if (strcmp(type, "unsigned long") == 0) 261 size = sizeof(unsigned long); 262 else if (strcmp(type, "bool") == 0) 263 size = sizeof(bool); 264 else if (strcmp(type, "pid_t") == 0) 265 size = sizeof(pid_t); 266 else if (strcmp(type, "gfp_t") == 0) 267 size = sizeof(gfp_t); 268 else if (synth_field_is_string(type)) 269 size = synth_field_string_size(type); 270 else if (synth_field_is_stack(type)) 271 size = 0; 272 273 return size; 274 } 275 276 static const char *synth_field_fmt(char *type) 277 { 278 const char *fmt = "%llu"; 279 280 if (strcmp(type, "s64") == 0) 281 fmt = "%lld"; 282 else if (strcmp(type, "u64") == 0) 283 fmt = "%llu"; 284 else if (strcmp(type, "s32") == 0) 285 fmt = "%d"; 286 else if (strcmp(type, "u32") == 0) 287 fmt = "%u"; 288 else if (strcmp(type, "s16") == 0) 289 fmt = "%d"; 290 else if (strcmp(type, "u16") == 0) 291 fmt = "%u"; 292 else if (strcmp(type, "s8") == 0) 293 fmt = "%d"; 294 else if (strcmp(type, "u8") == 0) 295 fmt = "%u"; 296 else if (strcmp(type, "char") == 0) 297 fmt = "%d"; 298 else if (strcmp(type, "unsigned char") == 0) 299 fmt = "%u"; 300 else if (strcmp(type, "int") == 0) 301 fmt = "%d"; 302 else if (strcmp(type, "unsigned int") == 0) 303 fmt = "%u"; 304 else if (strcmp(type, "long") == 0) 305 fmt = "%ld"; 306 else if (strcmp(type, "unsigned long") == 0) 307 fmt = "%lu"; 308 else if (strcmp(type, "bool") == 0) 309 fmt = "%d"; 310 else if (strcmp(type, "pid_t") == 0) 311 fmt = "%d"; 312 else if (strcmp(type, "gfp_t") == 0) 313 fmt = "%x"; 314 else if (synth_field_is_string(type)) 315 fmt = "%s"; 316 else if (synth_field_is_stack(type)) 317 fmt = "%s"; 318 319 return fmt; 320 } 321 322 static void print_synth_event_num_val(struct trace_seq *s, 323 char *print_fmt, char *name, 324 int size, union trace_synth_field *val, char *space) 325 { 326 switch (size) { 327 case 1: 328 trace_seq_printf(s, print_fmt, name, val->as_u8, space); 329 break; 330 331 case 2: 332 trace_seq_printf(s, print_fmt, name, val->as_u16, space); 333 break; 334 335 case 4: 336 trace_seq_printf(s, print_fmt, name, val->as_u32, space); 337 break; 338 339 default: 340 trace_seq_printf(s, print_fmt, name, val->as_u64, space); 341 break; 342 } 343 } 344 345 static enum print_line_t print_synth_event(struct trace_iterator *iter, 346 int flags, 347 struct trace_event *event) 348 { 349 struct trace_array *tr = iter->tr; 350 struct trace_seq *s = &iter->seq; 351 struct synth_trace_event *entry; 352 struct synth_event *se; 353 unsigned int i, j, n_u64; 354 char print_fmt[32]; 355 const char *fmt; 356 357 entry = (struct synth_trace_event *)iter->ent; 358 se = container_of(event, struct synth_event, call.event); 359 360 trace_seq_printf(s, "%s: ", se->name); 361 362 for (i = 0, n_u64 = 0; i < se->n_fields; i++) { 363 if (trace_seq_has_overflowed(s)) 364 goto end; 365 366 fmt = synth_field_fmt(se->fields[i]->type); 367 368 /* parameter types */ 369 if (tr && tr->trace_flags & TRACE_ITER_VERBOSE) 370 trace_seq_printf(s, "%s ", fmt); 371 372 snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt); 373 374 /* parameter values */ 375 if (se->fields[i]->is_string) { 376 if (se->fields[i]->is_dynamic) { 377 union trace_synth_field *data = &entry->fields[n_u64]; 378 379 trace_seq_printf(s, print_fmt, se->fields[i]->name, 380 STR_VAR_LEN_MAX, 381 (char *)entry + data->as_dynamic.offset, 382 i == se->n_fields - 1 ? "" : " "); 383 n_u64++; 384 } else { 385 trace_seq_printf(s, print_fmt, se->fields[i]->name, 386 STR_VAR_LEN_MAX, 387 (char *)&entry->fields[n_u64].as_u64, 388 i == se->n_fields - 1 ? "" : " "); 389 n_u64 += STR_VAR_LEN_MAX / sizeof(u64); 390 } 391 } else if (se->fields[i]->is_stack) { 392 union trace_synth_field *data = &entry->fields[n_u64]; 393 unsigned long *p = (void *)entry + data->as_dynamic.offset; 394 395 trace_seq_printf(s, "%s=STACK:\n", se->fields[i]->name); 396 for (j = 1; j < data->as_dynamic.len / sizeof(long); j++) 397 trace_seq_printf(s, "=> %pS\n", (void *)p[j]); 398 n_u64++; 399 } else { 400 struct trace_print_flags __flags[] = { 401 __def_gfpflag_names, {-1, NULL} }; 402 char *space = (i == se->n_fields - 1 ? "" : " "); 403 404 print_synth_event_num_val(s, print_fmt, 405 se->fields[i]->name, 406 se->fields[i]->size, 407 &entry->fields[n_u64], 408 space); 409 410 if (strcmp(se->fields[i]->type, "gfp_t") == 0) { 411 trace_seq_puts(s, " ("); 412 trace_print_flags_seq(s, "|", 413 entry->fields[n_u64].as_u64, 414 __flags); 415 trace_seq_putc(s, ')'); 416 } 417 n_u64++; 418 } 419 } 420 end: 421 trace_seq_putc(s, '\n'); 422 423 return trace_handle_return(s); 424 } 425 426 static struct trace_event_functions synth_event_funcs = { 427 .trace = print_synth_event 428 }; 429 430 static unsigned int trace_string(struct synth_trace_event *entry, 431 struct synth_event *event, 432 char *str_val, 433 bool is_dynamic, 434 unsigned int data_size, 435 unsigned int *n_u64) 436 { 437 unsigned int len = 0; 438 char *str_field; 439 int ret; 440 441 if (is_dynamic) { 442 union trace_synth_field *data = &entry->fields[*n_u64]; 443 444 len = fetch_store_strlen((unsigned long)str_val); 445 data->as_dynamic.offset = struct_size(entry, fields, event->n_u64) + data_size; 446 data->as_dynamic.len = len; 447 448 ret = fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry); 449 450 (*n_u64)++; 451 } else { 452 str_field = (char *)&entry->fields[*n_u64].as_u64; 453 454 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 455 if ((unsigned long)str_val < TASK_SIZE) 456 ret = strncpy_from_user_nofault(str_field, (const void __user *)str_val, STR_VAR_LEN_MAX); 457 else 458 #endif 459 ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX); 460 461 if (ret < 0) 462 strcpy(str_field, FAULT_STRING); 463 464 (*n_u64) += STR_VAR_LEN_MAX / sizeof(u64); 465 } 466 467 return len; 468 } 469 470 static unsigned int trace_stack(struct synth_trace_event *entry, 471 struct synth_event *event, 472 long *stack, 473 unsigned int data_size, 474 unsigned int *n_u64) 475 { 476 union trace_synth_field *data = &entry->fields[*n_u64]; 477 unsigned int len; 478 u32 data_offset; 479 void *data_loc; 480 481 data_offset = struct_size(entry, fields, event->n_u64); 482 data_offset += data_size; 483 484 for (len = 0; len < HIST_STACKTRACE_DEPTH; len++) { 485 if (!stack[len]) 486 break; 487 } 488 489 len *= sizeof(long); 490 491 /* Find the dynamic section to copy the stack into. */ 492 data_loc = (void *)entry + data_offset; 493 memcpy(data_loc, stack, len); 494 495 /* Fill in the field that holds the offset/len combo */ 496 497 data->as_dynamic.offset = data_offset; 498 data->as_dynamic.len = len; 499 500 (*n_u64)++; 501 502 return len; 503 } 504 505 static notrace void trace_event_raw_event_synth(void *__data, 506 u64 *var_ref_vals, 507 unsigned int *var_ref_idx) 508 { 509 unsigned int i, n_u64, val_idx, len, data_size = 0; 510 struct trace_event_file *trace_file = __data; 511 struct synth_trace_event *entry; 512 struct trace_event_buffer fbuffer; 513 struct trace_buffer *buffer; 514 struct synth_event *event; 515 int fields_size = 0; 516 517 event = trace_file->event_call->data; 518 519 if (trace_trigger_soft_disabled(trace_file)) 520 return; 521 522 fields_size = event->n_u64 * sizeof(u64); 523 524 for (i = 0; i < event->n_dynamic_fields; i++) { 525 unsigned int field_pos = event->dynamic_fields[i]->field_pos; 526 char *str_val; 527 528 val_idx = var_ref_idx[field_pos]; 529 str_val = (char *)(long)var_ref_vals[val_idx]; 530 531 if (event->dynamic_fields[i]->is_stack) { 532 /* reserve one extra element for size */ 533 len = *((unsigned long *)str_val) + 1; 534 len *= sizeof(unsigned long); 535 } else { 536 len = fetch_store_strlen((unsigned long)str_val); 537 } 538 539 fields_size += len; 540 } 541 542 /* 543 * Avoid ring buffer recursion detection, as this event 544 * is being performed within another event. 545 */ 546 buffer = trace_file->tr->array_buffer.buffer; 547 ring_buffer_nest_start(buffer); 548 549 entry = trace_event_buffer_reserve(&fbuffer, trace_file, 550 sizeof(*entry) + fields_size); 551 if (!entry) 552 goto out; 553 554 for (i = 0, n_u64 = 0; i < event->n_fields; i++) { 555 val_idx = var_ref_idx[i]; 556 if (event->fields[i]->is_string) { 557 char *str_val = (char *)(long)var_ref_vals[val_idx]; 558 559 len = trace_string(entry, event, str_val, 560 event->fields[i]->is_dynamic, 561 data_size, &n_u64); 562 data_size += len; /* only dynamic string increments */ 563 } else if (event->fields[i]->is_stack) { 564 long *stack = (long *)(long)var_ref_vals[val_idx]; 565 566 len = trace_stack(entry, event, stack, 567 data_size, &n_u64); 568 data_size += len; 569 } else { 570 struct synth_field *field = event->fields[i]; 571 u64 val = var_ref_vals[val_idx]; 572 573 switch (field->size) { 574 case 1: 575 entry->fields[n_u64].as_u8 = (u8)val; 576 break; 577 578 case 2: 579 entry->fields[n_u64].as_u16 = (u16)val; 580 break; 581 582 case 4: 583 entry->fields[n_u64].as_u32 = (u32)val; 584 break; 585 586 default: 587 entry->fields[n_u64].as_u64 = val; 588 break; 589 } 590 n_u64++; 591 } 592 } 593 594 trace_event_buffer_commit(&fbuffer); 595 out: 596 ring_buffer_nest_end(buffer); 597 } 598 599 static void free_synth_event_print_fmt(struct trace_event_call *call) 600 { 601 if (call) { 602 kfree(call->print_fmt); 603 call->print_fmt = NULL; 604 } 605 } 606 607 static int __set_synth_event_print_fmt(struct synth_event *event, 608 char *buf, int len) 609 { 610 const char *fmt; 611 int pos = 0; 612 int i; 613 614 /* When len=0, we just calculate the needed length */ 615 #define LEN_OR_ZERO (len ? len - pos : 0) 616 617 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 618 for (i = 0; i < event->n_fields; i++) { 619 fmt = synth_field_fmt(event->fields[i]->type); 620 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s", 621 event->fields[i]->name, fmt, 622 i == event->n_fields - 1 ? "" : ", "); 623 } 624 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 625 626 for (i = 0; i < event->n_fields; i++) { 627 if (event->fields[i]->is_string && 628 event->fields[i]->is_dynamic) 629 pos += snprintf(buf + pos, LEN_OR_ZERO, 630 ", __get_str(%s)", event->fields[i]->name); 631 else if (event->fields[i]->is_stack) 632 pos += snprintf(buf + pos, LEN_OR_ZERO, 633 ", __get_stacktrace(%s)", event->fields[i]->name); 634 else 635 pos += snprintf(buf + pos, LEN_OR_ZERO, 636 ", REC->%s", event->fields[i]->name); 637 } 638 639 #undef LEN_OR_ZERO 640 641 /* return the length of print_fmt */ 642 return pos; 643 } 644 645 static int set_synth_event_print_fmt(struct trace_event_call *call) 646 { 647 struct synth_event *event = call->data; 648 char *print_fmt; 649 int len; 650 651 /* First: called with 0 length to calculate the needed length */ 652 len = __set_synth_event_print_fmt(event, NULL, 0); 653 654 print_fmt = kmalloc(len + 1, GFP_KERNEL); 655 if (!print_fmt) 656 return -ENOMEM; 657 658 /* Second: actually write the @print_fmt */ 659 __set_synth_event_print_fmt(event, print_fmt, len + 1); 660 call->print_fmt = print_fmt; 661 662 return 0; 663 } 664 665 static void free_synth_field(struct synth_field *field) 666 { 667 kfree(field->type); 668 kfree(field->name); 669 kfree(field); 670 } 671 672 static int check_field_version(const char *prefix, const char *field_type, 673 const char *field_name) 674 { 675 /* 676 * For backward compatibility, the old synthetic event command 677 * format did not require semicolons, and in order to not 678 * break user space, that old format must still work. If a new 679 * feature is added, then the format that uses the new feature 680 * will be required to have semicolons, as nothing that uses 681 * the old format would be using the new, yet to be created, 682 * feature. When a new feature is added, this will detect it, 683 * and return a number greater than 1, and require the format 684 * to use semicolons. 685 */ 686 return 1; 687 } 688 689 static struct synth_field *parse_synth_field(int argc, char **argv, 690 int *consumed, int *field_version) 691 { 692 const char *prefix = NULL, *field_type = argv[0], *field_name, *array; 693 struct synth_field *field; 694 int len, ret = -ENOMEM; 695 struct seq_buf s; 696 ssize_t size; 697 698 if (!strcmp(field_type, "unsigned")) { 699 if (argc < 3) { 700 synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type)); 701 return ERR_PTR(-EINVAL); 702 } 703 prefix = "unsigned "; 704 field_type = argv[1]; 705 field_name = argv[2]; 706 *consumed += 3; 707 } else { 708 field_name = argv[1]; 709 *consumed += 2; 710 } 711 712 if (!field_name) { 713 synth_err(SYNTH_ERR_INVALID_FIELD, errpos(field_type)); 714 return ERR_PTR(-EINVAL); 715 } 716 717 *field_version = check_field_version(prefix, field_type, field_name); 718 719 field = kzalloc(sizeof(*field), GFP_KERNEL); 720 if (!field) 721 return ERR_PTR(-ENOMEM); 722 723 len = strlen(field_name); 724 array = strchr(field_name, '['); 725 if (array) 726 len -= strlen(array); 727 728 field->name = kmemdup_nul(field_name, len, GFP_KERNEL); 729 if (!field->name) 730 goto free; 731 732 if (!is_good_name(field->name)) { 733 synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name)); 734 ret = -EINVAL; 735 goto free; 736 } 737 738 len = strlen(field_type) + 1; 739 740 if (array) 741 len += strlen(array); 742 743 if (prefix) 744 len += strlen(prefix); 745 746 field->type = kzalloc(len, GFP_KERNEL); 747 if (!field->type) 748 goto free; 749 750 seq_buf_init(&s, field->type, len); 751 if (prefix) 752 seq_buf_puts(&s, prefix); 753 seq_buf_puts(&s, field_type); 754 if (array) 755 seq_buf_puts(&s, array); 756 if (WARN_ON_ONCE(!seq_buf_buffer_left(&s))) 757 goto free; 758 759 s.buffer[s.len] = '\0'; 760 761 size = synth_field_size(field->type); 762 if (size < 0) { 763 if (array) 764 synth_err(SYNTH_ERR_INVALID_ARRAY_SPEC, errpos(field_name)); 765 else 766 synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type)); 767 ret = -EINVAL; 768 goto free; 769 } else if (size == 0) { 770 if (synth_field_is_string(field->type) || 771 synth_field_is_stack(field->type)) { 772 char *type; 773 774 len = sizeof("__data_loc ") + strlen(field->type) + 1; 775 type = kzalloc(len, GFP_KERNEL); 776 if (!type) 777 goto free; 778 779 seq_buf_init(&s, type, len); 780 seq_buf_puts(&s, "__data_loc "); 781 seq_buf_puts(&s, field->type); 782 783 if (WARN_ON_ONCE(!seq_buf_buffer_left(&s))) 784 goto free; 785 s.buffer[s.len] = '\0'; 786 787 kfree(field->type); 788 field->type = type; 789 790 field->is_dynamic = true; 791 size = sizeof(u64); 792 } else { 793 synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type)); 794 ret = -EINVAL; 795 goto free; 796 } 797 } 798 field->size = size; 799 800 if (synth_field_is_string(field->type)) 801 field->is_string = true; 802 else if (synth_field_is_stack(field->type)) 803 field->is_stack = true; 804 805 field->is_signed = synth_field_signed(field->type); 806 out: 807 return field; 808 free: 809 free_synth_field(field); 810 field = ERR_PTR(ret); 811 goto out; 812 } 813 814 static void free_synth_tracepoint(struct tracepoint *tp) 815 { 816 if (!tp) 817 return; 818 819 kfree(tp->name); 820 kfree(tp); 821 } 822 823 static struct tracepoint *alloc_synth_tracepoint(char *name) 824 { 825 struct tracepoint *tp; 826 827 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 828 if (!tp) 829 return ERR_PTR(-ENOMEM); 830 831 tp->name = kstrdup(name, GFP_KERNEL); 832 if (!tp->name) { 833 kfree(tp); 834 return ERR_PTR(-ENOMEM); 835 } 836 837 return tp; 838 } 839 840 struct synth_event *find_synth_event(const char *name) 841 { 842 struct dyn_event *pos; 843 struct synth_event *event; 844 845 for_each_dyn_event(pos) { 846 if (!is_synth_event(pos)) 847 continue; 848 event = to_synth_event(pos); 849 if (strcmp(event->name, name) == 0) 850 return event; 851 } 852 853 return NULL; 854 } 855 856 static struct trace_event_fields synth_event_fields_array[] = { 857 { .type = TRACE_FUNCTION_TYPE, 858 .define_fields = synth_event_define_fields }, 859 {} 860 }; 861 862 static int synth_event_reg(struct trace_event_call *call, 863 enum trace_reg type, void *data) 864 { 865 struct synth_event *event = container_of(call, struct synth_event, call); 866 867 switch (type) { 868 #ifdef CONFIG_PERF_EVENTS 869 case TRACE_REG_PERF_REGISTER: 870 #endif 871 case TRACE_REG_REGISTER: 872 if (!try_module_get(event->mod)) 873 return -EBUSY; 874 break; 875 default: 876 break; 877 } 878 879 int ret = trace_event_reg(call, type, data); 880 881 switch (type) { 882 #ifdef CONFIG_PERF_EVENTS 883 case TRACE_REG_PERF_UNREGISTER: 884 #endif 885 case TRACE_REG_UNREGISTER: 886 module_put(event->mod); 887 break; 888 default: 889 break; 890 } 891 return ret; 892 } 893 894 static int register_synth_event(struct synth_event *event) 895 { 896 struct trace_event_call *call = &event->call; 897 int ret = 0; 898 899 event->call.class = &event->class; 900 event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL); 901 if (!event->class.system) { 902 ret = -ENOMEM; 903 goto out; 904 } 905 906 event->tp = alloc_synth_tracepoint(event->name); 907 if (IS_ERR(event->tp)) { 908 ret = PTR_ERR(event->tp); 909 event->tp = NULL; 910 goto out; 911 } 912 913 INIT_LIST_HEAD(&call->class->fields); 914 call->event.funcs = &synth_event_funcs; 915 call->class->fields_array = synth_event_fields_array; 916 917 ret = register_trace_event(&call->event); 918 if (!ret) { 919 ret = -ENODEV; 920 goto out; 921 } 922 call->flags = TRACE_EVENT_FL_TRACEPOINT; 923 call->class->reg = synth_event_reg; 924 call->class->probe = trace_event_raw_event_synth; 925 call->data = event; 926 call->tp = event->tp; 927 928 ret = trace_add_event_call(call); 929 if (ret) { 930 pr_warn("Failed to register synthetic event: %s\n", 931 trace_event_name(call)); 932 goto err; 933 } 934 935 ret = set_synth_event_print_fmt(call); 936 /* unregister_trace_event() will be called inside */ 937 if (ret < 0) 938 trace_remove_event_call(call); 939 out: 940 return ret; 941 err: 942 unregister_trace_event(&call->event); 943 goto out; 944 } 945 946 static int unregister_synth_event(struct synth_event *event) 947 { 948 struct trace_event_call *call = &event->call; 949 int ret; 950 951 ret = trace_remove_event_call(call); 952 953 return ret; 954 } 955 956 static void free_synth_event(struct synth_event *event) 957 { 958 unsigned int i; 959 960 if (!event) 961 return; 962 963 for (i = 0; i < event->n_fields; i++) 964 free_synth_field(event->fields[i]); 965 966 kfree(event->fields); 967 kfree(event->dynamic_fields); 968 kfree(event->name); 969 kfree(event->class.system); 970 free_synth_tracepoint(event->tp); 971 free_synth_event_print_fmt(&event->call); 972 kfree(event); 973 } 974 975 static struct synth_event *alloc_synth_event(const char *name, int n_fields, 976 struct synth_field **fields) 977 { 978 unsigned int i, j, n_dynamic_fields = 0; 979 struct synth_event *event; 980 981 event = kzalloc(sizeof(*event), GFP_KERNEL); 982 if (!event) { 983 event = ERR_PTR(-ENOMEM); 984 goto out; 985 } 986 987 event->name = kstrdup(name, GFP_KERNEL); 988 if (!event->name) { 989 kfree(event); 990 event = ERR_PTR(-ENOMEM); 991 goto out; 992 } 993 994 event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL); 995 if (!event->fields) { 996 free_synth_event(event); 997 event = ERR_PTR(-ENOMEM); 998 goto out; 999 } 1000 1001 for (i = 0; i < n_fields; i++) 1002 if (fields[i]->is_dynamic) 1003 n_dynamic_fields++; 1004 1005 if (n_dynamic_fields) { 1006 event->dynamic_fields = kcalloc(n_dynamic_fields, 1007 sizeof(*event->dynamic_fields), 1008 GFP_KERNEL); 1009 if (!event->dynamic_fields) { 1010 free_synth_event(event); 1011 event = ERR_PTR(-ENOMEM); 1012 goto out; 1013 } 1014 } 1015 1016 dyn_event_init(&event->devent, &synth_event_ops); 1017 1018 for (i = 0, j = 0; i < n_fields; i++) { 1019 fields[i]->field_pos = i; 1020 event->fields[i] = fields[i]; 1021 1022 if (fields[i]->is_dynamic) 1023 event->dynamic_fields[j++] = fields[i]; 1024 } 1025 event->n_dynamic_fields = j; 1026 event->n_fields = n_fields; 1027 out: 1028 return event; 1029 } 1030 1031 static int synth_event_check_arg_fn(void *data) 1032 { 1033 struct dynevent_arg_pair *arg_pair = data; 1034 int size; 1035 1036 size = synth_field_size((char *)arg_pair->lhs); 1037 if (size == 0) { 1038 if (strstr((char *)arg_pair->lhs, "[")) 1039 return 0; 1040 } 1041 1042 return size ? 0 : -EINVAL; 1043 } 1044 1045 /** 1046 * synth_event_add_field - Add a new field to a synthetic event cmd 1047 * @cmd: A pointer to the dynevent_cmd struct representing the new event 1048 * @type: The type of the new field to add 1049 * @name: The name of the new field to add 1050 * 1051 * Add a new field to a synthetic event cmd object. Field ordering is in 1052 * the same order the fields are added. 1053 * 1054 * See synth_field_size() for available types. If field_name contains 1055 * [n] the field is considered to be an array. 1056 * 1057 * Return: 0 if successful, error otherwise. 1058 */ 1059 int synth_event_add_field(struct dynevent_cmd *cmd, const char *type, 1060 const char *name) 1061 { 1062 struct dynevent_arg_pair arg_pair; 1063 int ret; 1064 1065 if (cmd->type != DYNEVENT_TYPE_SYNTH) 1066 return -EINVAL; 1067 1068 if (!type || !name) 1069 return -EINVAL; 1070 1071 dynevent_arg_pair_init(&arg_pair, 0, ';'); 1072 1073 arg_pair.lhs = type; 1074 arg_pair.rhs = name; 1075 1076 ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn); 1077 if (ret) 1078 return ret; 1079 1080 if (++cmd->n_fields > SYNTH_FIELDS_MAX) 1081 ret = -EINVAL; 1082 1083 return ret; 1084 } 1085 EXPORT_SYMBOL_GPL(synth_event_add_field); 1086 1087 /** 1088 * synth_event_add_field_str - Add a new field to a synthetic event cmd 1089 * @cmd: A pointer to the dynevent_cmd struct representing the new event 1090 * @type_name: The type and name of the new field to add, as a single string 1091 * 1092 * Add a new field to a synthetic event cmd object, as a single 1093 * string. The @type_name string is expected to be of the form 'type 1094 * name', which will be appended by ';'. No sanity checking is done - 1095 * what's passed in is assumed to already be well-formed. Field 1096 * ordering is in the same order the fields are added. 1097 * 1098 * See synth_field_size() for available types. If field_name contains 1099 * [n] the field is considered to be an array. 1100 * 1101 * Return: 0 if successful, error otherwise. 1102 */ 1103 int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name) 1104 { 1105 struct dynevent_arg arg; 1106 int ret; 1107 1108 if (cmd->type != DYNEVENT_TYPE_SYNTH) 1109 return -EINVAL; 1110 1111 if (!type_name) 1112 return -EINVAL; 1113 1114 dynevent_arg_init(&arg, ';'); 1115 1116 arg.str = type_name; 1117 1118 ret = dynevent_arg_add(cmd, &arg, NULL); 1119 if (ret) 1120 return ret; 1121 1122 if (++cmd->n_fields > SYNTH_FIELDS_MAX) 1123 ret = -EINVAL; 1124 1125 return ret; 1126 } 1127 EXPORT_SYMBOL_GPL(synth_event_add_field_str); 1128 1129 /** 1130 * synth_event_add_fields - Add multiple fields to a synthetic event cmd 1131 * @cmd: A pointer to the dynevent_cmd struct representing the new event 1132 * @fields: An array of type/name field descriptions 1133 * @n_fields: The number of field descriptions contained in the fields array 1134 * 1135 * Add a new set of fields to a synthetic event cmd object. The event 1136 * fields that will be defined for the event should be passed in as an 1137 * array of struct synth_field_desc, and the number of elements in the 1138 * array passed in as n_fields. Field ordering will retain the 1139 * ordering given in the fields array. 1140 * 1141 * See synth_field_size() for available types. If field_name contains 1142 * [n] the field is considered to be an array. 1143 * 1144 * Return: 0 if successful, error otherwise. 1145 */ 1146 int synth_event_add_fields(struct dynevent_cmd *cmd, 1147 struct synth_field_desc *fields, 1148 unsigned int n_fields) 1149 { 1150 unsigned int i; 1151 int ret = 0; 1152 1153 for (i = 0; i < n_fields; i++) { 1154 if (fields[i].type == NULL || fields[i].name == NULL) { 1155 ret = -EINVAL; 1156 break; 1157 } 1158 1159 ret = synth_event_add_field(cmd, fields[i].type, fields[i].name); 1160 if (ret) 1161 break; 1162 } 1163 1164 return ret; 1165 } 1166 EXPORT_SYMBOL_GPL(synth_event_add_fields); 1167 1168 /** 1169 * __synth_event_gen_cmd_start - Start a synthetic event command from arg list 1170 * @cmd: A pointer to the dynevent_cmd struct representing the new event 1171 * @name: The name of the synthetic event 1172 * @mod: The module creating the event, NULL if not created from a module 1173 * @args: Variable number of arg (pairs), one pair for each field 1174 * 1175 * NOTE: Users normally won't want to call this function directly, but 1176 * rather use the synth_event_gen_cmd_start() wrapper, which 1177 * automatically adds a NULL to the end of the arg list. If this 1178 * function is used directly, make sure the last arg in the variable 1179 * arg list is NULL. 1180 * 1181 * Generate a synthetic event command to be executed by 1182 * synth_event_gen_cmd_end(). This function can be used to generate 1183 * the complete command or only the first part of it; in the latter 1184 * case, synth_event_add_field(), synth_event_add_field_str(), or 1185 * synth_event_add_fields() can be used to add more fields following 1186 * this. 1187 * 1188 * There should be an even number variable args, each pair consisting 1189 * of a type followed by a field name. 1190 * 1191 * See synth_field_size() for available types. If field_name contains 1192 * [n] the field is considered to be an array. 1193 * 1194 * Return: 0 if successful, error otherwise. 1195 */ 1196 int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name, 1197 struct module *mod, ...) 1198 { 1199 struct dynevent_arg arg; 1200 va_list args; 1201 int ret; 1202 1203 cmd->event_name = name; 1204 cmd->private_data = mod; 1205 1206 if (cmd->type != DYNEVENT_TYPE_SYNTH) 1207 return -EINVAL; 1208 1209 dynevent_arg_init(&arg, 0); 1210 arg.str = name; 1211 ret = dynevent_arg_add(cmd, &arg, NULL); 1212 if (ret) 1213 return ret; 1214 1215 va_start(args, mod); 1216 for (;;) { 1217 const char *type, *name; 1218 1219 type = va_arg(args, const char *); 1220 if (!type) 1221 break; 1222 name = va_arg(args, const char *); 1223 if (!name) 1224 break; 1225 1226 if (++cmd->n_fields > SYNTH_FIELDS_MAX) { 1227 ret = -EINVAL; 1228 break; 1229 } 1230 1231 ret = synth_event_add_field(cmd, type, name); 1232 if (ret) 1233 break; 1234 } 1235 va_end(args); 1236 1237 return ret; 1238 } 1239 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start); 1240 1241 /** 1242 * synth_event_gen_cmd_array_start - Start synthetic event command from an array 1243 * @cmd: A pointer to the dynevent_cmd struct representing the new event 1244 * @name: The name of the synthetic event 1245 * @mod: The module creating the event, NULL if not created from a module 1246 * @fields: An array of type/name field descriptions 1247 * @n_fields: The number of field descriptions contained in the fields array 1248 * 1249 * Generate a synthetic event command to be executed by 1250 * synth_event_gen_cmd_end(). This function can be used to generate 1251 * the complete command or only the first part of it; in the latter 1252 * case, synth_event_add_field(), synth_event_add_field_str(), or 1253 * synth_event_add_fields() can be used to add more fields following 1254 * this. 1255 * 1256 * The event fields that will be defined for the event should be 1257 * passed in as an array of struct synth_field_desc, and the number of 1258 * elements in the array passed in as n_fields. Field ordering will 1259 * retain the ordering given in the fields array. 1260 * 1261 * See synth_field_size() for available types. If field_name contains 1262 * [n] the field is considered to be an array. 1263 * 1264 * Return: 0 if successful, error otherwise. 1265 */ 1266 int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name, 1267 struct module *mod, 1268 struct synth_field_desc *fields, 1269 unsigned int n_fields) 1270 { 1271 struct dynevent_arg arg; 1272 unsigned int i; 1273 int ret = 0; 1274 1275 cmd->event_name = name; 1276 cmd->private_data = mod; 1277 1278 if (cmd->type != DYNEVENT_TYPE_SYNTH) 1279 return -EINVAL; 1280 1281 if (n_fields > SYNTH_FIELDS_MAX) 1282 return -EINVAL; 1283 1284 dynevent_arg_init(&arg, 0); 1285 arg.str = name; 1286 ret = dynevent_arg_add(cmd, &arg, NULL); 1287 if (ret) 1288 return ret; 1289 1290 for (i = 0; i < n_fields; i++) { 1291 if (fields[i].type == NULL || fields[i].name == NULL) 1292 return -EINVAL; 1293 1294 ret = synth_event_add_field(cmd, fields[i].type, fields[i].name); 1295 if (ret) 1296 break; 1297 } 1298 1299 return ret; 1300 } 1301 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start); 1302 1303 static int __create_synth_event(const char *name, const char *raw_fields) 1304 { 1305 char **argv, *field_str, *tmp_fields, *saved_fields = NULL; 1306 struct synth_field *field, *fields[SYNTH_FIELDS_MAX]; 1307 int consumed, cmd_version = 1, n_fields_this_loop; 1308 int i, argc, n_fields = 0, ret = 0; 1309 struct synth_event *event = NULL; 1310 1311 /* 1312 * Argument syntax: 1313 * - Add synthetic event: <event_name> field[;field] ... 1314 * - Remove synthetic event: !<event_name> field[;field] ... 1315 * where 'field' = type field_name 1316 */ 1317 1318 if (name[0] == '\0') { 1319 synth_err(SYNTH_ERR_INVALID_CMD, 0); 1320 return -EINVAL; 1321 } 1322 1323 if (!is_good_name(name)) { 1324 synth_err(SYNTH_ERR_BAD_NAME, errpos(name)); 1325 return -EINVAL; 1326 } 1327 1328 mutex_lock(&event_mutex); 1329 1330 event = find_synth_event(name); 1331 if (event) { 1332 synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name)); 1333 ret = -EEXIST; 1334 goto err; 1335 } 1336 1337 tmp_fields = saved_fields = kstrdup(raw_fields, GFP_KERNEL); 1338 if (!tmp_fields) { 1339 ret = -ENOMEM; 1340 goto err; 1341 } 1342 1343 while ((field_str = strsep(&tmp_fields, ";")) != NULL) { 1344 argv = argv_split(GFP_KERNEL, field_str, &argc); 1345 if (!argv) { 1346 ret = -ENOMEM; 1347 goto err; 1348 } 1349 1350 if (!argc) { 1351 argv_free(argv); 1352 continue; 1353 } 1354 1355 n_fields_this_loop = 0; 1356 consumed = 0; 1357 while (argc > consumed) { 1358 int field_version; 1359 1360 field = parse_synth_field(argc - consumed, 1361 argv + consumed, &consumed, 1362 &field_version); 1363 if (IS_ERR(field)) { 1364 ret = PTR_ERR(field); 1365 goto err_free_arg; 1366 } 1367 1368 /* 1369 * Track the highest version of any field we 1370 * found in the command. 1371 */ 1372 if (field_version > cmd_version) 1373 cmd_version = field_version; 1374 1375 /* 1376 * Now sort out what is and isn't valid for 1377 * each supported version. 1378 * 1379 * If we see more than 1 field per loop, it 1380 * means we have multiple fields between 1381 * semicolons, and that's something we no 1382 * longer support in a version 2 or greater 1383 * command. 1384 */ 1385 if (cmd_version > 1 && n_fields_this_loop >= 1) { 1386 synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str)); 1387 ret = -EINVAL; 1388 goto err_free_arg; 1389 } 1390 1391 if (n_fields == SYNTH_FIELDS_MAX) { 1392 synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0); 1393 ret = -EINVAL; 1394 goto err_free_arg; 1395 } 1396 fields[n_fields++] = field; 1397 1398 n_fields_this_loop++; 1399 } 1400 argv_free(argv); 1401 1402 if (consumed < argc) { 1403 synth_err(SYNTH_ERR_INVALID_CMD, 0); 1404 ret = -EINVAL; 1405 goto err; 1406 } 1407 1408 } 1409 1410 if (n_fields == 0) { 1411 synth_err(SYNTH_ERR_INVALID_CMD, 0); 1412 ret = -EINVAL; 1413 goto err; 1414 } 1415 1416 event = alloc_synth_event(name, n_fields, fields); 1417 if (IS_ERR(event)) { 1418 ret = PTR_ERR(event); 1419 event = NULL; 1420 goto err; 1421 } 1422 ret = register_synth_event(event); 1423 if (!ret) 1424 dyn_event_add(&event->devent, &event->call); 1425 else 1426 free_synth_event(event); 1427 out: 1428 mutex_unlock(&event_mutex); 1429 1430 kfree(saved_fields); 1431 1432 return ret; 1433 err_free_arg: 1434 argv_free(argv); 1435 err: 1436 for (i = 0; i < n_fields; i++) 1437 free_synth_field(fields[i]); 1438 1439 goto out; 1440 } 1441 1442 /** 1443 * synth_event_create - Create a new synthetic event 1444 * @name: The name of the new synthetic event 1445 * @fields: An array of type/name field descriptions 1446 * @n_fields: The number of field descriptions contained in the fields array 1447 * @mod: The module creating the event, NULL if not created from a module 1448 * 1449 * Create a new synthetic event with the given name under the 1450 * trace/events/synthetic/ directory. The event fields that will be 1451 * defined for the event should be passed in as an array of struct 1452 * synth_field_desc, and the number elements in the array passed in as 1453 * n_fields. Field ordering will retain the ordering given in the 1454 * fields array. 1455 * 1456 * If the new synthetic event is being created from a module, the mod 1457 * param must be non-NULL. This will ensure that the trace buffer 1458 * won't contain unreadable events. 1459 * 1460 * The new synth event should be deleted using synth_event_delete() 1461 * function. The new synthetic event can be generated from modules or 1462 * other kernel code using trace_synth_event() and related functions. 1463 * 1464 * Return: 0 if successful, error otherwise. 1465 */ 1466 int synth_event_create(const char *name, struct synth_field_desc *fields, 1467 unsigned int n_fields, struct module *mod) 1468 { 1469 struct dynevent_cmd cmd; 1470 char *buf; 1471 int ret; 1472 1473 buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL); 1474 if (!buf) 1475 return -ENOMEM; 1476 1477 synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN); 1478 1479 ret = synth_event_gen_cmd_array_start(&cmd, name, mod, 1480 fields, n_fields); 1481 if (ret) 1482 goto out; 1483 1484 ret = synth_event_gen_cmd_end(&cmd); 1485 out: 1486 kfree(buf); 1487 1488 return ret; 1489 } 1490 EXPORT_SYMBOL_GPL(synth_event_create); 1491 1492 static int destroy_synth_event(struct synth_event *se) 1493 { 1494 int ret; 1495 1496 if (se->ref) 1497 return -EBUSY; 1498 1499 if (trace_event_dyn_busy(&se->call)) 1500 return -EBUSY; 1501 1502 ret = unregister_synth_event(se); 1503 if (!ret) { 1504 dyn_event_remove(&se->devent); 1505 free_synth_event(se); 1506 } 1507 1508 return ret; 1509 } 1510 1511 /** 1512 * synth_event_delete - Delete a synthetic event 1513 * @event_name: The name of the new synthetic event 1514 * 1515 * Delete a synthetic event that was created with synth_event_create(). 1516 * 1517 * Return: 0 if successful, error otherwise. 1518 */ 1519 int synth_event_delete(const char *event_name) 1520 { 1521 struct synth_event *se = NULL; 1522 struct module *mod = NULL; 1523 int ret = -ENOENT; 1524 1525 mutex_lock(&event_mutex); 1526 se = find_synth_event(event_name); 1527 if (se) { 1528 mod = se->mod; 1529 ret = destroy_synth_event(se); 1530 } 1531 mutex_unlock(&event_mutex); 1532 1533 if (mod) { 1534 /* 1535 * It is safest to reset the ring buffer if the module 1536 * being unloaded registered any events that were 1537 * used. The only worry is if a new module gets 1538 * loaded, and takes on the same id as the events of 1539 * this module. When printing out the buffer, traced 1540 * events left over from this module may be passed to 1541 * the new module events and unexpected results may 1542 * occur. 1543 */ 1544 tracing_reset_all_online_cpus(); 1545 } 1546 1547 return ret; 1548 } 1549 EXPORT_SYMBOL_GPL(synth_event_delete); 1550 1551 static int check_command(const char *raw_command) 1552 { 1553 char **argv = NULL, *cmd, *saved_cmd, *name_and_field; 1554 int argc, ret = 0; 1555 1556 cmd = saved_cmd = kstrdup(raw_command, GFP_KERNEL); 1557 if (!cmd) 1558 return -ENOMEM; 1559 1560 name_and_field = strsep(&cmd, ";"); 1561 if (!name_and_field) { 1562 ret = -EINVAL; 1563 goto free; 1564 } 1565 1566 if (name_and_field[0] == '!') 1567 goto free; 1568 1569 argv = argv_split(GFP_KERNEL, name_and_field, &argc); 1570 if (!argv) { 1571 ret = -ENOMEM; 1572 goto free; 1573 } 1574 argv_free(argv); 1575 1576 if (argc < 3) 1577 ret = -EINVAL; 1578 free: 1579 kfree(saved_cmd); 1580 1581 return ret; 1582 } 1583 1584 static int create_or_delete_synth_event(const char *raw_command) 1585 { 1586 char *name = NULL, *fields, *p; 1587 int ret = 0; 1588 1589 raw_command = skip_spaces(raw_command); 1590 if (raw_command[0] == '\0') 1591 return ret; 1592 1593 last_cmd_set(raw_command); 1594 1595 ret = check_command(raw_command); 1596 if (ret) { 1597 synth_err(SYNTH_ERR_INVALID_CMD, 0); 1598 return ret; 1599 } 1600 1601 p = strpbrk(raw_command, " \t"); 1602 if (!p && raw_command[0] != '!') { 1603 synth_err(SYNTH_ERR_INVALID_CMD, 0); 1604 ret = -EINVAL; 1605 goto free; 1606 } 1607 1608 name = kmemdup_nul(raw_command, p ? p - raw_command : strlen(raw_command), GFP_KERNEL); 1609 if (!name) 1610 return -ENOMEM; 1611 1612 if (name[0] == '!') { 1613 ret = synth_event_delete(name + 1); 1614 goto free; 1615 } 1616 1617 fields = skip_spaces(p); 1618 1619 ret = __create_synth_event(name, fields); 1620 free: 1621 kfree(name); 1622 1623 return ret; 1624 } 1625 1626 static int synth_event_run_command(struct dynevent_cmd *cmd) 1627 { 1628 struct synth_event *se; 1629 int ret; 1630 1631 ret = create_or_delete_synth_event(cmd->seq.buffer); 1632 if (ret) 1633 return ret; 1634 1635 se = find_synth_event(cmd->event_name); 1636 if (WARN_ON(!se)) 1637 return -ENOENT; 1638 1639 se->mod = cmd->private_data; 1640 1641 return ret; 1642 } 1643 1644 /** 1645 * synth_event_cmd_init - Initialize a synthetic event command object 1646 * @cmd: A pointer to the dynevent_cmd struct representing the new event 1647 * @buf: A pointer to the buffer used to build the command 1648 * @maxlen: The length of the buffer passed in @buf 1649 * 1650 * Initialize a synthetic event command object. Use this before 1651 * calling any of the other dyenvent_cmd functions. 1652 */ 1653 void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen) 1654 { 1655 dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH, 1656 synth_event_run_command); 1657 } 1658 EXPORT_SYMBOL_GPL(synth_event_cmd_init); 1659 1660 static inline int 1661 __synth_event_trace_init(struct trace_event_file *file, 1662 struct synth_event_trace_state *trace_state) 1663 { 1664 int ret = 0; 1665 1666 memset(trace_state, '\0', sizeof(*trace_state)); 1667 1668 /* 1669 * Normal event tracing doesn't get called at all unless the 1670 * ENABLED bit is set (which attaches the probe thus allowing 1671 * this code to be called, etc). Because this is called 1672 * directly by the user, we don't have that but we still need 1673 * to honor not logging when disabled. For the iterated 1674 * trace case, we save the enabled state upon start and just 1675 * ignore the following data calls. 1676 */ 1677 if (!(file->flags & EVENT_FILE_FL_ENABLED) || 1678 trace_trigger_soft_disabled(file)) { 1679 trace_state->disabled = true; 1680 ret = -ENOENT; 1681 goto out; 1682 } 1683 1684 trace_state->event = file->event_call->data; 1685 out: 1686 return ret; 1687 } 1688 1689 static inline int 1690 __synth_event_trace_start(struct trace_event_file *file, 1691 struct synth_event_trace_state *trace_state, 1692 int dynamic_fields_size) 1693 { 1694 int entry_size, fields_size = 0; 1695 int ret = 0; 1696 1697 fields_size = trace_state->event->n_u64 * sizeof(u64); 1698 fields_size += dynamic_fields_size; 1699 1700 /* 1701 * Avoid ring buffer recursion detection, as this event 1702 * is being performed within another event. 1703 */ 1704 trace_state->buffer = file->tr->array_buffer.buffer; 1705 ring_buffer_nest_start(trace_state->buffer); 1706 1707 entry_size = sizeof(*trace_state->entry) + fields_size; 1708 trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer, 1709 file, 1710 entry_size); 1711 if (!trace_state->entry) { 1712 ring_buffer_nest_end(trace_state->buffer); 1713 ret = -EINVAL; 1714 } 1715 1716 return ret; 1717 } 1718 1719 static inline void 1720 __synth_event_trace_end(struct synth_event_trace_state *trace_state) 1721 { 1722 trace_event_buffer_commit(&trace_state->fbuffer); 1723 1724 ring_buffer_nest_end(trace_state->buffer); 1725 } 1726 1727 /** 1728 * synth_event_trace - Trace a synthetic event 1729 * @file: The trace_event_file representing the synthetic event 1730 * @n_vals: The number of values in vals 1731 * @args: Variable number of args containing the event values 1732 * 1733 * Trace a synthetic event using the values passed in the variable 1734 * argument list. 1735 * 1736 * The argument list should be a list 'n_vals' u64 values. The number 1737 * of vals must match the number of field in the synthetic event, and 1738 * must be in the same order as the synthetic event fields. 1739 * 1740 * All vals should be cast to u64, and string vals are just pointers 1741 * to strings, cast to u64. Strings will be copied into space 1742 * reserved in the event for the string, using these pointers. 1743 * 1744 * Return: 0 on success, err otherwise. 1745 */ 1746 int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...) 1747 { 1748 unsigned int i, n_u64, len, data_size = 0; 1749 struct synth_event_trace_state state; 1750 va_list args; 1751 int ret; 1752 1753 ret = __synth_event_trace_init(file, &state); 1754 if (ret) { 1755 if (ret == -ENOENT) 1756 ret = 0; /* just disabled, not really an error */ 1757 return ret; 1758 } 1759 1760 if (state.event->n_dynamic_fields) { 1761 va_start(args, n_vals); 1762 1763 for (i = 0; i < state.event->n_fields; i++) { 1764 u64 val = va_arg(args, u64); 1765 1766 if (state.event->fields[i]->is_string && 1767 state.event->fields[i]->is_dynamic) { 1768 char *str_val = (char *)(long)val; 1769 1770 data_size += strlen(str_val) + 1; 1771 } 1772 } 1773 1774 va_end(args); 1775 } 1776 1777 ret = __synth_event_trace_start(file, &state, data_size); 1778 if (ret) 1779 return ret; 1780 1781 if (n_vals != state.event->n_fields) { 1782 ret = -EINVAL; 1783 goto out; 1784 } 1785 1786 data_size = 0; 1787 1788 va_start(args, n_vals); 1789 for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) { 1790 u64 val; 1791 1792 val = va_arg(args, u64); 1793 1794 if (state.event->fields[i]->is_string) { 1795 char *str_val = (char *)(long)val; 1796 1797 len = trace_string(state.entry, state.event, str_val, 1798 state.event->fields[i]->is_dynamic, 1799 data_size, &n_u64); 1800 data_size += len; /* only dynamic string increments */ 1801 } else { 1802 struct synth_field *field = state.event->fields[i]; 1803 1804 switch (field->size) { 1805 case 1: 1806 state.entry->fields[n_u64].as_u8 = (u8)val; 1807 break; 1808 1809 case 2: 1810 state.entry->fields[n_u64].as_u16 = (u16)val; 1811 break; 1812 1813 case 4: 1814 state.entry->fields[n_u64].as_u32 = (u32)val; 1815 break; 1816 1817 default: 1818 state.entry->fields[n_u64].as_u64 = val; 1819 break; 1820 } 1821 n_u64++; 1822 } 1823 } 1824 va_end(args); 1825 out: 1826 __synth_event_trace_end(&state); 1827 1828 return ret; 1829 } 1830 EXPORT_SYMBOL_GPL(synth_event_trace); 1831 1832 /** 1833 * synth_event_trace_array - Trace a synthetic event from an array 1834 * @file: The trace_event_file representing the synthetic event 1835 * @vals: Array of values 1836 * @n_vals: The number of values in vals 1837 * 1838 * Trace a synthetic event using the values passed in as 'vals'. 1839 * 1840 * The 'vals' array is just an array of 'n_vals' u64. The number of 1841 * vals must match the number of field in the synthetic event, and 1842 * must be in the same order as the synthetic event fields. 1843 * 1844 * All vals should be cast to u64, and string vals are just pointers 1845 * to strings, cast to u64. Strings will be copied into space 1846 * reserved in the event for the string, using these pointers. 1847 * 1848 * Return: 0 on success, err otherwise. 1849 */ 1850 int synth_event_trace_array(struct trace_event_file *file, u64 *vals, 1851 unsigned int n_vals) 1852 { 1853 unsigned int i, n_u64, field_pos, len, data_size = 0; 1854 struct synth_event_trace_state state; 1855 char *str_val; 1856 int ret; 1857 1858 ret = __synth_event_trace_init(file, &state); 1859 if (ret) { 1860 if (ret == -ENOENT) 1861 ret = 0; /* just disabled, not really an error */ 1862 return ret; 1863 } 1864 1865 if (state.event->n_dynamic_fields) { 1866 for (i = 0; i < state.event->n_dynamic_fields; i++) { 1867 field_pos = state.event->dynamic_fields[i]->field_pos; 1868 str_val = (char *)(long)vals[field_pos]; 1869 len = strlen(str_val) + 1; 1870 data_size += len; 1871 } 1872 } 1873 1874 ret = __synth_event_trace_start(file, &state, data_size); 1875 if (ret) 1876 return ret; 1877 1878 if (n_vals != state.event->n_fields) { 1879 ret = -EINVAL; 1880 goto out; 1881 } 1882 1883 data_size = 0; 1884 1885 for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) { 1886 if (state.event->fields[i]->is_string) { 1887 char *str_val = (char *)(long)vals[i]; 1888 1889 len = trace_string(state.entry, state.event, str_val, 1890 state.event->fields[i]->is_dynamic, 1891 data_size, &n_u64); 1892 data_size += len; /* only dynamic string increments */ 1893 } else { 1894 struct synth_field *field = state.event->fields[i]; 1895 u64 val = vals[i]; 1896 1897 switch (field->size) { 1898 case 1: 1899 state.entry->fields[n_u64].as_u8 = (u8)val; 1900 break; 1901 1902 case 2: 1903 state.entry->fields[n_u64].as_u16 = (u16)val; 1904 break; 1905 1906 case 4: 1907 state.entry->fields[n_u64].as_u32 = (u32)val; 1908 break; 1909 1910 default: 1911 state.entry->fields[n_u64].as_u64 = val; 1912 break; 1913 } 1914 n_u64++; 1915 } 1916 } 1917 out: 1918 __synth_event_trace_end(&state); 1919 1920 return ret; 1921 } 1922 EXPORT_SYMBOL_GPL(synth_event_trace_array); 1923 1924 /** 1925 * synth_event_trace_start - Start piecewise synthetic event trace 1926 * @file: The trace_event_file representing the synthetic event 1927 * @trace_state: A pointer to object tracking the piecewise trace state 1928 * 1929 * Start the trace of a synthetic event field-by-field rather than all 1930 * at once. 1931 * 1932 * This function 'opens' an event trace, which means space is reserved 1933 * for the event in the trace buffer, after which the event's 1934 * individual field values can be set through either 1935 * synth_event_add_next_val() or synth_event_add_val(). 1936 * 1937 * A pointer to a trace_state object is passed in, which will keep 1938 * track of the current event trace state until the event trace is 1939 * closed (and the event finally traced) using 1940 * synth_event_trace_end(). 1941 * 1942 * Note that synth_event_trace_end() must be called after all values 1943 * have been added for each event trace, regardless of whether adding 1944 * all field values succeeded or not. 1945 * 1946 * Note also that for a given event trace, all fields must be added 1947 * using either synth_event_add_next_val() or synth_event_add_val() 1948 * but not both together or interleaved. 1949 * 1950 * Return: 0 on success, err otherwise. 1951 */ 1952 int synth_event_trace_start(struct trace_event_file *file, 1953 struct synth_event_trace_state *trace_state) 1954 { 1955 int ret; 1956 1957 if (!trace_state) 1958 return -EINVAL; 1959 1960 ret = __synth_event_trace_init(file, trace_state); 1961 if (ret) { 1962 if (ret == -ENOENT) 1963 ret = 0; /* just disabled, not really an error */ 1964 return ret; 1965 } 1966 1967 if (trace_state->event->n_dynamic_fields) 1968 return -ENOTSUPP; 1969 1970 ret = __synth_event_trace_start(file, trace_state, 0); 1971 1972 return ret; 1973 } 1974 EXPORT_SYMBOL_GPL(synth_event_trace_start); 1975 1976 static int __synth_event_add_val(const char *field_name, u64 val, 1977 struct synth_event_trace_state *trace_state) 1978 { 1979 struct synth_field *field = NULL; 1980 struct synth_trace_event *entry; 1981 struct synth_event *event; 1982 int i, ret = 0; 1983 1984 if (!trace_state) { 1985 ret = -EINVAL; 1986 goto out; 1987 } 1988 1989 /* can't mix add_next_synth_val() with add_synth_val() */ 1990 if (field_name) { 1991 if (trace_state->add_next) { 1992 ret = -EINVAL; 1993 goto out; 1994 } 1995 trace_state->add_name = true; 1996 } else { 1997 if (trace_state->add_name) { 1998 ret = -EINVAL; 1999 goto out; 2000 } 2001 trace_state->add_next = true; 2002 } 2003 2004 if (trace_state->disabled) 2005 goto out; 2006 2007 event = trace_state->event; 2008 if (trace_state->add_name) { 2009 for (i = 0; i < event->n_fields; i++) { 2010 field = event->fields[i]; 2011 if (strcmp(field->name, field_name) == 0) 2012 break; 2013 } 2014 if (!field) { 2015 ret = -EINVAL; 2016 goto out; 2017 } 2018 } else { 2019 if (trace_state->cur_field >= event->n_fields) { 2020 ret = -EINVAL; 2021 goto out; 2022 } 2023 field = event->fields[trace_state->cur_field++]; 2024 } 2025 2026 entry = trace_state->entry; 2027 if (field->is_string) { 2028 char *str_val = (char *)(long)val; 2029 char *str_field; 2030 2031 if (field->is_dynamic) { /* add_val can't do dynamic strings */ 2032 ret = -EINVAL; 2033 goto out; 2034 } 2035 2036 if (!str_val) { 2037 ret = -EINVAL; 2038 goto out; 2039 } 2040 2041 str_field = (char *)&entry->fields[field->offset]; 2042 strscpy(str_field, str_val, STR_VAR_LEN_MAX); 2043 } else { 2044 switch (field->size) { 2045 case 1: 2046 trace_state->entry->fields[field->offset].as_u8 = (u8)val; 2047 break; 2048 2049 case 2: 2050 trace_state->entry->fields[field->offset].as_u16 = (u16)val; 2051 break; 2052 2053 case 4: 2054 trace_state->entry->fields[field->offset].as_u32 = (u32)val; 2055 break; 2056 2057 default: 2058 trace_state->entry->fields[field->offset].as_u64 = val; 2059 break; 2060 } 2061 } 2062 out: 2063 return ret; 2064 } 2065 2066 /** 2067 * synth_event_add_next_val - Add the next field's value to an open synth trace 2068 * @val: The value to set the next field to 2069 * @trace_state: A pointer to object tracking the piecewise trace state 2070 * 2071 * Set the value of the next field in an event that's been opened by 2072 * synth_event_trace_start(). 2073 * 2074 * The val param should be the value cast to u64. If the value points 2075 * to a string, the val param should be a char * cast to u64. 2076 * 2077 * This function assumes all the fields in an event are to be set one 2078 * after another - successive calls to this function are made, one for 2079 * each field, in the order of the fields in the event, until all 2080 * fields have been set. If you'd rather set each field individually 2081 * without regard to ordering, synth_event_add_val() can be used 2082 * instead. 2083 * 2084 * Note however that synth_event_add_next_val() and 2085 * synth_event_add_val() can't be intermixed for a given event trace - 2086 * one or the other but not both can be used at the same time. 2087 * 2088 * Note also that synth_event_trace_end() must be called after all 2089 * values have been added for each event trace, regardless of whether 2090 * adding all field values succeeded or not. 2091 * 2092 * Return: 0 on success, err otherwise. 2093 */ 2094 int synth_event_add_next_val(u64 val, 2095 struct synth_event_trace_state *trace_state) 2096 { 2097 return __synth_event_add_val(NULL, val, trace_state); 2098 } 2099 EXPORT_SYMBOL_GPL(synth_event_add_next_val); 2100 2101 /** 2102 * synth_event_add_val - Add a named field's value to an open synth trace 2103 * @field_name: The name of the synthetic event field value to set 2104 * @val: The value to set the named field to 2105 * @trace_state: A pointer to object tracking the piecewise trace state 2106 * 2107 * Set the value of the named field in an event that's been opened by 2108 * synth_event_trace_start(). 2109 * 2110 * The val param should be the value cast to u64. If the value points 2111 * to a string, the val param should be a char * cast to u64. 2112 * 2113 * This function looks up the field name, and if found, sets the field 2114 * to the specified value. This lookup makes this function more 2115 * expensive than synth_event_add_next_val(), so use that or the 2116 * none-piecewise synth_event_trace() instead if efficiency is more 2117 * important. 2118 * 2119 * Note however that synth_event_add_next_val() and 2120 * synth_event_add_val() can't be intermixed for a given event trace - 2121 * one or the other but not both can be used at the same time. 2122 * 2123 * Note also that synth_event_trace_end() must be called after all 2124 * values have been added for each event trace, regardless of whether 2125 * adding all field values succeeded or not. 2126 * 2127 * Return: 0 on success, err otherwise. 2128 */ 2129 int synth_event_add_val(const char *field_name, u64 val, 2130 struct synth_event_trace_state *trace_state) 2131 { 2132 return __synth_event_add_val(field_name, val, trace_state); 2133 } 2134 EXPORT_SYMBOL_GPL(synth_event_add_val); 2135 2136 /** 2137 * synth_event_trace_end - End piecewise synthetic event trace 2138 * @trace_state: A pointer to object tracking the piecewise trace state 2139 * 2140 * End the trace of a synthetic event opened by 2141 * synth_event_trace__start(). 2142 * 2143 * This function 'closes' an event trace, which basically means that 2144 * it commits the reserved event and cleans up other loose ends. 2145 * 2146 * A pointer to a trace_state object is passed in, which will keep 2147 * track of the current event trace state opened with 2148 * synth_event_trace_start(). 2149 * 2150 * Note that this function must be called after all values have been 2151 * added for each event trace, regardless of whether adding all field 2152 * values succeeded or not. 2153 * 2154 * Return: 0 on success, err otherwise. 2155 */ 2156 int synth_event_trace_end(struct synth_event_trace_state *trace_state) 2157 { 2158 if (!trace_state) 2159 return -EINVAL; 2160 2161 __synth_event_trace_end(trace_state); 2162 2163 return 0; 2164 } 2165 EXPORT_SYMBOL_GPL(synth_event_trace_end); 2166 2167 static int create_synth_event(const char *raw_command) 2168 { 2169 char *fields, *p; 2170 const char *name; 2171 int len, ret = 0; 2172 2173 raw_command = skip_spaces(raw_command); 2174 if (raw_command[0] == '\0') 2175 return ret; 2176 2177 last_cmd_set(raw_command); 2178 2179 name = raw_command; 2180 2181 /* Don't try to process if not our system */ 2182 if (name[0] != 's' || name[1] != ':') 2183 return -ECANCELED; 2184 name += 2; 2185 2186 p = strpbrk(raw_command, " \t"); 2187 if (!p) { 2188 synth_err(SYNTH_ERR_INVALID_CMD, 0); 2189 return -EINVAL; 2190 } 2191 2192 fields = skip_spaces(p); 2193 2194 /* This interface accepts group name prefix */ 2195 if (strchr(name, '/')) { 2196 len = str_has_prefix(name, SYNTH_SYSTEM "/"); 2197 if (len == 0) { 2198 synth_err(SYNTH_ERR_INVALID_DYN_CMD, 0); 2199 return -EINVAL; 2200 } 2201 name += len; 2202 } 2203 2204 len = name - raw_command; 2205 2206 ret = check_command(raw_command + len); 2207 if (ret) { 2208 synth_err(SYNTH_ERR_INVALID_CMD, 0); 2209 return ret; 2210 } 2211 2212 name = kmemdup_nul(raw_command + len, p - raw_command - len, GFP_KERNEL); 2213 if (!name) 2214 return -ENOMEM; 2215 2216 ret = __create_synth_event(name, fields); 2217 2218 kfree(name); 2219 2220 return ret; 2221 } 2222 2223 static int synth_event_release(struct dyn_event *ev) 2224 { 2225 struct synth_event *event = to_synth_event(ev); 2226 int ret; 2227 2228 if (event->ref) 2229 return -EBUSY; 2230 2231 if (trace_event_dyn_busy(&event->call)) 2232 return -EBUSY; 2233 2234 ret = unregister_synth_event(event); 2235 if (ret) 2236 return ret; 2237 2238 dyn_event_remove(ev); 2239 free_synth_event(event); 2240 return 0; 2241 } 2242 2243 static int __synth_event_show(struct seq_file *m, struct synth_event *event) 2244 { 2245 struct synth_field *field; 2246 unsigned int i; 2247 char *type, *t; 2248 2249 seq_printf(m, "%s\t", event->name); 2250 2251 for (i = 0; i < event->n_fields; i++) { 2252 field = event->fields[i]; 2253 2254 type = field->type; 2255 t = strstr(type, "__data_loc"); 2256 if (t) { /* __data_loc belongs in format but not event desc */ 2257 t += sizeof("__data_loc"); 2258 type = t; 2259 } 2260 2261 /* parameter values */ 2262 seq_printf(m, "%s %s%s", type, field->name, 2263 i == event->n_fields - 1 ? "" : "; "); 2264 } 2265 2266 seq_putc(m, '\n'); 2267 2268 return 0; 2269 } 2270 2271 static int synth_event_show(struct seq_file *m, struct dyn_event *ev) 2272 { 2273 struct synth_event *event = to_synth_event(ev); 2274 2275 seq_printf(m, "s:%s/", event->class.system); 2276 2277 return __synth_event_show(m, event); 2278 } 2279 2280 static int synth_events_seq_show(struct seq_file *m, void *v) 2281 { 2282 struct dyn_event *ev = v; 2283 2284 if (!is_synth_event(ev)) 2285 return 0; 2286 2287 return __synth_event_show(m, to_synth_event(ev)); 2288 } 2289 2290 static const struct seq_operations synth_events_seq_op = { 2291 .start = dyn_event_seq_start, 2292 .next = dyn_event_seq_next, 2293 .stop = dyn_event_seq_stop, 2294 .show = synth_events_seq_show, 2295 }; 2296 2297 static int synth_events_open(struct inode *inode, struct file *file) 2298 { 2299 int ret; 2300 2301 ret = security_locked_down(LOCKDOWN_TRACEFS); 2302 if (ret) 2303 return ret; 2304 2305 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 2306 ret = dyn_events_release_all(&synth_event_ops); 2307 if (ret < 0) 2308 return ret; 2309 } 2310 2311 return seq_open(file, &synth_events_seq_op); 2312 } 2313 2314 static ssize_t synth_events_write(struct file *file, 2315 const char __user *buffer, 2316 size_t count, loff_t *ppos) 2317 { 2318 return trace_parse_run_command(file, buffer, count, ppos, 2319 create_or_delete_synth_event); 2320 } 2321 2322 static const struct file_operations synth_events_fops = { 2323 .open = synth_events_open, 2324 .write = synth_events_write, 2325 .read = seq_read, 2326 .llseek = seq_lseek, 2327 .release = seq_release, 2328 }; 2329 2330 /* 2331 * Register dynevent at core_initcall. This allows kernel to setup kprobe 2332 * events in postcore_initcall without tracefs. 2333 */ 2334 static __init int trace_events_synth_init_early(void) 2335 { 2336 int err = 0; 2337 2338 err = dyn_event_register(&synth_event_ops); 2339 if (err) 2340 pr_warn("Could not register synth_event_ops\n"); 2341 2342 return err; 2343 } 2344 core_initcall(trace_events_synth_init_early); 2345 2346 static __init int trace_events_synth_init(void) 2347 { 2348 struct dentry *entry = NULL; 2349 int err = 0; 2350 err = tracing_init_dentry(); 2351 if (err) 2352 goto err; 2353 2354 entry = tracefs_create_file("synthetic_events", TRACE_MODE_WRITE, 2355 NULL, NULL, &synth_events_fops); 2356 if (!entry) { 2357 err = -ENODEV; 2358 goto err; 2359 } 2360 2361 return err; 2362 err: 2363 pr_warn("Could not create tracefs 'synthetic_events' entry\n"); 2364 2365 return err; 2366 } 2367 2368 fs_initcall(trace_events_synth_init); 2369