1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * event probes 4 * 5 * Part of this code was copied from kernel/trace/trace_kprobe.c written by 6 * Masami Hiramatsu <mhiramat@kernel.org> 7 * 8 * Copyright (C) 2021, VMware Inc, Steven Rostedt <rostedt@goodmis.org> 9 * Copyright (C) 2021, VMware Inc, Tzvetomir Stoyanov tz.stoyanov@gmail.com> 10 * 11 */ 12 #include <linux/module.h> 13 #include <linux/mutex.h> 14 #include <linux/ftrace.h> 15 16 #include "trace_dynevent.h" 17 #include "trace_probe.h" 18 #include "trace_probe_tmpl.h" 19 20 #define EPROBE_EVENT_SYSTEM "eprobes" 21 22 struct trace_eprobe { 23 /* tracepoint system */ 24 const char *event_system; 25 26 /* tracepoint event */ 27 const char *event_name; 28 29 struct trace_event_call *event; 30 31 struct dyn_event devent; 32 struct trace_probe tp; 33 }; 34 35 struct eprobe_data { 36 struct trace_event_file *file; 37 struct trace_eprobe *ep; 38 }; 39 40 static int __trace_eprobe_create(int argc, const char *argv[]); 41 42 static void trace_event_probe_cleanup(struct trace_eprobe *ep) 43 { 44 if (!ep) 45 return; 46 trace_probe_cleanup(&ep->tp); 47 kfree(ep->event_name); 48 kfree(ep->event_system); 49 if (ep->event) 50 trace_event_put_ref(ep->event); 51 kfree(ep); 52 } 53 54 static struct trace_eprobe *to_trace_eprobe(struct dyn_event *ev) 55 { 56 return container_of(ev, struct trace_eprobe, devent); 57 } 58 59 static int eprobe_dyn_event_create(const char *raw_command) 60 { 61 return trace_probe_create(raw_command, __trace_eprobe_create); 62 } 63 64 static int eprobe_dyn_event_show(struct seq_file *m, struct dyn_event *ev) 65 { 66 struct trace_eprobe *ep = to_trace_eprobe(ev); 67 int i; 68 69 seq_printf(m, "e:%s/%s", trace_probe_group_name(&ep->tp), 70 trace_probe_name(&ep->tp)); 71 seq_printf(m, " %s.%s", ep->event_system, ep->event_name); 72 73 for (i = 0; i < ep->tp.nr_args; i++) 74 seq_printf(m, " %s=%s", ep->tp.args[i].name, ep->tp.args[i].comm); 75 seq_putc(m, '\n'); 76 77 return 0; 78 } 79 80 static int unregister_trace_eprobe(struct trace_eprobe *ep) 81 { 82 /* If other probes are on the event, just unregister eprobe */ 83 if (trace_probe_has_sibling(&ep->tp)) 84 goto unreg; 85 86 /* Enabled event can not be unregistered */ 87 if (trace_probe_is_enabled(&ep->tp)) 88 return -EBUSY; 89 90 /* Will fail if probe is being used by ftrace or perf */ 91 if (trace_probe_unregister_event_call(&ep->tp)) 92 return -EBUSY; 93 94 unreg: 95 dyn_event_remove(&ep->devent); 96 trace_probe_unlink(&ep->tp); 97 98 return 0; 99 } 100 101 static int eprobe_dyn_event_release(struct dyn_event *ev) 102 { 103 struct trace_eprobe *ep = to_trace_eprobe(ev); 104 int ret = unregister_trace_eprobe(ep); 105 106 if (!ret) 107 trace_event_probe_cleanup(ep); 108 return ret; 109 } 110 111 static bool eprobe_dyn_event_is_busy(struct dyn_event *ev) 112 { 113 struct trace_eprobe *ep = to_trace_eprobe(ev); 114 115 return trace_probe_is_enabled(&ep->tp); 116 } 117 118 static bool eprobe_dyn_event_match(const char *system, const char *event, 119 int argc, const char **argv, struct dyn_event *ev) 120 { 121 struct trace_eprobe *ep = to_trace_eprobe(ev); 122 123 return strcmp(trace_probe_name(&ep->tp), event) == 0 && 124 (!system || strcmp(trace_probe_group_name(&ep->tp), system) == 0) && 125 trace_probe_match_command_args(&ep->tp, argc, argv); 126 } 127 128 static struct dyn_event_operations eprobe_dyn_event_ops = { 129 .create = eprobe_dyn_event_create, 130 .show = eprobe_dyn_event_show, 131 .is_busy = eprobe_dyn_event_is_busy, 132 .free = eprobe_dyn_event_release, 133 .match = eprobe_dyn_event_match, 134 }; 135 136 static struct trace_eprobe *alloc_event_probe(const char *group, 137 const char *this_event, 138 struct trace_event_call *event, 139 int nargs) 140 { 141 struct trace_eprobe *ep; 142 const char *event_name; 143 const char *sys_name; 144 int ret = -ENOMEM; 145 146 if (!event) 147 return ERR_PTR(-ENODEV); 148 149 sys_name = event->class->system; 150 event_name = trace_event_name(event); 151 152 ep = kzalloc(struct_size(ep, tp.args, nargs), GFP_KERNEL); 153 if (!ep) { 154 trace_event_put_ref(event); 155 goto error; 156 } 157 ep->event = event; 158 ep->event_name = kstrdup(event_name, GFP_KERNEL); 159 if (!ep->event_name) 160 goto error; 161 ep->event_system = kstrdup(sys_name, GFP_KERNEL); 162 if (!ep->event_system) 163 goto error; 164 165 ret = trace_probe_init(&ep->tp, this_event, group, false); 166 if (ret < 0) 167 goto error; 168 169 dyn_event_init(&ep->devent, &eprobe_dyn_event_ops); 170 return ep; 171 error: 172 trace_event_probe_cleanup(ep); 173 return ERR_PTR(ret); 174 } 175 176 static int trace_eprobe_tp_arg_update(struct trace_eprobe *ep, int i) 177 { 178 struct probe_arg *parg = &ep->tp.args[i]; 179 struct ftrace_event_field *field; 180 struct list_head *head; 181 182 head = trace_get_fields(ep->event); 183 list_for_each_entry(field, head, link) { 184 if (!strcmp(parg->code->data, field->name)) { 185 kfree(parg->code->data); 186 parg->code->data = field; 187 return 0; 188 } 189 } 190 kfree(parg->code->data); 191 parg->code->data = NULL; 192 return -ENOENT; 193 } 194 195 static int eprobe_event_define_fields(struct trace_event_call *event_call) 196 { 197 int ret; 198 struct eprobe_trace_entry_head field; 199 struct trace_probe *tp; 200 201 tp = trace_probe_primary_from_call(event_call); 202 if (WARN_ON_ONCE(!tp)) 203 return -ENOENT; 204 205 DEFINE_FIELD(unsigned int, type, FIELD_STRING_TYPE, 0); 206 207 return traceprobe_define_arg_fields(event_call, sizeof(field), tp); 208 } 209 210 static struct trace_event_fields eprobe_fields_array[] = { 211 { .type = TRACE_FUNCTION_TYPE, 212 .define_fields = eprobe_event_define_fields }, 213 {} 214 }; 215 216 /* Event entry printers */ 217 static enum print_line_t 218 print_eprobe_event(struct trace_iterator *iter, int flags, 219 struct trace_event *event) 220 { 221 struct eprobe_trace_entry_head *field; 222 struct trace_event_call *pevent; 223 struct trace_event *probed_event; 224 struct trace_seq *s = &iter->seq; 225 struct trace_probe *tp; 226 227 field = (struct eprobe_trace_entry_head *)iter->ent; 228 tp = trace_probe_primary_from_call( 229 container_of(event, struct trace_event_call, event)); 230 if (WARN_ON_ONCE(!tp)) 231 goto out; 232 233 trace_seq_printf(s, "%s: (", trace_probe_name(tp)); 234 235 probed_event = ftrace_find_event(field->type); 236 if (probed_event) { 237 pevent = container_of(probed_event, struct trace_event_call, event); 238 trace_seq_printf(s, "%s.%s", pevent->class->system, 239 trace_event_name(pevent)); 240 } else { 241 trace_seq_printf(s, "%u", field->type); 242 } 243 244 trace_seq_putc(s, ')'); 245 246 if (print_probe_args(s, tp->args, tp->nr_args, 247 (u8 *)&field[1], field) < 0) 248 goto out; 249 250 trace_seq_putc(s, '\n'); 251 out: 252 return trace_handle_return(s); 253 } 254 255 static unsigned long get_event_field(struct fetch_insn *code, void *rec) 256 { 257 struct ftrace_event_field *field = code->data; 258 unsigned long val; 259 void *addr; 260 261 addr = rec + field->offset; 262 263 switch (field->size) { 264 case 1: 265 if (field->is_signed) 266 val = *(char *)addr; 267 else 268 val = *(unsigned char *)addr; 269 break; 270 case 2: 271 if (field->is_signed) 272 val = *(short *)addr; 273 else 274 val = *(unsigned short *)addr; 275 break; 276 case 4: 277 if (field->is_signed) 278 val = *(int *)addr; 279 else 280 val = *(unsigned int *)addr; 281 break; 282 default: 283 if (field->is_signed) 284 val = *(long *)addr; 285 else 286 val = *(unsigned long *)addr; 287 break; 288 } 289 return val; 290 } 291 292 static int get_eprobe_size(struct trace_probe *tp, void *rec) 293 { 294 struct probe_arg *arg; 295 int i, len, ret = 0; 296 297 for (i = 0; i < tp->nr_args; i++) { 298 arg = tp->args + i; 299 if (unlikely(arg->dynamic)) { 300 unsigned long val; 301 302 val = get_event_field(arg->code, rec); 303 len = process_fetch_insn_bottom(arg->code + 1, val, NULL, NULL); 304 if (len > 0) 305 ret += len; 306 } 307 } 308 309 return ret; 310 } 311 312 /* Kprobe specific fetch functions */ 313 314 /* Note that we don't verify it, since the code does not come from user space */ 315 static int 316 process_fetch_insn(struct fetch_insn *code, void *rec, void *dest, 317 void *base) 318 { 319 unsigned long val; 320 321 val = get_event_field(code, rec); 322 return process_fetch_insn_bottom(code + 1, val, dest, base); 323 } 324 NOKPROBE_SYMBOL(process_fetch_insn) 325 326 /* Return the length of string -- including null terminal byte */ 327 static nokprobe_inline int 328 fetch_store_strlen_user(unsigned long addr) 329 { 330 const void __user *uaddr = (__force const void __user *)addr; 331 332 return strnlen_user_nofault(uaddr, MAX_STRING_SIZE); 333 } 334 335 /* Return the length of string -- including null terminal byte */ 336 static nokprobe_inline int 337 fetch_store_strlen(unsigned long addr) 338 { 339 int ret, len = 0; 340 u8 c; 341 342 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 343 if (addr < TASK_SIZE) 344 return fetch_store_strlen_user(addr); 345 #endif 346 347 do { 348 ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1); 349 len++; 350 } while (c && ret == 0 && len < MAX_STRING_SIZE); 351 352 return (ret < 0) ? ret : len; 353 } 354 355 /* 356 * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf 357 * with max length and relative data location. 358 */ 359 static nokprobe_inline int 360 fetch_store_string_user(unsigned long addr, void *dest, void *base) 361 { 362 const void __user *uaddr = (__force const void __user *)addr; 363 int maxlen = get_loc_len(*(u32 *)dest); 364 void *__dest; 365 long ret; 366 367 if (unlikely(!maxlen)) 368 return -ENOMEM; 369 370 __dest = get_loc_data(dest, base); 371 372 ret = strncpy_from_user_nofault(__dest, uaddr, maxlen); 373 if (ret >= 0) 374 *(u32 *)dest = make_data_loc(ret, __dest - base); 375 376 return ret; 377 } 378 379 /* 380 * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max 381 * length and relative data location. 382 */ 383 static nokprobe_inline int 384 fetch_store_string(unsigned long addr, void *dest, void *base) 385 { 386 int maxlen = get_loc_len(*(u32 *)dest); 387 void *__dest; 388 long ret; 389 390 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 391 if ((unsigned long)addr < TASK_SIZE) 392 return fetch_store_string_user(addr, dest, base); 393 #endif 394 395 if (unlikely(!maxlen)) 396 return -ENOMEM; 397 398 __dest = get_loc_data(dest, base); 399 400 /* 401 * Try to get string again, since the string can be changed while 402 * probing. 403 */ 404 ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen); 405 if (ret >= 0) 406 *(u32 *)dest = make_data_loc(ret, __dest - base); 407 408 return ret; 409 } 410 411 static nokprobe_inline int 412 probe_mem_read_user(void *dest, void *src, size_t size) 413 { 414 const void __user *uaddr = (__force const void __user *)src; 415 416 return copy_from_user_nofault(dest, uaddr, size); 417 } 418 419 static nokprobe_inline int 420 probe_mem_read(void *dest, void *src, size_t size) 421 { 422 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 423 if ((unsigned long)src < TASK_SIZE) 424 return probe_mem_read_user(dest, src, size); 425 #endif 426 return copy_from_kernel_nofault(dest, src, size); 427 } 428 429 /* eprobe handler */ 430 static inline void 431 __eprobe_trace_func(struct eprobe_data *edata, void *rec) 432 { 433 struct eprobe_trace_entry_head *entry; 434 struct trace_event_call *call = trace_probe_event_call(&edata->ep->tp); 435 struct trace_event_buffer fbuffer; 436 int dsize; 437 438 if (WARN_ON_ONCE(call != edata->file->event_call)) 439 return; 440 441 if (trace_trigger_soft_disabled(edata->file)) 442 return; 443 444 fbuffer.trace_ctx = tracing_gen_ctx(); 445 fbuffer.trace_file = edata->file; 446 447 dsize = get_eprobe_size(&edata->ep->tp, rec); 448 fbuffer.regs = NULL; 449 450 fbuffer.event = 451 trace_event_buffer_lock_reserve(&fbuffer.buffer, edata->file, 452 call->event.type, 453 sizeof(*entry) + edata->ep->tp.size + dsize, 454 fbuffer.trace_ctx); 455 if (!fbuffer.event) 456 return; 457 458 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event); 459 if (edata->ep->event) 460 entry->type = edata->ep->event->event.type; 461 else 462 entry->type = 0; 463 store_trace_args(&entry[1], &edata->ep->tp, rec, sizeof(*entry), dsize); 464 465 trace_event_buffer_commit(&fbuffer); 466 } 467 468 /* 469 * The event probe implementation uses event triggers to get access to 470 * the event it is attached to, but is not an actual trigger. The below 471 * functions are just stubs to fulfill what is needed to use the trigger 472 * infrastructure. 473 */ 474 static int eprobe_trigger_init(struct event_trigger_ops *ops, 475 struct event_trigger_data *data) 476 { 477 return 0; 478 } 479 480 static void eprobe_trigger_free(struct event_trigger_ops *ops, 481 struct event_trigger_data *data) 482 { 483 484 } 485 486 static int eprobe_trigger_print(struct seq_file *m, 487 struct event_trigger_ops *ops, 488 struct event_trigger_data *data) 489 { 490 /* Do not print eprobe event triggers */ 491 return 0; 492 } 493 494 static void eprobe_trigger_func(struct event_trigger_data *data, 495 struct trace_buffer *buffer, void *rec, 496 struct ring_buffer_event *rbe) 497 { 498 struct eprobe_data *edata = data->private_data; 499 500 __eprobe_trace_func(edata, rec); 501 } 502 503 static struct event_trigger_ops eprobe_trigger_ops = { 504 .func = eprobe_trigger_func, 505 .print = eprobe_trigger_print, 506 .init = eprobe_trigger_init, 507 .free = eprobe_trigger_free, 508 }; 509 510 static int eprobe_trigger_cmd_func(struct event_command *cmd_ops, 511 struct trace_event_file *file, 512 char *glob, char *cmd, char *param) 513 { 514 return -1; 515 } 516 517 static int eprobe_trigger_reg_func(char *glob, struct event_trigger_ops *ops, 518 struct event_trigger_data *data, 519 struct trace_event_file *file) 520 { 521 return -1; 522 } 523 524 static void eprobe_trigger_unreg_func(char *glob, struct event_trigger_ops *ops, 525 struct event_trigger_data *data, 526 struct trace_event_file *file) 527 { 528 529 } 530 531 static struct event_trigger_ops *eprobe_trigger_get_ops(char *cmd, 532 char *param) 533 { 534 return &eprobe_trigger_ops; 535 } 536 537 static struct event_command event_trigger_cmd = { 538 .name = "eprobe", 539 .trigger_type = ETT_EVENT_EPROBE, 540 .flags = EVENT_CMD_FL_NEEDS_REC, 541 .func = eprobe_trigger_cmd_func, 542 .reg = eprobe_trigger_reg_func, 543 .unreg = eprobe_trigger_unreg_func, 544 .unreg_all = NULL, 545 .get_trigger_ops = eprobe_trigger_get_ops, 546 .set_filter = NULL, 547 }; 548 549 static struct event_trigger_data * 550 new_eprobe_trigger(struct trace_eprobe *ep, struct trace_event_file *file) 551 { 552 struct event_trigger_data *trigger; 553 struct eprobe_data *edata; 554 555 edata = kzalloc(sizeof(*edata), GFP_KERNEL); 556 trigger = kzalloc(sizeof(*trigger), GFP_KERNEL); 557 if (!trigger || !edata) { 558 kfree(edata); 559 kfree(trigger); 560 return ERR_PTR(-ENOMEM); 561 } 562 563 trigger->flags = EVENT_TRIGGER_FL_PROBE; 564 trigger->count = -1; 565 trigger->ops = &eprobe_trigger_ops; 566 567 /* 568 * EVENT PROBE triggers are not registered as commands with 569 * register_event_command(), as they are not controlled by the user 570 * from the trigger file 571 */ 572 trigger->cmd_ops = &event_trigger_cmd; 573 574 INIT_LIST_HEAD(&trigger->list); 575 RCU_INIT_POINTER(trigger->filter, NULL); 576 577 edata->file = file; 578 edata->ep = ep; 579 trigger->private_data = edata; 580 581 return trigger; 582 } 583 584 static int enable_eprobe(struct trace_eprobe *ep, 585 struct trace_event_file *eprobe_file) 586 { 587 struct event_trigger_data *trigger; 588 struct trace_event_file *file; 589 struct trace_array *tr = eprobe_file->tr; 590 591 file = find_event_file(tr, ep->event_system, ep->event_name); 592 if (!file) 593 return -ENOENT; 594 trigger = new_eprobe_trigger(ep, eprobe_file); 595 if (IS_ERR(trigger)) 596 return PTR_ERR(trigger); 597 598 list_add_tail_rcu(&trigger->list, &file->triggers); 599 600 trace_event_trigger_enable_disable(file, 1); 601 update_cond_flag(file); 602 603 return 0; 604 } 605 606 static struct trace_event_functions eprobe_funcs = { 607 .trace = print_eprobe_event 608 }; 609 610 static int disable_eprobe(struct trace_eprobe *ep, 611 struct trace_array *tr) 612 { 613 struct event_trigger_data *trigger; 614 struct trace_event_file *file; 615 struct eprobe_data *edata; 616 617 file = find_event_file(tr, ep->event_system, ep->event_name); 618 if (!file) 619 return -ENOENT; 620 621 list_for_each_entry(trigger, &file->triggers, list) { 622 if (!(trigger->flags & EVENT_TRIGGER_FL_PROBE)) 623 continue; 624 edata = trigger->private_data; 625 if (edata->ep == ep) 626 break; 627 } 628 if (list_entry_is_head(trigger, &file->triggers, list)) 629 return -ENODEV; 630 631 list_del_rcu(&trigger->list); 632 633 trace_event_trigger_enable_disable(file, 0); 634 update_cond_flag(file); 635 return 0; 636 } 637 638 static int enable_trace_eprobe(struct trace_event_call *call, 639 struct trace_event_file *file) 640 { 641 struct trace_probe *pos, *tp; 642 struct trace_eprobe *ep; 643 bool enabled; 644 int ret = 0; 645 646 tp = trace_probe_primary_from_call(call); 647 if (WARN_ON_ONCE(!tp)) 648 return -ENODEV; 649 enabled = trace_probe_is_enabled(tp); 650 651 /* This also changes "enabled" state */ 652 if (file) { 653 ret = trace_probe_add_file(tp, file); 654 if (ret) 655 return ret; 656 } else 657 trace_probe_set_flag(tp, TP_FLAG_PROFILE); 658 659 if (enabled) 660 return 0; 661 662 list_for_each_entry(pos, trace_probe_probe_list(tp), list) { 663 ep = container_of(pos, struct trace_eprobe, tp); 664 ret = enable_eprobe(ep, file); 665 if (ret) 666 break; 667 enabled = true; 668 } 669 670 if (ret) { 671 /* Failed to enable one of them. Roll back all */ 672 if (enabled) 673 disable_eprobe(ep, file->tr); 674 if (file) 675 trace_probe_remove_file(tp, file); 676 else 677 trace_probe_clear_flag(tp, TP_FLAG_PROFILE); 678 } 679 680 return ret; 681 } 682 683 static int disable_trace_eprobe(struct trace_event_call *call, 684 struct trace_event_file *file) 685 { 686 struct trace_probe *pos, *tp; 687 struct trace_eprobe *ep; 688 689 tp = trace_probe_primary_from_call(call); 690 if (WARN_ON_ONCE(!tp)) 691 return -ENODEV; 692 693 if (file) { 694 if (!trace_probe_get_file_link(tp, file)) 695 return -ENOENT; 696 if (!trace_probe_has_single_file(tp)) 697 goto out; 698 trace_probe_clear_flag(tp, TP_FLAG_TRACE); 699 } else 700 trace_probe_clear_flag(tp, TP_FLAG_PROFILE); 701 702 if (!trace_probe_is_enabled(tp)) { 703 list_for_each_entry(pos, trace_probe_probe_list(tp), list) { 704 ep = container_of(pos, struct trace_eprobe, tp); 705 disable_eprobe(ep, file->tr); 706 } 707 } 708 709 out: 710 if (file) 711 /* 712 * Synchronization is done in below function. For perf event, 713 * file == NULL and perf_trace_event_unreg() calls 714 * tracepoint_synchronize_unregister() to ensure synchronize 715 * event. We don't need to care about it. 716 */ 717 trace_probe_remove_file(tp, file); 718 719 return 0; 720 } 721 722 static int eprobe_register(struct trace_event_call *event, 723 enum trace_reg type, void *data) 724 { 725 struct trace_event_file *file = data; 726 727 switch (type) { 728 case TRACE_REG_REGISTER: 729 return enable_trace_eprobe(event, file); 730 case TRACE_REG_UNREGISTER: 731 return disable_trace_eprobe(event, file); 732 #ifdef CONFIG_PERF_EVENTS 733 case TRACE_REG_PERF_REGISTER: 734 case TRACE_REG_PERF_UNREGISTER: 735 case TRACE_REG_PERF_OPEN: 736 case TRACE_REG_PERF_CLOSE: 737 case TRACE_REG_PERF_ADD: 738 case TRACE_REG_PERF_DEL: 739 return 0; 740 #endif 741 } 742 return 0; 743 } 744 745 static inline void init_trace_eprobe_call(struct trace_eprobe *ep) 746 { 747 struct trace_event_call *call = trace_probe_event_call(&ep->tp); 748 749 call->flags = TRACE_EVENT_FL_EPROBE; 750 call->event.funcs = &eprobe_funcs; 751 call->class->fields_array = eprobe_fields_array; 752 call->class->reg = eprobe_register; 753 } 754 755 static struct trace_event_call * 756 find_and_get_event(const char *system, const char *event_name) 757 { 758 struct trace_event_call *tp_event; 759 const char *name; 760 761 list_for_each_entry(tp_event, &ftrace_events, list) { 762 /* Skip other probes and ftrace events */ 763 if (tp_event->flags & 764 (TRACE_EVENT_FL_IGNORE_ENABLE | 765 TRACE_EVENT_FL_KPROBE | 766 TRACE_EVENT_FL_UPROBE | 767 TRACE_EVENT_FL_EPROBE)) 768 continue; 769 if (!tp_event->class->system || 770 strcmp(system, tp_event->class->system)) 771 continue; 772 name = trace_event_name(tp_event); 773 if (!name || strcmp(event_name, name)) 774 continue; 775 if (!trace_event_try_get_ref(tp_event)) { 776 return NULL; 777 break; 778 } 779 return tp_event; 780 break; 781 } 782 return NULL; 783 } 784 785 static int trace_eprobe_tp_update_arg(struct trace_eprobe *ep, const char *argv[], int i) 786 { 787 unsigned int flags = TPARG_FL_KERNEL | TPARG_FL_TPOINT; 788 int ret; 789 790 ret = traceprobe_parse_probe_arg(&ep->tp, i, argv[i], flags); 791 if (ret) 792 return ret; 793 794 if (ep->tp.args[i].code->op == FETCH_OP_TP_ARG) 795 ret = trace_eprobe_tp_arg_update(ep, i); 796 797 return ret; 798 } 799 800 static int __trace_eprobe_create(int argc, const char *argv[]) 801 { 802 /* 803 * Argument syntax: 804 * e[:[GRP/]ENAME] SYSTEM.EVENT [FETCHARGS] 805 * Fetch args: 806 * <name>=$<field>[:TYPE] 807 */ 808 const char *event = NULL, *group = EPROBE_EVENT_SYSTEM; 809 const char *sys_event = NULL, *sys_name = NULL; 810 struct trace_event_call *event_call; 811 struct trace_eprobe *ep = NULL; 812 char buf1[MAX_EVENT_NAME_LEN]; 813 char buf2[MAX_EVENT_NAME_LEN]; 814 int ret = 0; 815 int i; 816 817 if (argc < 2 || argv[0][0] != 'e') 818 return -ECANCELED; 819 820 trace_probe_log_init("event_probe", argc, argv); 821 822 event = strchr(&argv[0][1], ':'); 823 if (event) { 824 event++; 825 ret = traceprobe_parse_event_name(&event, &group, buf1, 826 event - argv[0]); 827 if (ret) 828 goto parse_error; 829 } else { 830 strscpy(buf1, argv[1], MAX_EVENT_NAME_LEN); 831 sanitize_event_name(buf1); 832 event = buf1; 833 } 834 if (!is_good_name(event) || !is_good_name(group)) 835 goto parse_error; 836 837 sys_event = argv[1]; 838 ret = traceprobe_parse_event_name(&sys_event, &sys_name, buf2, 839 sys_event - argv[1]); 840 if (ret || !sys_name) 841 goto parse_error; 842 if (!is_good_name(sys_event) || !is_good_name(sys_name)) 843 goto parse_error; 844 845 mutex_lock(&event_mutex); 846 event_call = find_and_get_event(sys_name, sys_event); 847 ep = alloc_event_probe(group, event, event_call, argc - 2); 848 mutex_unlock(&event_mutex); 849 850 if (IS_ERR(ep)) { 851 ret = PTR_ERR(ep); 852 /* This must return -ENOMEM, else there is a bug */ 853 WARN_ON_ONCE(ret != -ENOMEM); 854 ep = NULL; 855 goto error; 856 } 857 858 argc -= 2; argv += 2; 859 /* parse arguments */ 860 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { 861 trace_probe_log_set_index(i + 2); 862 ret = trace_eprobe_tp_update_arg(ep, argv, i); 863 if (ret) 864 goto error; 865 } 866 ret = traceprobe_set_print_fmt(&ep->tp, PROBE_PRINT_EVENT); 867 if (ret < 0) 868 goto error; 869 init_trace_eprobe_call(ep); 870 mutex_lock(&event_mutex); 871 ret = trace_probe_register_event_call(&ep->tp); 872 if (ret) { 873 if (ret == -EEXIST) { 874 trace_probe_log_set_index(0); 875 trace_probe_log_err(0, EVENT_EXIST); 876 } 877 mutex_unlock(&event_mutex); 878 goto error; 879 } 880 ret = dyn_event_add(&ep->devent, &ep->tp.event->call); 881 mutex_unlock(&event_mutex); 882 return ret; 883 parse_error: 884 ret = -EINVAL; 885 error: 886 trace_event_probe_cleanup(ep); 887 return ret; 888 } 889 890 /* 891 * Register dynevent at core_initcall. This allows kernel to setup eprobe 892 * events in postcore_initcall without tracefs. 893 */ 894 static __init int trace_events_eprobe_init_early(void) 895 { 896 int err = 0; 897 898 err = dyn_event_register(&eprobe_dyn_event_ops); 899 if (err) 900 pr_warn("Could not register eprobe_dyn_event_ops\n"); 901 902 return err; 903 } 904 core_initcall(trace_events_eprobe_init_early); 905