1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2021, Microsoft Corporation. 4 * 5 * Authors: 6 * Beau Belgrave <beaub@linux.microsoft.com> 7 */ 8 9 #include <linux/bitmap.h> 10 #include <linux/cdev.h> 11 #include <linux/hashtable.h> 12 #include <linux/list.h> 13 #include <linux/io.h> 14 #include <linux/uio.h> 15 #include <linux/ioctl.h> 16 #include <linux/jhash.h> 17 #include <linux/trace_events.h> 18 #include <linux/tracefs.h> 19 #include <linux/types.h> 20 #include <linux/uaccess.h> 21 #include <uapi/linux/user_events.h> 22 #include "trace.h" 23 #include "trace_dynevent.h" 24 25 #define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1) 26 27 #define FIELD_DEPTH_TYPE 0 28 #define FIELD_DEPTH_NAME 1 29 #define FIELD_DEPTH_SIZE 2 30 31 /* 32 * Limits how many trace_event calls user processes can create: 33 * Must be multiple of PAGE_SIZE. 34 */ 35 #define MAX_PAGES 1 36 #define MAX_EVENTS (MAX_PAGES * PAGE_SIZE) 37 38 /* Limit how long of an event name plus args within the subsystem. */ 39 #define MAX_EVENT_DESC 512 40 #define EVENT_NAME(user_event) ((user_event)->tracepoint.name) 41 #define MAX_FIELD_ARRAY_SIZE 1024 42 #define MAX_FIELD_ARG_NAME 256 43 44 static char *register_page_data; 45 46 static DEFINE_MUTEX(reg_mutex); 47 static DEFINE_HASHTABLE(register_table, 4); 48 static DECLARE_BITMAP(page_bitmap, MAX_EVENTS); 49 50 /* 51 * Stores per-event properties, as users register events 52 * within a file a user_event might be created if it does not 53 * already exist. These are globally used and their lifetime 54 * is tied to the refcnt member. These cannot go away until the 55 * refcnt reaches zero. 56 */ 57 struct user_event { 58 struct tracepoint tracepoint; 59 struct trace_event_call call; 60 struct trace_event_class class; 61 struct dyn_event devent; 62 struct hlist_node node; 63 struct list_head fields; 64 atomic_t refcnt; 65 int index; 66 int flags; 67 }; 68 69 /* 70 * Stores per-file events references, as users register events 71 * within a file this structure is modified and freed via RCU. 72 * The lifetime of this struct is tied to the lifetime of the file. 73 * These are not shared and only accessible by the file that created it. 74 */ 75 struct user_event_refs { 76 struct rcu_head rcu; 77 int count; 78 struct user_event *events[]; 79 }; 80 81 typedef void (*user_event_func_t) (struct user_event *user, 82 void *data, u32 datalen, 83 void *tpdata); 84 85 static int user_event_parse(char *name, char *args, char *flags, 86 struct user_event **newuser); 87 88 static u32 user_event_key(char *name) 89 { 90 return jhash(name, strlen(name), 0); 91 } 92 93 static struct list_head *user_event_get_fields(struct trace_event_call *call) 94 { 95 struct user_event *user = (struct user_event *)call->data; 96 97 return &user->fields; 98 } 99 100 /* 101 * Parses a register command for user_events 102 * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]] 103 * 104 * Example event named 'test' with a 20 char 'msg' field with an unsigned int 105 * 'id' field after: 106 * test char[20] msg;unsigned int id 107 * 108 * NOTE: Offsets are from the user data perspective, they are not from the 109 * trace_entry/buffer perspective. We automatically add the common properties 110 * sizes to the offset for the user. 111 */ 112 static int user_event_parse_cmd(char *raw_command, struct user_event **newuser) 113 { 114 char *name = raw_command; 115 char *args = strpbrk(name, " "); 116 char *flags; 117 118 if (args) 119 *args++ = '\0'; 120 121 flags = strpbrk(name, ":"); 122 123 if (flags) 124 *flags++ = '\0'; 125 126 return user_event_parse(name, args, flags, newuser); 127 } 128 129 static int user_field_array_size(const char *type) 130 { 131 const char *start = strchr(type, '['); 132 char val[8]; 133 char *bracket; 134 int size = 0; 135 136 if (start == NULL) 137 return -EINVAL; 138 139 if (strscpy(val, start + 1, sizeof(val)) <= 0) 140 return -EINVAL; 141 142 bracket = strchr(val, ']'); 143 144 if (!bracket) 145 return -EINVAL; 146 147 *bracket = '\0'; 148 149 if (kstrtouint(val, 0, &size)) 150 return -EINVAL; 151 152 if (size > MAX_FIELD_ARRAY_SIZE) 153 return -EINVAL; 154 155 return size; 156 } 157 158 static int user_field_size(const char *type) 159 { 160 /* long is not allowed from a user, since it's ambigious in size */ 161 if (strcmp(type, "s64") == 0) 162 return sizeof(s64); 163 if (strcmp(type, "u64") == 0) 164 return sizeof(u64); 165 if (strcmp(type, "s32") == 0) 166 return sizeof(s32); 167 if (strcmp(type, "u32") == 0) 168 return sizeof(u32); 169 if (strcmp(type, "int") == 0) 170 return sizeof(int); 171 if (strcmp(type, "unsigned int") == 0) 172 return sizeof(unsigned int); 173 if (strcmp(type, "s16") == 0) 174 return sizeof(s16); 175 if (strcmp(type, "u16") == 0) 176 return sizeof(u16); 177 if (strcmp(type, "short") == 0) 178 return sizeof(short); 179 if (strcmp(type, "unsigned short") == 0) 180 return sizeof(unsigned short); 181 if (strcmp(type, "s8") == 0) 182 return sizeof(s8); 183 if (strcmp(type, "u8") == 0) 184 return sizeof(u8); 185 if (strcmp(type, "char") == 0) 186 return sizeof(char); 187 if (strcmp(type, "unsigned char") == 0) 188 return sizeof(unsigned char); 189 if (str_has_prefix(type, "char[")) 190 return user_field_array_size(type); 191 if (str_has_prefix(type, "unsigned char[")) 192 return user_field_array_size(type); 193 if (str_has_prefix(type, "__data_loc ")) 194 return sizeof(u32); 195 if (str_has_prefix(type, "__rel_loc ")) 196 return sizeof(u32); 197 198 /* Uknown basic type, error */ 199 return -EINVAL; 200 } 201 202 static void user_event_destroy_fields(struct user_event *user) 203 { 204 struct ftrace_event_field *field, *next; 205 struct list_head *head = &user->fields; 206 207 list_for_each_entry_safe(field, next, head, link) { 208 list_del(&field->link); 209 kfree(field); 210 } 211 } 212 213 static int user_event_add_field(struct user_event *user, const char *type, 214 const char *name, int offset, int size, 215 int is_signed, int filter_type) 216 { 217 struct ftrace_event_field *field; 218 219 field = kmalloc(sizeof(*field), GFP_KERNEL); 220 221 if (!field) 222 return -ENOMEM; 223 224 field->type = type; 225 field->name = name; 226 field->offset = offset; 227 field->size = size; 228 field->is_signed = is_signed; 229 field->filter_type = filter_type; 230 231 list_add(&field->link, &user->fields); 232 233 return 0; 234 } 235 236 /* 237 * Parses the values of a field within the description 238 * Format: type name [size] 239 */ 240 static int user_event_parse_field(char *field, struct user_event *user, 241 u32 *offset) 242 { 243 char *part, *type, *name; 244 u32 depth = 0, saved_offset = *offset; 245 int len, size = -EINVAL; 246 bool is_struct = false; 247 248 field = skip_spaces(field); 249 250 if (*field == '\0') 251 return 0; 252 253 /* Handle types that have a space within */ 254 len = str_has_prefix(field, "unsigned "); 255 if (len) 256 goto skip_next; 257 258 len = str_has_prefix(field, "struct "); 259 if (len) { 260 is_struct = true; 261 goto skip_next; 262 } 263 264 len = str_has_prefix(field, "__data_loc unsigned "); 265 if (len) 266 goto skip_next; 267 268 len = str_has_prefix(field, "__data_loc "); 269 if (len) 270 goto skip_next; 271 272 len = str_has_prefix(field, "__rel_loc unsigned "); 273 if (len) 274 goto skip_next; 275 276 len = str_has_prefix(field, "__rel_loc "); 277 if (len) 278 goto skip_next; 279 280 goto parse; 281 skip_next: 282 type = field; 283 field = strpbrk(field + len, " "); 284 285 if (field == NULL) 286 return -EINVAL; 287 288 *field++ = '\0'; 289 depth++; 290 parse: 291 while ((part = strsep(&field, " ")) != NULL) { 292 switch (depth++) { 293 case FIELD_DEPTH_TYPE: 294 type = part; 295 break; 296 case FIELD_DEPTH_NAME: 297 name = part; 298 break; 299 case FIELD_DEPTH_SIZE: 300 if (!is_struct) 301 return -EINVAL; 302 303 if (kstrtou32(part, 10, &size)) 304 return -EINVAL; 305 break; 306 default: 307 return -EINVAL; 308 } 309 } 310 311 if (depth < FIELD_DEPTH_SIZE) 312 return -EINVAL; 313 314 if (depth == FIELD_DEPTH_SIZE) 315 size = user_field_size(type); 316 317 if (size == 0) 318 return -EINVAL; 319 320 if (size < 0) 321 return size; 322 323 *offset = saved_offset + size; 324 325 return user_event_add_field(user, type, name, saved_offset, size, 326 type[0] != 'u', FILTER_OTHER); 327 } 328 329 static void user_event_parse_flags(struct user_event *user, char *flags) 330 { 331 char *flag; 332 333 if (flags == NULL) 334 return; 335 336 while ((flag = strsep(&flags, ",")) != NULL) { 337 if (strcmp(flag, "BPF_ITER") == 0) 338 user->flags |= FLAG_BPF_ITER; 339 } 340 } 341 342 static int user_event_parse_fields(struct user_event *user, char *args) 343 { 344 char *field; 345 u32 offset = sizeof(struct trace_entry); 346 int ret = -EINVAL; 347 348 if (args == NULL) 349 return 0; 350 351 while ((field = strsep(&args, ";")) != NULL) { 352 ret = user_event_parse_field(field, user, &offset); 353 354 if (ret) 355 break; 356 } 357 358 return ret; 359 } 360 361 static struct trace_event_fields user_event_fields_array[1]; 362 363 static const char *user_field_format(const char *type) 364 { 365 if (strcmp(type, "s64") == 0) 366 return "%lld"; 367 if (strcmp(type, "u64") == 0) 368 return "%llu"; 369 if (strcmp(type, "s32") == 0) 370 return "%d"; 371 if (strcmp(type, "u32") == 0) 372 return "%u"; 373 if (strcmp(type, "int") == 0) 374 return "%d"; 375 if (strcmp(type, "unsigned int") == 0) 376 return "%u"; 377 if (strcmp(type, "s16") == 0) 378 return "%d"; 379 if (strcmp(type, "u16") == 0) 380 return "%u"; 381 if (strcmp(type, "short") == 0) 382 return "%d"; 383 if (strcmp(type, "unsigned short") == 0) 384 return "%u"; 385 if (strcmp(type, "s8") == 0) 386 return "%d"; 387 if (strcmp(type, "u8") == 0) 388 return "%u"; 389 if (strcmp(type, "char") == 0) 390 return "%d"; 391 if (strcmp(type, "unsigned char") == 0) 392 return "%u"; 393 if (strstr(type, "char[") != 0) 394 return "%s"; 395 396 /* Unknown, likely struct, allowed treat as 64-bit */ 397 return "%llu"; 398 } 399 400 static bool user_field_is_dyn_string(const char *type, const char **str_func) 401 { 402 if (str_has_prefix(type, "__data_loc ")) { 403 *str_func = "__get_str"; 404 goto check; 405 } 406 407 if (str_has_prefix(type, "__rel_loc ")) { 408 *str_func = "__get_rel_str"; 409 goto check; 410 } 411 412 return false; 413 check: 414 return strstr(type, "char") != 0; 415 } 416 417 #define LEN_OR_ZERO (len ? len - pos : 0) 418 static int user_event_set_print_fmt(struct user_event *user, char *buf, int len) 419 { 420 struct ftrace_event_field *field, *next; 421 struct list_head *head = &user->fields; 422 int pos = 0, depth = 0; 423 const char *str_func; 424 425 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 426 427 list_for_each_entry_safe_reverse(field, next, head, link) { 428 if (depth != 0) 429 pos += snprintf(buf + pos, LEN_OR_ZERO, " "); 430 431 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s", 432 field->name, user_field_format(field->type)); 433 434 depth++; 435 } 436 437 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 438 439 list_for_each_entry_safe_reverse(field, next, head, link) { 440 if (user_field_is_dyn_string(field->type, &str_func)) 441 pos += snprintf(buf + pos, LEN_OR_ZERO, 442 ", %s(%s)", str_func, field->name); 443 else 444 pos += snprintf(buf + pos, LEN_OR_ZERO, 445 ", REC->%s", field->name); 446 } 447 448 return pos + 1; 449 } 450 #undef LEN_OR_ZERO 451 452 static int user_event_create_print_fmt(struct user_event *user) 453 { 454 char *print_fmt; 455 int len; 456 457 len = user_event_set_print_fmt(user, NULL, 0); 458 459 print_fmt = kmalloc(len, GFP_KERNEL); 460 461 if (!print_fmt) 462 return -ENOMEM; 463 464 user_event_set_print_fmt(user, print_fmt, len); 465 466 user->call.print_fmt = print_fmt; 467 468 return 0; 469 } 470 471 static enum print_line_t user_event_print_trace(struct trace_iterator *iter, 472 int flags, 473 struct trace_event *event) 474 { 475 /* Unsafe to try to decode user provided print_fmt, use hex */ 476 trace_print_hex_dump_seq(&iter->seq, "", DUMP_PREFIX_OFFSET, 16, 477 1, iter->ent, iter->ent_size, true); 478 479 return trace_handle_return(&iter->seq); 480 } 481 482 static struct trace_event_functions user_event_funcs = { 483 .trace = user_event_print_trace, 484 }; 485 486 static int destroy_user_event(struct user_event *user) 487 { 488 int ret = 0; 489 490 /* Must destroy fields before call removal */ 491 user_event_destroy_fields(user); 492 493 ret = trace_remove_event_call(&user->call); 494 495 if (ret) 496 return ret; 497 498 dyn_event_remove(&user->devent); 499 500 register_page_data[user->index] = 0; 501 clear_bit(user->index, page_bitmap); 502 hash_del(&user->node); 503 504 kfree(user->call.print_fmt); 505 kfree(EVENT_NAME(user)); 506 kfree(user); 507 508 return ret; 509 } 510 511 static struct user_event *find_user_event(char *name, u32 *outkey) 512 { 513 struct user_event *user; 514 u32 key = user_event_key(name); 515 516 *outkey = key; 517 518 hash_for_each_possible(register_table, user, node, key) 519 if (!strcmp(EVENT_NAME(user), name)) 520 return user; 521 522 return NULL; 523 } 524 525 /* 526 * Writes the user supplied payload out to a trace file. 527 */ 528 static void user_event_ftrace(struct user_event *user, void *data, u32 datalen, 529 void *tpdata) 530 { 531 struct trace_event_file *file; 532 struct trace_entry *entry; 533 struct trace_event_buffer event_buffer; 534 535 file = (struct trace_event_file *)tpdata; 536 537 if (!file || 538 !(file->flags & EVENT_FILE_FL_ENABLED) || 539 trace_trigger_soft_disabled(file)) 540 return; 541 542 /* Allocates and fills trace_entry, + 1 of this is data payload */ 543 entry = trace_event_buffer_reserve(&event_buffer, file, 544 sizeof(*entry) + datalen); 545 546 if (unlikely(!entry)) 547 return; 548 549 memcpy(entry + 1, data, datalen); 550 551 trace_event_buffer_commit(&event_buffer); 552 } 553 554 /* 555 * Update the register page that is shared between user processes. 556 */ 557 static void update_reg_page_for(struct user_event *user) 558 { 559 struct tracepoint *tp = &user->tracepoint; 560 char status = 0; 561 562 if (atomic_read(&tp->key.enabled) > 0) { 563 struct tracepoint_func *probe_func_ptr; 564 user_event_func_t probe_func; 565 566 rcu_read_lock_sched(); 567 568 probe_func_ptr = rcu_dereference_sched(tp->funcs); 569 570 if (probe_func_ptr) { 571 do { 572 probe_func = probe_func_ptr->func; 573 574 if (probe_func == user_event_ftrace) 575 status |= EVENT_STATUS_FTRACE; 576 else 577 status |= EVENT_STATUS_OTHER; 578 } while ((++probe_func_ptr)->func); 579 } 580 581 rcu_read_unlock_sched(); 582 } 583 584 register_page_data[user->index] = status; 585 } 586 587 /* 588 * Register callback for our events from tracing sub-systems. 589 */ 590 static int user_event_reg(struct trace_event_call *call, 591 enum trace_reg type, 592 void *data) 593 { 594 struct user_event *user = (struct user_event *)call->data; 595 int ret = 0; 596 597 if (!user) 598 return -ENOENT; 599 600 switch (type) { 601 case TRACE_REG_REGISTER: 602 ret = tracepoint_probe_register(call->tp, 603 call->class->probe, 604 data); 605 if (!ret) 606 goto inc; 607 break; 608 609 case TRACE_REG_UNREGISTER: 610 tracepoint_probe_unregister(call->tp, 611 call->class->probe, 612 data); 613 goto dec; 614 615 default: 616 break; 617 } 618 619 return ret; 620 inc: 621 atomic_inc(&user->refcnt); 622 update_reg_page_for(user); 623 return 0; 624 dec: 625 update_reg_page_for(user); 626 atomic_dec(&user->refcnt); 627 return 0; 628 } 629 630 static int user_event_create(const char *raw_command) 631 { 632 struct user_event *user; 633 char *name; 634 int ret; 635 636 if (!str_has_prefix(raw_command, USER_EVENTS_PREFIX)) 637 return -ECANCELED; 638 639 raw_command += USER_EVENTS_PREFIX_LEN; 640 raw_command = skip_spaces(raw_command); 641 642 name = kstrdup(raw_command, GFP_KERNEL); 643 644 if (!name) 645 return -ENOMEM; 646 647 mutex_lock(®_mutex); 648 ret = user_event_parse_cmd(name, &user); 649 mutex_unlock(®_mutex); 650 651 if (ret) 652 kfree(name); 653 654 return ret; 655 } 656 657 static int user_event_show(struct seq_file *m, struct dyn_event *ev) 658 { 659 struct user_event *user = container_of(ev, struct user_event, devent); 660 struct ftrace_event_field *field, *next; 661 struct list_head *head; 662 int depth = 0; 663 664 seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user)); 665 666 head = trace_get_fields(&user->call); 667 668 list_for_each_entry_safe_reverse(field, next, head, link) { 669 if (depth == 0) 670 seq_puts(m, " "); 671 else 672 seq_puts(m, "; "); 673 674 seq_printf(m, "%s %s", field->type, field->name); 675 676 if (str_has_prefix(field->type, "struct ")) 677 seq_printf(m, " %d", field->size); 678 679 depth++; 680 } 681 682 seq_puts(m, "\n"); 683 684 return 0; 685 } 686 687 static bool user_event_is_busy(struct dyn_event *ev) 688 { 689 struct user_event *user = container_of(ev, struct user_event, devent); 690 691 return atomic_read(&user->refcnt) != 0; 692 } 693 694 static int user_event_free(struct dyn_event *ev) 695 { 696 struct user_event *user = container_of(ev, struct user_event, devent); 697 698 if (atomic_read(&user->refcnt) != 0) 699 return -EBUSY; 700 701 return destroy_user_event(user); 702 } 703 704 static bool user_field_match(struct ftrace_event_field *field, int argc, 705 const char **argv, int *iout) 706 { 707 char *field_name, *arg_name; 708 int len, pos, i = *iout; 709 bool colon = false, match = false; 710 711 if (i >= argc) 712 return false; 713 714 len = MAX_FIELD_ARG_NAME; 715 field_name = kmalloc(len, GFP_KERNEL); 716 arg_name = kmalloc(len, GFP_KERNEL); 717 718 if (!arg_name || !field_name) 719 goto out; 720 721 pos = 0; 722 723 for (; i < argc; ++i) { 724 if (i != *iout) 725 pos += snprintf(arg_name + pos, len - pos, " "); 726 727 pos += snprintf(arg_name + pos, len - pos, argv[i]); 728 729 if (strchr(argv[i], ';')) { 730 ++i; 731 colon = true; 732 break; 733 } 734 } 735 736 pos = 0; 737 738 pos += snprintf(field_name + pos, len - pos, field->type); 739 pos += snprintf(field_name + pos, len - pos, " "); 740 pos += snprintf(field_name + pos, len - pos, field->name); 741 742 if (colon) 743 pos += snprintf(field_name + pos, len - pos, ";"); 744 745 *iout = i; 746 747 match = strcmp(arg_name, field_name) == 0; 748 out: 749 kfree(arg_name); 750 kfree(field_name); 751 752 return match; 753 } 754 755 static bool user_fields_match(struct user_event *user, int argc, 756 const char **argv) 757 { 758 struct ftrace_event_field *field, *next; 759 struct list_head *head = &user->fields; 760 int i = 0; 761 762 list_for_each_entry_safe_reverse(field, next, head, link) 763 if (!user_field_match(field, argc, argv, &i)) 764 return false; 765 766 if (i != argc) 767 return false; 768 769 return true; 770 } 771 772 static bool user_event_match(const char *system, const char *event, 773 int argc, const char **argv, struct dyn_event *ev) 774 { 775 struct user_event *user = container_of(ev, struct user_event, devent); 776 bool match; 777 778 match = strcmp(EVENT_NAME(user), event) == 0 && 779 (!system || strcmp(system, USER_EVENTS_SYSTEM) == 0); 780 781 if (match && argc > 0) 782 match = user_fields_match(user, argc, argv); 783 784 return match; 785 } 786 787 static struct dyn_event_operations user_event_dops = { 788 .create = user_event_create, 789 .show = user_event_show, 790 .is_busy = user_event_is_busy, 791 .free = user_event_free, 792 .match = user_event_match, 793 }; 794 795 static int user_event_trace_register(struct user_event *user) 796 { 797 int ret; 798 799 ret = register_trace_event(&user->call.event); 800 801 if (!ret) 802 return -ENODEV; 803 804 ret = trace_add_event_call(&user->call); 805 806 if (ret) 807 unregister_trace_event(&user->call.event); 808 809 return ret; 810 } 811 812 /* 813 * Parses the event name, arguments and flags then registers if successful. 814 * The name buffer lifetime is owned by this method for success cases only. 815 */ 816 static int user_event_parse(char *name, char *args, char *flags, 817 struct user_event **newuser) 818 { 819 int ret; 820 int index; 821 u32 key; 822 struct user_event *user = find_user_event(name, &key); 823 824 if (user) { 825 *newuser = user; 826 /* 827 * Name is allocated by caller, free it since it already exists. 828 * Caller only worries about failure cases for freeing. 829 */ 830 kfree(name); 831 return 0; 832 } 833 834 index = find_first_zero_bit(page_bitmap, MAX_EVENTS); 835 836 if (index == MAX_EVENTS) 837 return -EMFILE; 838 839 user = kzalloc(sizeof(*user), GFP_KERNEL); 840 841 if (!user) 842 return -ENOMEM; 843 844 INIT_LIST_HEAD(&user->class.fields); 845 INIT_LIST_HEAD(&user->fields); 846 847 user->tracepoint.name = name; 848 849 user_event_parse_flags(user, flags); 850 851 ret = user_event_parse_fields(user, args); 852 853 if (ret) 854 goto put_user; 855 856 ret = user_event_create_print_fmt(user); 857 858 if (ret) 859 goto put_user; 860 861 user->call.data = user; 862 user->call.class = &user->class; 863 user->call.name = name; 864 user->call.flags = TRACE_EVENT_FL_TRACEPOINT; 865 user->call.tp = &user->tracepoint; 866 user->call.event.funcs = &user_event_funcs; 867 868 user->class.system = USER_EVENTS_SYSTEM; 869 user->class.fields_array = user_event_fields_array; 870 user->class.get_fields = user_event_get_fields; 871 user->class.reg = user_event_reg; 872 user->class.probe = user_event_ftrace; 873 874 mutex_lock(&event_mutex); 875 ret = user_event_trace_register(user); 876 mutex_unlock(&event_mutex); 877 878 if (ret) 879 goto put_user; 880 881 user->index = index; 882 dyn_event_init(&user->devent, &user_event_dops); 883 dyn_event_add(&user->devent, &user->call); 884 set_bit(user->index, page_bitmap); 885 hash_add(register_table, &user->node, key); 886 887 *newuser = user; 888 return 0; 889 put_user: 890 user_event_destroy_fields(user); 891 kfree(user); 892 return ret; 893 } 894 895 /* 896 * Deletes a previously created event if it is no longer being used. 897 */ 898 static int delete_user_event(char *name) 899 { 900 u32 key; 901 int ret; 902 struct user_event *user = find_user_event(name, &key); 903 904 if (!user) 905 return -ENOENT; 906 907 if (atomic_read(&user->refcnt) != 0) 908 return -EBUSY; 909 910 mutex_lock(&event_mutex); 911 ret = destroy_user_event(user); 912 mutex_unlock(&event_mutex); 913 914 return ret; 915 } 916 917 /* 918 * Validates the user payload and writes via iterator. 919 */ 920 static ssize_t user_events_write_core(struct file *file, struct iov_iter *i) 921 { 922 struct user_event_refs *refs; 923 struct user_event *user = NULL; 924 struct tracepoint *tp; 925 ssize_t ret = i->count; 926 int idx; 927 928 if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx))) 929 return -EFAULT; 930 931 rcu_read_lock_sched(); 932 933 refs = rcu_dereference_sched(file->private_data); 934 935 /* 936 * The refs->events array is protected by RCU, and new items may be 937 * added. But the user retrieved from indexing into the events array 938 * shall be immutable while the file is opened. 939 */ 940 if (likely(refs && idx < refs->count)) 941 user = refs->events[idx]; 942 943 rcu_read_unlock_sched(); 944 945 if (unlikely(user == NULL)) 946 return -ENOENT; 947 948 tp = &user->tracepoint; 949 950 /* 951 * It's possible key.enabled disables after this check, however 952 * we don't mind if a few events are included in this condition. 953 */ 954 if (likely(atomic_read(&tp->key.enabled) > 0)) { 955 struct tracepoint_func *probe_func_ptr; 956 user_event_func_t probe_func; 957 void *tpdata; 958 void *kdata; 959 u32 datalen; 960 961 kdata = kmalloc(i->count, GFP_KERNEL); 962 963 if (unlikely(!kdata)) 964 return -ENOMEM; 965 966 datalen = copy_from_iter(kdata, i->count, i); 967 968 rcu_read_lock_sched(); 969 970 probe_func_ptr = rcu_dereference_sched(tp->funcs); 971 972 if (probe_func_ptr) { 973 do { 974 probe_func = probe_func_ptr->func; 975 tpdata = probe_func_ptr->data; 976 probe_func(user, kdata, datalen, tpdata); 977 } while ((++probe_func_ptr)->func); 978 } 979 980 rcu_read_unlock_sched(); 981 982 kfree(kdata); 983 } 984 985 return ret; 986 } 987 988 static ssize_t user_events_write(struct file *file, const char __user *ubuf, 989 size_t count, loff_t *ppos) 990 { 991 struct iovec iov; 992 struct iov_iter i; 993 994 if (unlikely(*ppos != 0)) 995 return -EFAULT; 996 997 if (unlikely(import_single_range(READ, (char *)ubuf, count, &iov, &i))) 998 return -EFAULT; 999 1000 return user_events_write_core(file, &i); 1001 } 1002 1003 static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i) 1004 { 1005 return user_events_write_core(kp->ki_filp, i); 1006 } 1007 1008 static int user_events_ref_add(struct file *file, struct user_event *user) 1009 { 1010 struct user_event_refs *refs, *new_refs; 1011 int i, size, count = 0; 1012 1013 refs = rcu_dereference_protected(file->private_data, 1014 lockdep_is_held(®_mutex)); 1015 1016 if (refs) { 1017 count = refs->count; 1018 1019 for (i = 0; i < count; ++i) 1020 if (refs->events[i] == user) 1021 return i; 1022 } 1023 1024 size = struct_size(refs, events, count + 1); 1025 1026 new_refs = kzalloc(size, GFP_KERNEL); 1027 1028 if (!new_refs) 1029 return -ENOMEM; 1030 1031 new_refs->count = count + 1; 1032 1033 for (i = 0; i < count; ++i) 1034 new_refs->events[i] = refs->events[i]; 1035 1036 new_refs->events[i] = user; 1037 1038 atomic_inc(&user->refcnt); 1039 1040 rcu_assign_pointer(file->private_data, new_refs); 1041 1042 if (refs) 1043 kfree_rcu(refs, rcu); 1044 1045 return i; 1046 } 1047 1048 static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg) 1049 { 1050 u32 size; 1051 long ret; 1052 1053 ret = get_user(size, &ureg->size); 1054 1055 if (ret) 1056 return ret; 1057 1058 if (size > PAGE_SIZE) 1059 return -E2BIG; 1060 1061 return copy_struct_from_user(kreg, sizeof(*kreg), ureg, size); 1062 } 1063 1064 /* 1065 * Registers a user_event on behalf of a user process. 1066 */ 1067 static long user_events_ioctl_reg(struct file *file, unsigned long uarg) 1068 { 1069 struct user_reg __user *ureg = (struct user_reg __user *)uarg; 1070 struct user_reg reg; 1071 struct user_event *user; 1072 char *name; 1073 long ret; 1074 1075 ret = user_reg_get(ureg, ®); 1076 1077 if (ret) 1078 return ret; 1079 1080 name = strndup_user((const char __user *)(uintptr_t)reg.name_args, 1081 MAX_EVENT_DESC); 1082 1083 if (IS_ERR(name)) { 1084 ret = PTR_ERR(name); 1085 return ret; 1086 } 1087 1088 ret = user_event_parse_cmd(name, &user); 1089 1090 if (ret) { 1091 kfree(name); 1092 return ret; 1093 } 1094 1095 ret = user_events_ref_add(file, user); 1096 1097 /* Positive number is index and valid */ 1098 if (ret < 0) 1099 return ret; 1100 1101 put_user((u32)ret, &ureg->write_index); 1102 put_user(user->index, &ureg->status_index); 1103 1104 return 0; 1105 } 1106 1107 /* 1108 * Deletes a user_event on behalf of a user process. 1109 */ 1110 static long user_events_ioctl_del(struct file *file, unsigned long uarg) 1111 { 1112 void __user *ubuf = (void __user *)uarg; 1113 char *name; 1114 long ret; 1115 1116 name = strndup_user(ubuf, MAX_EVENT_DESC); 1117 1118 if (IS_ERR(name)) 1119 return PTR_ERR(name); 1120 1121 ret = delete_user_event(name); 1122 1123 kfree(name); 1124 1125 return ret; 1126 } 1127 1128 /* 1129 * Handles the ioctl from user mode to register or alter operations. 1130 */ 1131 static long user_events_ioctl(struct file *file, unsigned int cmd, 1132 unsigned long uarg) 1133 { 1134 long ret = -ENOTTY; 1135 1136 switch (cmd) { 1137 case DIAG_IOCSREG: 1138 mutex_lock(®_mutex); 1139 ret = user_events_ioctl_reg(file, uarg); 1140 mutex_unlock(®_mutex); 1141 break; 1142 1143 case DIAG_IOCSDEL: 1144 mutex_lock(®_mutex); 1145 ret = user_events_ioctl_del(file, uarg); 1146 mutex_unlock(®_mutex); 1147 break; 1148 } 1149 1150 return ret; 1151 } 1152 1153 /* 1154 * Handles the final close of the file from user mode. 1155 */ 1156 static int user_events_release(struct inode *node, struct file *file) 1157 { 1158 struct user_event_refs *refs; 1159 struct user_event *user; 1160 int i; 1161 1162 /* 1163 * Ensure refs cannot change under any situation by taking the 1164 * register mutex during the final freeing of the references. 1165 */ 1166 mutex_lock(®_mutex); 1167 1168 refs = file->private_data; 1169 1170 if (!refs) 1171 goto out; 1172 1173 /* 1174 * The lifetime of refs has reached an end, it's tied to this file. 1175 * The underlying user_events are ref counted, and cannot be freed. 1176 * After this decrement, the user_events may be freed elsewhere. 1177 */ 1178 for (i = 0; i < refs->count; ++i) { 1179 user = refs->events[i]; 1180 1181 if (user) 1182 atomic_dec(&user->refcnt); 1183 } 1184 out: 1185 file->private_data = NULL; 1186 1187 mutex_unlock(®_mutex); 1188 1189 kfree(refs); 1190 1191 return 0; 1192 } 1193 1194 static const struct file_operations user_data_fops = { 1195 .write = user_events_write, 1196 .write_iter = user_events_write_iter, 1197 .unlocked_ioctl = user_events_ioctl, 1198 .release = user_events_release, 1199 }; 1200 1201 /* 1202 * Maps the shared page into the user process for checking if event is enabled. 1203 */ 1204 static int user_status_mmap(struct file *file, struct vm_area_struct *vma) 1205 { 1206 unsigned long size = vma->vm_end - vma->vm_start; 1207 1208 if (size != MAX_EVENTS) 1209 return -EINVAL; 1210 1211 return remap_pfn_range(vma, vma->vm_start, 1212 virt_to_phys(register_page_data) >> PAGE_SHIFT, 1213 size, vm_get_page_prot(VM_READ)); 1214 } 1215 1216 static void *user_seq_start(struct seq_file *m, loff_t *pos) 1217 { 1218 if (*pos) 1219 return NULL; 1220 1221 return (void *)1; 1222 } 1223 1224 static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos) 1225 { 1226 ++*pos; 1227 return NULL; 1228 } 1229 1230 static void user_seq_stop(struct seq_file *m, void *p) 1231 { 1232 } 1233 1234 static int user_seq_show(struct seq_file *m, void *p) 1235 { 1236 struct user_event *user; 1237 char status; 1238 int i, active = 0, busy = 0, flags; 1239 1240 mutex_lock(®_mutex); 1241 1242 hash_for_each(register_table, i, user, node) { 1243 status = register_page_data[user->index]; 1244 flags = user->flags; 1245 1246 seq_printf(m, "%d:%s", user->index, EVENT_NAME(user)); 1247 1248 if (flags != 0 || status != 0) 1249 seq_puts(m, " #"); 1250 1251 if (status != 0) { 1252 seq_puts(m, " Used by"); 1253 if (status & EVENT_STATUS_FTRACE) 1254 seq_puts(m, " ftrace"); 1255 if (status & EVENT_STATUS_PERF) 1256 seq_puts(m, " perf"); 1257 if (status & EVENT_STATUS_OTHER) 1258 seq_puts(m, " other"); 1259 busy++; 1260 } 1261 1262 if (flags & FLAG_BPF_ITER) 1263 seq_puts(m, " FLAG:BPF_ITER"); 1264 1265 seq_puts(m, "\n"); 1266 active++; 1267 } 1268 1269 mutex_unlock(®_mutex); 1270 1271 seq_puts(m, "\n"); 1272 seq_printf(m, "Active: %d\n", active); 1273 seq_printf(m, "Busy: %d\n", busy); 1274 seq_printf(m, "Max: %ld\n", MAX_EVENTS); 1275 1276 return 0; 1277 } 1278 1279 static const struct seq_operations user_seq_ops = { 1280 .start = user_seq_start, 1281 .next = user_seq_next, 1282 .stop = user_seq_stop, 1283 .show = user_seq_show, 1284 }; 1285 1286 static int user_status_open(struct inode *node, struct file *file) 1287 { 1288 return seq_open(file, &user_seq_ops); 1289 } 1290 1291 static const struct file_operations user_status_fops = { 1292 .open = user_status_open, 1293 .mmap = user_status_mmap, 1294 .read = seq_read, 1295 .llseek = seq_lseek, 1296 .release = seq_release, 1297 }; 1298 1299 /* 1300 * Creates a set of tracefs files to allow user mode interactions. 1301 */ 1302 static int create_user_tracefs(void) 1303 { 1304 struct dentry *edata, *emmap; 1305 1306 edata = tracefs_create_file("user_events_data", TRACE_MODE_WRITE, 1307 NULL, NULL, &user_data_fops); 1308 1309 if (!edata) { 1310 pr_warn("Could not create tracefs 'user_events_data' entry\n"); 1311 goto err; 1312 } 1313 1314 /* mmap with MAP_SHARED requires writable fd */ 1315 emmap = tracefs_create_file("user_events_status", TRACE_MODE_WRITE, 1316 NULL, NULL, &user_status_fops); 1317 1318 if (!emmap) { 1319 tracefs_remove(edata); 1320 pr_warn("Could not create tracefs 'user_events_mmap' entry\n"); 1321 goto err; 1322 } 1323 1324 return 0; 1325 err: 1326 return -ENODEV; 1327 } 1328 1329 static void set_page_reservations(bool set) 1330 { 1331 int page; 1332 1333 for (page = 0; page < MAX_PAGES; ++page) { 1334 void *addr = register_page_data + (PAGE_SIZE * page); 1335 1336 if (set) 1337 SetPageReserved(virt_to_page(addr)); 1338 else 1339 ClearPageReserved(virt_to_page(addr)); 1340 } 1341 } 1342 1343 static int __init trace_events_user_init(void) 1344 { 1345 int ret; 1346 1347 /* Zero all bits beside 0 (which is reserved for failures) */ 1348 bitmap_zero(page_bitmap, MAX_EVENTS); 1349 set_bit(0, page_bitmap); 1350 1351 register_page_data = kzalloc(MAX_EVENTS, GFP_KERNEL); 1352 1353 if (!register_page_data) 1354 return -ENOMEM; 1355 1356 set_page_reservations(true); 1357 1358 ret = create_user_tracefs(); 1359 1360 if (ret) { 1361 pr_warn("user_events could not register with tracefs\n"); 1362 set_page_reservations(false); 1363 kfree(register_page_data); 1364 return ret; 1365 } 1366 1367 if (dyn_event_register(&user_event_dops)) 1368 pr_warn("user_events could not register with dyn_events\n"); 1369 1370 return 0; 1371 } 1372 1373 fs_initcall(trace_events_user_init); 1374