1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2021, Microsoft Corporation. 4 * 5 * Authors: 6 * Beau Belgrave <beaub@linux.microsoft.com> 7 */ 8 9 #include <linux/bitmap.h> 10 #include <linux/cdev.h> 11 #include <linux/hashtable.h> 12 #include <linux/list.h> 13 #include <linux/io.h> 14 #include <linux/uio.h> 15 #include <linux/ioctl.h> 16 #include <linux/jhash.h> 17 #include <linux/refcount.h> 18 #include <linux/trace_events.h> 19 #include <linux/tracefs.h> 20 #include <linux/types.h> 21 #include <linux/uaccess.h> 22 /* Reminder to move to uapi when everything works */ 23 #ifdef CONFIG_COMPILE_TEST 24 #include <linux/user_events.h> 25 #else 26 #include <uapi/linux/user_events.h> 27 #endif 28 #include "trace.h" 29 #include "trace_dynevent.h" 30 31 #define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1) 32 33 #define FIELD_DEPTH_TYPE 0 34 #define FIELD_DEPTH_NAME 1 35 #define FIELD_DEPTH_SIZE 2 36 37 /* 38 * Limits how many trace_event calls user processes can create: 39 * Must be a power of two of PAGE_SIZE. 40 */ 41 #define MAX_PAGE_ORDER 0 42 #define MAX_PAGES (1 << MAX_PAGE_ORDER) 43 #define MAX_BYTES (MAX_PAGES * PAGE_SIZE) 44 #define MAX_EVENTS (MAX_BYTES * 8) 45 46 /* Limit how long of an event name plus args within the subsystem. */ 47 #define MAX_EVENT_DESC 512 48 #define EVENT_NAME(user_event) ((user_event)->tracepoint.name) 49 #define MAX_FIELD_ARRAY_SIZE 1024 50 51 /* 52 * The MAP_STATUS_* macros are used for taking a index and determining the 53 * appropriate byte and the bit in the byte to set/reset for an event. 54 * 55 * The lower 3 bits of the index decide which bit to set. 56 * The remaining upper bits of the index decide which byte to use for the bit. 57 * 58 * This is used when an event has a probe attached/removed to reflect live 59 * status of the event wanting tracing or not to user-programs via shared 60 * memory maps. 61 */ 62 #define MAP_STATUS_BYTE(index) ((index) >> 3) 63 #define MAP_STATUS_MASK(index) BIT((index) & 7) 64 65 /* 66 * Internal bits (kernel side only) to keep track of connected probes: 67 * These are used when status is requested in text form about an event. These 68 * bits are compared against an internal byte on the event to determine which 69 * probes to print out to the user. 70 * 71 * These do not reflect the mapped bytes between the user and kernel space. 72 */ 73 #define EVENT_STATUS_FTRACE BIT(0) 74 #define EVENT_STATUS_PERF BIT(1) 75 #define EVENT_STATUS_OTHER BIT(7) 76 77 /* 78 * Stores the pages, tables, and locks for a group of events. 79 * Each logical grouping of events has its own group, with a 80 * matching page for status checks within user programs. This 81 * allows for isolation of events to user programs by various 82 * means. 83 */ 84 struct user_event_group { 85 struct page *pages; 86 char *register_page_data; 87 char *system_name; 88 struct hlist_node node; 89 struct mutex reg_mutex; 90 DECLARE_HASHTABLE(register_table, 8); 91 DECLARE_BITMAP(page_bitmap, MAX_EVENTS); 92 }; 93 94 /* Group for init_user_ns mapping, top-most group */ 95 static struct user_event_group *init_group; 96 97 /* 98 * Stores per-event properties, as users register events 99 * within a file a user_event might be created if it does not 100 * already exist. These are globally used and their lifetime 101 * is tied to the refcnt member. These cannot go away until the 102 * refcnt reaches one. 103 */ 104 struct user_event { 105 struct user_event_group *group; 106 struct tracepoint tracepoint; 107 struct trace_event_call call; 108 struct trace_event_class class; 109 struct dyn_event devent; 110 struct hlist_node node; 111 struct list_head fields; 112 struct list_head validators; 113 refcount_t refcnt; 114 int index; 115 int flags; 116 int min_size; 117 char status; 118 }; 119 120 /* 121 * Stores per-file events references, as users register events 122 * within a file this structure is modified and freed via RCU. 123 * The lifetime of this struct is tied to the lifetime of the file. 124 * These are not shared and only accessible by the file that created it. 125 */ 126 struct user_event_refs { 127 struct rcu_head rcu; 128 int count; 129 struct user_event *events[]; 130 }; 131 132 struct user_event_file_info { 133 struct user_event_group *group; 134 struct user_event_refs *refs; 135 }; 136 137 #define VALIDATOR_ENSURE_NULL (1 << 0) 138 #define VALIDATOR_REL (1 << 1) 139 140 struct user_event_validator { 141 struct list_head link; 142 int offset; 143 int flags; 144 }; 145 146 typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i, 147 void *tpdata, bool *faulted); 148 149 static int user_event_parse(struct user_event_group *group, char *name, 150 char *args, char *flags, 151 struct user_event **newuser); 152 153 static u32 user_event_key(char *name) 154 { 155 return jhash(name, strlen(name), 0); 156 } 157 158 static void set_page_reservations(char *pages, bool set) 159 { 160 int page; 161 162 for (page = 0; page < MAX_PAGES; ++page) { 163 void *addr = pages + (PAGE_SIZE * page); 164 165 if (set) 166 SetPageReserved(virt_to_page(addr)); 167 else 168 ClearPageReserved(virt_to_page(addr)); 169 } 170 } 171 172 static void user_event_group_destroy(struct user_event_group *group) 173 { 174 if (group->register_page_data) 175 set_page_reservations(group->register_page_data, false); 176 177 if (group->pages) 178 __free_pages(group->pages, MAX_PAGE_ORDER); 179 180 kfree(group->system_name); 181 kfree(group); 182 } 183 184 static char *user_event_group_system_name(struct user_namespace *user_ns) 185 { 186 char *system_name; 187 int len = sizeof(USER_EVENTS_SYSTEM) + 1; 188 189 if (user_ns != &init_user_ns) { 190 /* 191 * Unexpected at this point: 192 * We only currently support init_user_ns. 193 * When we enable more, this will trigger a failure so log. 194 */ 195 pr_warn("user_events: Namespace other than init_user_ns!\n"); 196 return NULL; 197 } 198 199 system_name = kmalloc(len, GFP_KERNEL); 200 201 if (!system_name) 202 return NULL; 203 204 snprintf(system_name, len, "%s", USER_EVENTS_SYSTEM); 205 206 return system_name; 207 } 208 209 static inline struct user_event_group 210 *user_event_group_from_user_ns(struct user_namespace *user_ns) 211 { 212 if (user_ns == &init_user_ns) 213 return init_group; 214 215 return NULL; 216 } 217 218 static struct user_event_group *current_user_event_group(void) 219 { 220 struct user_namespace *user_ns = current_user_ns(); 221 struct user_event_group *group = NULL; 222 223 while (user_ns) { 224 group = user_event_group_from_user_ns(user_ns); 225 226 if (group) 227 break; 228 229 user_ns = user_ns->parent; 230 } 231 232 return group; 233 } 234 235 static struct user_event_group 236 *user_event_group_create(struct user_namespace *user_ns) 237 { 238 struct user_event_group *group; 239 240 group = kzalloc(sizeof(*group), GFP_KERNEL); 241 242 if (!group) 243 return NULL; 244 245 group->system_name = user_event_group_system_name(user_ns); 246 247 if (!group->system_name) 248 goto error; 249 250 group->pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, MAX_PAGE_ORDER); 251 252 if (!group->pages) 253 goto error; 254 255 group->register_page_data = page_address(group->pages); 256 257 set_page_reservations(group->register_page_data, true); 258 259 /* Zero all bits beside 0 (which is reserved for failures) */ 260 bitmap_zero(group->page_bitmap, MAX_EVENTS); 261 set_bit(0, group->page_bitmap); 262 263 mutex_init(&group->reg_mutex); 264 hash_init(group->register_table); 265 266 return group; 267 error: 268 if (group) 269 user_event_group_destroy(group); 270 271 return NULL; 272 }; 273 274 static __always_inline 275 void user_event_register_set(struct user_event *user) 276 { 277 int i = user->index; 278 279 user->group->register_page_data[MAP_STATUS_BYTE(i)] |= MAP_STATUS_MASK(i); 280 } 281 282 static __always_inline 283 void user_event_register_clear(struct user_event *user) 284 { 285 int i = user->index; 286 287 user->group->register_page_data[MAP_STATUS_BYTE(i)] &= ~MAP_STATUS_MASK(i); 288 } 289 290 static __always_inline __must_check 291 bool user_event_last_ref(struct user_event *user) 292 { 293 return refcount_read(&user->refcnt) == 1; 294 } 295 296 static __always_inline __must_check 297 size_t copy_nofault(void *addr, size_t bytes, struct iov_iter *i) 298 { 299 size_t ret; 300 301 pagefault_disable(); 302 303 ret = copy_from_iter_nocache(addr, bytes, i); 304 305 pagefault_enable(); 306 307 return ret; 308 } 309 310 static struct list_head *user_event_get_fields(struct trace_event_call *call) 311 { 312 struct user_event *user = (struct user_event *)call->data; 313 314 return &user->fields; 315 } 316 317 /* 318 * Parses a register command for user_events 319 * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]] 320 * 321 * Example event named 'test' with a 20 char 'msg' field with an unsigned int 322 * 'id' field after: 323 * test char[20] msg;unsigned int id 324 * 325 * NOTE: Offsets are from the user data perspective, they are not from the 326 * trace_entry/buffer perspective. We automatically add the common properties 327 * sizes to the offset for the user. 328 * 329 * Upon success user_event has its ref count increased by 1. 330 */ 331 static int user_event_parse_cmd(struct user_event_group *group, 332 char *raw_command, struct user_event **newuser) 333 { 334 char *name = raw_command; 335 char *args = strpbrk(name, " "); 336 char *flags; 337 338 if (args) 339 *args++ = '\0'; 340 341 flags = strpbrk(name, ":"); 342 343 if (flags) 344 *flags++ = '\0'; 345 346 return user_event_parse(group, name, args, flags, newuser); 347 } 348 349 static int user_field_array_size(const char *type) 350 { 351 const char *start = strchr(type, '['); 352 char val[8]; 353 char *bracket; 354 int size = 0; 355 356 if (start == NULL) 357 return -EINVAL; 358 359 if (strscpy(val, start + 1, sizeof(val)) <= 0) 360 return -EINVAL; 361 362 bracket = strchr(val, ']'); 363 364 if (!bracket) 365 return -EINVAL; 366 367 *bracket = '\0'; 368 369 if (kstrtouint(val, 0, &size)) 370 return -EINVAL; 371 372 if (size > MAX_FIELD_ARRAY_SIZE) 373 return -EINVAL; 374 375 return size; 376 } 377 378 static int user_field_size(const char *type) 379 { 380 /* long is not allowed from a user, since it's ambigious in size */ 381 if (strcmp(type, "s64") == 0) 382 return sizeof(s64); 383 if (strcmp(type, "u64") == 0) 384 return sizeof(u64); 385 if (strcmp(type, "s32") == 0) 386 return sizeof(s32); 387 if (strcmp(type, "u32") == 0) 388 return sizeof(u32); 389 if (strcmp(type, "int") == 0) 390 return sizeof(int); 391 if (strcmp(type, "unsigned int") == 0) 392 return sizeof(unsigned int); 393 if (strcmp(type, "s16") == 0) 394 return sizeof(s16); 395 if (strcmp(type, "u16") == 0) 396 return sizeof(u16); 397 if (strcmp(type, "short") == 0) 398 return sizeof(short); 399 if (strcmp(type, "unsigned short") == 0) 400 return sizeof(unsigned short); 401 if (strcmp(type, "s8") == 0) 402 return sizeof(s8); 403 if (strcmp(type, "u8") == 0) 404 return sizeof(u8); 405 if (strcmp(type, "char") == 0) 406 return sizeof(char); 407 if (strcmp(type, "unsigned char") == 0) 408 return sizeof(unsigned char); 409 if (str_has_prefix(type, "char[")) 410 return user_field_array_size(type); 411 if (str_has_prefix(type, "unsigned char[")) 412 return user_field_array_size(type); 413 if (str_has_prefix(type, "__data_loc ")) 414 return sizeof(u32); 415 if (str_has_prefix(type, "__rel_loc ")) 416 return sizeof(u32); 417 418 /* Uknown basic type, error */ 419 return -EINVAL; 420 } 421 422 static void user_event_destroy_validators(struct user_event *user) 423 { 424 struct user_event_validator *validator, *next; 425 struct list_head *head = &user->validators; 426 427 list_for_each_entry_safe(validator, next, head, link) { 428 list_del(&validator->link); 429 kfree(validator); 430 } 431 } 432 433 static void user_event_destroy_fields(struct user_event *user) 434 { 435 struct ftrace_event_field *field, *next; 436 struct list_head *head = &user->fields; 437 438 list_for_each_entry_safe(field, next, head, link) { 439 list_del(&field->link); 440 kfree(field); 441 } 442 } 443 444 static int user_event_add_field(struct user_event *user, const char *type, 445 const char *name, int offset, int size, 446 int is_signed, int filter_type) 447 { 448 struct user_event_validator *validator; 449 struct ftrace_event_field *field; 450 int validator_flags = 0; 451 452 field = kmalloc(sizeof(*field), GFP_KERNEL); 453 454 if (!field) 455 return -ENOMEM; 456 457 if (str_has_prefix(type, "__data_loc ")) 458 goto add_validator; 459 460 if (str_has_prefix(type, "__rel_loc ")) { 461 validator_flags |= VALIDATOR_REL; 462 goto add_validator; 463 } 464 465 goto add_field; 466 467 add_validator: 468 if (strstr(type, "char") != NULL) 469 validator_flags |= VALIDATOR_ENSURE_NULL; 470 471 validator = kmalloc(sizeof(*validator), GFP_KERNEL); 472 473 if (!validator) { 474 kfree(field); 475 return -ENOMEM; 476 } 477 478 validator->flags = validator_flags; 479 validator->offset = offset; 480 481 /* Want sequential access when validating */ 482 list_add_tail(&validator->link, &user->validators); 483 484 add_field: 485 field->type = type; 486 field->name = name; 487 field->offset = offset; 488 field->size = size; 489 field->is_signed = is_signed; 490 field->filter_type = filter_type; 491 492 list_add(&field->link, &user->fields); 493 494 /* 495 * Min size from user writes that are required, this does not include 496 * the size of trace_entry (common fields). 497 */ 498 user->min_size = (offset + size) - sizeof(struct trace_entry); 499 500 return 0; 501 } 502 503 /* 504 * Parses the values of a field within the description 505 * Format: type name [size] 506 */ 507 static int user_event_parse_field(char *field, struct user_event *user, 508 u32 *offset) 509 { 510 char *part, *type, *name; 511 u32 depth = 0, saved_offset = *offset; 512 int len, size = -EINVAL; 513 bool is_struct = false; 514 515 field = skip_spaces(field); 516 517 if (*field == '\0') 518 return 0; 519 520 /* Handle types that have a space within */ 521 len = str_has_prefix(field, "unsigned "); 522 if (len) 523 goto skip_next; 524 525 len = str_has_prefix(field, "struct "); 526 if (len) { 527 is_struct = true; 528 goto skip_next; 529 } 530 531 len = str_has_prefix(field, "__data_loc unsigned "); 532 if (len) 533 goto skip_next; 534 535 len = str_has_prefix(field, "__data_loc "); 536 if (len) 537 goto skip_next; 538 539 len = str_has_prefix(field, "__rel_loc unsigned "); 540 if (len) 541 goto skip_next; 542 543 len = str_has_prefix(field, "__rel_loc "); 544 if (len) 545 goto skip_next; 546 547 goto parse; 548 skip_next: 549 type = field; 550 field = strpbrk(field + len, " "); 551 552 if (field == NULL) 553 return -EINVAL; 554 555 *field++ = '\0'; 556 depth++; 557 parse: 558 name = NULL; 559 560 while ((part = strsep(&field, " ")) != NULL) { 561 switch (depth++) { 562 case FIELD_DEPTH_TYPE: 563 type = part; 564 break; 565 case FIELD_DEPTH_NAME: 566 name = part; 567 break; 568 case FIELD_DEPTH_SIZE: 569 if (!is_struct) 570 return -EINVAL; 571 572 if (kstrtou32(part, 10, &size)) 573 return -EINVAL; 574 break; 575 default: 576 return -EINVAL; 577 } 578 } 579 580 if (depth < FIELD_DEPTH_SIZE || !name) 581 return -EINVAL; 582 583 if (depth == FIELD_DEPTH_SIZE) 584 size = user_field_size(type); 585 586 if (size == 0) 587 return -EINVAL; 588 589 if (size < 0) 590 return size; 591 592 *offset = saved_offset + size; 593 594 return user_event_add_field(user, type, name, saved_offset, size, 595 type[0] != 'u', FILTER_OTHER); 596 } 597 598 static int user_event_parse_fields(struct user_event *user, char *args) 599 { 600 char *field; 601 u32 offset = sizeof(struct trace_entry); 602 int ret = -EINVAL; 603 604 if (args == NULL) 605 return 0; 606 607 while ((field = strsep(&args, ";")) != NULL) { 608 ret = user_event_parse_field(field, user, &offset); 609 610 if (ret) 611 break; 612 } 613 614 return ret; 615 } 616 617 static struct trace_event_fields user_event_fields_array[1]; 618 619 static const char *user_field_format(const char *type) 620 { 621 if (strcmp(type, "s64") == 0) 622 return "%lld"; 623 if (strcmp(type, "u64") == 0) 624 return "%llu"; 625 if (strcmp(type, "s32") == 0) 626 return "%d"; 627 if (strcmp(type, "u32") == 0) 628 return "%u"; 629 if (strcmp(type, "int") == 0) 630 return "%d"; 631 if (strcmp(type, "unsigned int") == 0) 632 return "%u"; 633 if (strcmp(type, "s16") == 0) 634 return "%d"; 635 if (strcmp(type, "u16") == 0) 636 return "%u"; 637 if (strcmp(type, "short") == 0) 638 return "%d"; 639 if (strcmp(type, "unsigned short") == 0) 640 return "%u"; 641 if (strcmp(type, "s8") == 0) 642 return "%d"; 643 if (strcmp(type, "u8") == 0) 644 return "%u"; 645 if (strcmp(type, "char") == 0) 646 return "%d"; 647 if (strcmp(type, "unsigned char") == 0) 648 return "%u"; 649 if (strstr(type, "char[") != NULL) 650 return "%s"; 651 652 /* Unknown, likely struct, allowed treat as 64-bit */ 653 return "%llu"; 654 } 655 656 static bool user_field_is_dyn_string(const char *type, const char **str_func) 657 { 658 if (str_has_prefix(type, "__data_loc ")) { 659 *str_func = "__get_str"; 660 goto check; 661 } 662 663 if (str_has_prefix(type, "__rel_loc ")) { 664 *str_func = "__get_rel_str"; 665 goto check; 666 } 667 668 return false; 669 check: 670 return strstr(type, "char") != NULL; 671 } 672 673 #define LEN_OR_ZERO (len ? len - pos : 0) 674 static int user_dyn_field_set_string(int argc, const char **argv, int *iout, 675 char *buf, int len, bool *colon) 676 { 677 int pos = 0, i = *iout; 678 679 *colon = false; 680 681 for (; i < argc; ++i) { 682 if (i != *iout) 683 pos += snprintf(buf + pos, LEN_OR_ZERO, " "); 684 685 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", argv[i]); 686 687 if (strchr(argv[i], ';')) { 688 ++i; 689 *colon = true; 690 break; 691 } 692 } 693 694 /* Actual set, advance i */ 695 if (len != 0) 696 *iout = i; 697 698 return pos + 1; 699 } 700 701 static int user_field_set_string(struct ftrace_event_field *field, 702 char *buf, int len, bool colon) 703 { 704 int pos = 0; 705 706 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->type); 707 pos += snprintf(buf + pos, LEN_OR_ZERO, " "); 708 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name); 709 710 if (colon) 711 pos += snprintf(buf + pos, LEN_OR_ZERO, ";"); 712 713 return pos + 1; 714 } 715 716 static int user_event_set_print_fmt(struct user_event *user, char *buf, int len) 717 { 718 struct ftrace_event_field *field, *next; 719 struct list_head *head = &user->fields; 720 int pos = 0, depth = 0; 721 const char *str_func; 722 723 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 724 725 list_for_each_entry_safe_reverse(field, next, head, link) { 726 if (depth != 0) 727 pos += snprintf(buf + pos, LEN_OR_ZERO, " "); 728 729 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s", 730 field->name, user_field_format(field->type)); 731 732 depth++; 733 } 734 735 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 736 737 list_for_each_entry_safe_reverse(field, next, head, link) { 738 if (user_field_is_dyn_string(field->type, &str_func)) 739 pos += snprintf(buf + pos, LEN_OR_ZERO, 740 ", %s(%s)", str_func, field->name); 741 else 742 pos += snprintf(buf + pos, LEN_OR_ZERO, 743 ", REC->%s", field->name); 744 } 745 746 return pos + 1; 747 } 748 #undef LEN_OR_ZERO 749 750 static int user_event_create_print_fmt(struct user_event *user) 751 { 752 char *print_fmt; 753 int len; 754 755 len = user_event_set_print_fmt(user, NULL, 0); 756 757 print_fmt = kmalloc(len, GFP_KERNEL); 758 759 if (!print_fmt) 760 return -ENOMEM; 761 762 user_event_set_print_fmt(user, print_fmt, len); 763 764 user->call.print_fmt = print_fmt; 765 766 return 0; 767 } 768 769 static enum print_line_t user_event_print_trace(struct trace_iterator *iter, 770 int flags, 771 struct trace_event *event) 772 { 773 /* Unsafe to try to decode user provided print_fmt, use hex */ 774 trace_print_hex_dump_seq(&iter->seq, "", DUMP_PREFIX_OFFSET, 16, 775 1, iter->ent, iter->ent_size, true); 776 777 return trace_handle_return(&iter->seq); 778 } 779 780 static struct trace_event_functions user_event_funcs = { 781 .trace = user_event_print_trace, 782 }; 783 784 static int user_event_set_call_visible(struct user_event *user, bool visible) 785 { 786 int ret; 787 const struct cred *old_cred; 788 struct cred *cred; 789 790 cred = prepare_creds(); 791 792 if (!cred) 793 return -ENOMEM; 794 795 /* 796 * While by default tracefs is locked down, systems can be configured 797 * to allow user_event files to be less locked down. The extreme case 798 * being "other" has read/write access to user_events_data/status. 799 * 800 * When not locked down, processes may not have permissions to 801 * add/remove calls themselves to tracefs. We need to temporarily 802 * switch to root file permission to allow for this scenario. 803 */ 804 cred->fsuid = GLOBAL_ROOT_UID; 805 806 old_cred = override_creds(cred); 807 808 if (visible) 809 ret = trace_add_event_call(&user->call); 810 else 811 ret = trace_remove_event_call(&user->call); 812 813 revert_creds(old_cred); 814 put_cred(cred); 815 816 return ret; 817 } 818 819 static int destroy_user_event(struct user_event *user) 820 { 821 int ret = 0; 822 823 /* Must destroy fields before call removal */ 824 user_event_destroy_fields(user); 825 826 ret = user_event_set_call_visible(user, false); 827 828 if (ret) 829 return ret; 830 831 dyn_event_remove(&user->devent); 832 833 user_event_register_clear(user); 834 clear_bit(user->index, user->group->page_bitmap); 835 hash_del(&user->node); 836 837 user_event_destroy_validators(user); 838 kfree(user->call.print_fmt); 839 kfree(EVENT_NAME(user)); 840 kfree(user); 841 842 return ret; 843 } 844 845 static struct user_event *find_user_event(struct user_event_group *group, 846 char *name, u32 *outkey) 847 { 848 struct user_event *user; 849 u32 key = user_event_key(name); 850 851 *outkey = key; 852 853 hash_for_each_possible(group->register_table, user, node, key) 854 if (!strcmp(EVENT_NAME(user), name)) { 855 refcount_inc(&user->refcnt); 856 return user; 857 } 858 859 return NULL; 860 } 861 862 static int user_event_validate(struct user_event *user, void *data, int len) 863 { 864 struct list_head *head = &user->validators; 865 struct user_event_validator *validator; 866 void *pos, *end = data + len; 867 u32 loc, offset, size; 868 869 list_for_each_entry(validator, head, link) { 870 pos = data + validator->offset; 871 872 /* Already done min_size check, no bounds check here */ 873 loc = *(u32 *)pos; 874 offset = loc & 0xffff; 875 size = loc >> 16; 876 877 if (likely(validator->flags & VALIDATOR_REL)) 878 pos += offset + sizeof(loc); 879 else 880 pos = data + offset; 881 882 pos += size; 883 884 if (unlikely(pos > end)) 885 return -EFAULT; 886 887 if (likely(validator->flags & VALIDATOR_ENSURE_NULL)) 888 if (unlikely(*(char *)(pos - 1) != '\0')) 889 return -EFAULT; 890 } 891 892 return 0; 893 } 894 895 /* 896 * Writes the user supplied payload out to a trace file. 897 */ 898 static void user_event_ftrace(struct user_event *user, struct iov_iter *i, 899 void *tpdata, bool *faulted) 900 { 901 struct trace_event_file *file; 902 struct trace_entry *entry; 903 struct trace_event_buffer event_buffer; 904 size_t size = sizeof(*entry) + i->count; 905 906 file = (struct trace_event_file *)tpdata; 907 908 if (!file || 909 !(file->flags & EVENT_FILE_FL_ENABLED) || 910 trace_trigger_soft_disabled(file)) 911 return; 912 913 /* Allocates and fills trace_entry, + 1 of this is data payload */ 914 entry = trace_event_buffer_reserve(&event_buffer, file, size); 915 916 if (unlikely(!entry)) 917 return; 918 919 if (unlikely(!copy_nofault(entry + 1, i->count, i))) 920 goto discard; 921 922 if (!list_empty(&user->validators) && 923 unlikely(user_event_validate(user, entry, size))) 924 goto discard; 925 926 trace_event_buffer_commit(&event_buffer); 927 928 return; 929 discard: 930 *faulted = true; 931 __trace_event_discard_commit(event_buffer.buffer, 932 event_buffer.event); 933 } 934 935 #ifdef CONFIG_PERF_EVENTS 936 /* 937 * Writes the user supplied payload out to perf ring buffer. 938 */ 939 static void user_event_perf(struct user_event *user, struct iov_iter *i, 940 void *tpdata, bool *faulted) 941 { 942 struct hlist_head *perf_head; 943 944 perf_head = this_cpu_ptr(user->call.perf_events); 945 946 if (perf_head && !hlist_empty(perf_head)) { 947 struct trace_entry *perf_entry; 948 struct pt_regs *regs; 949 size_t size = sizeof(*perf_entry) + i->count; 950 int context; 951 952 perf_entry = perf_trace_buf_alloc(ALIGN(size, 8), 953 ®s, &context); 954 955 if (unlikely(!perf_entry)) 956 return; 957 958 perf_fetch_caller_regs(regs); 959 960 if (unlikely(!copy_nofault(perf_entry + 1, i->count, i))) 961 goto discard; 962 963 if (!list_empty(&user->validators) && 964 unlikely(user_event_validate(user, perf_entry, size))) 965 goto discard; 966 967 perf_trace_buf_submit(perf_entry, size, context, 968 user->call.event.type, 1, regs, 969 perf_head, NULL); 970 971 return; 972 discard: 973 *faulted = true; 974 perf_swevent_put_recursion_context(context); 975 } 976 } 977 #endif 978 979 /* 980 * Update the register page that is shared between user processes. 981 */ 982 static void update_reg_page_for(struct user_event *user) 983 { 984 struct tracepoint *tp = &user->tracepoint; 985 char status = 0; 986 987 if (atomic_read(&tp->key.enabled) > 0) { 988 struct tracepoint_func *probe_func_ptr; 989 user_event_func_t probe_func; 990 991 rcu_read_lock_sched(); 992 993 probe_func_ptr = rcu_dereference_sched(tp->funcs); 994 995 if (probe_func_ptr) { 996 do { 997 probe_func = probe_func_ptr->func; 998 999 if (probe_func == user_event_ftrace) 1000 status |= EVENT_STATUS_FTRACE; 1001 #ifdef CONFIG_PERF_EVENTS 1002 else if (probe_func == user_event_perf) 1003 status |= EVENT_STATUS_PERF; 1004 #endif 1005 else 1006 status |= EVENT_STATUS_OTHER; 1007 } while ((++probe_func_ptr)->func); 1008 } 1009 1010 rcu_read_unlock_sched(); 1011 } 1012 1013 if (status) 1014 user_event_register_set(user); 1015 else 1016 user_event_register_clear(user); 1017 1018 user->status = status; 1019 } 1020 1021 /* 1022 * Register callback for our events from tracing sub-systems. 1023 */ 1024 static int user_event_reg(struct trace_event_call *call, 1025 enum trace_reg type, 1026 void *data) 1027 { 1028 struct user_event *user = (struct user_event *)call->data; 1029 int ret = 0; 1030 1031 if (!user) 1032 return -ENOENT; 1033 1034 switch (type) { 1035 case TRACE_REG_REGISTER: 1036 ret = tracepoint_probe_register(call->tp, 1037 call->class->probe, 1038 data); 1039 if (!ret) 1040 goto inc; 1041 break; 1042 1043 case TRACE_REG_UNREGISTER: 1044 tracepoint_probe_unregister(call->tp, 1045 call->class->probe, 1046 data); 1047 goto dec; 1048 1049 #ifdef CONFIG_PERF_EVENTS 1050 case TRACE_REG_PERF_REGISTER: 1051 ret = tracepoint_probe_register(call->tp, 1052 call->class->perf_probe, 1053 data); 1054 if (!ret) 1055 goto inc; 1056 break; 1057 1058 case TRACE_REG_PERF_UNREGISTER: 1059 tracepoint_probe_unregister(call->tp, 1060 call->class->perf_probe, 1061 data); 1062 goto dec; 1063 1064 case TRACE_REG_PERF_OPEN: 1065 case TRACE_REG_PERF_CLOSE: 1066 case TRACE_REG_PERF_ADD: 1067 case TRACE_REG_PERF_DEL: 1068 break; 1069 #endif 1070 } 1071 1072 return ret; 1073 inc: 1074 refcount_inc(&user->refcnt); 1075 update_reg_page_for(user); 1076 return 0; 1077 dec: 1078 update_reg_page_for(user); 1079 refcount_dec(&user->refcnt); 1080 return 0; 1081 } 1082 1083 static int user_event_create(const char *raw_command) 1084 { 1085 struct user_event_group *group; 1086 struct user_event *user; 1087 char *name; 1088 int ret; 1089 1090 if (!str_has_prefix(raw_command, USER_EVENTS_PREFIX)) 1091 return -ECANCELED; 1092 1093 raw_command += USER_EVENTS_PREFIX_LEN; 1094 raw_command = skip_spaces(raw_command); 1095 1096 name = kstrdup(raw_command, GFP_KERNEL); 1097 1098 if (!name) 1099 return -ENOMEM; 1100 1101 group = current_user_event_group(); 1102 1103 if (!group) 1104 return -ENOENT; 1105 1106 mutex_lock(&group->reg_mutex); 1107 1108 ret = user_event_parse_cmd(group, name, &user); 1109 1110 if (!ret) 1111 refcount_dec(&user->refcnt); 1112 1113 mutex_unlock(&group->reg_mutex); 1114 1115 if (ret) 1116 kfree(name); 1117 1118 return ret; 1119 } 1120 1121 static int user_event_show(struct seq_file *m, struct dyn_event *ev) 1122 { 1123 struct user_event *user = container_of(ev, struct user_event, devent); 1124 struct ftrace_event_field *field, *next; 1125 struct list_head *head; 1126 int depth = 0; 1127 1128 seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user)); 1129 1130 head = trace_get_fields(&user->call); 1131 1132 list_for_each_entry_safe_reverse(field, next, head, link) { 1133 if (depth == 0) 1134 seq_puts(m, " "); 1135 else 1136 seq_puts(m, "; "); 1137 1138 seq_printf(m, "%s %s", field->type, field->name); 1139 1140 if (str_has_prefix(field->type, "struct ")) 1141 seq_printf(m, " %d", field->size); 1142 1143 depth++; 1144 } 1145 1146 seq_puts(m, "\n"); 1147 1148 return 0; 1149 } 1150 1151 static bool user_event_is_busy(struct dyn_event *ev) 1152 { 1153 struct user_event *user = container_of(ev, struct user_event, devent); 1154 1155 return !user_event_last_ref(user); 1156 } 1157 1158 static int user_event_free(struct dyn_event *ev) 1159 { 1160 struct user_event *user = container_of(ev, struct user_event, devent); 1161 1162 if (!user_event_last_ref(user)) 1163 return -EBUSY; 1164 1165 return destroy_user_event(user); 1166 } 1167 1168 static bool user_field_match(struct ftrace_event_field *field, int argc, 1169 const char **argv, int *iout) 1170 { 1171 char *field_name = NULL, *dyn_field_name = NULL; 1172 bool colon = false, match = false; 1173 int dyn_len, len; 1174 1175 if (*iout >= argc) 1176 return false; 1177 1178 dyn_len = user_dyn_field_set_string(argc, argv, iout, dyn_field_name, 1179 0, &colon); 1180 1181 len = user_field_set_string(field, field_name, 0, colon); 1182 1183 if (dyn_len != len) 1184 return false; 1185 1186 dyn_field_name = kmalloc(dyn_len, GFP_KERNEL); 1187 field_name = kmalloc(len, GFP_KERNEL); 1188 1189 if (!dyn_field_name || !field_name) 1190 goto out; 1191 1192 user_dyn_field_set_string(argc, argv, iout, dyn_field_name, 1193 dyn_len, &colon); 1194 1195 user_field_set_string(field, field_name, len, colon); 1196 1197 match = strcmp(dyn_field_name, field_name) == 0; 1198 out: 1199 kfree(dyn_field_name); 1200 kfree(field_name); 1201 1202 return match; 1203 } 1204 1205 static bool user_fields_match(struct user_event *user, int argc, 1206 const char **argv) 1207 { 1208 struct ftrace_event_field *field, *next; 1209 struct list_head *head = &user->fields; 1210 int i = 0; 1211 1212 list_for_each_entry_safe_reverse(field, next, head, link) 1213 if (!user_field_match(field, argc, argv, &i)) 1214 return false; 1215 1216 if (i != argc) 1217 return false; 1218 1219 return true; 1220 } 1221 1222 static bool user_event_match(const char *system, const char *event, 1223 int argc, const char **argv, struct dyn_event *ev) 1224 { 1225 struct user_event *user = container_of(ev, struct user_event, devent); 1226 bool match; 1227 1228 match = strcmp(EVENT_NAME(user), event) == 0 && 1229 (!system || strcmp(system, USER_EVENTS_SYSTEM) == 0); 1230 1231 if (match && argc > 0) 1232 match = user_fields_match(user, argc, argv); 1233 1234 return match; 1235 } 1236 1237 static struct dyn_event_operations user_event_dops = { 1238 .create = user_event_create, 1239 .show = user_event_show, 1240 .is_busy = user_event_is_busy, 1241 .free = user_event_free, 1242 .match = user_event_match, 1243 }; 1244 1245 static int user_event_trace_register(struct user_event *user) 1246 { 1247 int ret; 1248 1249 ret = register_trace_event(&user->call.event); 1250 1251 if (!ret) 1252 return -ENODEV; 1253 1254 ret = user_event_set_call_visible(user, true); 1255 1256 if (ret) 1257 unregister_trace_event(&user->call.event); 1258 1259 return ret; 1260 } 1261 1262 /* 1263 * Parses the event name, arguments and flags then registers if successful. 1264 * The name buffer lifetime is owned by this method for success cases only. 1265 * Upon success the returned user_event has its ref count increased by 1. 1266 */ 1267 static int user_event_parse(struct user_event_group *group, char *name, 1268 char *args, char *flags, 1269 struct user_event **newuser) 1270 { 1271 int ret; 1272 int index; 1273 u32 key; 1274 struct user_event *user; 1275 1276 /* Prevent dyn_event from racing */ 1277 mutex_lock(&event_mutex); 1278 user = find_user_event(group, name, &key); 1279 mutex_unlock(&event_mutex); 1280 1281 if (user) { 1282 *newuser = user; 1283 /* 1284 * Name is allocated by caller, free it since it already exists. 1285 * Caller only worries about failure cases for freeing. 1286 */ 1287 kfree(name); 1288 return 0; 1289 } 1290 1291 index = find_first_zero_bit(group->page_bitmap, MAX_EVENTS); 1292 1293 if (index == MAX_EVENTS) 1294 return -EMFILE; 1295 1296 user = kzalloc(sizeof(*user), GFP_KERNEL); 1297 1298 if (!user) 1299 return -ENOMEM; 1300 1301 INIT_LIST_HEAD(&user->class.fields); 1302 INIT_LIST_HEAD(&user->fields); 1303 INIT_LIST_HEAD(&user->validators); 1304 1305 user->group = group; 1306 user->tracepoint.name = name; 1307 1308 ret = user_event_parse_fields(user, args); 1309 1310 if (ret) 1311 goto put_user; 1312 1313 ret = user_event_create_print_fmt(user); 1314 1315 if (ret) 1316 goto put_user; 1317 1318 user->call.data = user; 1319 user->call.class = &user->class; 1320 user->call.name = name; 1321 user->call.flags = TRACE_EVENT_FL_TRACEPOINT; 1322 user->call.tp = &user->tracepoint; 1323 user->call.event.funcs = &user_event_funcs; 1324 user->class.system = group->system_name; 1325 1326 user->class.fields_array = user_event_fields_array; 1327 user->class.get_fields = user_event_get_fields; 1328 user->class.reg = user_event_reg; 1329 user->class.probe = user_event_ftrace; 1330 #ifdef CONFIG_PERF_EVENTS 1331 user->class.perf_probe = user_event_perf; 1332 #endif 1333 1334 mutex_lock(&event_mutex); 1335 1336 ret = user_event_trace_register(user); 1337 1338 if (ret) 1339 goto put_user_lock; 1340 1341 user->index = index; 1342 1343 /* Ensure we track self ref and caller ref (2) */ 1344 refcount_set(&user->refcnt, 2); 1345 1346 dyn_event_init(&user->devent, &user_event_dops); 1347 dyn_event_add(&user->devent, &user->call); 1348 set_bit(user->index, group->page_bitmap); 1349 hash_add(group->register_table, &user->node, key); 1350 1351 mutex_unlock(&event_mutex); 1352 1353 *newuser = user; 1354 return 0; 1355 put_user_lock: 1356 mutex_unlock(&event_mutex); 1357 put_user: 1358 user_event_destroy_fields(user); 1359 user_event_destroy_validators(user); 1360 kfree(user); 1361 return ret; 1362 } 1363 1364 /* 1365 * Deletes a previously created event if it is no longer being used. 1366 */ 1367 static int delete_user_event(struct user_event_group *group, char *name) 1368 { 1369 u32 key; 1370 struct user_event *user = find_user_event(group, name, &key); 1371 1372 if (!user) 1373 return -ENOENT; 1374 1375 refcount_dec(&user->refcnt); 1376 1377 if (!user_event_last_ref(user)) 1378 return -EBUSY; 1379 1380 return destroy_user_event(user); 1381 } 1382 1383 /* 1384 * Validates the user payload and writes via iterator. 1385 */ 1386 static ssize_t user_events_write_core(struct file *file, struct iov_iter *i) 1387 { 1388 struct user_event_file_info *info = file->private_data; 1389 struct user_event_refs *refs; 1390 struct user_event *user = NULL; 1391 struct tracepoint *tp; 1392 ssize_t ret = i->count; 1393 int idx; 1394 1395 if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx))) 1396 return -EFAULT; 1397 1398 rcu_read_lock_sched(); 1399 1400 refs = rcu_dereference_sched(info->refs); 1401 1402 /* 1403 * The refs->events array is protected by RCU, and new items may be 1404 * added. But the user retrieved from indexing into the events array 1405 * shall be immutable while the file is opened. 1406 */ 1407 if (likely(refs && idx < refs->count)) 1408 user = refs->events[idx]; 1409 1410 rcu_read_unlock_sched(); 1411 1412 if (unlikely(user == NULL)) 1413 return -ENOENT; 1414 1415 if (unlikely(i->count < user->min_size)) 1416 return -EINVAL; 1417 1418 tp = &user->tracepoint; 1419 1420 /* 1421 * It's possible key.enabled disables after this check, however 1422 * we don't mind if a few events are included in this condition. 1423 */ 1424 if (likely(atomic_read(&tp->key.enabled) > 0)) { 1425 struct tracepoint_func *probe_func_ptr; 1426 user_event_func_t probe_func; 1427 struct iov_iter copy; 1428 void *tpdata; 1429 bool faulted; 1430 1431 if (unlikely(fault_in_iov_iter_readable(i, i->count))) 1432 return -EFAULT; 1433 1434 faulted = false; 1435 1436 rcu_read_lock_sched(); 1437 1438 probe_func_ptr = rcu_dereference_sched(tp->funcs); 1439 1440 if (probe_func_ptr) { 1441 do { 1442 copy = *i; 1443 probe_func = probe_func_ptr->func; 1444 tpdata = probe_func_ptr->data; 1445 probe_func(user, ©, tpdata, &faulted); 1446 } while ((++probe_func_ptr)->func); 1447 } 1448 1449 rcu_read_unlock_sched(); 1450 1451 if (unlikely(faulted)) 1452 return -EFAULT; 1453 } 1454 1455 return ret; 1456 } 1457 1458 static int user_events_open(struct inode *node, struct file *file) 1459 { 1460 struct user_event_group *group; 1461 struct user_event_file_info *info; 1462 1463 group = current_user_event_group(); 1464 1465 if (!group) 1466 return -ENOENT; 1467 1468 info = kzalloc(sizeof(*info), GFP_KERNEL); 1469 1470 if (!info) 1471 return -ENOMEM; 1472 1473 info->group = group; 1474 1475 file->private_data = info; 1476 1477 return 0; 1478 } 1479 1480 static ssize_t user_events_write(struct file *file, const char __user *ubuf, 1481 size_t count, loff_t *ppos) 1482 { 1483 struct iovec iov; 1484 struct iov_iter i; 1485 1486 if (unlikely(*ppos != 0)) 1487 return -EFAULT; 1488 1489 if (unlikely(import_single_range(WRITE, (char __user *)ubuf, 1490 count, &iov, &i))) 1491 return -EFAULT; 1492 1493 return user_events_write_core(file, &i); 1494 } 1495 1496 static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i) 1497 { 1498 return user_events_write_core(kp->ki_filp, i); 1499 } 1500 1501 static int user_events_ref_add(struct user_event_file_info *info, 1502 struct user_event *user) 1503 { 1504 struct user_event_group *group = info->group; 1505 struct user_event_refs *refs, *new_refs; 1506 int i, size, count = 0; 1507 1508 refs = rcu_dereference_protected(info->refs, 1509 lockdep_is_held(&group->reg_mutex)); 1510 1511 if (refs) { 1512 count = refs->count; 1513 1514 for (i = 0; i < count; ++i) 1515 if (refs->events[i] == user) 1516 return i; 1517 } 1518 1519 size = struct_size(refs, events, count + 1); 1520 1521 new_refs = kzalloc(size, GFP_KERNEL); 1522 1523 if (!new_refs) 1524 return -ENOMEM; 1525 1526 new_refs->count = count + 1; 1527 1528 for (i = 0; i < count; ++i) 1529 new_refs->events[i] = refs->events[i]; 1530 1531 new_refs->events[i] = user; 1532 1533 refcount_inc(&user->refcnt); 1534 1535 rcu_assign_pointer(info->refs, new_refs); 1536 1537 if (refs) 1538 kfree_rcu(refs, rcu); 1539 1540 return i; 1541 } 1542 1543 static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg) 1544 { 1545 u32 size; 1546 long ret; 1547 1548 ret = get_user(size, &ureg->size); 1549 1550 if (ret) 1551 return ret; 1552 1553 if (size > PAGE_SIZE) 1554 return -E2BIG; 1555 1556 if (size < offsetofend(struct user_reg, write_index)) 1557 return -EINVAL; 1558 1559 ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size); 1560 1561 if (ret) 1562 return ret; 1563 1564 kreg->size = size; 1565 1566 return 0; 1567 } 1568 1569 /* 1570 * Registers a user_event on behalf of a user process. 1571 */ 1572 static long user_events_ioctl_reg(struct user_event_file_info *info, 1573 unsigned long uarg) 1574 { 1575 struct user_reg __user *ureg = (struct user_reg __user *)uarg; 1576 struct user_reg reg; 1577 struct user_event *user; 1578 char *name; 1579 long ret; 1580 1581 ret = user_reg_get(ureg, ®); 1582 1583 if (ret) 1584 return ret; 1585 1586 name = strndup_user((const char __user *)(uintptr_t)reg.name_args, 1587 MAX_EVENT_DESC); 1588 1589 if (IS_ERR(name)) { 1590 ret = PTR_ERR(name); 1591 return ret; 1592 } 1593 1594 ret = user_event_parse_cmd(info->group, name, &user); 1595 1596 if (ret) { 1597 kfree(name); 1598 return ret; 1599 } 1600 1601 ret = user_events_ref_add(info, user); 1602 1603 /* No longer need parse ref, ref_add either worked or not */ 1604 refcount_dec(&user->refcnt); 1605 1606 /* Positive number is index and valid */ 1607 if (ret < 0) 1608 return ret; 1609 1610 put_user((u32)ret, &ureg->write_index); 1611 put_user(user->index, &ureg->status_bit); 1612 1613 return 0; 1614 } 1615 1616 /* 1617 * Deletes a user_event on behalf of a user process. 1618 */ 1619 static long user_events_ioctl_del(struct user_event_file_info *info, 1620 unsigned long uarg) 1621 { 1622 void __user *ubuf = (void __user *)uarg; 1623 char *name; 1624 long ret; 1625 1626 name = strndup_user(ubuf, MAX_EVENT_DESC); 1627 1628 if (IS_ERR(name)) 1629 return PTR_ERR(name); 1630 1631 /* event_mutex prevents dyn_event from racing */ 1632 mutex_lock(&event_mutex); 1633 ret = delete_user_event(info->group, name); 1634 mutex_unlock(&event_mutex); 1635 1636 kfree(name); 1637 1638 return ret; 1639 } 1640 1641 /* 1642 * Handles the ioctl from user mode to register or alter operations. 1643 */ 1644 static long user_events_ioctl(struct file *file, unsigned int cmd, 1645 unsigned long uarg) 1646 { 1647 struct user_event_file_info *info = file->private_data; 1648 struct user_event_group *group = info->group; 1649 long ret = -ENOTTY; 1650 1651 switch (cmd) { 1652 case DIAG_IOCSREG: 1653 mutex_lock(&group->reg_mutex); 1654 ret = user_events_ioctl_reg(info, uarg); 1655 mutex_unlock(&group->reg_mutex); 1656 break; 1657 1658 case DIAG_IOCSDEL: 1659 mutex_lock(&group->reg_mutex); 1660 ret = user_events_ioctl_del(info, uarg); 1661 mutex_unlock(&group->reg_mutex); 1662 break; 1663 } 1664 1665 return ret; 1666 } 1667 1668 /* 1669 * Handles the final close of the file from user mode. 1670 */ 1671 static int user_events_release(struct inode *node, struct file *file) 1672 { 1673 struct user_event_file_info *info = file->private_data; 1674 struct user_event_group *group; 1675 struct user_event_refs *refs; 1676 struct user_event *user; 1677 int i; 1678 1679 if (!info) 1680 return -EINVAL; 1681 1682 group = info->group; 1683 1684 /* 1685 * Ensure refs cannot change under any situation by taking the 1686 * register mutex during the final freeing of the references. 1687 */ 1688 mutex_lock(&group->reg_mutex); 1689 1690 refs = info->refs; 1691 1692 if (!refs) 1693 goto out; 1694 1695 /* 1696 * The lifetime of refs has reached an end, it's tied to this file. 1697 * The underlying user_events are ref counted, and cannot be freed. 1698 * After this decrement, the user_events may be freed elsewhere. 1699 */ 1700 for (i = 0; i < refs->count; ++i) { 1701 user = refs->events[i]; 1702 1703 if (user) 1704 refcount_dec(&user->refcnt); 1705 } 1706 out: 1707 file->private_data = NULL; 1708 1709 mutex_unlock(&group->reg_mutex); 1710 1711 kfree(refs); 1712 kfree(info); 1713 1714 return 0; 1715 } 1716 1717 static const struct file_operations user_data_fops = { 1718 .open = user_events_open, 1719 .write = user_events_write, 1720 .write_iter = user_events_write_iter, 1721 .unlocked_ioctl = user_events_ioctl, 1722 .release = user_events_release, 1723 }; 1724 1725 static struct user_event_group *user_status_group(struct file *file) 1726 { 1727 struct seq_file *m = file->private_data; 1728 1729 if (!m) 1730 return NULL; 1731 1732 return m->private; 1733 } 1734 1735 /* 1736 * Maps the shared page into the user process for checking if event is enabled. 1737 */ 1738 static int user_status_mmap(struct file *file, struct vm_area_struct *vma) 1739 { 1740 char *pages; 1741 struct user_event_group *group = user_status_group(file); 1742 unsigned long size = vma->vm_end - vma->vm_start; 1743 1744 if (size != MAX_BYTES) 1745 return -EINVAL; 1746 1747 if (!group) 1748 return -EINVAL; 1749 1750 pages = group->register_page_data; 1751 1752 return remap_pfn_range(vma, vma->vm_start, 1753 virt_to_phys(pages) >> PAGE_SHIFT, 1754 size, vm_get_page_prot(VM_READ)); 1755 } 1756 1757 static void *user_seq_start(struct seq_file *m, loff_t *pos) 1758 { 1759 if (*pos) 1760 return NULL; 1761 1762 return (void *)1; 1763 } 1764 1765 static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos) 1766 { 1767 ++*pos; 1768 return NULL; 1769 } 1770 1771 static void user_seq_stop(struct seq_file *m, void *p) 1772 { 1773 } 1774 1775 static int user_seq_show(struct seq_file *m, void *p) 1776 { 1777 struct user_event_group *group = m->private; 1778 struct user_event *user; 1779 char status; 1780 int i, active = 0, busy = 0, flags; 1781 1782 if (!group) 1783 return -EINVAL; 1784 1785 mutex_lock(&group->reg_mutex); 1786 1787 hash_for_each(group->register_table, i, user, node) { 1788 status = user->status; 1789 flags = user->flags; 1790 1791 seq_printf(m, "%d:%s", user->index, EVENT_NAME(user)); 1792 1793 if (flags != 0 || status != 0) 1794 seq_puts(m, " #"); 1795 1796 if (status != 0) { 1797 seq_puts(m, " Used by"); 1798 if (status & EVENT_STATUS_FTRACE) 1799 seq_puts(m, " ftrace"); 1800 if (status & EVENT_STATUS_PERF) 1801 seq_puts(m, " perf"); 1802 if (status & EVENT_STATUS_OTHER) 1803 seq_puts(m, " other"); 1804 busy++; 1805 } 1806 1807 seq_puts(m, "\n"); 1808 active++; 1809 } 1810 1811 mutex_unlock(&group->reg_mutex); 1812 1813 seq_puts(m, "\n"); 1814 seq_printf(m, "Active: %d\n", active); 1815 seq_printf(m, "Busy: %d\n", busy); 1816 seq_printf(m, "Max: %ld\n", MAX_EVENTS); 1817 1818 return 0; 1819 } 1820 1821 static const struct seq_operations user_seq_ops = { 1822 .start = user_seq_start, 1823 .next = user_seq_next, 1824 .stop = user_seq_stop, 1825 .show = user_seq_show, 1826 }; 1827 1828 static int user_status_open(struct inode *node, struct file *file) 1829 { 1830 struct user_event_group *group; 1831 int ret; 1832 1833 group = current_user_event_group(); 1834 1835 if (!group) 1836 return -ENOENT; 1837 1838 ret = seq_open(file, &user_seq_ops); 1839 1840 if (!ret) { 1841 /* Chain group to seq_file */ 1842 struct seq_file *m = file->private_data; 1843 1844 m->private = group; 1845 } 1846 1847 return ret; 1848 } 1849 1850 static const struct file_operations user_status_fops = { 1851 .open = user_status_open, 1852 .mmap = user_status_mmap, 1853 .read = seq_read, 1854 .llseek = seq_lseek, 1855 .release = seq_release, 1856 }; 1857 1858 /* 1859 * Creates a set of tracefs files to allow user mode interactions. 1860 */ 1861 static int create_user_tracefs(void) 1862 { 1863 struct dentry *edata, *emmap; 1864 1865 edata = tracefs_create_file("user_events_data", TRACE_MODE_WRITE, 1866 NULL, NULL, &user_data_fops); 1867 1868 if (!edata) { 1869 pr_warn("Could not create tracefs 'user_events_data' entry\n"); 1870 goto err; 1871 } 1872 1873 /* mmap with MAP_SHARED requires writable fd */ 1874 emmap = tracefs_create_file("user_events_status", TRACE_MODE_WRITE, 1875 NULL, NULL, &user_status_fops); 1876 1877 if (!emmap) { 1878 tracefs_remove(edata); 1879 pr_warn("Could not create tracefs 'user_events_mmap' entry\n"); 1880 goto err; 1881 } 1882 1883 return 0; 1884 err: 1885 return -ENODEV; 1886 } 1887 1888 static int __init trace_events_user_init(void) 1889 { 1890 int ret; 1891 1892 init_group = user_event_group_create(&init_user_ns); 1893 1894 if (!init_group) 1895 return -ENOMEM; 1896 1897 ret = create_user_tracefs(); 1898 1899 if (ret) { 1900 pr_warn("user_events could not register with tracefs\n"); 1901 user_event_group_destroy(init_group); 1902 init_group = NULL; 1903 return ret; 1904 } 1905 1906 if (dyn_event_register(&user_event_dops)) 1907 pr_warn("user_events could not register with dyn_events\n"); 1908 1909 return 0; 1910 } 1911 1912 fs_initcall(trace_events_user_init); 1913