1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2021, Microsoft Corporation. 4 * 5 * Authors: 6 * Beau Belgrave <beaub@linux.microsoft.com> 7 */ 8 9 #include <linux/bitmap.h> 10 #include <linux/cdev.h> 11 #include <linux/hashtable.h> 12 #include <linux/list.h> 13 #include <linux/io.h> 14 #include <linux/uio.h> 15 #include <linux/ioctl.h> 16 #include <linux/jhash.h> 17 #include <linux/refcount.h> 18 #include <linux/trace_events.h> 19 #include <linux/tracefs.h> 20 #include <linux/types.h> 21 #include <linux/uaccess.h> 22 #include <linux/highmem.h> 23 #include <linux/init.h> 24 #include <linux/user_events.h> 25 #include "trace_dynevent.h" 26 #include "trace_output.h" 27 #include "trace.h" 28 29 #define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1) 30 31 #define FIELD_DEPTH_TYPE 0 32 #define FIELD_DEPTH_NAME 1 33 #define FIELD_DEPTH_SIZE 2 34 35 /* Limit how long of an event name plus args within the subsystem. */ 36 #define MAX_EVENT_DESC 512 37 #define EVENT_NAME(user_event) ((user_event)->tracepoint.name) 38 #define MAX_FIELD_ARRAY_SIZE 1024 39 40 /* 41 * Internal bits (kernel side only) to keep track of connected probes: 42 * These are used when status is requested in text form about an event. These 43 * bits are compared against an internal byte on the event to determine which 44 * probes to print out to the user. 45 * 46 * These do not reflect the mapped bytes between the user and kernel space. 47 */ 48 #define EVENT_STATUS_FTRACE BIT(0) 49 #define EVENT_STATUS_PERF BIT(1) 50 #define EVENT_STATUS_OTHER BIT(7) 51 52 /* 53 * User register flags are not allowed yet, keep them here until we are 54 * ready to expose them out to the user ABI. 55 */ 56 enum user_reg_flag { 57 /* Event will not delete upon last reference closing */ 58 USER_EVENT_REG_PERSIST = 1U << 0, 59 60 /* This value or above is currently non-ABI */ 61 USER_EVENT_REG_MAX = 1U << 1, 62 }; 63 64 /* 65 * Stores the system name, tables, and locks for a group of events. This 66 * allows isolation for events by various means. 67 */ 68 struct user_event_group { 69 char *system_name; 70 struct hlist_node node; 71 struct mutex reg_mutex; 72 DECLARE_HASHTABLE(register_table, 8); 73 }; 74 75 /* Group for init_user_ns mapping, top-most group */ 76 static struct user_event_group *init_group; 77 78 /* Max allowed events for the whole system */ 79 static unsigned int max_user_events = 32768; 80 81 /* Current number of events on the whole system */ 82 static unsigned int current_user_events; 83 84 /* 85 * Stores per-event properties, as users register events 86 * within a file a user_event might be created if it does not 87 * already exist. These are globally used and their lifetime 88 * is tied to the refcnt member. These cannot go away until the 89 * refcnt reaches one. 90 */ 91 struct user_event { 92 struct user_event_group *group; 93 struct tracepoint tracepoint; 94 struct trace_event_call call; 95 struct trace_event_class class; 96 struct dyn_event devent; 97 struct hlist_node node; 98 struct list_head fields; 99 struct list_head validators; 100 struct work_struct put_work; 101 refcount_t refcnt; 102 int min_size; 103 int reg_flags; 104 char status; 105 }; 106 107 /* 108 * Stores per-mm/event properties that enable an address to be 109 * updated properly for each task. As tasks are forked, we use 110 * these to track enablement sites that are tied to an event. 111 */ 112 struct user_event_enabler { 113 struct list_head mm_enablers_link; 114 struct user_event *event; 115 unsigned long addr; 116 117 /* Track enable bit, flags, etc. Aligned for bitops. */ 118 unsigned long values; 119 }; 120 121 /* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */ 122 #define ENABLE_VAL_BIT_MASK 0x3F 123 124 /* Bit 6 is for faulting status of enablement */ 125 #define ENABLE_VAL_FAULTING_BIT 6 126 127 /* Bit 7 is for freeing status of enablement */ 128 #define ENABLE_VAL_FREEING_BIT 7 129 130 /* Only duplicate the bit value */ 131 #define ENABLE_VAL_DUP_MASK ENABLE_VAL_BIT_MASK 132 133 #define ENABLE_BITOPS(e) (&(e)->values) 134 135 #define ENABLE_BIT(e) ((int)((e)->values & ENABLE_VAL_BIT_MASK)) 136 137 /* Used for asynchronous faulting in of pages */ 138 struct user_event_enabler_fault { 139 struct work_struct work; 140 struct user_event_mm *mm; 141 struct user_event_enabler *enabler; 142 int attempt; 143 }; 144 145 static struct kmem_cache *fault_cache; 146 147 /* Global list of memory descriptors using user_events */ 148 static LIST_HEAD(user_event_mms); 149 static DEFINE_SPINLOCK(user_event_mms_lock); 150 151 /* 152 * Stores per-file events references, as users register events 153 * within a file this structure is modified and freed via RCU. 154 * The lifetime of this struct is tied to the lifetime of the file. 155 * These are not shared and only accessible by the file that created it. 156 */ 157 struct user_event_refs { 158 struct rcu_head rcu; 159 int count; 160 struct user_event *events[]; 161 }; 162 163 struct user_event_file_info { 164 struct user_event_group *group; 165 struct user_event_refs *refs; 166 }; 167 168 #define VALIDATOR_ENSURE_NULL (1 << 0) 169 #define VALIDATOR_REL (1 << 1) 170 171 struct user_event_validator { 172 struct list_head user_event_link; 173 int offset; 174 int flags; 175 }; 176 177 typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i, 178 void *tpdata, bool *faulted); 179 180 static int user_event_parse(struct user_event_group *group, char *name, 181 char *args, char *flags, 182 struct user_event **newuser, int reg_flags); 183 184 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm); 185 static struct user_event_mm *user_event_mm_get_all(struct user_event *user); 186 static void user_event_mm_put(struct user_event_mm *mm); 187 static int destroy_user_event(struct user_event *user); 188 189 static u32 user_event_key(char *name) 190 { 191 return jhash(name, strlen(name), 0); 192 } 193 194 static struct user_event *user_event_get(struct user_event *user) 195 { 196 refcount_inc(&user->refcnt); 197 198 return user; 199 } 200 201 static void delayed_destroy_user_event(struct work_struct *work) 202 { 203 struct user_event *user = container_of( 204 work, struct user_event, put_work); 205 206 mutex_lock(&event_mutex); 207 208 if (!refcount_dec_and_test(&user->refcnt)) 209 goto out; 210 211 if (destroy_user_event(user)) { 212 /* 213 * The only reason this would fail here is if we cannot 214 * update the visibility of the event. In this case the 215 * event stays in the hashtable, waiting for someone to 216 * attempt to delete it later. 217 */ 218 pr_warn("user_events: Unable to delete event\n"); 219 refcount_set(&user->refcnt, 1); 220 } 221 out: 222 mutex_unlock(&event_mutex); 223 } 224 225 static void user_event_put(struct user_event *user, bool locked) 226 { 227 bool delete; 228 229 if (unlikely(!user)) 230 return; 231 232 /* 233 * When the event is not enabled for auto-delete there will always 234 * be at least 1 reference to the event. During the event creation 235 * we initially set the refcnt to 2 to achieve this. In those cases 236 * the caller must acquire event_mutex and after decrement check if 237 * the refcnt is 1, meaning this is the last reference. When auto 238 * delete is enabled, there will only be 1 ref, IE: refcnt will be 239 * only set to 1 during creation to allow the below checks to go 240 * through upon the last put. The last put must always be done with 241 * the event mutex held. 242 */ 243 if (!locked) { 244 lockdep_assert_not_held(&event_mutex); 245 delete = refcount_dec_and_mutex_lock(&user->refcnt, &event_mutex); 246 } else { 247 lockdep_assert_held(&event_mutex); 248 delete = refcount_dec_and_test(&user->refcnt); 249 } 250 251 if (!delete) 252 return; 253 254 /* 255 * We now have the event_mutex in all cases, which ensures that 256 * no new references will be taken until event_mutex is released. 257 * New references come through find_user_event(), which requires 258 * the event_mutex to be held. 259 */ 260 261 if (user->reg_flags & USER_EVENT_REG_PERSIST) { 262 /* We should not get here when persist flag is set */ 263 pr_alert("BUG: Auto-delete engaged on persistent event\n"); 264 goto out; 265 } 266 267 /* 268 * Unfortunately we have to attempt the actual destroy in a work 269 * queue. This is because not all cases handle a trace_event_call 270 * being removed within the class->reg() operation for unregister. 271 */ 272 INIT_WORK(&user->put_work, delayed_destroy_user_event); 273 274 /* 275 * Since the event is still in the hashtable, we have to re-inc 276 * the ref count to 1. This count will be decremented and checked 277 * in the work queue to ensure it's still the last ref. This is 278 * needed because a user-process could register the same event in 279 * between the time of event_mutex release and the work queue 280 * running the delayed destroy. If we removed the item now from 281 * the hashtable, this would result in a timing window where a 282 * user process would fail a register because the trace_event_call 283 * register would fail in the tracing layers. 284 */ 285 refcount_set(&user->refcnt, 1); 286 287 if (WARN_ON_ONCE(!schedule_work(&user->put_work))) { 288 /* 289 * If we fail we must wait for an admin to attempt delete or 290 * another register/close of the event, whichever is first. 291 */ 292 pr_warn("user_events: Unable to queue delayed destroy\n"); 293 } 294 out: 295 /* Ensure if we didn't have event_mutex before we unlock it */ 296 if (!locked) 297 mutex_unlock(&event_mutex); 298 } 299 300 static void user_event_group_destroy(struct user_event_group *group) 301 { 302 kfree(group->system_name); 303 kfree(group); 304 } 305 306 static char *user_event_group_system_name(void) 307 { 308 char *system_name; 309 int len = sizeof(USER_EVENTS_SYSTEM) + 1; 310 311 system_name = kmalloc(len, GFP_KERNEL); 312 313 if (!system_name) 314 return NULL; 315 316 snprintf(system_name, len, "%s", USER_EVENTS_SYSTEM); 317 318 return system_name; 319 } 320 321 static struct user_event_group *current_user_event_group(void) 322 { 323 return init_group; 324 } 325 326 static struct user_event_group *user_event_group_create(void) 327 { 328 struct user_event_group *group; 329 330 group = kzalloc(sizeof(*group), GFP_KERNEL); 331 332 if (!group) 333 return NULL; 334 335 group->system_name = user_event_group_system_name(); 336 337 if (!group->system_name) 338 goto error; 339 340 mutex_init(&group->reg_mutex); 341 hash_init(group->register_table); 342 343 return group; 344 error: 345 if (group) 346 user_event_group_destroy(group); 347 348 return NULL; 349 }; 350 351 static void user_event_enabler_destroy(struct user_event_enabler *enabler, 352 bool locked) 353 { 354 list_del_rcu(&enabler->mm_enablers_link); 355 356 /* No longer tracking the event via the enabler */ 357 user_event_put(enabler->event, locked); 358 359 kfree(enabler); 360 } 361 362 static int user_event_mm_fault_in(struct user_event_mm *mm, unsigned long uaddr, 363 int attempt) 364 { 365 bool unlocked; 366 int ret; 367 368 /* 369 * Normally this is low, ensure that it cannot be taken advantage of by 370 * bad user processes to cause excessive looping. 371 */ 372 if (attempt > 10) 373 return -EFAULT; 374 375 mmap_read_lock(mm->mm); 376 377 /* Ensure MM has tasks, cannot use after exit_mm() */ 378 if (refcount_read(&mm->tasks) == 0) { 379 ret = -ENOENT; 380 goto out; 381 } 382 383 ret = fixup_user_fault(mm->mm, uaddr, FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE, 384 &unlocked); 385 out: 386 mmap_read_unlock(mm->mm); 387 388 return ret; 389 } 390 391 static int user_event_enabler_write(struct user_event_mm *mm, 392 struct user_event_enabler *enabler, 393 bool fixup_fault, int *attempt); 394 395 static void user_event_enabler_fault_fixup(struct work_struct *work) 396 { 397 struct user_event_enabler_fault *fault = container_of( 398 work, struct user_event_enabler_fault, work); 399 struct user_event_enabler *enabler = fault->enabler; 400 struct user_event_mm *mm = fault->mm; 401 unsigned long uaddr = enabler->addr; 402 int attempt = fault->attempt; 403 int ret; 404 405 ret = user_event_mm_fault_in(mm, uaddr, attempt); 406 407 if (ret && ret != -ENOENT) { 408 struct user_event *user = enabler->event; 409 410 pr_warn("user_events: Fault for mm: 0x%pK @ 0x%llx event: %s\n", 411 mm->mm, (unsigned long long)uaddr, EVENT_NAME(user)); 412 } 413 414 /* Prevent state changes from racing */ 415 mutex_lock(&event_mutex); 416 417 /* User asked for enabler to be removed during fault */ 418 if (test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))) { 419 user_event_enabler_destroy(enabler, true); 420 goto out; 421 } 422 423 /* 424 * If we managed to get the page, re-issue the write. We do not 425 * want to get into a possible infinite loop, which is why we only 426 * attempt again directly if the page came in. If we couldn't get 427 * the page here, then we will try again the next time the event is 428 * enabled/disabled. 429 */ 430 clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)); 431 432 if (!ret) { 433 mmap_read_lock(mm->mm); 434 user_event_enabler_write(mm, enabler, true, &attempt); 435 mmap_read_unlock(mm->mm); 436 } 437 out: 438 mutex_unlock(&event_mutex); 439 440 /* In all cases we no longer need the mm or fault */ 441 user_event_mm_put(mm); 442 kmem_cache_free(fault_cache, fault); 443 } 444 445 static bool user_event_enabler_queue_fault(struct user_event_mm *mm, 446 struct user_event_enabler *enabler, 447 int attempt) 448 { 449 struct user_event_enabler_fault *fault; 450 451 fault = kmem_cache_zalloc(fault_cache, GFP_NOWAIT | __GFP_NOWARN); 452 453 if (!fault) 454 return false; 455 456 INIT_WORK(&fault->work, user_event_enabler_fault_fixup); 457 fault->mm = user_event_mm_get(mm); 458 fault->enabler = enabler; 459 fault->attempt = attempt; 460 461 /* Don't try to queue in again while we have a pending fault */ 462 set_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)); 463 464 if (!schedule_work(&fault->work)) { 465 /* Allow another attempt later */ 466 clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)); 467 468 user_event_mm_put(mm); 469 kmem_cache_free(fault_cache, fault); 470 471 return false; 472 } 473 474 return true; 475 } 476 477 static int user_event_enabler_write(struct user_event_mm *mm, 478 struct user_event_enabler *enabler, 479 bool fixup_fault, int *attempt) 480 { 481 unsigned long uaddr = enabler->addr; 482 unsigned long *ptr; 483 struct page *page; 484 void *kaddr; 485 int ret; 486 487 lockdep_assert_held(&event_mutex); 488 mmap_assert_locked(mm->mm); 489 490 *attempt += 1; 491 492 /* Ensure MM has tasks, cannot use after exit_mm() */ 493 if (refcount_read(&mm->tasks) == 0) 494 return -ENOENT; 495 496 if (unlikely(test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)) || 497 test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler)))) 498 return -EBUSY; 499 500 ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT, 501 &page, NULL); 502 503 if (unlikely(ret <= 0)) { 504 if (!fixup_fault) 505 return -EFAULT; 506 507 if (!user_event_enabler_queue_fault(mm, enabler, *attempt)) 508 pr_warn("user_events: Unable to queue fault handler\n"); 509 510 return -EFAULT; 511 } 512 513 kaddr = kmap_local_page(page); 514 ptr = kaddr + (uaddr & ~PAGE_MASK); 515 516 /* Update bit atomically, user tracers must be atomic as well */ 517 if (enabler->event && enabler->event->status) 518 set_bit(ENABLE_BIT(enabler), ptr); 519 else 520 clear_bit(ENABLE_BIT(enabler), ptr); 521 522 kunmap_local(kaddr); 523 unpin_user_pages_dirty_lock(&page, 1, true); 524 525 return 0; 526 } 527 528 static bool user_event_enabler_exists(struct user_event_mm *mm, 529 unsigned long uaddr, unsigned char bit) 530 { 531 struct user_event_enabler *enabler; 532 533 list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) { 534 if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit) 535 return true; 536 } 537 538 return false; 539 } 540 541 static void user_event_enabler_update(struct user_event *user) 542 { 543 struct user_event_enabler *enabler; 544 struct user_event_mm *next; 545 struct user_event_mm *mm; 546 int attempt; 547 548 lockdep_assert_held(&event_mutex); 549 550 /* 551 * We need to build a one-shot list of all the mms that have an 552 * enabler for the user_event passed in. This list is only valid 553 * while holding the event_mutex. The only reason for this is due 554 * to the global mm list being RCU protected and we use methods 555 * which can wait (mmap_read_lock and pin_user_pages_remote). 556 * 557 * NOTE: user_event_mm_get_all() increments the ref count of each 558 * mm that is added to the list to prevent removal timing windows. 559 * We must always put each mm after they are used, which may wait. 560 */ 561 mm = user_event_mm_get_all(user); 562 563 while (mm) { 564 next = mm->next; 565 mmap_read_lock(mm->mm); 566 567 list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) { 568 if (enabler->event == user) { 569 attempt = 0; 570 user_event_enabler_write(mm, enabler, true, &attempt); 571 } 572 } 573 574 mmap_read_unlock(mm->mm); 575 user_event_mm_put(mm); 576 mm = next; 577 } 578 } 579 580 static bool user_event_enabler_dup(struct user_event_enabler *orig, 581 struct user_event_mm *mm) 582 { 583 struct user_event_enabler *enabler; 584 585 /* Skip pending frees */ 586 if (unlikely(test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(orig)))) 587 return true; 588 589 enabler = kzalloc(sizeof(*enabler), GFP_NOWAIT | __GFP_ACCOUNT); 590 591 if (!enabler) 592 return false; 593 594 enabler->event = user_event_get(orig->event); 595 enabler->addr = orig->addr; 596 597 /* Only dup part of value (ignore future flags, etc) */ 598 enabler->values = orig->values & ENABLE_VAL_DUP_MASK; 599 600 /* Enablers not exposed yet, RCU not required */ 601 list_add(&enabler->mm_enablers_link, &mm->enablers); 602 603 return true; 604 } 605 606 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm) 607 { 608 refcount_inc(&mm->refcnt); 609 610 return mm; 611 } 612 613 static struct user_event_mm *user_event_mm_get_all(struct user_event *user) 614 { 615 struct user_event_mm *found = NULL; 616 struct user_event_enabler *enabler; 617 struct user_event_mm *mm; 618 619 /* 620 * We use the mm->next field to build a one-shot list from the global 621 * RCU protected list. To build this list the event_mutex must be held. 622 * This lets us build a list without requiring allocs that could fail 623 * when user based events are most wanted for diagnostics. 624 */ 625 lockdep_assert_held(&event_mutex); 626 627 /* 628 * We do not want to block fork/exec while enablements are being 629 * updated, so we use RCU to walk the current tasks that have used 630 * user_events ABI for 1 or more events. Each enabler found in each 631 * task that matches the event being updated has a write to reflect 632 * the kernel state back into the process. Waits/faults must not occur 633 * during this. So we scan the list under RCU for all the mm that have 634 * the event within it. This is needed because mm_read_lock() can wait. 635 * Each user mm returned has a ref inc to handle remove RCU races. 636 */ 637 rcu_read_lock(); 638 639 list_for_each_entry_rcu(mm, &user_event_mms, mms_link) { 640 list_for_each_entry_rcu(enabler, &mm->enablers, mm_enablers_link) { 641 if (enabler->event == user) { 642 mm->next = found; 643 found = user_event_mm_get(mm); 644 break; 645 } 646 } 647 } 648 649 rcu_read_unlock(); 650 651 return found; 652 } 653 654 static struct user_event_mm *user_event_mm_alloc(struct task_struct *t) 655 { 656 struct user_event_mm *user_mm; 657 658 user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT); 659 660 if (!user_mm) 661 return NULL; 662 663 user_mm->mm = t->mm; 664 INIT_LIST_HEAD(&user_mm->enablers); 665 refcount_set(&user_mm->refcnt, 1); 666 refcount_set(&user_mm->tasks, 1); 667 668 /* 669 * The lifetime of the memory descriptor can slightly outlast 670 * the task lifetime if a ref to the user_event_mm is taken 671 * between list_del_rcu() and call_rcu(). Therefore we need 672 * to take a reference to it to ensure it can live this long 673 * under this corner case. This can also occur in clones that 674 * outlast the parent. 675 */ 676 mmgrab(user_mm->mm); 677 678 return user_mm; 679 } 680 681 static void user_event_mm_attach(struct user_event_mm *user_mm, struct task_struct *t) 682 { 683 unsigned long flags; 684 685 spin_lock_irqsave(&user_event_mms_lock, flags); 686 list_add_rcu(&user_mm->mms_link, &user_event_mms); 687 spin_unlock_irqrestore(&user_event_mms_lock, flags); 688 689 t->user_event_mm = user_mm; 690 } 691 692 static struct user_event_mm *current_user_event_mm(void) 693 { 694 struct user_event_mm *user_mm = current->user_event_mm; 695 696 if (user_mm) 697 goto inc; 698 699 user_mm = user_event_mm_alloc(current); 700 701 if (!user_mm) 702 goto error; 703 704 user_event_mm_attach(user_mm, current); 705 inc: 706 refcount_inc(&user_mm->refcnt); 707 error: 708 return user_mm; 709 } 710 711 static void user_event_mm_destroy(struct user_event_mm *mm) 712 { 713 struct user_event_enabler *enabler, *next; 714 715 list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) 716 user_event_enabler_destroy(enabler, false); 717 718 mmdrop(mm->mm); 719 kfree(mm); 720 } 721 722 static void user_event_mm_put(struct user_event_mm *mm) 723 { 724 if (mm && refcount_dec_and_test(&mm->refcnt)) 725 user_event_mm_destroy(mm); 726 } 727 728 static void delayed_user_event_mm_put(struct work_struct *work) 729 { 730 struct user_event_mm *mm; 731 732 mm = container_of(to_rcu_work(work), struct user_event_mm, put_rwork); 733 user_event_mm_put(mm); 734 } 735 736 void user_event_mm_remove(struct task_struct *t) 737 { 738 struct user_event_mm *mm; 739 unsigned long flags; 740 741 might_sleep(); 742 743 mm = t->user_event_mm; 744 t->user_event_mm = NULL; 745 746 /* Clone will increment the tasks, only remove if last clone */ 747 if (!refcount_dec_and_test(&mm->tasks)) 748 return; 749 750 /* Remove the mm from the list, so it can no longer be enabled */ 751 spin_lock_irqsave(&user_event_mms_lock, flags); 752 list_del_rcu(&mm->mms_link); 753 spin_unlock_irqrestore(&user_event_mms_lock, flags); 754 755 /* 756 * We need to wait for currently occurring writes to stop within 757 * the mm. This is required since exit_mm() snaps the current rss 758 * stats and clears them. On the final mmdrop(), check_mm() will 759 * report a bug if these increment. 760 * 761 * All writes/pins are done under mmap_read lock, take the write 762 * lock to ensure in-progress faults have completed. Faults that 763 * are pending but yet to run will check the task count and skip 764 * the fault since the mm is going away. 765 */ 766 mmap_write_lock(mm->mm); 767 mmap_write_unlock(mm->mm); 768 769 /* 770 * Put for mm must be done after RCU delay to handle new refs in 771 * between the list_del_rcu() and now. This ensures any get refs 772 * during rcu_read_lock() are accounted for during list removal. 773 * 774 * CPU A | CPU B 775 * --------------------------------------------------------------- 776 * user_event_mm_remove() | rcu_read_lock(); 777 * list_del_rcu() | list_for_each_entry_rcu(); 778 * call_rcu() | refcount_inc(); 779 * . | rcu_read_unlock(); 780 * schedule_work() | . 781 * user_event_mm_put() | . 782 * 783 * mmdrop() cannot be called in the softirq context of call_rcu() 784 * so we use a work queue after call_rcu() to run within. 785 */ 786 INIT_RCU_WORK(&mm->put_rwork, delayed_user_event_mm_put); 787 queue_rcu_work(system_wq, &mm->put_rwork); 788 } 789 790 void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm) 791 { 792 struct user_event_mm *mm = user_event_mm_alloc(t); 793 struct user_event_enabler *enabler; 794 795 if (!mm) 796 return; 797 798 rcu_read_lock(); 799 800 list_for_each_entry_rcu(enabler, &old_mm->enablers, mm_enablers_link) { 801 if (!user_event_enabler_dup(enabler, mm)) 802 goto error; 803 } 804 805 rcu_read_unlock(); 806 807 user_event_mm_attach(mm, t); 808 return; 809 error: 810 rcu_read_unlock(); 811 user_event_mm_destroy(mm); 812 } 813 814 static bool current_user_event_enabler_exists(unsigned long uaddr, 815 unsigned char bit) 816 { 817 struct user_event_mm *user_mm = current_user_event_mm(); 818 bool exists; 819 820 if (!user_mm) 821 return false; 822 823 exists = user_event_enabler_exists(user_mm, uaddr, bit); 824 825 user_event_mm_put(user_mm); 826 827 return exists; 828 } 829 830 static struct user_event_enabler 831 *user_event_enabler_create(struct user_reg *reg, struct user_event *user, 832 int *write_result) 833 { 834 struct user_event_enabler *enabler; 835 struct user_event_mm *user_mm; 836 unsigned long uaddr = (unsigned long)reg->enable_addr; 837 int attempt = 0; 838 839 user_mm = current_user_event_mm(); 840 841 if (!user_mm) 842 return NULL; 843 844 enabler = kzalloc(sizeof(*enabler), GFP_KERNEL_ACCOUNT); 845 846 if (!enabler) 847 goto out; 848 849 enabler->event = user; 850 enabler->addr = uaddr; 851 enabler->values = reg->enable_bit; 852 retry: 853 /* Prevents state changes from racing with new enablers */ 854 mutex_lock(&event_mutex); 855 856 /* Attempt to reflect the current state within the process */ 857 mmap_read_lock(user_mm->mm); 858 *write_result = user_event_enabler_write(user_mm, enabler, false, 859 &attempt); 860 mmap_read_unlock(user_mm->mm); 861 862 /* 863 * If the write works, then we will track the enabler. A ref to the 864 * underlying user_event is held by the enabler to prevent it going 865 * away while the enabler is still in use by a process. The ref is 866 * removed when the enabler is destroyed. This means a event cannot 867 * be forcefully deleted from the system until all tasks using it 868 * exit or run exec(), which includes forks and clones. 869 */ 870 if (!*write_result) { 871 user_event_get(user); 872 list_add_rcu(&enabler->mm_enablers_link, &user_mm->enablers); 873 } 874 875 mutex_unlock(&event_mutex); 876 877 if (*write_result) { 878 /* Attempt to fault-in and retry if it worked */ 879 if (!user_event_mm_fault_in(user_mm, uaddr, attempt)) 880 goto retry; 881 882 kfree(enabler); 883 enabler = NULL; 884 } 885 out: 886 user_event_mm_put(user_mm); 887 888 return enabler; 889 } 890 891 static __always_inline __must_check 892 bool user_event_last_ref(struct user_event *user) 893 { 894 int last = 0; 895 896 if (user->reg_flags & USER_EVENT_REG_PERSIST) 897 last = 1; 898 899 return refcount_read(&user->refcnt) == last; 900 } 901 902 static __always_inline __must_check 903 size_t copy_nofault(void *addr, size_t bytes, struct iov_iter *i) 904 { 905 size_t ret; 906 907 pagefault_disable(); 908 909 ret = copy_from_iter_nocache(addr, bytes, i); 910 911 pagefault_enable(); 912 913 return ret; 914 } 915 916 static struct list_head *user_event_get_fields(struct trace_event_call *call) 917 { 918 struct user_event *user = (struct user_event *)call->data; 919 920 return &user->fields; 921 } 922 923 /* 924 * Parses a register command for user_events 925 * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]] 926 * 927 * Example event named 'test' with a 20 char 'msg' field with an unsigned int 928 * 'id' field after: 929 * test char[20] msg;unsigned int id 930 * 931 * NOTE: Offsets are from the user data perspective, they are not from the 932 * trace_entry/buffer perspective. We automatically add the common properties 933 * sizes to the offset for the user. 934 * 935 * Upon success user_event has its ref count increased by 1. 936 */ 937 static int user_event_parse_cmd(struct user_event_group *group, 938 char *raw_command, struct user_event **newuser, 939 int reg_flags) 940 { 941 char *name = raw_command; 942 char *args = strpbrk(name, " "); 943 char *flags; 944 945 if (args) 946 *args++ = '\0'; 947 948 flags = strpbrk(name, ":"); 949 950 if (flags) 951 *flags++ = '\0'; 952 953 return user_event_parse(group, name, args, flags, newuser, reg_flags); 954 } 955 956 static int user_field_array_size(const char *type) 957 { 958 const char *start = strchr(type, '['); 959 char val[8]; 960 char *bracket; 961 int size = 0; 962 963 if (start == NULL) 964 return -EINVAL; 965 966 if (strscpy(val, start + 1, sizeof(val)) <= 0) 967 return -EINVAL; 968 969 bracket = strchr(val, ']'); 970 971 if (!bracket) 972 return -EINVAL; 973 974 *bracket = '\0'; 975 976 if (kstrtouint(val, 0, &size)) 977 return -EINVAL; 978 979 if (size > MAX_FIELD_ARRAY_SIZE) 980 return -EINVAL; 981 982 return size; 983 } 984 985 static int user_field_size(const char *type) 986 { 987 /* long is not allowed from a user, since it's ambigious in size */ 988 if (strcmp(type, "s64") == 0) 989 return sizeof(s64); 990 if (strcmp(type, "u64") == 0) 991 return sizeof(u64); 992 if (strcmp(type, "s32") == 0) 993 return sizeof(s32); 994 if (strcmp(type, "u32") == 0) 995 return sizeof(u32); 996 if (strcmp(type, "int") == 0) 997 return sizeof(int); 998 if (strcmp(type, "unsigned int") == 0) 999 return sizeof(unsigned int); 1000 if (strcmp(type, "s16") == 0) 1001 return sizeof(s16); 1002 if (strcmp(type, "u16") == 0) 1003 return sizeof(u16); 1004 if (strcmp(type, "short") == 0) 1005 return sizeof(short); 1006 if (strcmp(type, "unsigned short") == 0) 1007 return sizeof(unsigned short); 1008 if (strcmp(type, "s8") == 0) 1009 return sizeof(s8); 1010 if (strcmp(type, "u8") == 0) 1011 return sizeof(u8); 1012 if (strcmp(type, "char") == 0) 1013 return sizeof(char); 1014 if (strcmp(type, "unsigned char") == 0) 1015 return sizeof(unsigned char); 1016 if (str_has_prefix(type, "char[")) 1017 return user_field_array_size(type); 1018 if (str_has_prefix(type, "unsigned char[")) 1019 return user_field_array_size(type); 1020 if (str_has_prefix(type, "__data_loc ")) 1021 return sizeof(u32); 1022 if (str_has_prefix(type, "__rel_loc ")) 1023 return sizeof(u32); 1024 1025 /* Uknown basic type, error */ 1026 return -EINVAL; 1027 } 1028 1029 static void user_event_destroy_validators(struct user_event *user) 1030 { 1031 struct user_event_validator *validator, *next; 1032 struct list_head *head = &user->validators; 1033 1034 list_for_each_entry_safe(validator, next, head, user_event_link) { 1035 list_del(&validator->user_event_link); 1036 kfree(validator); 1037 } 1038 } 1039 1040 static void user_event_destroy_fields(struct user_event *user) 1041 { 1042 struct ftrace_event_field *field, *next; 1043 struct list_head *head = &user->fields; 1044 1045 list_for_each_entry_safe(field, next, head, link) { 1046 list_del(&field->link); 1047 kfree(field); 1048 } 1049 } 1050 1051 static int user_event_add_field(struct user_event *user, const char *type, 1052 const char *name, int offset, int size, 1053 int is_signed, int filter_type) 1054 { 1055 struct user_event_validator *validator; 1056 struct ftrace_event_field *field; 1057 int validator_flags = 0; 1058 1059 field = kmalloc(sizeof(*field), GFP_KERNEL_ACCOUNT); 1060 1061 if (!field) 1062 return -ENOMEM; 1063 1064 if (str_has_prefix(type, "__data_loc ")) 1065 goto add_validator; 1066 1067 if (str_has_prefix(type, "__rel_loc ")) { 1068 validator_flags |= VALIDATOR_REL; 1069 goto add_validator; 1070 } 1071 1072 goto add_field; 1073 1074 add_validator: 1075 if (strstr(type, "char") != NULL) 1076 validator_flags |= VALIDATOR_ENSURE_NULL; 1077 1078 validator = kmalloc(sizeof(*validator), GFP_KERNEL_ACCOUNT); 1079 1080 if (!validator) { 1081 kfree(field); 1082 return -ENOMEM; 1083 } 1084 1085 validator->flags = validator_flags; 1086 validator->offset = offset; 1087 1088 /* Want sequential access when validating */ 1089 list_add_tail(&validator->user_event_link, &user->validators); 1090 1091 add_field: 1092 field->type = type; 1093 field->name = name; 1094 field->offset = offset; 1095 field->size = size; 1096 field->is_signed = is_signed; 1097 field->filter_type = filter_type; 1098 1099 if (filter_type == FILTER_OTHER) 1100 field->filter_type = filter_assign_type(type); 1101 1102 list_add(&field->link, &user->fields); 1103 1104 /* 1105 * Min size from user writes that are required, this does not include 1106 * the size of trace_entry (common fields). 1107 */ 1108 user->min_size = (offset + size) - sizeof(struct trace_entry); 1109 1110 return 0; 1111 } 1112 1113 /* 1114 * Parses the values of a field within the description 1115 * Format: type name [size] 1116 */ 1117 static int user_event_parse_field(char *field, struct user_event *user, 1118 u32 *offset) 1119 { 1120 char *part, *type, *name; 1121 u32 depth = 0, saved_offset = *offset; 1122 int len, size = -EINVAL; 1123 bool is_struct = false; 1124 1125 field = skip_spaces(field); 1126 1127 if (*field == '\0') 1128 return 0; 1129 1130 /* Handle types that have a space within */ 1131 len = str_has_prefix(field, "unsigned "); 1132 if (len) 1133 goto skip_next; 1134 1135 len = str_has_prefix(field, "struct "); 1136 if (len) { 1137 is_struct = true; 1138 goto skip_next; 1139 } 1140 1141 len = str_has_prefix(field, "__data_loc unsigned "); 1142 if (len) 1143 goto skip_next; 1144 1145 len = str_has_prefix(field, "__data_loc "); 1146 if (len) 1147 goto skip_next; 1148 1149 len = str_has_prefix(field, "__rel_loc unsigned "); 1150 if (len) 1151 goto skip_next; 1152 1153 len = str_has_prefix(field, "__rel_loc "); 1154 if (len) 1155 goto skip_next; 1156 1157 goto parse; 1158 skip_next: 1159 type = field; 1160 field = strpbrk(field + len, " "); 1161 1162 if (field == NULL) 1163 return -EINVAL; 1164 1165 *field++ = '\0'; 1166 depth++; 1167 parse: 1168 name = NULL; 1169 1170 while ((part = strsep(&field, " ")) != NULL) { 1171 switch (depth++) { 1172 case FIELD_DEPTH_TYPE: 1173 type = part; 1174 break; 1175 case FIELD_DEPTH_NAME: 1176 name = part; 1177 break; 1178 case FIELD_DEPTH_SIZE: 1179 if (!is_struct) 1180 return -EINVAL; 1181 1182 if (kstrtou32(part, 10, &size)) 1183 return -EINVAL; 1184 break; 1185 default: 1186 return -EINVAL; 1187 } 1188 } 1189 1190 if (depth < FIELD_DEPTH_SIZE || !name) 1191 return -EINVAL; 1192 1193 if (depth == FIELD_DEPTH_SIZE) 1194 size = user_field_size(type); 1195 1196 if (size == 0) 1197 return -EINVAL; 1198 1199 if (size < 0) 1200 return size; 1201 1202 *offset = saved_offset + size; 1203 1204 return user_event_add_field(user, type, name, saved_offset, size, 1205 type[0] != 'u', FILTER_OTHER); 1206 } 1207 1208 static int user_event_parse_fields(struct user_event *user, char *args) 1209 { 1210 char *field; 1211 u32 offset = sizeof(struct trace_entry); 1212 int ret = -EINVAL; 1213 1214 if (args == NULL) 1215 return 0; 1216 1217 while ((field = strsep(&args, ";")) != NULL) { 1218 ret = user_event_parse_field(field, user, &offset); 1219 1220 if (ret) 1221 break; 1222 } 1223 1224 return ret; 1225 } 1226 1227 static struct trace_event_fields user_event_fields_array[1]; 1228 1229 static const char *user_field_format(const char *type) 1230 { 1231 if (strcmp(type, "s64") == 0) 1232 return "%lld"; 1233 if (strcmp(type, "u64") == 0) 1234 return "%llu"; 1235 if (strcmp(type, "s32") == 0) 1236 return "%d"; 1237 if (strcmp(type, "u32") == 0) 1238 return "%u"; 1239 if (strcmp(type, "int") == 0) 1240 return "%d"; 1241 if (strcmp(type, "unsigned int") == 0) 1242 return "%u"; 1243 if (strcmp(type, "s16") == 0) 1244 return "%d"; 1245 if (strcmp(type, "u16") == 0) 1246 return "%u"; 1247 if (strcmp(type, "short") == 0) 1248 return "%d"; 1249 if (strcmp(type, "unsigned short") == 0) 1250 return "%u"; 1251 if (strcmp(type, "s8") == 0) 1252 return "%d"; 1253 if (strcmp(type, "u8") == 0) 1254 return "%u"; 1255 if (strcmp(type, "char") == 0) 1256 return "%d"; 1257 if (strcmp(type, "unsigned char") == 0) 1258 return "%u"; 1259 if (strstr(type, "char[") != NULL) 1260 return "%s"; 1261 1262 /* Unknown, likely struct, allowed treat as 64-bit */ 1263 return "%llu"; 1264 } 1265 1266 static bool user_field_is_dyn_string(const char *type, const char **str_func) 1267 { 1268 if (str_has_prefix(type, "__data_loc ")) { 1269 *str_func = "__get_str"; 1270 goto check; 1271 } 1272 1273 if (str_has_prefix(type, "__rel_loc ")) { 1274 *str_func = "__get_rel_str"; 1275 goto check; 1276 } 1277 1278 return false; 1279 check: 1280 return strstr(type, "char") != NULL; 1281 } 1282 1283 #define LEN_OR_ZERO (len ? len - pos : 0) 1284 static int user_dyn_field_set_string(int argc, const char **argv, int *iout, 1285 char *buf, int len, bool *colon) 1286 { 1287 int pos = 0, i = *iout; 1288 1289 *colon = false; 1290 1291 for (; i < argc; ++i) { 1292 if (i != *iout) 1293 pos += snprintf(buf + pos, LEN_OR_ZERO, " "); 1294 1295 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", argv[i]); 1296 1297 if (strchr(argv[i], ';')) { 1298 ++i; 1299 *colon = true; 1300 break; 1301 } 1302 } 1303 1304 /* Actual set, advance i */ 1305 if (len != 0) 1306 *iout = i; 1307 1308 return pos + 1; 1309 } 1310 1311 static int user_field_set_string(struct ftrace_event_field *field, 1312 char *buf, int len, bool colon) 1313 { 1314 int pos = 0; 1315 1316 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->type); 1317 pos += snprintf(buf + pos, LEN_OR_ZERO, " "); 1318 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name); 1319 1320 if (str_has_prefix(field->type, "struct ")) 1321 pos += snprintf(buf + pos, LEN_OR_ZERO, " %d", field->size); 1322 1323 if (colon) 1324 pos += snprintf(buf + pos, LEN_OR_ZERO, ";"); 1325 1326 return pos + 1; 1327 } 1328 1329 static int user_event_set_print_fmt(struct user_event *user, char *buf, int len) 1330 { 1331 struct ftrace_event_field *field, *next; 1332 struct list_head *head = &user->fields; 1333 int pos = 0, depth = 0; 1334 const char *str_func; 1335 1336 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 1337 1338 list_for_each_entry_safe_reverse(field, next, head, link) { 1339 if (depth != 0) 1340 pos += snprintf(buf + pos, LEN_OR_ZERO, " "); 1341 1342 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s", 1343 field->name, user_field_format(field->type)); 1344 1345 depth++; 1346 } 1347 1348 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 1349 1350 list_for_each_entry_safe_reverse(field, next, head, link) { 1351 if (user_field_is_dyn_string(field->type, &str_func)) 1352 pos += snprintf(buf + pos, LEN_OR_ZERO, 1353 ", %s(%s)", str_func, field->name); 1354 else 1355 pos += snprintf(buf + pos, LEN_OR_ZERO, 1356 ", REC->%s", field->name); 1357 } 1358 1359 return pos + 1; 1360 } 1361 #undef LEN_OR_ZERO 1362 1363 static int user_event_create_print_fmt(struct user_event *user) 1364 { 1365 char *print_fmt; 1366 int len; 1367 1368 len = user_event_set_print_fmt(user, NULL, 0); 1369 1370 print_fmt = kmalloc(len, GFP_KERNEL_ACCOUNT); 1371 1372 if (!print_fmt) 1373 return -ENOMEM; 1374 1375 user_event_set_print_fmt(user, print_fmt, len); 1376 1377 user->call.print_fmt = print_fmt; 1378 1379 return 0; 1380 } 1381 1382 static enum print_line_t user_event_print_trace(struct trace_iterator *iter, 1383 int flags, 1384 struct trace_event *event) 1385 { 1386 return print_event_fields(iter, event); 1387 } 1388 1389 static struct trace_event_functions user_event_funcs = { 1390 .trace = user_event_print_trace, 1391 }; 1392 1393 static int user_event_set_call_visible(struct user_event *user, bool visible) 1394 { 1395 int ret; 1396 const struct cred *old_cred; 1397 struct cred *cred; 1398 1399 cred = prepare_creds(); 1400 1401 if (!cred) 1402 return -ENOMEM; 1403 1404 /* 1405 * While by default tracefs is locked down, systems can be configured 1406 * to allow user_event files to be less locked down. The extreme case 1407 * being "other" has read/write access to user_events_data/status. 1408 * 1409 * When not locked down, processes may not have permissions to 1410 * add/remove calls themselves to tracefs. We need to temporarily 1411 * switch to root file permission to allow for this scenario. 1412 */ 1413 cred->fsuid = GLOBAL_ROOT_UID; 1414 1415 old_cred = override_creds(cred); 1416 1417 if (visible) 1418 ret = trace_add_event_call(&user->call); 1419 else 1420 ret = trace_remove_event_call(&user->call); 1421 1422 revert_creds(old_cred); 1423 put_cred(cred); 1424 1425 return ret; 1426 } 1427 1428 static int destroy_user_event(struct user_event *user) 1429 { 1430 int ret = 0; 1431 1432 lockdep_assert_held(&event_mutex); 1433 1434 /* Must destroy fields before call removal */ 1435 user_event_destroy_fields(user); 1436 1437 ret = user_event_set_call_visible(user, false); 1438 1439 if (ret) 1440 return ret; 1441 1442 dyn_event_remove(&user->devent); 1443 hash_del(&user->node); 1444 1445 user_event_destroy_validators(user); 1446 kfree(user->call.print_fmt); 1447 kfree(EVENT_NAME(user)); 1448 kfree(user); 1449 1450 if (current_user_events > 0) 1451 current_user_events--; 1452 else 1453 pr_alert("BUG: Bad current_user_events\n"); 1454 1455 return ret; 1456 } 1457 1458 static struct user_event *find_user_event(struct user_event_group *group, 1459 char *name, u32 *outkey) 1460 { 1461 struct user_event *user; 1462 u32 key = user_event_key(name); 1463 1464 *outkey = key; 1465 1466 hash_for_each_possible(group->register_table, user, node, key) 1467 if (!strcmp(EVENT_NAME(user), name)) 1468 return user_event_get(user); 1469 1470 return NULL; 1471 } 1472 1473 static int user_event_validate(struct user_event *user, void *data, int len) 1474 { 1475 struct list_head *head = &user->validators; 1476 struct user_event_validator *validator; 1477 void *pos, *end = data + len; 1478 u32 loc, offset, size; 1479 1480 list_for_each_entry(validator, head, user_event_link) { 1481 pos = data + validator->offset; 1482 1483 /* Already done min_size check, no bounds check here */ 1484 loc = *(u32 *)pos; 1485 offset = loc & 0xffff; 1486 size = loc >> 16; 1487 1488 if (likely(validator->flags & VALIDATOR_REL)) 1489 pos += offset + sizeof(loc); 1490 else 1491 pos = data + offset; 1492 1493 pos += size; 1494 1495 if (unlikely(pos > end)) 1496 return -EFAULT; 1497 1498 if (likely(validator->flags & VALIDATOR_ENSURE_NULL)) 1499 if (unlikely(*(char *)(pos - 1) != '\0')) 1500 return -EFAULT; 1501 } 1502 1503 return 0; 1504 } 1505 1506 /* 1507 * Writes the user supplied payload out to a trace file. 1508 */ 1509 static void user_event_ftrace(struct user_event *user, struct iov_iter *i, 1510 void *tpdata, bool *faulted) 1511 { 1512 struct trace_event_file *file; 1513 struct trace_entry *entry; 1514 struct trace_event_buffer event_buffer; 1515 size_t size = sizeof(*entry) + i->count; 1516 1517 file = (struct trace_event_file *)tpdata; 1518 1519 if (!file || 1520 !(file->flags & EVENT_FILE_FL_ENABLED) || 1521 trace_trigger_soft_disabled(file)) 1522 return; 1523 1524 /* Allocates and fills trace_entry, + 1 of this is data payload */ 1525 entry = trace_event_buffer_reserve(&event_buffer, file, size); 1526 1527 if (unlikely(!entry)) 1528 return; 1529 1530 if (unlikely(i->count != 0 && !copy_nofault(entry + 1, i->count, i))) 1531 goto discard; 1532 1533 if (!list_empty(&user->validators) && 1534 unlikely(user_event_validate(user, entry, size))) 1535 goto discard; 1536 1537 trace_event_buffer_commit(&event_buffer); 1538 1539 return; 1540 discard: 1541 *faulted = true; 1542 __trace_event_discard_commit(event_buffer.buffer, 1543 event_buffer.event); 1544 } 1545 1546 #ifdef CONFIG_PERF_EVENTS 1547 /* 1548 * Writes the user supplied payload out to perf ring buffer. 1549 */ 1550 static void user_event_perf(struct user_event *user, struct iov_iter *i, 1551 void *tpdata, bool *faulted) 1552 { 1553 struct hlist_head *perf_head; 1554 1555 perf_head = this_cpu_ptr(user->call.perf_events); 1556 1557 if (perf_head && !hlist_empty(perf_head)) { 1558 struct trace_entry *perf_entry; 1559 struct pt_regs *regs; 1560 size_t size = sizeof(*perf_entry) + i->count; 1561 int context; 1562 1563 perf_entry = perf_trace_buf_alloc(ALIGN(size, 8), 1564 ®s, &context); 1565 1566 if (unlikely(!perf_entry)) 1567 return; 1568 1569 perf_fetch_caller_regs(regs); 1570 1571 if (unlikely(i->count != 0 && !copy_nofault(perf_entry + 1, i->count, i))) 1572 goto discard; 1573 1574 if (!list_empty(&user->validators) && 1575 unlikely(user_event_validate(user, perf_entry, size))) 1576 goto discard; 1577 1578 perf_trace_buf_submit(perf_entry, size, context, 1579 user->call.event.type, 1, regs, 1580 perf_head, NULL); 1581 1582 return; 1583 discard: 1584 *faulted = true; 1585 perf_swevent_put_recursion_context(context); 1586 } 1587 } 1588 #endif 1589 1590 /* 1591 * Update the enabled bit among all user processes. 1592 */ 1593 static void update_enable_bit_for(struct user_event *user) 1594 { 1595 struct tracepoint *tp = &user->tracepoint; 1596 char status = 0; 1597 1598 if (atomic_read(&tp->key.enabled) > 0) { 1599 struct tracepoint_func *probe_func_ptr; 1600 user_event_func_t probe_func; 1601 1602 rcu_read_lock_sched(); 1603 1604 probe_func_ptr = rcu_dereference_sched(tp->funcs); 1605 1606 if (probe_func_ptr) { 1607 do { 1608 probe_func = probe_func_ptr->func; 1609 1610 if (probe_func == user_event_ftrace) 1611 status |= EVENT_STATUS_FTRACE; 1612 #ifdef CONFIG_PERF_EVENTS 1613 else if (probe_func == user_event_perf) 1614 status |= EVENT_STATUS_PERF; 1615 #endif 1616 else 1617 status |= EVENT_STATUS_OTHER; 1618 } while ((++probe_func_ptr)->func); 1619 } 1620 1621 rcu_read_unlock_sched(); 1622 } 1623 1624 user->status = status; 1625 1626 user_event_enabler_update(user); 1627 } 1628 1629 /* 1630 * Register callback for our events from tracing sub-systems. 1631 */ 1632 static int user_event_reg(struct trace_event_call *call, 1633 enum trace_reg type, 1634 void *data) 1635 { 1636 struct user_event *user = (struct user_event *)call->data; 1637 int ret = 0; 1638 1639 if (!user) 1640 return -ENOENT; 1641 1642 switch (type) { 1643 case TRACE_REG_REGISTER: 1644 ret = tracepoint_probe_register(call->tp, 1645 call->class->probe, 1646 data); 1647 if (!ret) 1648 goto inc; 1649 break; 1650 1651 case TRACE_REG_UNREGISTER: 1652 tracepoint_probe_unregister(call->tp, 1653 call->class->probe, 1654 data); 1655 goto dec; 1656 1657 #ifdef CONFIG_PERF_EVENTS 1658 case TRACE_REG_PERF_REGISTER: 1659 ret = tracepoint_probe_register(call->tp, 1660 call->class->perf_probe, 1661 data); 1662 if (!ret) 1663 goto inc; 1664 break; 1665 1666 case TRACE_REG_PERF_UNREGISTER: 1667 tracepoint_probe_unregister(call->tp, 1668 call->class->perf_probe, 1669 data); 1670 goto dec; 1671 1672 case TRACE_REG_PERF_OPEN: 1673 case TRACE_REG_PERF_CLOSE: 1674 case TRACE_REG_PERF_ADD: 1675 case TRACE_REG_PERF_DEL: 1676 break; 1677 #endif 1678 } 1679 1680 return ret; 1681 inc: 1682 user_event_get(user); 1683 update_enable_bit_for(user); 1684 return 0; 1685 dec: 1686 update_enable_bit_for(user); 1687 user_event_put(user, true); 1688 return 0; 1689 } 1690 1691 static int user_event_create(const char *raw_command) 1692 { 1693 struct user_event_group *group; 1694 struct user_event *user; 1695 char *name; 1696 int ret; 1697 1698 if (!str_has_prefix(raw_command, USER_EVENTS_PREFIX)) 1699 return -ECANCELED; 1700 1701 raw_command += USER_EVENTS_PREFIX_LEN; 1702 raw_command = skip_spaces(raw_command); 1703 1704 name = kstrdup(raw_command, GFP_KERNEL_ACCOUNT); 1705 1706 if (!name) 1707 return -ENOMEM; 1708 1709 group = current_user_event_group(); 1710 1711 if (!group) { 1712 kfree(name); 1713 return -ENOENT; 1714 } 1715 1716 mutex_lock(&group->reg_mutex); 1717 1718 /* Dyn events persist, otherwise they would cleanup immediately */ 1719 ret = user_event_parse_cmd(group, name, &user, USER_EVENT_REG_PERSIST); 1720 1721 if (!ret) 1722 user_event_put(user, false); 1723 1724 mutex_unlock(&group->reg_mutex); 1725 1726 if (ret) 1727 kfree(name); 1728 1729 return ret; 1730 } 1731 1732 static int user_event_show(struct seq_file *m, struct dyn_event *ev) 1733 { 1734 struct user_event *user = container_of(ev, struct user_event, devent); 1735 struct ftrace_event_field *field, *next; 1736 struct list_head *head; 1737 int depth = 0; 1738 1739 seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user)); 1740 1741 head = trace_get_fields(&user->call); 1742 1743 list_for_each_entry_safe_reverse(field, next, head, link) { 1744 if (depth == 0) 1745 seq_puts(m, " "); 1746 else 1747 seq_puts(m, "; "); 1748 1749 seq_printf(m, "%s %s", field->type, field->name); 1750 1751 if (str_has_prefix(field->type, "struct ")) 1752 seq_printf(m, " %d", field->size); 1753 1754 depth++; 1755 } 1756 1757 seq_puts(m, "\n"); 1758 1759 return 0; 1760 } 1761 1762 static bool user_event_is_busy(struct dyn_event *ev) 1763 { 1764 struct user_event *user = container_of(ev, struct user_event, devent); 1765 1766 return !user_event_last_ref(user); 1767 } 1768 1769 static int user_event_free(struct dyn_event *ev) 1770 { 1771 struct user_event *user = container_of(ev, struct user_event, devent); 1772 1773 if (!user_event_last_ref(user)) 1774 return -EBUSY; 1775 1776 return destroy_user_event(user); 1777 } 1778 1779 static bool user_field_match(struct ftrace_event_field *field, int argc, 1780 const char **argv, int *iout) 1781 { 1782 char *field_name = NULL, *dyn_field_name = NULL; 1783 bool colon = false, match = false; 1784 int dyn_len, len; 1785 1786 if (*iout >= argc) 1787 return false; 1788 1789 dyn_len = user_dyn_field_set_string(argc, argv, iout, dyn_field_name, 1790 0, &colon); 1791 1792 len = user_field_set_string(field, field_name, 0, colon); 1793 1794 if (dyn_len != len) 1795 return false; 1796 1797 dyn_field_name = kmalloc(dyn_len, GFP_KERNEL); 1798 field_name = kmalloc(len, GFP_KERNEL); 1799 1800 if (!dyn_field_name || !field_name) 1801 goto out; 1802 1803 user_dyn_field_set_string(argc, argv, iout, dyn_field_name, 1804 dyn_len, &colon); 1805 1806 user_field_set_string(field, field_name, len, colon); 1807 1808 match = strcmp(dyn_field_name, field_name) == 0; 1809 out: 1810 kfree(dyn_field_name); 1811 kfree(field_name); 1812 1813 return match; 1814 } 1815 1816 static bool user_fields_match(struct user_event *user, int argc, 1817 const char **argv) 1818 { 1819 struct ftrace_event_field *field, *next; 1820 struct list_head *head = &user->fields; 1821 int i = 0; 1822 1823 list_for_each_entry_safe_reverse(field, next, head, link) 1824 if (!user_field_match(field, argc, argv, &i)) 1825 return false; 1826 1827 if (i != argc) 1828 return false; 1829 1830 return true; 1831 } 1832 1833 static bool user_event_match(const char *system, const char *event, 1834 int argc, const char **argv, struct dyn_event *ev) 1835 { 1836 struct user_event *user = container_of(ev, struct user_event, devent); 1837 bool match; 1838 1839 match = strcmp(EVENT_NAME(user), event) == 0 && 1840 (!system || strcmp(system, USER_EVENTS_SYSTEM) == 0); 1841 1842 if (match && argc > 0) 1843 match = user_fields_match(user, argc, argv); 1844 else if (match && argc == 0) 1845 match = list_empty(&user->fields); 1846 1847 return match; 1848 } 1849 1850 static struct dyn_event_operations user_event_dops = { 1851 .create = user_event_create, 1852 .show = user_event_show, 1853 .is_busy = user_event_is_busy, 1854 .free = user_event_free, 1855 .match = user_event_match, 1856 }; 1857 1858 static int user_event_trace_register(struct user_event *user) 1859 { 1860 int ret; 1861 1862 ret = register_trace_event(&user->call.event); 1863 1864 if (!ret) 1865 return -ENODEV; 1866 1867 ret = user_event_set_call_visible(user, true); 1868 1869 if (ret) 1870 unregister_trace_event(&user->call.event); 1871 1872 return ret; 1873 } 1874 1875 /* 1876 * Parses the event name, arguments and flags then registers if successful. 1877 * The name buffer lifetime is owned by this method for success cases only. 1878 * Upon success the returned user_event has its ref count increased by 1. 1879 */ 1880 static int user_event_parse(struct user_event_group *group, char *name, 1881 char *args, char *flags, 1882 struct user_event **newuser, int reg_flags) 1883 { 1884 int ret; 1885 u32 key; 1886 struct user_event *user; 1887 int argc = 0; 1888 char **argv; 1889 1890 /* User register flags are not ready yet */ 1891 if (reg_flags != 0 || flags != NULL) 1892 return -EINVAL; 1893 1894 /* Prevent dyn_event from racing */ 1895 mutex_lock(&event_mutex); 1896 user = find_user_event(group, name, &key); 1897 mutex_unlock(&event_mutex); 1898 1899 if (user) { 1900 if (args) { 1901 argv = argv_split(GFP_KERNEL, args, &argc); 1902 if (!argv) { 1903 ret = -ENOMEM; 1904 goto error; 1905 } 1906 1907 ret = user_fields_match(user, argc, (const char **)argv); 1908 argv_free(argv); 1909 1910 } else 1911 ret = list_empty(&user->fields); 1912 1913 if (ret) { 1914 *newuser = user; 1915 /* 1916 * Name is allocated by caller, free it since it already exists. 1917 * Caller only worries about failure cases for freeing. 1918 */ 1919 kfree(name); 1920 } else { 1921 ret = -EADDRINUSE; 1922 goto error; 1923 } 1924 1925 return 0; 1926 error: 1927 user_event_put(user, false); 1928 return ret; 1929 } 1930 1931 user = kzalloc(sizeof(*user), GFP_KERNEL_ACCOUNT); 1932 1933 if (!user) 1934 return -ENOMEM; 1935 1936 INIT_LIST_HEAD(&user->class.fields); 1937 INIT_LIST_HEAD(&user->fields); 1938 INIT_LIST_HEAD(&user->validators); 1939 1940 user->group = group; 1941 user->tracepoint.name = name; 1942 1943 ret = user_event_parse_fields(user, args); 1944 1945 if (ret) 1946 goto put_user; 1947 1948 ret = user_event_create_print_fmt(user); 1949 1950 if (ret) 1951 goto put_user; 1952 1953 user->call.data = user; 1954 user->call.class = &user->class; 1955 user->call.name = name; 1956 user->call.flags = TRACE_EVENT_FL_TRACEPOINT; 1957 user->call.tp = &user->tracepoint; 1958 user->call.event.funcs = &user_event_funcs; 1959 user->class.system = group->system_name; 1960 1961 user->class.fields_array = user_event_fields_array; 1962 user->class.get_fields = user_event_get_fields; 1963 user->class.reg = user_event_reg; 1964 user->class.probe = user_event_ftrace; 1965 #ifdef CONFIG_PERF_EVENTS 1966 user->class.perf_probe = user_event_perf; 1967 #endif 1968 1969 mutex_lock(&event_mutex); 1970 1971 if (current_user_events >= max_user_events) { 1972 ret = -EMFILE; 1973 goto put_user_lock; 1974 } 1975 1976 ret = user_event_trace_register(user); 1977 1978 if (ret) 1979 goto put_user_lock; 1980 1981 user->reg_flags = reg_flags; 1982 1983 if (user->reg_flags & USER_EVENT_REG_PERSIST) { 1984 /* Ensure we track self ref and caller ref (2) */ 1985 refcount_set(&user->refcnt, 2); 1986 } else { 1987 /* Ensure we track only caller ref (1) */ 1988 refcount_set(&user->refcnt, 1); 1989 } 1990 1991 dyn_event_init(&user->devent, &user_event_dops); 1992 dyn_event_add(&user->devent, &user->call); 1993 hash_add(group->register_table, &user->node, key); 1994 current_user_events++; 1995 1996 mutex_unlock(&event_mutex); 1997 1998 *newuser = user; 1999 return 0; 2000 put_user_lock: 2001 mutex_unlock(&event_mutex); 2002 put_user: 2003 user_event_destroy_fields(user); 2004 user_event_destroy_validators(user); 2005 kfree(user->call.print_fmt); 2006 kfree(user); 2007 return ret; 2008 } 2009 2010 /* 2011 * Deletes a previously created event if it is no longer being used. 2012 */ 2013 static int delete_user_event(struct user_event_group *group, char *name) 2014 { 2015 u32 key; 2016 struct user_event *user = find_user_event(group, name, &key); 2017 2018 if (!user) 2019 return -ENOENT; 2020 2021 user_event_put(user, true); 2022 2023 if (!user_event_last_ref(user)) 2024 return -EBUSY; 2025 2026 return destroy_user_event(user); 2027 } 2028 2029 /* 2030 * Validates the user payload and writes via iterator. 2031 */ 2032 static ssize_t user_events_write_core(struct file *file, struct iov_iter *i) 2033 { 2034 struct user_event_file_info *info = file->private_data; 2035 struct user_event_refs *refs; 2036 struct user_event *user = NULL; 2037 struct tracepoint *tp; 2038 ssize_t ret = i->count; 2039 int idx; 2040 2041 if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx))) 2042 return -EFAULT; 2043 2044 if (idx < 0) 2045 return -EINVAL; 2046 2047 rcu_read_lock_sched(); 2048 2049 refs = rcu_dereference_sched(info->refs); 2050 2051 /* 2052 * The refs->events array is protected by RCU, and new items may be 2053 * added. But the user retrieved from indexing into the events array 2054 * shall be immutable while the file is opened. 2055 */ 2056 if (likely(refs && idx < refs->count)) 2057 user = refs->events[idx]; 2058 2059 rcu_read_unlock_sched(); 2060 2061 if (unlikely(user == NULL)) 2062 return -ENOENT; 2063 2064 if (unlikely(i->count < user->min_size)) 2065 return -EINVAL; 2066 2067 tp = &user->tracepoint; 2068 2069 /* 2070 * It's possible key.enabled disables after this check, however 2071 * we don't mind if a few events are included in this condition. 2072 */ 2073 if (likely(atomic_read(&tp->key.enabled) > 0)) { 2074 struct tracepoint_func *probe_func_ptr; 2075 user_event_func_t probe_func; 2076 struct iov_iter copy; 2077 void *tpdata; 2078 bool faulted; 2079 2080 if (unlikely(fault_in_iov_iter_readable(i, i->count))) 2081 return -EFAULT; 2082 2083 faulted = false; 2084 2085 rcu_read_lock_sched(); 2086 2087 probe_func_ptr = rcu_dereference_sched(tp->funcs); 2088 2089 if (probe_func_ptr) { 2090 do { 2091 copy = *i; 2092 probe_func = probe_func_ptr->func; 2093 tpdata = probe_func_ptr->data; 2094 probe_func(user, ©, tpdata, &faulted); 2095 } while ((++probe_func_ptr)->func); 2096 } 2097 2098 rcu_read_unlock_sched(); 2099 2100 if (unlikely(faulted)) 2101 return -EFAULT; 2102 } else 2103 return -EBADF; 2104 2105 return ret; 2106 } 2107 2108 static int user_events_open(struct inode *node, struct file *file) 2109 { 2110 struct user_event_group *group; 2111 struct user_event_file_info *info; 2112 2113 group = current_user_event_group(); 2114 2115 if (!group) 2116 return -ENOENT; 2117 2118 info = kzalloc(sizeof(*info), GFP_KERNEL_ACCOUNT); 2119 2120 if (!info) 2121 return -ENOMEM; 2122 2123 info->group = group; 2124 2125 file->private_data = info; 2126 2127 return 0; 2128 } 2129 2130 static ssize_t user_events_write(struct file *file, const char __user *ubuf, 2131 size_t count, loff_t *ppos) 2132 { 2133 struct iovec iov; 2134 struct iov_iter i; 2135 2136 if (unlikely(*ppos != 0)) 2137 return -EFAULT; 2138 2139 if (unlikely(import_single_range(ITER_SOURCE, (char __user *)ubuf, 2140 count, &iov, &i))) 2141 return -EFAULT; 2142 2143 return user_events_write_core(file, &i); 2144 } 2145 2146 static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i) 2147 { 2148 return user_events_write_core(kp->ki_filp, i); 2149 } 2150 2151 static int user_events_ref_add(struct user_event_file_info *info, 2152 struct user_event *user) 2153 { 2154 struct user_event_group *group = info->group; 2155 struct user_event_refs *refs, *new_refs; 2156 int i, size, count = 0; 2157 2158 refs = rcu_dereference_protected(info->refs, 2159 lockdep_is_held(&group->reg_mutex)); 2160 2161 if (refs) { 2162 count = refs->count; 2163 2164 for (i = 0; i < count; ++i) 2165 if (refs->events[i] == user) 2166 return i; 2167 } 2168 2169 size = struct_size(refs, events, count + 1); 2170 2171 new_refs = kzalloc(size, GFP_KERNEL_ACCOUNT); 2172 2173 if (!new_refs) 2174 return -ENOMEM; 2175 2176 new_refs->count = count + 1; 2177 2178 for (i = 0; i < count; ++i) 2179 new_refs->events[i] = refs->events[i]; 2180 2181 new_refs->events[i] = user_event_get(user); 2182 2183 rcu_assign_pointer(info->refs, new_refs); 2184 2185 if (refs) 2186 kfree_rcu(refs, rcu); 2187 2188 return i; 2189 } 2190 2191 static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg) 2192 { 2193 u32 size; 2194 long ret; 2195 2196 ret = get_user(size, &ureg->size); 2197 2198 if (ret) 2199 return ret; 2200 2201 if (size > PAGE_SIZE) 2202 return -E2BIG; 2203 2204 if (size < offsetofend(struct user_reg, write_index)) 2205 return -EINVAL; 2206 2207 ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size); 2208 2209 if (ret) 2210 return ret; 2211 2212 /* Ensure only valid flags */ 2213 if (kreg->flags & ~(USER_EVENT_REG_MAX-1)) 2214 return -EINVAL; 2215 2216 /* Ensure supported size */ 2217 switch (kreg->enable_size) { 2218 case 4: 2219 /* 32-bit */ 2220 break; 2221 #if BITS_PER_LONG >= 64 2222 case 8: 2223 /* 64-bit */ 2224 break; 2225 #endif 2226 default: 2227 return -EINVAL; 2228 } 2229 2230 /* Ensure natural alignment */ 2231 if (kreg->enable_addr % kreg->enable_size) 2232 return -EINVAL; 2233 2234 /* Ensure bit range for size */ 2235 if (kreg->enable_bit > (kreg->enable_size * BITS_PER_BYTE) - 1) 2236 return -EINVAL; 2237 2238 /* Ensure accessible */ 2239 if (!access_ok((const void __user *)(uintptr_t)kreg->enable_addr, 2240 kreg->enable_size)) 2241 return -EFAULT; 2242 2243 kreg->size = size; 2244 2245 return 0; 2246 } 2247 2248 /* 2249 * Registers a user_event on behalf of a user process. 2250 */ 2251 static long user_events_ioctl_reg(struct user_event_file_info *info, 2252 unsigned long uarg) 2253 { 2254 struct user_reg __user *ureg = (struct user_reg __user *)uarg; 2255 struct user_reg reg; 2256 struct user_event *user; 2257 struct user_event_enabler *enabler; 2258 char *name; 2259 long ret; 2260 int write_result; 2261 2262 ret = user_reg_get(ureg, ®); 2263 2264 if (ret) 2265 return ret; 2266 2267 /* 2268 * Prevent users from using the same address and bit multiple times 2269 * within the same mm address space. This can cause unexpected behavior 2270 * for user processes that is far easier to debug if this is explictly 2271 * an error upon registering. 2272 */ 2273 if (current_user_event_enabler_exists((unsigned long)reg.enable_addr, 2274 reg.enable_bit)) 2275 return -EADDRINUSE; 2276 2277 name = strndup_user((const char __user *)(uintptr_t)reg.name_args, 2278 MAX_EVENT_DESC); 2279 2280 if (IS_ERR(name)) { 2281 ret = PTR_ERR(name); 2282 return ret; 2283 } 2284 2285 ret = user_event_parse_cmd(info->group, name, &user, reg.flags); 2286 2287 if (ret) { 2288 kfree(name); 2289 return ret; 2290 } 2291 2292 ret = user_events_ref_add(info, user); 2293 2294 /* No longer need parse ref, ref_add either worked or not */ 2295 user_event_put(user, false); 2296 2297 /* Positive number is index and valid */ 2298 if (ret < 0) 2299 return ret; 2300 2301 /* 2302 * user_events_ref_add succeeded: 2303 * At this point we have a user_event, it's lifetime is bound by the 2304 * reference count, not this file. If anything fails, the user_event 2305 * still has a reference until the file is released. During release 2306 * any remaining references (from user_events_ref_add) are decremented. 2307 * 2308 * Attempt to create an enabler, which too has a lifetime tied in the 2309 * same way for the event. Once the task that caused the enabler to be 2310 * created exits or issues exec() then the enablers it has created 2311 * will be destroyed and the ref to the event will be decremented. 2312 */ 2313 enabler = user_event_enabler_create(®, user, &write_result); 2314 2315 if (!enabler) 2316 return -ENOMEM; 2317 2318 /* Write failed/faulted, give error back to caller */ 2319 if (write_result) 2320 return write_result; 2321 2322 put_user((u32)ret, &ureg->write_index); 2323 2324 return 0; 2325 } 2326 2327 /* 2328 * Deletes a user_event on behalf of a user process. 2329 */ 2330 static long user_events_ioctl_del(struct user_event_file_info *info, 2331 unsigned long uarg) 2332 { 2333 void __user *ubuf = (void __user *)uarg; 2334 char *name; 2335 long ret; 2336 2337 name = strndup_user(ubuf, MAX_EVENT_DESC); 2338 2339 if (IS_ERR(name)) 2340 return PTR_ERR(name); 2341 2342 /* event_mutex prevents dyn_event from racing */ 2343 mutex_lock(&event_mutex); 2344 ret = delete_user_event(info->group, name); 2345 mutex_unlock(&event_mutex); 2346 2347 kfree(name); 2348 2349 return ret; 2350 } 2351 2352 static long user_unreg_get(struct user_unreg __user *ureg, 2353 struct user_unreg *kreg) 2354 { 2355 u32 size; 2356 long ret; 2357 2358 ret = get_user(size, &ureg->size); 2359 2360 if (ret) 2361 return ret; 2362 2363 if (size > PAGE_SIZE) 2364 return -E2BIG; 2365 2366 if (size < offsetofend(struct user_unreg, disable_addr)) 2367 return -EINVAL; 2368 2369 ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size); 2370 2371 /* Ensure no reserved values, since we don't support any yet */ 2372 if (kreg->__reserved || kreg->__reserved2) 2373 return -EINVAL; 2374 2375 return ret; 2376 } 2377 2378 static int user_event_mm_clear_bit(struct user_event_mm *user_mm, 2379 unsigned long uaddr, unsigned char bit) 2380 { 2381 struct user_event_enabler enabler; 2382 int result; 2383 int attempt = 0; 2384 2385 memset(&enabler, 0, sizeof(enabler)); 2386 enabler.addr = uaddr; 2387 enabler.values = bit; 2388 retry: 2389 /* Prevents state changes from racing with new enablers */ 2390 mutex_lock(&event_mutex); 2391 2392 /* Force the bit to be cleared, since no event is attached */ 2393 mmap_read_lock(user_mm->mm); 2394 result = user_event_enabler_write(user_mm, &enabler, false, &attempt); 2395 mmap_read_unlock(user_mm->mm); 2396 2397 mutex_unlock(&event_mutex); 2398 2399 if (result) { 2400 /* Attempt to fault-in and retry if it worked */ 2401 if (!user_event_mm_fault_in(user_mm, uaddr, attempt)) 2402 goto retry; 2403 } 2404 2405 return result; 2406 } 2407 2408 /* 2409 * Unregisters an enablement address/bit within a task/user mm. 2410 */ 2411 static long user_events_ioctl_unreg(unsigned long uarg) 2412 { 2413 struct user_unreg __user *ureg = (struct user_unreg __user *)uarg; 2414 struct user_event_mm *mm = current->user_event_mm; 2415 struct user_event_enabler *enabler, *next; 2416 struct user_unreg reg; 2417 long ret; 2418 2419 ret = user_unreg_get(ureg, ®); 2420 2421 if (ret) 2422 return ret; 2423 2424 if (!mm) 2425 return -ENOENT; 2426 2427 ret = -ENOENT; 2428 2429 /* 2430 * Flags freeing and faulting are used to indicate if the enabler is in 2431 * use at all. When faulting is set a page-fault is occurring asyncly. 2432 * During async fault if freeing is set, the enabler will be destroyed. 2433 * If no async fault is happening, we can destroy it now since we hold 2434 * the event_mutex during these checks. 2435 */ 2436 mutex_lock(&event_mutex); 2437 2438 list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) { 2439 if (enabler->addr == reg.disable_addr && 2440 ENABLE_BIT(enabler) == reg.disable_bit) { 2441 set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler)); 2442 2443 if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler))) 2444 user_event_enabler_destroy(enabler, true); 2445 2446 /* Removed at least one */ 2447 ret = 0; 2448 } 2449 } 2450 2451 mutex_unlock(&event_mutex); 2452 2453 /* Ensure bit is now cleared for user, regardless of event status */ 2454 if (!ret) 2455 ret = user_event_mm_clear_bit(mm, reg.disable_addr, 2456 reg.disable_bit); 2457 2458 return ret; 2459 } 2460 2461 /* 2462 * Handles the ioctl from user mode to register or alter operations. 2463 */ 2464 static long user_events_ioctl(struct file *file, unsigned int cmd, 2465 unsigned long uarg) 2466 { 2467 struct user_event_file_info *info = file->private_data; 2468 struct user_event_group *group = info->group; 2469 long ret = -ENOTTY; 2470 2471 switch (cmd) { 2472 case DIAG_IOCSREG: 2473 mutex_lock(&group->reg_mutex); 2474 ret = user_events_ioctl_reg(info, uarg); 2475 mutex_unlock(&group->reg_mutex); 2476 break; 2477 2478 case DIAG_IOCSDEL: 2479 mutex_lock(&group->reg_mutex); 2480 ret = user_events_ioctl_del(info, uarg); 2481 mutex_unlock(&group->reg_mutex); 2482 break; 2483 2484 case DIAG_IOCSUNREG: 2485 mutex_lock(&group->reg_mutex); 2486 ret = user_events_ioctl_unreg(uarg); 2487 mutex_unlock(&group->reg_mutex); 2488 break; 2489 } 2490 2491 return ret; 2492 } 2493 2494 /* 2495 * Handles the final close of the file from user mode. 2496 */ 2497 static int user_events_release(struct inode *node, struct file *file) 2498 { 2499 struct user_event_file_info *info = file->private_data; 2500 struct user_event_group *group; 2501 struct user_event_refs *refs; 2502 int i; 2503 2504 if (!info) 2505 return -EINVAL; 2506 2507 group = info->group; 2508 2509 /* 2510 * Ensure refs cannot change under any situation by taking the 2511 * register mutex during the final freeing of the references. 2512 */ 2513 mutex_lock(&group->reg_mutex); 2514 2515 refs = info->refs; 2516 2517 if (!refs) 2518 goto out; 2519 2520 /* 2521 * The lifetime of refs has reached an end, it's tied to this file. 2522 * The underlying user_events are ref counted, and cannot be freed. 2523 * After this decrement, the user_events may be freed elsewhere. 2524 */ 2525 for (i = 0; i < refs->count; ++i) 2526 user_event_put(refs->events[i], false); 2527 2528 out: 2529 file->private_data = NULL; 2530 2531 mutex_unlock(&group->reg_mutex); 2532 2533 kfree(refs); 2534 kfree(info); 2535 2536 return 0; 2537 } 2538 2539 static const struct file_operations user_data_fops = { 2540 .open = user_events_open, 2541 .write = user_events_write, 2542 .write_iter = user_events_write_iter, 2543 .unlocked_ioctl = user_events_ioctl, 2544 .release = user_events_release, 2545 }; 2546 2547 static void *user_seq_start(struct seq_file *m, loff_t *pos) 2548 { 2549 if (*pos) 2550 return NULL; 2551 2552 return (void *)1; 2553 } 2554 2555 static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos) 2556 { 2557 ++*pos; 2558 return NULL; 2559 } 2560 2561 static void user_seq_stop(struct seq_file *m, void *p) 2562 { 2563 } 2564 2565 static int user_seq_show(struct seq_file *m, void *p) 2566 { 2567 struct user_event_group *group = m->private; 2568 struct user_event *user; 2569 char status; 2570 int i, active = 0, busy = 0; 2571 2572 if (!group) 2573 return -EINVAL; 2574 2575 mutex_lock(&group->reg_mutex); 2576 2577 hash_for_each(group->register_table, i, user, node) { 2578 status = user->status; 2579 2580 seq_printf(m, "%s", EVENT_NAME(user)); 2581 2582 if (status != 0) 2583 seq_puts(m, " #"); 2584 2585 if (status != 0) { 2586 seq_puts(m, " Used by"); 2587 if (status & EVENT_STATUS_FTRACE) 2588 seq_puts(m, " ftrace"); 2589 if (status & EVENT_STATUS_PERF) 2590 seq_puts(m, " perf"); 2591 if (status & EVENT_STATUS_OTHER) 2592 seq_puts(m, " other"); 2593 busy++; 2594 } 2595 2596 seq_puts(m, "\n"); 2597 active++; 2598 } 2599 2600 mutex_unlock(&group->reg_mutex); 2601 2602 seq_puts(m, "\n"); 2603 seq_printf(m, "Active: %d\n", active); 2604 seq_printf(m, "Busy: %d\n", busy); 2605 2606 return 0; 2607 } 2608 2609 static const struct seq_operations user_seq_ops = { 2610 .start = user_seq_start, 2611 .next = user_seq_next, 2612 .stop = user_seq_stop, 2613 .show = user_seq_show, 2614 }; 2615 2616 static int user_status_open(struct inode *node, struct file *file) 2617 { 2618 struct user_event_group *group; 2619 int ret; 2620 2621 group = current_user_event_group(); 2622 2623 if (!group) 2624 return -ENOENT; 2625 2626 ret = seq_open(file, &user_seq_ops); 2627 2628 if (!ret) { 2629 /* Chain group to seq_file */ 2630 struct seq_file *m = file->private_data; 2631 2632 m->private = group; 2633 } 2634 2635 return ret; 2636 } 2637 2638 static const struct file_operations user_status_fops = { 2639 .open = user_status_open, 2640 .read = seq_read, 2641 .llseek = seq_lseek, 2642 .release = seq_release, 2643 }; 2644 2645 /* 2646 * Creates a set of tracefs files to allow user mode interactions. 2647 */ 2648 static int create_user_tracefs(void) 2649 { 2650 struct dentry *edata, *emmap; 2651 2652 edata = tracefs_create_file("user_events_data", TRACE_MODE_WRITE, 2653 NULL, NULL, &user_data_fops); 2654 2655 if (!edata) { 2656 pr_warn("Could not create tracefs 'user_events_data' entry\n"); 2657 goto err; 2658 } 2659 2660 emmap = tracefs_create_file("user_events_status", TRACE_MODE_READ, 2661 NULL, NULL, &user_status_fops); 2662 2663 if (!emmap) { 2664 tracefs_remove(edata); 2665 pr_warn("Could not create tracefs 'user_events_mmap' entry\n"); 2666 goto err; 2667 } 2668 2669 return 0; 2670 err: 2671 return -ENODEV; 2672 } 2673 2674 static int set_max_user_events_sysctl(struct ctl_table *table, int write, 2675 void *buffer, size_t *lenp, loff_t *ppos) 2676 { 2677 int ret; 2678 2679 mutex_lock(&event_mutex); 2680 2681 ret = proc_douintvec(table, write, buffer, lenp, ppos); 2682 2683 mutex_unlock(&event_mutex); 2684 2685 return ret; 2686 } 2687 2688 static struct ctl_table user_event_sysctls[] = { 2689 { 2690 .procname = "user_events_max", 2691 .data = &max_user_events, 2692 .maxlen = sizeof(unsigned int), 2693 .mode = 0644, 2694 .proc_handler = set_max_user_events_sysctl, 2695 }, 2696 {} 2697 }; 2698 2699 static int __init trace_events_user_init(void) 2700 { 2701 int ret; 2702 2703 fault_cache = KMEM_CACHE(user_event_enabler_fault, 0); 2704 2705 if (!fault_cache) 2706 return -ENOMEM; 2707 2708 init_group = user_event_group_create(); 2709 2710 if (!init_group) { 2711 kmem_cache_destroy(fault_cache); 2712 return -ENOMEM; 2713 } 2714 2715 ret = create_user_tracefs(); 2716 2717 if (ret) { 2718 pr_warn("user_events could not register with tracefs\n"); 2719 user_event_group_destroy(init_group); 2720 kmem_cache_destroy(fault_cache); 2721 init_group = NULL; 2722 return ret; 2723 } 2724 2725 if (dyn_event_register(&user_event_dops)) 2726 pr_warn("user_events could not register with dyn_events\n"); 2727 2728 register_sysctl_init("kernel", user_event_sysctls); 2729 2730 return 0; 2731 } 2732 2733 fs_initcall(trace_events_user_init); 2734