1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, Microsoft Corporation.
4  *
5  * Authors:
6  *   Beau Belgrave <beaub@linux.microsoft.com>
7  */
8 
9 #include <linux/bitmap.h>
10 #include <linux/cdev.h>
11 #include <linux/hashtable.h>
12 #include <linux/list.h>
13 #include <linux/io.h>
14 #include <linux/uio.h>
15 #include <linux/ioctl.h>
16 #include <linux/jhash.h>
17 #include <linux/refcount.h>
18 #include <linux/trace_events.h>
19 #include <linux/tracefs.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/highmem.h>
23 #include <linux/init.h>
24 #include <linux/user_events.h>
25 #include "trace_dynevent.h"
26 #include "trace_output.h"
27 #include "trace.h"
28 
29 #define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1)
30 
31 #define FIELD_DEPTH_TYPE 0
32 #define FIELD_DEPTH_NAME 1
33 #define FIELD_DEPTH_SIZE 2
34 
35 /* Limit how long of an event name plus args within the subsystem. */
36 #define MAX_EVENT_DESC 512
37 #define EVENT_NAME(user_event) ((user_event)->tracepoint.name)
38 #define MAX_FIELD_ARRAY_SIZE 1024
39 
40 /*
41  * Internal bits (kernel side only) to keep track of connected probes:
42  * These are used when status is requested in text form about an event. These
43  * bits are compared against an internal byte on the event to determine which
44  * probes to print out to the user.
45  *
46  * These do not reflect the mapped bytes between the user and kernel space.
47  */
48 #define EVENT_STATUS_FTRACE BIT(0)
49 #define EVENT_STATUS_PERF BIT(1)
50 #define EVENT_STATUS_OTHER BIT(7)
51 
52 /*
53  * Stores the system name, tables, and locks for a group of events. This
54  * allows isolation for events by various means.
55  */
56 struct user_event_group {
57 	char		*system_name;
58 	struct		hlist_node node;
59 	struct		mutex reg_mutex;
60 	DECLARE_HASHTABLE(register_table, 8);
61 };
62 
63 /* Group for init_user_ns mapping, top-most group */
64 static struct user_event_group *init_group;
65 
66 /* Max allowed events for the whole system */
67 static unsigned int max_user_events = 32768;
68 
69 /* Current number of events on the whole system */
70 static unsigned int current_user_events;
71 
72 /*
73  * Stores per-event properties, as users register events
74  * within a file a user_event might be created if it does not
75  * already exist. These are globally used and their lifetime
76  * is tied to the refcnt member. These cannot go away until the
77  * refcnt reaches one.
78  */
79 struct user_event {
80 	struct user_event_group		*group;
81 	struct tracepoint		tracepoint;
82 	struct trace_event_call		call;
83 	struct trace_event_class	class;
84 	struct dyn_event		devent;
85 	struct hlist_node		node;
86 	struct list_head		fields;
87 	struct list_head		validators;
88 	refcount_t			refcnt;
89 	int				min_size;
90 	char				status;
91 };
92 
93 /*
94  * Stores per-mm/event properties that enable an address to be
95  * updated properly for each task. As tasks are forked, we use
96  * these to track enablement sites that are tied to an event.
97  */
98 struct user_event_enabler {
99 	struct list_head	mm_enablers_link;
100 	struct user_event	*event;
101 	unsigned long		addr;
102 
103 	/* Track enable bit, flags, etc. Aligned for bitops. */
104 	unsigned long		values;
105 };
106 
107 /* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */
108 #define ENABLE_VAL_BIT_MASK 0x3F
109 
110 /* Bit 6 is for faulting status of enablement */
111 #define ENABLE_VAL_FAULTING_BIT 6
112 
113 /* Bit 7 is for freeing status of enablement */
114 #define ENABLE_VAL_FREEING_BIT 7
115 
116 /* Only duplicate the bit value */
117 #define ENABLE_VAL_DUP_MASK ENABLE_VAL_BIT_MASK
118 
119 #define ENABLE_BITOPS(e) (&(e)->values)
120 
121 #define ENABLE_BIT(e) ((int)((e)->values & ENABLE_VAL_BIT_MASK))
122 
123 /* Used for asynchronous faulting in of pages */
124 struct user_event_enabler_fault {
125 	struct work_struct		work;
126 	struct user_event_mm		*mm;
127 	struct user_event_enabler	*enabler;
128 	int				attempt;
129 };
130 
131 static struct kmem_cache *fault_cache;
132 
133 /* Global list of memory descriptors using user_events */
134 static LIST_HEAD(user_event_mms);
135 static DEFINE_SPINLOCK(user_event_mms_lock);
136 
137 /*
138  * Stores per-file events references, as users register events
139  * within a file this structure is modified and freed via RCU.
140  * The lifetime of this struct is tied to the lifetime of the file.
141  * These are not shared and only accessible by the file that created it.
142  */
143 struct user_event_refs {
144 	struct rcu_head		rcu;
145 	int			count;
146 	struct user_event	*events[];
147 };
148 
149 struct user_event_file_info {
150 	struct user_event_group	*group;
151 	struct user_event_refs	*refs;
152 };
153 
154 #define VALIDATOR_ENSURE_NULL (1 << 0)
155 #define VALIDATOR_REL (1 << 1)
156 
157 struct user_event_validator {
158 	struct list_head	user_event_link;
159 	int			offset;
160 	int			flags;
161 };
162 
163 typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
164 				   void *tpdata, bool *faulted);
165 
166 static int user_event_parse(struct user_event_group *group, char *name,
167 			    char *args, char *flags,
168 			    struct user_event **newuser);
169 
170 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm);
171 static struct user_event_mm *user_event_mm_get_all(struct user_event *user);
172 static void user_event_mm_put(struct user_event_mm *mm);
173 
174 static u32 user_event_key(char *name)
175 {
176 	return jhash(name, strlen(name), 0);
177 }
178 
179 static void user_event_group_destroy(struct user_event_group *group)
180 {
181 	kfree(group->system_name);
182 	kfree(group);
183 }
184 
185 static char *user_event_group_system_name(struct user_namespace *user_ns)
186 {
187 	char *system_name;
188 	int len = sizeof(USER_EVENTS_SYSTEM) + 1;
189 
190 	if (user_ns != &init_user_ns) {
191 		/*
192 		 * Unexpected at this point:
193 		 * We only currently support init_user_ns.
194 		 * When we enable more, this will trigger a failure so log.
195 		 */
196 		pr_warn("user_events: Namespace other than init_user_ns!\n");
197 		return NULL;
198 	}
199 
200 	system_name = kmalloc(len, GFP_KERNEL);
201 
202 	if (!system_name)
203 		return NULL;
204 
205 	snprintf(system_name, len, "%s", USER_EVENTS_SYSTEM);
206 
207 	return system_name;
208 }
209 
210 static inline struct user_event_group
211 *user_event_group_from_user_ns(struct user_namespace *user_ns)
212 {
213 	if (user_ns == &init_user_ns)
214 		return init_group;
215 
216 	return NULL;
217 }
218 
219 static struct user_event_group *current_user_event_group(void)
220 {
221 	struct user_namespace *user_ns = current_user_ns();
222 	struct user_event_group *group = NULL;
223 
224 	while (user_ns) {
225 		group = user_event_group_from_user_ns(user_ns);
226 
227 		if (group)
228 			break;
229 
230 		user_ns = user_ns->parent;
231 	}
232 
233 	return group;
234 }
235 
236 static struct user_event_group
237 *user_event_group_create(struct user_namespace *user_ns)
238 {
239 	struct user_event_group *group;
240 
241 	group = kzalloc(sizeof(*group), GFP_KERNEL);
242 
243 	if (!group)
244 		return NULL;
245 
246 	group->system_name = user_event_group_system_name(user_ns);
247 
248 	if (!group->system_name)
249 		goto error;
250 
251 	mutex_init(&group->reg_mutex);
252 	hash_init(group->register_table);
253 
254 	return group;
255 error:
256 	if (group)
257 		user_event_group_destroy(group);
258 
259 	return NULL;
260 };
261 
262 static void user_event_enabler_destroy(struct user_event_enabler *enabler)
263 {
264 	list_del_rcu(&enabler->mm_enablers_link);
265 
266 	/* No longer tracking the event via the enabler */
267 	refcount_dec(&enabler->event->refcnt);
268 
269 	kfree(enabler);
270 }
271 
272 static int user_event_mm_fault_in(struct user_event_mm *mm, unsigned long uaddr,
273 				  int attempt)
274 {
275 	bool unlocked;
276 	int ret;
277 
278 	/*
279 	 * Normally this is low, ensure that it cannot be taken advantage of by
280 	 * bad user processes to cause excessive looping.
281 	 */
282 	if (attempt > 10)
283 		return -EFAULT;
284 
285 	mmap_read_lock(mm->mm);
286 
287 	/* Ensure MM has tasks, cannot use after exit_mm() */
288 	if (refcount_read(&mm->tasks) == 0) {
289 		ret = -ENOENT;
290 		goto out;
291 	}
292 
293 	ret = fixup_user_fault(mm->mm, uaddr, FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE,
294 			       &unlocked);
295 out:
296 	mmap_read_unlock(mm->mm);
297 
298 	return ret;
299 }
300 
301 static int user_event_enabler_write(struct user_event_mm *mm,
302 				    struct user_event_enabler *enabler,
303 				    bool fixup_fault, int *attempt);
304 
305 static void user_event_enabler_fault_fixup(struct work_struct *work)
306 {
307 	struct user_event_enabler_fault *fault = container_of(
308 		work, struct user_event_enabler_fault, work);
309 	struct user_event_enabler *enabler = fault->enabler;
310 	struct user_event_mm *mm = fault->mm;
311 	unsigned long uaddr = enabler->addr;
312 	int attempt = fault->attempt;
313 	int ret;
314 
315 	ret = user_event_mm_fault_in(mm, uaddr, attempt);
316 
317 	if (ret && ret != -ENOENT) {
318 		struct user_event *user = enabler->event;
319 
320 		pr_warn("user_events: Fault for mm: 0x%pK @ 0x%llx event: %s\n",
321 			mm->mm, (unsigned long long)uaddr, EVENT_NAME(user));
322 	}
323 
324 	/* Prevent state changes from racing */
325 	mutex_lock(&event_mutex);
326 
327 	/* User asked for enabler to be removed during fault */
328 	if (test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))) {
329 		user_event_enabler_destroy(enabler);
330 		goto out;
331 	}
332 
333 	/*
334 	 * If we managed to get the page, re-issue the write. We do not
335 	 * want to get into a possible infinite loop, which is why we only
336 	 * attempt again directly if the page came in. If we couldn't get
337 	 * the page here, then we will try again the next time the event is
338 	 * enabled/disabled.
339 	 */
340 	clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
341 
342 	if (!ret) {
343 		mmap_read_lock(mm->mm);
344 		user_event_enabler_write(mm, enabler, true, &attempt);
345 		mmap_read_unlock(mm->mm);
346 	}
347 out:
348 	mutex_unlock(&event_mutex);
349 
350 	/* In all cases we no longer need the mm or fault */
351 	user_event_mm_put(mm);
352 	kmem_cache_free(fault_cache, fault);
353 }
354 
355 static bool user_event_enabler_queue_fault(struct user_event_mm *mm,
356 					   struct user_event_enabler *enabler,
357 					   int attempt)
358 {
359 	struct user_event_enabler_fault *fault;
360 
361 	fault = kmem_cache_zalloc(fault_cache, GFP_NOWAIT | __GFP_NOWARN);
362 
363 	if (!fault)
364 		return false;
365 
366 	INIT_WORK(&fault->work, user_event_enabler_fault_fixup);
367 	fault->mm = user_event_mm_get(mm);
368 	fault->enabler = enabler;
369 	fault->attempt = attempt;
370 
371 	/* Don't try to queue in again while we have a pending fault */
372 	set_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
373 
374 	if (!schedule_work(&fault->work)) {
375 		/* Allow another attempt later */
376 		clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
377 
378 		user_event_mm_put(mm);
379 		kmem_cache_free(fault_cache, fault);
380 
381 		return false;
382 	}
383 
384 	return true;
385 }
386 
387 static int user_event_enabler_write(struct user_event_mm *mm,
388 				    struct user_event_enabler *enabler,
389 				    bool fixup_fault, int *attempt)
390 {
391 	unsigned long uaddr = enabler->addr;
392 	unsigned long *ptr;
393 	struct page *page;
394 	void *kaddr;
395 	int ret;
396 
397 	lockdep_assert_held(&event_mutex);
398 	mmap_assert_locked(mm->mm);
399 
400 	*attempt += 1;
401 
402 	/* Ensure MM has tasks, cannot use after exit_mm() */
403 	if (refcount_read(&mm->tasks) == 0)
404 		return -ENOENT;
405 
406 	if (unlikely(test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)) ||
407 		     test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))))
408 		return -EBUSY;
409 
410 	ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT,
411 				    &page, NULL, NULL);
412 
413 	if (unlikely(ret <= 0)) {
414 		if (!fixup_fault)
415 			return -EFAULT;
416 
417 		if (!user_event_enabler_queue_fault(mm, enabler, *attempt))
418 			pr_warn("user_events: Unable to queue fault handler\n");
419 
420 		return -EFAULT;
421 	}
422 
423 	kaddr = kmap_local_page(page);
424 	ptr = kaddr + (uaddr & ~PAGE_MASK);
425 
426 	/* Update bit atomically, user tracers must be atomic as well */
427 	if (enabler->event && enabler->event->status)
428 		set_bit(ENABLE_BIT(enabler), ptr);
429 	else
430 		clear_bit(ENABLE_BIT(enabler), ptr);
431 
432 	kunmap_local(kaddr);
433 	unpin_user_pages_dirty_lock(&page, 1, true);
434 
435 	return 0;
436 }
437 
438 static bool user_event_enabler_exists(struct user_event_mm *mm,
439 				      unsigned long uaddr, unsigned char bit)
440 {
441 	struct user_event_enabler *enabler;
442 
443 	list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
444 		if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit)
445 			return true;
446 	}
447 
448 	return false;
449 }
450 
451 static void user_event_enabler_update(struct user_event *user)
452 {
453 	struct user_event_enabler *enabler;
454 	struct user_event_mm *next;
455 	struct user_event_mm *mm;
456 	int attempt;
457 
458 	lockdep_assert_held(&event_mutex);
459 
460 	/*
461 	 * We need to build a one-shot list of all the mms that have an
462 	 * enabler for the user_event passed in. This list is only valid
463 	 * while holding the event_mutex. The only reason for this is due
464 	 * to the global mm list being RCU protected and we use methods
465 	 * which can wait (mmap_read_lock and pin_user_pages_remote).
466 	 *
467 	 * NOTE: user_event_mm_get_all() increments the ref count of each
468 	 * mm that is added to the list to prevent removal timing windows.
469 	 * We must always put each mm after they are used, which may wait.
470 	 */
471 	mm = user_event_mm_get_all(user);
472 
473 	while (mm) {
474 		next = mm->next;
475 		mmap_read_lock(mm->mm);
476 
477 		list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
478 			if (enabler->event == user) {
479 				attempt = 0;
480 				user_event_enabler_write(mm, enabler, true, &attempt);
481 			}
482 		}
483 
484 		mmap_read_unlock(mm->mm);
485 		user_event_mm_put(mm);
486 		mm = next;
487 	}
488 }
489 
490 static bool user_event_enabler_dup(struct user_event_enabler *orig,
491 				   struct user_event_mm *mm)
492 {
493 	struct user_event_enabler *enabler;
494 
495 	/* Skip pending frees */
496 	if (unlikely(test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(orig))))
497 		return true;
498 
499 	enabler = kzalloc(sizeof(*enabler), GFP_NOWAIT | __GFP_ACCOUNT);
500 
501 	if (!enabler)
502 		return false;
503 
504 	enabler->event = orig->event;
505 	enabler->addr = orig->addr;
506 
507 	/* Only dup part of value (ignore future flags, etc) */
508 	enabler->values = orig->values & ENABLE_VAL_DUP_MASK;
509 
510 	refcount_inc(&enabler->event->refcnt);
511 
512 	/* Enablers not exposed yet, RCU not required */
513 	list_add(&enabler->mm_enablers_link, &mm->enablers);
514 
515 	return true;
516 }
517 
518 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm)
519 {
520 	refcount_inc(&mm->refcnt);
521 
522 	return mm;
523 }
524 
525 static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
526 {
527 	struct user_event_mm *found = NULL;
528 	struct user_event_enabler *enabler;
529 	struct user_event_mm *mm;
530 
531 	/*
532 	 * We use the mm->next field to build a one-shot list from the global
533 	 * RCU protected list. To build this list the event_mutex must be held.
534 	 * This lets us build a list without requiring allocs that could fail
535 	 * when user based events are most wanted for diagnostics.
536 	 */
537 	lockdep_assert_held(&event_mutex);
538 
539 	/*
540 	 * We do not want to block fork/exec while enablements are being
541 	 * updated, so we use RCU to walk the current tasks that have used
542 	 * user_events ABI for 1 or more events. Each enabler found in each
543 	 * task that matches the event being updated has a write to reflect
544 	 * the kernel state back into the process. Waits/faults must not occur
545 	 * during this. So we scan the list under RCU for all the mm that have
546 	 * the event within it. This is needed because mm_read_lock() can wait.
547 	 * Each user mm returned has a ref inc to handle remove RCU races.
548 	 */
549 	rcu_read_lock();
550 
551 	list_for_each_entry_rcu(mm, &user_event_mms, mms_link) {
552 		list_for_each_entry_rcu(enabler, &mm->enablers, mm_enablers_link) {
553 			if (enabler->event == user) {
554 				mm->next = found;
555 				found = user_event_mm_get(mm);
556 				break;
557 			}
558 		}
559 	}
560 
561 	rcu_read_unlock();
562 
563 	return found;
564 }
565 
566 static struct user_event_mm *user_event_mm_alloc(struct task_struct *t)
567 {
568 	struct user_event_mm *user_mm;
569 
570 	user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT);
571 
572 	if (!user_mm)
573 		return NULL;
574 
575 	user_mm->mm = t->mm;
576 	INIT_LIST_HEAD(&user_mm->enablers);
577 	refcount_set(&user_mm->refcnt, 1);
578 	refcount_set(&user_mm->tasks, 1);
579 
580 	/*
581 	 * The lifetime of the memory descriptor can slightly outlast
582 	 * the task lifetime if a ref to the user_event_mm is taken
583 	 * between list_del_rcu() and call_rcu(). Therefore we need
584 	 * to take a reference to it to ensure it can live this long
585 	 * under this corner case. This can also occur in clones that
586 	 * outlast the parent.
587 	 */
588 	mmgrab(user_mm->mm);
589 
590 	return user_mm;
591 }
592 
593 static void user_event_mm_attach(struct user_event_mm *user_mm, struct task_struct *t)
594 {
595 	unsigned long flags;
596 
597 	spin_lock_irqsave(&user_event_mms_lock, flags);
598 	list_add_rcu(&user_mm->mms_link, &user_event_mms);
599 	spin_unlock_irqrestore(&user_event_mms_lock, flags);
600 
601 	t->user_event_mm = user_mm;
602 }
603 
604 static struct user_event_mm *current_user_event_mm(void)
605 {
606 	struct user_event_mm *user_mm = current->user_event_mm;
607 
608 	if (user_mm)
609 		goto inc;
610 
611 	user_mm = user_event_mm_alloc(current);
612 
613 	if (!user_mm)
614 		goto error;
615 
616 	user_event_mm_attach(user_mm, current);
617 inc:
618 	refcount_inc(&user_mm->refcnt);
619 error:
620 	return user_mm;
621 }
622 
623 static void user_event_mm_destroy(struct user_event_mm *mm)
624 {
625 	struct user_event_enabler *enabler, *next;
626 
627 	list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link)
628 		user_event_enabler_destroy(enabler);
629 
630 	mmdrop(mm->mm);
631 	kfree(mm);
632 }
633 
634 static void user_event_mm_put(struct user_event_mm *mm)
635 {
636 	if (mm && refcount_dec_and_test(&mm->refcnt))
637 		user_event_mm_destroy(mm);
638 }
639 
640 static void delayed_user_event_mm_put(struct work_struct *work)
641 {
642 	struct user_event_mm *mm;
643 
644 	mm = container_of(to_rcu_work(work), struct user_event_mm, put_rwork);
645 	user_event_mm_put(mm);
646 }
647 
648 void user_event_mm_remove(struct task_struct *t)
649 {
650 	struct user_event_mm *mm;
651 	unsigned long flags;
652 
653 	might_sleep();
654 
655 	mm = t->user_event_mm;
656 	t->user_event_mm = NULL;
657 
658 	/* Clone will increment the tasks, only remove if last clone */
659 	if (!refcount_dec_and_test(&mm->tasks))
660 		return;
661 
662 	/* Remove the mm from the list, so it can no longer be enabled */
663 	spin_lock_irqsave(&user_event_mms_lock, flags);
664 	list_del_rcu(&mm->mms_link);
665 	spin_unlock_irqrestore(&user_event_mms_lock, flags);
666 
667 	/*
668 	 * We need to wait for currently occurring writes to stop within
669 	 * the mm. This is required since exit_mm() snaps the current rss
670 	 * stats and clears them. On the final mmdrop(), check_mm() will
671 	 * report a bug if these increment.
672 	 *
673 	 * All writes/pins are done under mmap_read lock, take the write
674 	 * lock to ensure in-progress faults have completed. Faults that
675 	 * are pending but yet to run will check the task count and skip
676 	 * the fault since the mm is going away.
677 	 */
678 	mmap_write_lock(mm->mm);
679 	mmap_write_unlock(mm->mm);
680 
681 	/*
682 	 * Put for mm must be done after RCU delay to handle new refs in
683 	 * between the list_del_rcu() and now. This ensures any get refs
684 	 * during rcu_read_lock() are accounted for during list removal.
685 	 *
686 	 * CPU A			|	CPU B
687 	 * ---------------------------------------------------------------
688 	 * user_event_mm_remove()	|	rcu_read_lock();
689 	 * list_del_rcu()		|	list_for_each_entry_rcu();
690 	 * call_rcu()			|	refcount_inc();
691 	 * .				|	rcu_read_unlock();
692 	 * schedule_work()		|	.
693 	 * user_event_mm_put()		|	.
694 	 *
695 	 * mmdrop() cannot be called in the softirq context of call_rcu()
696 	 * so we use a work queue after call_rcu() to run within.
697 	 */
698 	INIT_RCU_WORK(&mm->put_rwork, delayed_user_event_mm_put);
699 	queue_rcu_work(system_wq, &mm->put_rwork);
700 }
701 
702 void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm)
703 {
704 	struct user_event_mm *mm = user_event_mm_alloc(t);
705 	struct user_event_enabler *enabler;
706 
707 	if (!mm)
708 		return;
709 
710 	rcu_read_lock();
711 
712 	list_for_each_entry_rcu(enabler, &old_mm->enablers, mm_enablers_link) {
713 		if (!user_event_enabler_dup(enabler, mm))
714 			goto error;
715 	}
716 
717 	rcu_read_unlock();
718 
719 	user_event_mm_attach(mm, t);
720 	return;
721 error:
722 	rcu_read_unlock();
723 	user_event_mm_destroy(mm);
724 }
725 
726 static bool current_user_event_enabler_exists(unsigned long uaddr,
727 					      unsigned char bit)
728 {
729 	struct user_event_mm *user_mm = current_user_event_mm();
730 	bool exists;
731 
732 	if (!user_mm)
733 		return false;
734 
735 	exists = user_event_enabler_exists(user_mm, uaddr, bit);
736 
737 	user_event_mm_put(user_mm);
738 
739 	return exists;
740 }
741 
742 static struct user_event_enabler
743 *user_event_enabler_create(struct user_reg *reg, struct user_event *user,
744 			   int *write_result)
745 {
746 	struct user_event_enabler *enabler;
747 	struct user_event_mm *user_mm;
748 	unsigned long uaddr = (unsigned long)reg->enable_addr;
749 	int attempt = 0;
750 
751 	user_mm = current_user_event_mm();
752 
753 	if (!user_mm)
754 		return NULL;
755 
756 	enabler = kzalloc(sizeof(*enabler), GFP_KERNEL_ACCOUNT);
757 
758 	if (!enabler)
759 		goto out;
760 
761 	enabler->event = user;
762 	enabler->addr = uaddr;
763 	enabler->values = reg->enable_bit;
764 retry:
765 	/* Prevents state changes from racing with new enablers */
766 	mutex_lock(&event_mutex);
767 
768 	/* Attempt to reflect the current state within the process */
769 	mmap_read_lock(user_mm->mm);
770 	*write_result = user_event_enabler_write(user_mm, enabler, false,
771 						 &attempt);
772 	mmap_read_unlock(user_mm->mm);
773 
774 	/*
775 	 * If the write works, then we will track the enabler. A ref to the
776 	 * underlying user_event is held by the enabler to prevent it going
777 	 * away while the enabler is still in use by a process. The ref is
778 	 * removed when the enabler is destroyed. This means a event cannot
779 	 * be forcefully deleted from the system until all tasks using it
780 	 * exit or run exec(), which includes forks and clones.
781 	 */
782 	if (!*write_result) {
783 		refcount_inc(&enabler->event->refcnt);
784 		list_add_rcu(&enabler->mm_enablers_link, &user_mm->enablers);
785 	}
786 
787 	mutex_unlock(&event_mutex);
788 
789 	if (*write_result) {
790 		/* Attempt to fault-in and retry if it worked */
791 		if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
792 			goto retry;
793 
794 		kfree(enabler);
795 		enabler = NULL;
796 	}
797 out:
798 	user_event_mm_put(user_mm);
799 
800 	return enabler;
801 }
802 
803 static __always_inline __must_check
804 bool user_event_last_ref(struct user_event *user)
805 {
806 	return refcount_read(&user->refcnt) == 1;
807 }
808 
809 static __always_inline __must_check
810 size_t copy_nofault(void *addr, size_t bytes, struct iov_iter *i)
811 {
812 	size_t ret;
813 
814 	pagefault_disable();
815 
816 	ret = copy_from_iter_nocache(addr, bytes, i);
817 
818 	pagefault_enable();
819 
820 	return ret;
821 }
822 
823 static struct list_head *user_event_get_fields(struct trace_event_call *call)
824 {
825 	struct user_event *user = (struct user_event *)call->data;
826 
827 	return &user->fields;
828 }
829 
830 /*
831  * Parses a register command for user_events
832  * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]]
833  *
834  * Example event named 'test' with a 20 char 'msg' field with an unsigned int
835  * 'id' field after:
836  * test char[20] msg;unsigned int id
837  *
838  * NOTE: Offsets are from the user data perspective, they are not from the
839  * trace_entry/buffer perspective. We automatically add the common properties
840  * sizes to the offset for the user.
841  *
842  * Upon success user_event has its ref count increased by 1.
843  */
844 static int user_event_parse_cmd(struct user_event_group *group,
845 				char *raw_command, struct user_event **newuser)
846 {
847 	char *name = raw_command;
848 	char *args = strpbrk(name, " ");
849 	char *flags;
850 
851 	if (args)
852 		*args++ = '\0';
853 
854 	flags = strpbrk(name, ":");
855 
856 	if (flags)
857 		*flags++ = '\0';
858 
859 	return user_event_parse(group, name, args, flags, newuser);
860 }
861 
862 static int user_field_array_size(const char *type)
863 {
864 	const char *start = strchr(type, '[');
865 	char val[8];
866 	char *bracket;
867 	int size = 0;
868 
869 	if (start == NULL)
870 		return -EINVAL;
871 
872 	if (strscpy(val, start + 1, sizeof(val)) <= 0)
873 		return -EINVAL;
874 
875 	bracket = strchr(val, ']');
876 
877 	if (!bracket)
878 		return -EINVAL;
879 
880 	*bracket = '\0';
881 
882 	if (kstrtouint(val, 0, &size))
883 		return -EINVAL;
884 
885 	if (size > MAX_FIELD_ARRAY_SIZE)
886 		return -EINVAL;
887 
888 	return size;
889 }
890 
891 static int user_field_size(const char *type)
892 {
893 	/* long is not allowed from a user, since it's ambigious in size */
894 	if (strcmp(type, "s64") == 0)
895 		return sizeof(s64);
896 	if (strcmp(type, "u64") == 0)
897 		return sizeof(u64);
898 	if (strcmp(type, "s32") == 0)
899 		return sizeof(s32);
900 	if (strcmp(type, "u32") == 0)
901 		return sizeof(u32);
902 	if (strcmp(type, "int") == 0)
903 		return sizeof(int);
904 	if (strcmp(type, "unsigned int") == 0)
905 		return sizeof(unsigned int);
906 	if (strcmp(type, "s16") == 0)
907 		return sizeof(s16);
908 	if (strcmp(type, "u16") == 0)
909 		return sizeof(u16);
910 	if (strcmp(type, "short") == 0)
911 		return sizeof(short);
912 	if (strcmp(type, "unsigned short") == 0)
913 		return sizeof(unsigned short);
914 	if (strcmp(type, "s8") == 0)
915 		return sizeof(s8);
916 	if (strcmp(type, "u8") == 0)
917 		return sizeof(u8);
918 	if (strcmp(type, "char") == 0)
919 		return sizeof(char);
920 	if (strcmp(type, "unsigned char") == 0)
921 		return sizeof(unsigned char);
922 	if (str_has_prefix(type, "char["))
923 		return user_field_array_size(type);
924 	if (str_has_prefix(type, "unsigned char["))
925 		return user_field_array_size(type);
926 	if (str_has_prefix(type, "__data_loc "))
927 		return sizeof(u32);
928 	if (str_has_prefix(type, "__rel_loc "))
929 		return sizeof(u32);
930 
931 	/* Uknown basic type, error */
932 	return -EINVAL;
933 }
934 
935 static void user_event_destroy_validators(struct user_event *user)
936 {
937 	struct user_event_validator *validator, *next;
938 	struct list_head *head = &user->validators;
939 
940 	list_for_each_entry_safe(validator, next, head, user_event_link) {
941 		list_del(&validator->user_event_link);
942 		kfree(validator);
943 	}
944 }
945 
946 static void user_event_destroy_fields(struct user_event *user)
947 {
948 	struct ftrace_event_field *field, *next;
949 	struct list_head *head = &user->fields;
950 
951 	list_for_each_entry_safe(field, next, head, link) {
952 		list_del(&field->link);
953 		kfree(field);
954 	}
955 }
956 
957 static int user_event_add_field(struct user_event *user, const char *type,
958 				const char *name, int offset, int size,
959 				int is_signed, int filter_type)
960 {
961 	struct user_event_validator *validator;
962 	struct ftrace_event_field *field;
963 	int validator_flags = 0;
964 
965 	field = kmalloc(sizeof(*field), GFP_KERNEL_ACCOUNT);
966 
967 	if (!field)
968 		return -ENOMEM;
969 
970 	if (str_has_prefix(type, "__data_loc "))
971 		goto add_validator;
972 
973 	if (str_has_prefix(type, "__rel_loc ")) {
974 		validator_flags |= VALIDATOR_REL;
975 		goto add_validator;
976 	}
977 
978 	goto add_field;
979 
980 add_validator:
981 	if (strstr(type, "char") != NULL)
982 		validator_flags |= VALIDATOR_ENSURE_NULL;
983 
984 	validator = kmalloc(sizeof(*validator), GFP_KERNEL_ACCOUNT);
985 
986 	if (!validator) {
987 		kfree(field);
988 		return -ENOMEM;
989 	}
990 
991 	validator->flags = validator_flags;
992 	validator->offset = offset;
993 
994 	/* Want sequential access when validating */
995 	list_add_tail(&validator->user_event_link, &user->validators);
996 
997 add_field:
998 	field->type = type;
999 	field->name = name;
1000 	field->offset = offset;
1001 	field->size = size;
1002 	field->is_signed = is_signed;
1003 	field->filter_type = filter_type;
1004 
1005 	if (filter_type == FILTER_OTHER)
1006 		field->filter_type = filter_assign_type(type);
1007 
1008 	list_add(&field->link, &user->fields);
1009 
1010 	/*
1011 	 * Min size from user writes that are required, this does not include
1012 	 * the size of trace_entry (common fields).
1013 	 */
1014 	user->min_size = (offset + size) - sizeof(struct trace_entry);
1015 
1016 	return 0;
1017 }
1018 
1019 /*
1020  * Parses the values of a field within the description
1021  * Format: type name [size]
1022  */
1023 static int user_event_parse_field(char *field, struct user_event *user,
1024 				  u32 *offset)
1025 {
1026 	char *part, *type, *name;
1027 	u32 depth = 0, saved_offset = *offset;
1028 	int len, size = -EINVAL;
1029 	bool is_struct = false;
1030 
1031 	field = skip_spaces(field);
1032 
1033 	if (*field == '\0')
1034 		return 0;
1035 
1036 	/* Handle types that have a space within */
1037 	len = str_has_prefix(field, "unsigned ");
1038 	if (len)
1039 		goto skip_next;
1040 
1041 	len = str_has_prefix(field, "struct ");
1042 	if (len) {
1043 		is_struct = true;
1044 		goto skip_next;
1045 	}
1046 
1047 	len = str_has_prefix(field, "__data_loc unsigned ");
1048 	if (len)
1049 		goto skip_next;
1050 
1051 	len = str_has_prefix(field, "__data_loc ");
1052 	if (len)
1053 		goto skip_next;
1054 
1055 	len = str_has_prefix(field, "__rel_loc unsigned ");
1056 	if (len)
1057 		goto skip_next;
1058 
1059 	len = str_has_prefix(field, "__rel_loc ");
1060 	if (len)
1061 		goto skip_next;
1062 
1063 	goto parse;
1064 skip_next:
1065 	type = field;
1066 	field = strpbrk(field + len, " ");
1067 
1068 	if (field == NULL)
1069 		return -EINVAL;
1070 
1071 	*field++ = '\0';
1072 	depth++;
1073 parse:
1074 	name = NULL;
1075 
1076 	while ((part = strsep(&field, " ")) != NULL) {
1077 		switch (depth++) {
1078 		case FIELD_DEPTH_TYPE:
1079 			type = part;
1080 			break;
1081 		case FIELD_DEPTH_NAME:
1082 			name = part;
1083 			break;
1084 		case FIELD_DEPTH_SIZE:
1085 			if (!is_struct)
1086 				return -EINVAL;
1087 
1088 			if (kstrtou32(part, 10, &size))
1089 				return -EINVAL;
1090 			break;
1091 		default:
1092 			return -EINVAL;
1093 		}
1094 	}
1095 
1096 	if (depth < FIELD_DEPTH_SIZE || !name)
1097 		return -EINVAL;
1098 
1099 	if (depth == FIELD_DEPTH_SIZE)
1100 		size = user_field_size(type);
1101 
1102 	if (size == 0)
1103 		return -EINVAL;
1104 
1105 	if (size < 0)
1106 		return size;
1107 
1108 	*offset = saved_offset + size;
1109 
1110 	return user_event_add_field(user, type, name, saved_offset, size,
1111 				    type[0] != 'u', FILTER_OTHER);
1112 }
1113 
1114 static int user_event_parse_fields(struct user_event *user, char *args)
1115 {
1116 	char *field;
1117 	u32 offset = sizeof(struct trace_entry);
1118 	int ret = -EINVAL;
1119 
1120 	if (args == NULL)
1121 		return 0;
1122 
1123 	while ((field = strsep(&args, ";")) != NULL) {
1124 		ret = user_event_parse_field(field, user, &offset);
1125 
1126 		if (ret)
1127 			break;
1128 	}
1129 
1130 	return ret;
1131 }
1132 
1133 static struct trace_event_fields user_event_fields_array[1];
1134 
1135 static const char *user_field_format(const char *type)
1136 {
1137 	if (strcmp(type, "s64") == 0)
1138 		return "%lld";
1139 	if (strcmp(type, "u64") == 0)
1140 		return "%llu";
1141 	if (strcmp(type, "s32") == 0)
1142 		return "%d";
1143 	if (strcmp(type, "u32") == 0)
1144 		return "%u";
1145 	if (strcmp(type, "int") == 0)
1146 		return "%d";
1147 	if (strcmp(type, "unsigned int") == 0)
1148 		return "%u";
1149 	if (strcmp(type, "s16") == 0)
1150 		return "%d";
1151 	if (strcmp(type, "u16") == 0)
1152 		return "%u";
1153 	if (strcmp(type, "short") == 0)
1154 		return "%d";
1155 	if (strcmp(type, "unsigned short") == 0)
1156 		return "%u";
1157 	if (strcmp(type, "s8") == 0)
1158 		return "%d";
1159 	if (strcmp(type, "u8") == 0)
1160 		return "%u";
1161 	if (strcmp(type, "char") == 0)
1162 		return "%d";
1163 	if (strcmp(type, "unsigned char") == 0)
1164 		return "%u";
1165 	if (strstr(type, "char[") != NULL)
1166 		return "%s";
1167 
1168 	/* Unknown, likely struct, allowed treat as 64-bit */
1169 	return "%llu";
1170 }
1171 
1172 static bool user_field_is_dyn_string(const char *type, const char **str_func)
1173 {
1174 	if (str_has_prefix(type, "__data_loc ")) {
1175 		*str_func = "__get_str";
1176 		goto check;
1177 	}
1178 
1179 	if (str_has_prefix(type, "__rel_loc ")) {
1180 		*str_func = "__get_rel_str";
1181 		goto check;
1182 	}
1183 
1184 	return false;
1185 check:
1186 	return strstr(type, "char") != NULL;
1187 }
1188 
1189 #define LEN_OR_ZERO (len ? len - pos : 0)
1190 static int user_dyn_field_set_string(int argc, const char **argv, int *iout,
1191 				     char *buf, int len, bool *colon)
1192 {
1193 	int pos = 0, i = *iout;
1194 
1195 	*colon = false;
1196 
1197 	for (; i < argc; ++i) {
1198 		if (i != *iout)
1199 			pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1200 
1201 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", argv[i]);
1202 
1203 		if (strchr(argv[i], ';')) {
1204 			++i;
1205 			*colon = true;
1206 			break;
1207 		}
1208 	}
1209 
1210 	/* Actual set, advance i */
1211 	if (len != 0)
1212 		*iout = i;
1213 
1214 	return pos + 1;
1215 }
1216 
1217 static int user_field_set_string(struct ftrace_event_field *field,
1218 				 char *buf, int len, bool colon)
1219 {
1220 	int pos = 0;
1221 
1222 	pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->type);
1223 	pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1224 	pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name);
1225 
1226 	if (colon)
1227 		pos += snprintf(buf + pos, LEN_OR_ZERO, ";");
1228 
1229 	return pos + 1;
1230 }
1231 
1232 static int user_event_set_print_fmt(struct user_event *user, char *buf, int len)
1233 {
1234 	struct ftrace_event_field *field, *next;
1235 	struct list_head *head = &user->fields;
1236 	int pos = 0, depth = 0;
1237 	const char *str_func;
1238 
1239 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1240 
1241 	list_for_each_entry_safe_reverse(field, next, head, link) {
1242 		if (depth != 0)
1243 			pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1244 
1245 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s",
1246 				field->name, user_field_format(field->type));
1247 
1248 		depth++;
1249 	}
1250 
1251 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1252 
1253 	list_for_each_entry_safe_reverse(field, next, head, link) {
1254 		if (user_field_is_dyn_string(field->type, &str_func))
1255 			pos += snprintf(buf + pos, LEN_OR_ZERO,
1256 					", %s(%s)", str_func, field->name);
1257 		else
1258 			pos += snprintf(buf + pos, LEN_OR_ZERO,
1259 					", REC->%s", field->name);
1260 	}
1261 
1262 	return pos + 1;
1263 }
1264 #undef LEN_OR_ZERO
1265 
1266 static int user_event_create_print_fmt(struct user_event *user)
1267 {
1268 	char *print_fmt;
1269 	int len;
1270 
1271 	len = user_event_set_print_fmt(user, NULL, 0);
1272 
1273 	print_fmt = kmalloc(len, GFP_KERNEL_ACCOUNT);
1274 
1275 	if (!print_fmt)
1276 		return -ENOMEM;
1277 
1278 	user_event_set_print_fmt(user, print_fmt, len);
1279 
1280 	user->call.print_fmt = print_fmt;
1281 
1282 	return 0;
1283 }
1284 
1285 static enum print_line_t user_event_print_trace(struct trace_iterator *iter,
1286 						int flags,
1287 						struct trace_event *event)
1288 {
1289 	return print_event_fields(iter, event);
1290 }
1291 
1292 static struct trace_event_functions user_event_funcs = {
1293 	.trace = user_event_print_trace,
1294 };
1295 
1296 static int user_event_set_call_visible(struct user_event *user, bool visible)
1297 {
1298 	int ret;
1299 	const struct cred *old_cred;
1300 	struct cred *cred;
1301 
1302 	cred = prepare_creds();
1303 
1304 	if (!cred)
1305 		return -ENOMEM;
1306 
1307 	/*
1308 	 * While by default tracefs is locked down, systems can be configured
1309 	 * to allow user_event files to be less locked down. The extreme case
1310 	 * being "other" has read/write access to user_events_data/status.
1311 	 *
1312 	 * When not locked down, processes may not have permissions to
1313 	 * add/remove calls themselves to tracefs. We need to temporarily
1314 	 * switch to root file permission to allow for this scenario.
1315 	 */
1316 	cred->fsuid = GLOBAL_ROOT_UID;
1317 
1318 	old_cred = override_creds(cred);
1319 
1320 	if (visible)
1321 		ret = trace_add_event_call(&user->call);
1322 	else
1323 		ret = trace_remove_event_call(&user->call);
1324 
1325 	revert_creds(old_cred);
1326 	put_cred(cred);
1327 
1328 	return ret;
1329 }
1330 
1331 static int destroy_user_event(struct user_event *user)
1332 {
1333 	int ret = 0;
1334 
1335 	lockdep_assert_held(&event_mutex);
1336 
1337 	/* Must destroy fields before call removal */
1338 	user_event_destroy_fields(user);
1339 
1340 	ret = user_event_set_call_visible(user, false);
1341 
1342 	if (ret)
1343 		return ret;
1344 
1345 	dyn_event_remove(&user->devent);
1346 	hash_del(&user->node);
1347 
1348 	user_event_destroy_validators(user);
1349 	kfree(user->call.print_fmt);
1350 	kfree(EVENT_NAME(user));
1351 	kfree(user);
1352 
1353 	if (current_user_events > 0)
1354 		current_user_events--;
1355 	else
1356 		pr_alert("BUG: Bad current_user_events\n");
1357 
1358 	return ret;
1359 }
1360 
1361 static struct user_event *find_user_event(struct user_event_group *group,
1362 					  char *name, u32 *outkey)
1363 {
1364 	struct user_event *user;
1365 	u32 key = user_event_key(name);
1366 
1367 	*outkey = key;
1368 
1369 	hash_for_each_possible(group->register_table, user, node, key)
1370 		if (!strcmp(EVENT_NAME(user), name)) {
1371 			refcount_inc(&user->refcnt);
1372 			return user;
1373 		}
1374 
1375 	return NULL;
1376 }
1377 
1378 static int user_event_validate(struct user_event *user, void *data, int len)
1379 {
1380 	struct list_head *head = &user->validators;
1381 	struct user_event_validator *validator;
1382 	void *pos, *end = data + len;
1383 	u32 loc, offset, size;
1384 
1385 	list_for_each_entry(validator, head, user_event_link) {
1386 		pos = data + validator->offset;
1387 
1388 		/* Already done min_size check, no bounds check here */
1389 		loc = *(u32 *)pos;
1390 		offset = loc & 0xffff;
1391 		size = loc >> 16;
1392 
1393 		if (likely(validator->flags & VALIDATOR_REL))
1394 			pos += offset + sizeof(loc);
1395 		else
1396 			pos = data + offset;
1397 
1398 		pos += size;
1399 
1400 		if (unlikely(pos > end))
1401 			return -EFAULT;
1402 
1403 		if (likely(validator->flags & VALIDATOR_ENSURE_NULL))
1404 			if (unlikely(*(char *)(pos - 1) != '\0'))
1405 				return -EFAULT;
1406 	}
1407 
1408 	return 0;
1409 }
1410 
1411 /*
1412  * Writes the user supplied payload out to a trace file.
1413  */
1414 static void user_event_ftrace(struct user_event *user, struct iov_iter *i,
1415 			      void *tpdata, bool *faulted)
1416 {
1417 	struct trace_event_file *file;
1418 	struct trace_entry *entry;
1419 	struct trace_event_buffer event_buffer;
1420 	size_t size = sizeof(*entry) + i->count;
1421 
1422 	file = (struct trace_event_file *)tpdata;
1423 
1424 	if (!file ||
1425 	    !(file->flags & EVENT_FILE_FL_ENABLED) ||
1426 	    trace_trigger_soft_disabled(file))
1427 		return;
1428 
1429 	/* Allocates and fills trace_entry, + 1 of this is data payload */
1430 	entry = trace_event_buffer_reserve(&event_buffer, file, size);
1431 
1432 	if (unlikely(!entry))
1433 		return;
1434 
1435 	if (unlikely(!copy_nofault(entry + 1, i->count, i)))
1436 		goto discard;
1437 
1438 	if (!list_empty(&user->validators) &&
1439 	    unlikely(user_event_validate(user, entry, size)))
1440 		goto discard;
1441 
1442 	trace_event_buffer_commit(&event_buffer);
1443 
1444 	return;
1445 discard:
1446 	*faulted = true;
1447 	__trace_event_discard_commit(event_buffer.buffer,
1448 				     event_buffer.event);
1449 }
1450 
1451 #ifdef CONFIG_PERF_EVENTS
1452 /*
1453  * Writes the user supplied payload out to perf ring buffer.
1454  */
1455 static void user_event_perf(struct user_event *user, struct iov_iter *i,
1456 			    void *tpdata, bool *faulted)
1457 {
1458 	struct hlist_head *perf_head;
1459 
1460 	perf_head = this_cpu_ptr(user->call.perf_events);
1461 
1462 	if (perf_head && !hlist_empty(perf_head)) {
1463 		struct trace_entry *perf_entry;
1464 		struct pt_regs *regs;
1465 		size_t size = sizeof(*perf_entry) + i->count;
1466 		int context;
1467 
1468 		perf_entry = perf_trace_buf_alloc(ALIGN(size, 8),
1469 						  &regs, &context);
1470 
1471 		if (unlikely(!perf_entry))
1472 			return;
1473 
1474 		perf_fetch_caller_regs(regs);
1475 
1476 		if (unlikely(!copy_nofault(perf_entry + 1, i->count, i)))
1477 			goto discard;
1478 
1479 		if (!list_empty(&user->validators) &&
1480 		    unlikely(user_event_validate(user, perf_entry, size)))
1481 			goto discard;
1482 
1483 		perf_trace_buf_submit(perf_entry, size, context,
1484 				      user->call.event.type, 1, regs,
1485 				      perf_head, NULL);
1486 
1487 		return;
1488 discard:
1489 		*faulted = true;
1490 		perf_swevent_put_recursion_context(context);
1491 	}
1492 }
1493 #endif
1494 
1495 /*
1496  * Update the enabled bit among all user processes.
1497  */
1498 static void update_enable_bit_for(struct user_event *user)
1499 {
1500 	struct tracepoint *tp = &user->tracepoint;
1501 	char status = 0;
1502 
1503 	if (atomic_read(&tp->key.enabled) > 0) {
1504 		struct tracepoint_func *probe_func_ptr;
1505 		user_event_func_t probe_func;
1506 
1507 		rcu_read_lock_sched();
1508 
1509 		probe_func_ptr = rcu_dereference_sched(tp->funcs);
1510 
1511 		if (probe_func_ptr) {
1512 			do {
1513 				probe_func = probe_func_ptr->func;
1514 
1515 				if (probe_func == user_event_ftrace)
1516 					status |= EVENT_STATUS_FTRACE;
1517 #ifdef CONFIG_PERF_EVENTS
1518 				else if (probe_func == user_event_perf)
1519 					status |= EVENT_STATUS_PERF;
1520 #endif
1521 				else
1522 					status |= EVENT_STATUS_OTHER;
1523 			} while ((++probe_func_ptr)->func);
1524 		}
1525 
1526 		rcu_read_unlock_sched();
1527 	}
1528 
1529 	user->status = status;
1530 
1531 	user_event_enabler_update(user);
1532 }
1533 
1534 /*
1535  * Register callback for our events from tracing sub-systems.
1536  */
1537 static int user_event_reg(struct trace_event_call *call,
1538 			  enum trace_reg type,
1539 			  void *data)
1540 {
1541 	struct user_event *user = (struct user_event *)call->data;
1542 	int ret = 0;
1543 
1544 	if (!user)
1545 		return -ENOENT;
1546 
1547 	switch (type) {
1548 	case TRACE_REG_REGISTER:
1549 		ret = tracepoint_probe_register(call->tp,
1550 						call->class->probe,
1551 						data);
1552 		if (!ret)
1553 			goto inc;
1554 		break;
1555 
1556 	case TRACE_REG_UNREGISTER:
1557 		tracepoint_probe_unregister(call->tp,
1558 					    call->class->probe,
1559 					    data);
1560 		goto dec;
1561 
1562 #ifdef CONFIG_PERF_EVENTS
1563 	case TRACE_REG_PERF_REGISTER:
1564 		ret = tracepoint_probe_register(call->tp,
1565 						call->class->perf_probe,
1566 						data);
1567 		if (!ret)
1568 			goto inc;
1569 		break;
1570 
1571 	case TRACE_REG_PERF_UNREGISTER:
1572 		tracepoint_probe_unregister(call->tp,
1573 					    call->class->perf_probe,
1574 					    data);
1575 		goto dec;
1576 
1577 	case TRACE_REG_PERF_OPEN:
1578 	case TRACE_REG_PERF_CLOSE:
1579 	case TRACE_REG_PERF_ADD:
1580 	case TRACE_REG_PERF_DEL:
1581 		break;
1582 #endif
1583 	}
1584 
1585 	return ret;
1586 inc:
1587 	refcount_inc(&user->refcnt);
1588 	update_enable_bit_for(user);
1589 	return 0;
1590 dec:
1591 	update_enable_bit_for(user);
1592 	refcount_dec(&user->refcnt);
1593 	return 0;
1594 }
1595 
1596 static int user_event_create(const char *raw_command)
1597 {
1598 	struct user_event_group *group;
1599 	struct user_event *user;
1600 	char *name;
1601 	int ret;
1602 
1603 	if (!str_has_prefix(raw_command, USER_EVENTS_PREFIX))
1604 		return -ECANCELED;
1605 
1606 	raw_command += USER_EVENTS_PREFIX_LEN;
1607 	raw_command = skip_spaces(raw_command);
1608 
1609 	name = kstrdup(raw_command, GFP_KERNEL_ACCOUNT);
1610 
1611 	if (!name)
1612 		return -ENOMEM;
1613 
1614 	group = current_user_event_group();
1615 
1616 	if (!group) {
1617 		kfree(name);
1618 		return -ENOENT;
1619 	}
1620 
1621 	mutex_lock(&group->reg_mutex);
1622 
1623 	ret = user_event_parse_cmd(group, name, &user);
1624 
1625 	if (!ret)
1626 		refcount_dec(&user->refcnt);
1627 
1628 	mutex_unlock(&group->reg_mutex);
1629 
1630 	if (ret)
1631 		kfree(name);
1632 
1633 	return ret;
1634 }
1635 
1636 static int user_event_show(struct seq_file *m, struct dyn_event *ev)
1637 {
1638 	struct user_event *user = container_of(ev, struct user_event, devent);
1639 	struct ftrace_event_field *field, *next;
1640 	struct list_head *head;
1641 	int depth = 0;
1642 
1643 	seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user));
1644 
1645 	head = trace_get_fields(&user->call);
1646 
1647 	list_for_each_entry_safe_reverse(field, next, head, link) {
1648 		if (depth == 0)
1649 			seq_puts(m, " ");
1650 		else
1651 			seq_puts(m, "; ");
1652 
1653 		seq_printf(m, "%s %s", field->type, field->name);
1654 
1655 		if (str_has_prefix(field->type, "struct "))
1656 			seq_printf(m, " %d", field->size);
1657 
1658 		depth++;
1659 	}
1660 
1661 	seq_puts(m, "\n");
1662 
1663 	return 0;
1664 }
1665 
1666 static bool user_event_is_busy(struct dyn_event *ev)
1667 {
1668 	struct user_event *user = container_of(ev, struct user_event, devent);
1669 
1670 	return !user_event_last_ref(user);
1671 }
1672 
1673 static int user_event_free(struct dyn_event *ev)
1674 {
1675 	struct user_event *user = container_of(ev, struct user_event, devent);
1676 
1677 	if (!user_event_last_ref(user))
1678 		return -EBUSY;
1679 
1680 	return destroy_user_event(user);
1681 }
1682 
1683 static bool user_field_match(struct ftrace_event_field *field, int argc,
1684 			     const char **argv, int *iout)
1685 {
1686 	char *field_name = NULL, *dyn_field_name = NULL;
1687 	bool colon = false, match = false;
1688 	int dyn_len, len;
1689 
1690 	if (*iout >= argc)
1691 		return false;
1692 
1693 	dyn_len = user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1694 					    0, &colon);
1695 
1696 	len = user_field_set_string(field, field_name, 0, colon);
1697 
1698 	if (dyn_len != len)
1699 		return false;
1700 
1701 	dyn_field_name = kmalloc(dyn_len, GFP_KERNEL);
1702 	field_name = kmalloc(len, GFP_KERNEL);
1703 
1704 	if (!dyn_field_name || !field_name)
1705 		goto out;
1706 
1707 	user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1708 				  dyn_len, &colon);
1709 
1710 	user_field_set_string(field, field_name, len, colon);
1711 
1712 	match = strcmp(dyn_field_name, field_name) == 0;
1713 out:
1714 	kfree(dyn_field_name);
1715 	kfree(field_name);
1716 
1717 	return match;
1718 }
1719 
1720 static bool user_fields_match(struct user_event *user, int argc,
1721 			      const char **argv)
1722 {
1723 	struct ftrace_event_field *field, *next;
1724 	struct list_head *head = &user->fields;
1725 	int i = 0;
1726 
1727 	list_for_each_entry_safe_reverse(field, next, head, link)
1728 		if (!user_field_match(field, argc, argv, &i))
1729 			return false;
1730 
1731 	if (i != argc)
1732 		return false;
1733 
1734 	return true;
1735 }
1736 
1737 static bool user_event_match(const char *system, const char *event,
1738 			     int argc, const char **argv, struct dyn_event *ev)
1739 {
1740 	struct user_event *user = container_of(ev, struct user_event, devent);
1741 	bool match;
1742 
1743 	match = strcmp(EVENT_NAME(user), event) == 0 &&
1744 		(!system || strcmp(system, USER_EVENTS_SYSTEM) == 0);
1745 
1746 	if (match && argc > 0)
1747 		match = user_fields_match(user, argc, argv);
1748 
1749 	return match;
1750 }
1751 
1752 static struct dyn_event_operations user_event_dops = {
1753 	.create = user_event_create,
1754 	.show = user_event_show,
1755 	.is_busy = user_event_is_busy,
1756 	.free = user_event_free,
1757 	.match = user_event_match,
1758 };
1759 
1760 static int user_event_trace_register(struct user_event *user)
1761 {
1762 	int ret;
1763 
1764 	ret = register_trace_event(&user->call.event);
1765 
1766 	if (!ret)
1767 		return -ENODEV;
1768 
1769 	ret = user_event_set_call_visible(user, true);
1770 
1771 	if (ret)
1772 		unregister_trace_event(&user->call.event);
1773 
1774 	return ret;
1775 }
1776 
1777 /*
1778  * Parses the event name, arguments and flags then registers if successful.
1779  * The name buffer lifetime is owned by this method for success cases only.
1780  * Upon success the returned user_event has its ref count increased by 1.
1781  */
1782 static int user_event_parse(struct user_event_group *group, char *name,
1783 			    char *args, char *flags,
1784 			    struct user_event **newuser)
1785 {
1786 	int ret;
1787 	u32 key;
1788 	struct user_event *user;
1789 
1790 	/* Prevent dyn_event from racing */
1791 	mutex_lock(&event_mutex);
1792 	user = find_user_event(group, name, &key);
1793 	mutex_unlock(&event_mutex);
1794 
1795 	if (user) {
1796 		*newuser = user;
1797 		/*
1798 		 * Name is allocated by caller, free it since it already exists.
1799 		 * Caller only worries about failure cases for freeing.
1800 		 */
1801 		kfree(name);
1802 		return 0;
1803 	}
1804 
1805 	user = kzalloc(sizeof(*user), GFP_KERNEL_ACCOUNT);
1806 
1807 	if (!user)
1808 		return -ENOMEM;
1809 
1810 	INIT_LIST_HEAD(&user->class.fields);
1811 	INIT_LIST_HEAD(&user->fields);
1812 	INIT_LIST_HEAD(&user->validators);
1813 
1814 	user->group = group;
1815 	user->tracepoint.name = name;
1816 
1817 	ret = user_event_parse_fields(user, args);
1818 
1819 	if (ret)
1820 		goto put_user;
1821 
1822 	ret = user_event_create_print_fmt(user);
1823 
1824 	if (ret)
1825 		goto put_user;
1826 
1827 	user->call.data = user;
1828 	user->call.class = &user->class;
1829 	user->call.name = name;
1830 	user->call.flags = TRACE_EVENT_FL_TRACEPOINT;
1831 	user->call.tp = &user->tracepoint;
1832 	user->call.event.funcs = &user_event_funcs;
1833 	user->class.system = group->system_name;
1834 
1835 	user->class.fields_array = user_event_fields_array;
1836 	user->class.get_fields = user_event_get_fields;
1837 	user->class.reg = user_event_reg;
1838 	user->class.probe = user_event_ftrace;
1839 #ifdef CONFIG_PERF_EVENTS
1840 	user->class.perf_probe = user_event_perf;
1841 #endif
1842 
1843 	mutex_lock(&event_mutex);
1844 
1845 	if (current_user_events >= max_user_events) {
1846 		ret = -EMFILE;
1847 		goto put_user_lock;
1848 	}
1849 
1850 	ret = user_event_trace_register(user);
1851 
1852 	if (ret)
1853 		goto put_user_lock;
1854 
1855 	/* Ensure we track self ref and caller ref (2) */
1856 	refcount_set(&user->refcnt, 2);
1857 
1858 	dyn_event_init(&user->devent, &user_event_dops);
1859 	dyn_event_add(&user->devent, &user->call);
1860 	hash_add(group->register_table, &user->node, key);
1861 	current_user_events++;
1862 
1863 	mutex_unlock(&event_mutex);
1864 
1865 	*newuser = user;
1866 	return 0;
1867 put_user_lock:
1868 	mutex_unlock(&event_mutex);
1869 put_user:
1870 	user_event_destroy_fields(user);
1871 	user_event_destroy_validators(user);
1872 	kfree(user->call.print_fmt);
1873 	kfree(user);
1874 	return ret;
1875 }
1876 
1877 /*
1878  * Deletes a previously created event if it is no longer being used.
1879  */
1880 static int delete_user_event(struct user_event_group *group, char *name)
1881 {
1882 	u32 key;
1883 	struct user_event *user = find_user_event(group, name, &key);
1884 
1885 	if (!user)
1886 		return -ENOENT;
1887 
1888 	refcount_dec(&user->refcnt);
1889 
1890 	if (!user_event_last_ref(user))
1891 		return -EBUSY;
1892 
1893 	return destroy_user_event(user);
1894 }
1895 
1896 /*
1897  * Validates the user payload and writes via iterator.
1898  */
1899 static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
1900 {
1901 	struct user_event_file_info *info = file->private_data;
1902 	struct user_event_refs *refs;
1903 	struct user_event *user = NULL;
1904 	struct tracepoint *tp;
1905 	ssize_t ret = i->count;
1906 	int idx;
1907 
1908 	if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx)))
1909 		return -EFAULT;
1910 
1911 	if (idx < 0)
1912 		return -EINVAL;
1913 
1914 	rcu_read_lock_sched();
1915 
1916 	refs = rcu_dereference_sched(info->refs);
1917 
1918 	/*
1919 	 * The refs->events array is protected by RCU, and new items may be
1920 	 * added. But the user retrieved from indexing into the events array
1921 	 * shall be immutable while the file is opened.
1922 	 */
1923 	if (likely(refs && idx < refs->count))
1924 		user = refs->events[idx];
1925 
1926 	rcu_read_unlock_sched();
1927 
1928 	if (unlikely(user == NULL))
1929 		return -ENOENT;
1930 
1931 	if (unlikely(i->count < user->min_size))
1932 		return -EINVAL;
1933 
1934 	tp = &user->tracepoint;
1935 
1936 	/*
1937 	 * It's possible key.enabled disables after this check, however
1938 	 * we don't mind if a few events are included in this condition.
1939 	 */
1940 	if (likely(atomic_read(&tp->key.enabled) > 0)) {
1941 		struct tracepoint_func *probe_func_ptr;
1942 		user_event_func_t probe_func;
1943 		struct iov_iter copy;
1944 		void *tpdata;
1945 		bool faulted;
1946 
1947 		if (unlikely(fault_in_iov_iter_readable(i, i->count)))
1948 			return -EFAULT;
1949 
1950 		faulted = false;
1951 
1952 		rcu_read_lock_sched();
1953 
1954 		probe_func_ptr = rcu_dereference_sched(tp->funcs);
1955 
1956 		if (probe_func_ptr) {
1957 			do {
1958 				copy = *i;
1959 				probe_func = probe_func_ptr->func;
1960 				tpdata = probe_func_ptr->data;
1961 				probe_func(user, &copy, tpdata, &faulted);
1962 			} while ((++probe_func_ptr)->func);
1963 		}
1964 
1965 		rcu_read_unlock_sched();
1966 
1967 		if (unlikely(faulted))
1968 			return -EFAULT;
1969 	}
1970 
1971 	return ret;
1972 }
1973 
1974 static int user_events_open(struct inode *node, struct file *file)
1975 {
1976 	struct user_event_group *group;
1977 	struct user_event_file_info *info;
1978 
1979 	group = current_user_event_group();
1980 
1981 	if (!group)
1982 		return -ENOENT;
1983 
1984 	info = kzalloc(sizeof(*info), GFP_KERNEL_ACCOUNT);
1985 
1986 	if (!info)
1987 		return -ENOMEM;
1988 
1989 	info->group = group;
1990 
1991 	file->private_data = info;
1992 
1993 	return 0;
1994 }
1995 
1996 static ssize_t user_events_write(struct file *file, const char __user *ubuf,
1997 				 size_t count, loff_t *ppos)
1998 {
1999 	struct iovec iov;
2000 	struct iov_iter i;
2001 
2002 	if (unlikely(*ppos != 0))
2003 		return -EFAULT;
2004 
2005 	if (unlikely(import_single_range(ITER_SOURCE, (char __user *)ubuf,
2006 					 count, &iov, &i)))
2007 		return -EFAULT;
2008 
2009 	return user_events_write_core(file, &i);
2010 }
2011 
2012 static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i)
2013 {
2014 	return user_events_write_core(kp->ki_filp, i);
2015 }
2016 
2017 static int user_events_ref_add(struct user_event_file_info *info,
2018 			       struct user_event *user)
2019 {
2020 	struct user_event_group *group = info->group;
2021 	struct user_event_refs *refs, *new_refs;
2022 	int i, size, count = 0;
2023 
2024 	refs = rcu_dereference_protected(info->refs,
2025 					 lockdep_is_held(&group->reg_mutex));
2026 
2027 	if (refs) {
2028 		count = refs->count;
2029 
2030 		for (i = 0; i < count; ++i)
2031 			if (refs->events[i] == user)
2032 				return i;
2033 	}
2034 
2035 	size = struct_size(refs, events, count + 1);
2036 
2037 	new_refs = kzalloc(size, GFP_KERNEL_ACCOUNT);
2038 
2039 	if (!new_refs)
2040 		return -ENOMEM;
2041 
2042 	new_refs->count = count + 1;
2043 
2044 	for (i = 0; i < count; ++i)
2045 		new_refs->events[i] = refs->events[i];
2046 
2047 	new_refs->events[i] = user;
2048 
2049 	refcount_inc(&user->refcnt);
2050 
2051 	rcu_assign_pointer(info->refs, new_refs);
2052 
2053 	if (refs)
2054 		kfree_rcu(refs, rcu);
2055 
2056 	return i;
2057 }
2058 
2059 static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg)
2060 {
2061 	u32 size;
2062 	long ret;
2063 
2064 	ret = get_user(size, &ureg->size);
2065 
2066 	if (ret)
2067 		return ret;
2068 
2069 	if (size > PAGE_SIZE)
2070 		return -E2BIG;
2071 
2072 	if (size < offsetofend(struct user_reg, write_index))
2073 		return -EINVAL;
2074 
2075 	ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
2076 
2077 	if (ret)
2078 		return ret;
2079 
2080 	/* Ensure no flags, since we don't support any yet */
2081 	if (kreg->flags != 0)
2082 		return -EINVAL;
2083 
2084 	/* Ensure supported size */
2085 	switch (kreg->enable_size) {
2086 	case 4:
2087 		/* 32-bit */
2088 		break;
2089 #if BITS_PER_LONG >= 64
2090 	case 8:
2091 		/* 64-bit */
2092 		break;
2093 #endif
2094 	default:
2095 		return -EINVAL;
2096 	}
2097 
2098 	/* Ensure natural alignment */
2099 	if (kreg->enable_addr % kreg->enable_size)
2100 		return -EINVAL;
2101 
2102 	/* Ensure bit range for size */
2103 	if (kreg->enable_bit > (kreg->enable_size * BITS_PER_BYTE) - 1)
2104 		return -EINVAL;
2105 
2106 	/* Ensure accessible */
2107 	if (!access_ok((const void __user *)(uintptr_t)kreg->enable_addr,
2108 		       kreg->enable_size))
2109 		return -EFAULT;
2110 
2111 	kreg->size = size;
2112 
2113 	return 0;
2114 }
2115 
2116 /*
2117  * Registers a user_event on behalf of a user process.
2118  */
2119 static long user_events_ioctl_reg(struct user_event_file_info *info,
2120 				  unsigned long uarg)
2121 {
2122 	struct user_reg __user *ureg = (struct user_reg __user *)uarg;
2123 	struct user_reg reg;
2124 	struct user_event *user;
2125 	struct user_event_enabler *enabler;
2126 	char *name;
2127 	long ret;
2128 	int write_result;
2129 
2130 	ret = user_reg_get(ureg, &reg);
2131 
2132 	if (ret)
2133 		return ret;
2134 
2135 	/*
2136 	 * Prevent users from using the same address and bit multiple times
2137 	 * within the same mm address space. This can cause unexpected behavior
2138 	 * for user processes that is far easier to debug if this is explictly
2139 	 * an error upon registering.
2140 	 */
2141 	if (current_user_event_enabler_exists((unsigned long)reg.enable_addr,
2142 					      reg.enable_bit))
2143 		return -EADDRINUSE;
2144 
2145 	name = strndup_user((const char __user *)(uintptr_t)reg.name_args,
2146 			    MAX_EVENT_DESC);
2147 
2148 	if (IS_ERR(name)) {
2149 		ret = PTR_ERR(name);
2150 		return ret;
2151 	}
2152 
2153 	ret = user_event_parse_cmd(info->group, name, &user);
2154 
2155 	if (ret) {
2156 		kfree(name);
2157 		return ret;
2158 	}
2159 
2160 	ret = user_events_ref_add(info, user);
2161 
2162 	/* No longer need parse ref, ref_add either worked or not */
2163 	refcount_dec(&user->refcnt);
2164 
2165 	/* Positive number is index and valid */
2166 	if (ret < 0)
2167 		return ret;
2168 
2169 	/*
2170 	 * user_events_ref_add succeeded:
2171 	 * At this point we have a user_event, it's lifetime is bound by the
2172 	 * reference count, not this file. If anything fails, the user_event
2173 	 * still has a reference until the file is released. During release
2174 	 * any remaining references (from user_events_ref_add) are decremented.
2175 	 *
2176 	 * Attempt to create an enabler, which too has a lifetime tied in the
2177 	 * same way for the event. Once the task that caused the enabler to be
2178 	 * created exits or issues exec() then the enablers it has created
2179 	 * will be destroyed and the ref to the event will be decremented.
2180 	 */
2181 	enabler = user_event_enabler_create(&reg, user, &write_result);
2182 
2183 	if (!enabler)
2184 		return -ENOMEM;
2185 
2186 	/* Write failed/faulted, give error back to caller */
2187 	if (write_result)
2188 		return write_result;
2189 
2190 	put_user((u32)ret, &ureg->write_index);
2191 
2192 	return 0;
2193 }
2194 
2195 /*
2196  * Deletes a user_event on behalf of a user process.
2197  */
2198 static long user_events_ioctl_del(struct user_event_file_info *info,
2199 				  unsigned long uarg)
2200 {
2201 	void __user *ubuf = (void __user *)uarg;
2202 	char *name;
2203 	long ret;
2204 
2205 	name = strndup_user(ubuf, MAX_EVENT_DESC);
2206 
2207 	if (IS_ERR(name))
2208 		return PTR_ERR(name);
2209 
2210 	/* event_mutex prevents dyn_event from racing */
2211 	mutex_lock(&event_mutex);
2212 	ret = delete_user_event(info->group, name);
2213 	mutex_unlock(&event_mutex);
2214 
2215 	kfree(name);
2216 
2217 	return ret;
2218 }
2219 
2220 static long user_unreg_get(struct user_unreg __user *ureg,
2221 			   struct user_unreg *kreg)
2222 {
2223 	u32 size;
2224 	long ret;
2225 
2226 	ret = get_user(size, &ureg->size);
2227 
2228 	if (ret)
2229 		return ret;
2230 
2231 	if (size > PAGE_SIZE)
2232 		return -E2BIG;
2233 
2234 	if (size < offsetofend(struct user_unreg, disable_addr))
2235 		return -EINVAL;
2236 
2237 	ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
2238 
2239 	/* Ensure no reserved values, since we don't support any yet */
2240 	if (kreg->__reserved || kreg->__reserved2)
2241 		return -EINVAL;
2242 
2243 	return ret;
2244 }
2245 
2246 static int user_event_mm_clear_bit(struct user_event_mm *user_mm,
2247 				   unsigned long uaddr, unsigned char bit)
2248 {
2249 	struct user_event_enabler enabler;
2250 	int result;
2251 	int attempt = 0;
2252 
2253 	memset(&enabler, 0, sizeof(enabler));
2254 	enabler.addr = uaddr;
2255 	enabler.values = bit;
2256 retry:
2257 	/* Prevents state changes from racing with new enablers */
2258 	mutex_lock(&event_mutex);
2259 
2260 	/* Force the bit to be cleared, since no event is attached */
2261 	mmap_read_lock(user_mm->mm);
2262 	result = user_event_enabler_write(user_mm, &enabler, false, &attempt);
2263 	mmap_read_unlock(user_mm->mm);
2264 
2265 	mutex_unlock(&event_mutex);
2266 
2267 	if (result) {
2268 		/* Attempt to fault-in and retry if it worked */
2269 		if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
2270 			goto retry;
2271 	}
2272 
2273 	return result;
2274 }
2275 
2276 /*
2277  * Unregisters an enablement address/bit within a task/user mm.
2278  */
2279 static long user_events_ioctl_unreg(unsigned long uarg)
2280 {
2281 	struct user_unreg __user *ureg = (struct user_unreg __user *)uarg;
2282 	struct user_event_mm *mm = current->user_event_mm;
2283 	struct user_event_enabler *enabler, *next;
2284 	struct user_unreg reg;
2285 	long ret;
2286 
2287 	ret = user_unreg_get(ureg, &reg);
2288 
2289 	if (ret)
2290 		return ret;
2291 
2292 	if (!mm)
2293 		return -ENOENT;
2294 
2295 	ret = -ENOENT;
2296 
2297 	/*
2298 	 * Flags freeing and faulting are used to indicate if the enabler is in
2299 	 * use at all. When faulting is set a page-fault is occurring asyncly.
2300 	 * During async fault if freeing is set, the enabler will be destroyed.
2301 	 * If no async fault is happening, we can destroy it now since we hold
2302 	 * the event_mutex during these checks.
2303 	 */
2304 	mutex_lock(&event_mutex);
2305 
2306 	list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) {
2307 		if (enabler->addr == reg.disable_addr &&
2308 		    ENABLE_BIT(enabler) == reg.disable_bit) {
2309 			set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler));
2310 
2311 			if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)))
2312 				user_event_enabler_destroy(enabler);
2313 
2314 			/* Removed at least one */
2315 			ret = 0;
2316 		}
2317 	}
2318 
2319 	mutex_unlock(&event_mutex);
2320 
2321 	/* Ensure bit is now cleared for user, regardless of event status */
2322 	if (!ret)
2323 		ret = user_event_mm_clear_bit(mm, reg.disable_addr,
2324 					      reg.disable_bit);
2325 
2326 	return ret;
2327 }
2328 
2329 /*
2330  * Handles the ioctl from user mode to register or alter operations.
2331  */
2332 static long user_events_ioctl(struct file *file, unsigned int cmd,
2333 			      unsigned long uarg)
2334 {
2335 	struct user_event_file_info *info = file->private_data;
2336 	struct user_event_group *group = info->group;
2337 	long ret = -ENOTTY;
2338 
2339 	switch (cmd) {
2340 	case DIAG_IOCSREG:
2341 		mutex_lock(&group->reg_mutex);
2342 		ret = user_events_ioctl_reg(info, uarg);
2343 		mutex_unlock(&group->reg_mutex);
2344 		break;
2345 
2346 	case DIAG_IOCSDEL:
2347 		mutex_lock(&group->reg_mutex);
2348 		ret = user_events_ioctl_del(info, uarg);
2349 		mutex_unlock(&group->reg_mutex);
2350 		break;
2351 
2352 	case DIAG_IOCSUNREG:
2353 		mutex_lock(&group->reg_mutex);
2354 		ret = user_events_ioctl_unreg(uarg);
2355 		mutex_unlock(&group->reg_mutex);
2356 		break;
2357 	}
2358 
2359 	return ret;
2360 }
2361 
2362 /*
2363  * Handles the final close of the file from user mode.
2364  */
2365 static int user_events_release(struct inode *node, struct file *file)
2366 {
2367 	struct user_event_file_info *info = file->private_data;
2368 	struct user_event_group *group;
2369 	struct user_event_refs *refs;
2370 	struct user_event *user;
2371 	int i;
2372 
2373 	if (!info)
2374 		return -EINVAL;
2375 
2376 	group = info->group;
2377 
2378 	/*
2379 	 * Ensure refs cannot change under any situation by taking the
2380 	 * register mutex during the final freeing of the references.
2381 	 */
2382 	mutex_lock(&group->reg_mutex);
2383 
2384 	refs = info->refs;
2385 
2386 	if (!refs)
2387 		goto out;
2388 
2389 	/*
2390 	 * The lifetime of refs has reached an end, it's tied to this file.
2391 	 * The underlying user_events are ref counted, and cannot be freed.
2392 	 * After this decrement, the user_events may be freed elsewhere.
2393 	 */
2394 	for (i = 0; i < refs->count; ++i) {
2395 		user = refs->events[i];
2396 
2397 		if (user)
2398 			refcount_dec(&user->refcnt);
2399 	}
2400 out:
2401 	file->private_data = NULL;
2402 
2403 	mutex_unlock(&group->reg_mutex);
2404 
2405 	kfree(refs);
2406 	kfree(info);
2407 
2408 	return 0;
2409 }
2410 
2411 static const struct file_operations user_data_fops = {
2412 	.open		= user_events_open,
2413 	.write		= user_events_write,
2414 	.write_iter	= user_events_write_iter,
2415 	.unlocked_ioctl	= user_events_ioctl,
2416 	.release	= user_events_release,
2417 };
2418 
2419 static void *user_seq_start(struct seq_file *m, loff_t *pos)
2420 {
2421 	if (*pos)
2422 		return NULL;
2423 
2424 	return (void *)1;
2425 }
2426 
2427 static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos)
2428 {
2429 	++*pos;
2430 	return NULL;
2431 }
2432 
2433 static void user_seq_stop(struct seq_file *m, void *p)
2434 {
2435 }
2436 
2437 static int user_seq_show(struct seq_file *m, void *p)
2438 {
2439 	struct user_event_group *group = m->private;
2440 	struct user_event *user;
2441 	char status;
2442 	int i, active = 0, busy = 0;
2443 
2444 	if (!group)
2445 		return -EINVAL;
2446 
2447 	mutex_lock(&group->reg_mutex);
2448 
2449 	hash_for_each(group->register_table, i, user, node) {
2450 		status = user->status;
2451 
2452 		seq_printf(m, "%s", EVENT_NAME(user));
2453 
2454 		if (status != 0)
2455 			seq_puts(m, " #");
2456 
2457 		if (status != 0) {
2458 			seq_puts(m, " Used by");
2459 			if (status & EVENT_STATUS_FTRACE)
2460 				seq_puts(m, " ftrace");
2461 			if (status & EVENT_STATUS_PERF)
2462 				seq_puts(m, " perf");
2463 			if (status & EVENT_STATUS_OTHER)
2464 				seq_puts(m, " other");
2465 			busy++;
2466 		}
2467 
2468 		seq_puts(m, "\n");
2469 		active++;
2470 	}
2471 
2472 	mutex_unlock(&group->reg_mutex);
2473 
2474 	seq_puts(m, "\n");
2475 	seq_printf(m, "Active: %d\n", active);
2476 	seq_printf(m, "Busy: %d\n", busy);
2477 
2478 	return 0;
2479 }
2480 
2481 static const struct seq_operations user_seq_ops = {
2482 	.start	= user_seq_start,
2483 	.next	= user_seq_next,
2484 	.stop	= user_seq_stop,
2485 	.show	= user_seq_show,
2486 };
2487 
2488 static int user_status_open(struct inode *node, struct file *file)
2489 {
2490 	struct user_event_group *group;
2491 	int ret;
2492 
2493 	group = current_user_event_group();
2494 
2495 	if (!group)
2496 		return -ENOENT;
2497 
2498 	ret = seq_open(file, &user_seq_ops);
2499 
2500 	if (!ret) {
2501 		/* Chain group to seq_file */
2502 		struct seq_file *m = file->private_data;
2503 
2504 		m->private = group;
2505 	}
2506 
2507 	return ret;
2508 }
2509 
2510 static const struct file_operations user_status_fops = {
2511 	.open		= user_status_open,
2512 	.read		= seq_read,
2513 	.llseek		= seq_lseek,
2514 	.release	= seq_release,
2515 };
2516 
2517 /*
2518  * Creates a set of tracefs files to allow user mode interactions.
2519  */
2520 static int create_user_tracefs(void)
2521 {
2522 	struct dentry *edata, *emmap;
2523 
2524 	edata = tracefs_create_file("user_events_data", TRACE_MODE_WRITE,
2525 				    NULL, NULL, &user_data_fops);
2526 
2527 	if (!edata) {
2528 		pr_warn("Could not create tracefs 'user_events_data' entry\n");
2529 		goto err;
2530 	}
2531 
2532 	emmap = tracefs_create_file("user_events_status", TRACE_MODE_READ,
2533 				    NULL, NULL, &user_status_fops);
2534 
2535 	if (!emmap) {
2536 		tracefs_remove(edata);
2537 		pr_warn("Could not create tracefs 'user_events_mmap' entry\n");
2538 		goto err;
2539 	}
2540 
2541 	return 0;
2542 err:
2543 	return -ENODEV;
2544 }
2545 
2546 static int set_max_user_events_sysctl(struct ctl_table *table, int write,
2547 				      void *buffer, size_t *lenp, loff_t *ppos)
2548 {
2549 	int ret;
2550 
2551 	mutex_lock(&event_mutex);
2552 
2553 	ret = proc_douintvec(table, write, buffer, lenp, ppos);
2554 
2555 	mutex_unlock(&event_mutex);
2556 
2557 	return ret;
2558 }
2559 
2560 static struct ctl_table user_event_sysctls[] = {
2561 	{
2562 		.procname	= "user_events_max",
2563 		.data		= &max_user_events,
2564 		.maxlen		= sizeof(unsigned int),
2565 		.mode		= 0644,
2566 		.proc_handler	= set_max_user_events_sysctl,
2567 	},
2568 	{}
2569 };
2570 
2571 static int __init trace_events_user_init(void)
2572 {
2573 	int ret;
2574 
2575 	fault_cache = KMEM_CACHE(user_event_enabler_fault, 0);
2576 
2577 	if (!fault_cache)
2578 		return -ENOMEM;
2579 
2580 	init_group = user_event_group_create(&init_user_ns);
2581 
2582 	if (!init_group) {
2583 		kmem_cache_destroy(fault_cache);
2584 		return -ENOMEM;
2585 	}
2586 
2587 	ret = create_user_tracefs();
2588 
2589 	if (ret) {
2590 		pr_warn("user_events could not register with tracefs\n");
2591 		user_event_group_destroy(init_group);
2592 		kmem_cache_destroy(fault_cache);
2593 		init_group = NULL;
2594 		return ret;
2595 	}
2596 
2597 	if (dyn_event_register(&user_event_dops))
2598 		pr_warn("user_events could not register with dyn_events\n");
2599 
2600 	register_sysctl_init("kernel", user_event_sysctls);
2601 
2602 	return 0;
2603 }
2604 
2605 fs_initcall(trace_events_user_init);
2606