1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, Microsoft Corporation.
4  *
5  * Authors:
6  *   Beau Belgrave <beaub@linux.microsoft.com>
7  */
8 
9 #include <linux/bitmap.h>
10 #include <linux/cdev.h>
11 #include <linux/hashtable.h>
12 #include <linux/list.h>
13 #include <linux/io.h>
14 #include <linux/uio.h>
15 #include <linux/ioctl.h>
16 #include <linux/jhash.h>
17 #include <linux/refcount.h>
18 #include <linux/trace_events.h>
19 #include <linux/tracefs.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/highmem.h>
23 #include <linux/init.h>
24 #include <linux/user_events.h>
25 #include "trace_dynevent.h"
26 #include "trace_output.h"
27 #include "trace.h"
28 
29 #define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1)
30 
31 #define FIELD_DEPTH_TYPE 0
32 #define FIELD_DEPTH_NAME 1
33 #define FIELD_DEPTH_SIZE 2
34 
35 /* Limit how long of an event name plus args within the subsystem. */
36 #define MAX_EVENT_DESC 512
37 #define EVENT_NAME(user_event) ((user_event)->tracepoint.name)
38 #define MAX_FIELD_ARRAY_SIZE 1024
39 
40 /*
41  * Internal bits (kernel side only) to keep track of connected probes:
42  * These are used when status is requested in text form about an event. These
43  * bits are compared against an internal byte on the event to determine which
44  * probes to print out to the user.
45  *
46  * These do not reflect the mapped bytes between the user and kernel space.
47  */
48 #define EVENT_STATUS_FTRACE BIT(0)
49 #define EVENT_STATUS_PERF BIT(1)
50 #define EVENT_STATUS_OTHER BIT(7)
51 
52 /*
53  * Stores the system name, tables, and locks for a group of events. This
54  * allows isolation for events by various means.
55  */
56 struct user_event_group {
57 	char		*system_name;
58 	struct		hlist_node node;
59 	struct		mutex reg_mutex;
60 	DECLARE_HASHTABLE(register_table, 8);
61 };
62 
63 /* Group for init_user_ns mapping, top-most group */
64 static struct user_event_group *init_group;
65 
66 /* Max allowed events for the whole system */
67 static unsigned int max_user_events = 32768;
68 
69 /* Current number of events on the whole system */
70 static unsigned int current_user_events;
71 
72 /*
73  * Stores per-event properties, as users register events
74  * within a file a user_event might be created if it does not
75  * already exist. These are globally used and their lifetime
76  * is tied to the refcnt member. These cannot go away until the
77  * refcnt reaches one.
78  */
79 struct user_event {
80 	struct user_event_group		*group;
81 	struct tracepoint		tracepoint;
82 	struct trace_event_call		call;
83 	struct trace_event_class	class;
84 	struct dyn_event		devent;
85 	struct hlist_node		node;
86 	struct list_head		fields;
87 	struct list_head		validators;
88 	struct work_struct		put_work;
89 	refcount_t			refcnt;
90 	int				min_size;
91 	int				reg_flags;
92 	char				status;
93 };
94 
95 /*
96  * Stores per-mm/event properties that enable an address to be
97  * updated properly for each task. As tasks are forked, we use
98  * these to track enablement sites that are tied to an event.
99  */
100 struct user_event_enabler {
101 	struct list_head	mm_enablers_link;
102 	struct user_event	*event;
103 	unsigned long		addr;
104 
105 	/* Track enable bit, flags, etc. Aligned for bitops. */
106 	unsigned long		values;
107 };
108 
109 /* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */
110 #define ENABLE_VAL_BIT_MASK 0x3F
111 
112 /* Bit 6 is for faulting status of enablement */
113 #define ENABLE_VAL_FAULTING_BIT 6
114 
115 /* Bit 7 is for freeing status of enablement */
116 #define ENABLE_VAL_FREEING_BIT 7
117 
118 /* Bit 8 is for marking 32-bit on 64-bit */
119 #define ENABLE_VAL_32_ON_64_BIT 8
120 
121 #define ENABLE_VAL_COMPAT_MASK (1 << ENABLE_VAL_32_ON_64_BIT)
122 
123 /* Only duplicate the bit and compat values */
124 #define ENABLE_VAL_DUP_MASK (ENABLE_VAL_BIT_MASK | ENABLE_VAL_COMPAT_MASK)
125 
126 #define ENABLE_BITOPS(e) (&(e)->values)
127 
128 #define ENABLE_BIT(e) ((int)((e)->values & ENABLE_VAL_BIT_MASK))
129 
130 /* Used for asynchronous faulting in of pages */
131 struct user_event_enabler_fault {
132 	struct work_struct		work;
133 	struct user_event_mm		*mm;
134 	struct user_event_enabler	*enabler;
135 	int				attempt;
136 };
137 
138 static struct kmem_cache *fault_cache;
139 
140 /* Global list of memory descriptors using user_events */
141 static LIST_HEAD(user_event_mms);
142 static DEFINE_SPINLOCK(user_event_mms_lock);
143 
144 /*
145  * Stores per-file events references, as users register events
146  * within a file this structure is modified and freed via RCU.
147  * The lifetime of this struct is tied to the lifetime of the file.
148  * These are not shared and only accessible by the file that created it.
149  */
150 struct user_event_refs {
151 	struct rcu_head		rcu;
152 	int			count;
153 	struct user_event	*events[];
154 };
155 
156 struct user_event_file_info {
157 	struct user_event_group	*group;
158 	struct user_event_refs	*refs;
159 };
160 
161 #define VALIDATOR_ENSURE_NULL (1 << 0)
162 #define VALIDATOR_REL (1 << 1)
163 
164 struct user_event_validator {
165 	struct list_head	user_event_link;
166 	int			offset;
167 	int			flags;
168 };
169 
align_addr_bit(unsigned long * addr,int * bit,unsigned long * flags)170 static inline void align_addr_bit(unsigned long *addr, int *bit,
171 				  unsigned long *flags)
172 {
173 	if (IS_ALIGNED(*addr, sizeof(long))) {
174 #ifdef __BIG_ENDIAN
175 		/* 32 bit on BE 64 bit requires a 32 bit offset when aligned. */
176 		if (test_bit(ENABLE_VAL_32_ON_64_BIT, flags))
177 			*bit += 32;
178 #endif
179 		return;
180 	}
181 
182 	*addr = ALIGN_DOWN(*addr, sizeof(long));
183 
184 	/*
185 	 * We only support 32 and 64 bit values. The only time we need
186 	 * to align is a 32 bit value on a 64 bit kernel, which on LE
187 	 * is always 32 bits, and on BE requires no change when unaligned.
188 	 */
189 #ifdef __LITTLE_ENDIAN
190 	*bit += 32;
191 #endif
192 }
193 
194 typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
195 				   void *tpdata, bool *faulted);
196 
197 static int user_event_parse(struct user_event_group *group, char *name,
198 			    char *args, char *flags,
199 			    struct user_event **newuser, int reg_flags);
200 
201 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm);
202 static struct user_event_mm *user_event_mm_get_all(struct user_event *user);
203 static void user_event_mm_put(struct user_event_mm *mm);
204 static int destroy_user_event(struct user_event *user);
205 static bool user_fields_match(struct user_event *user, int argc,
206 			      const char **argv);
207 
user_event_key(char * name)208 static u32 user_event_key(char *name)
209 {
210 	return jhash(name, strlen(name), 0);
211 }
212 
user_event_capable(u16 reg_flags)213 static bool user_event_capable(u16 reg_flags)
214 {
215 	/* Persistent events require CAP_PERFMON / CAP_SYS_ADMIN */
216 	if (reg_flags & USER_EVENT_REG_PERSIST) {
217 		if (!perfmon_capable())
218 			return false;
219 	}
220 
221 	return true;
222 }
223 
user_event_get(struct user_event * user)224 static struct user_event *user_event_get(struct user_event *user)
225 {
226 	refcount_inc(&user->refcnt);
227 
228 	return user;
229 }
230 
delayed_destroy_user_event(struct work_struct * work)231 static void delayed_destroy_user_event(struct work_struct *work)
232 {
233 	struct user_event *user = container_of(
234 		work, struct user_event, put_work);
235 
236 	mutex_lock(&event_mutex);
237 
238 	if (!refcount_dec_and_test(&user->refcnt))
239 		goto out;
240 
241 	if (destroy_user_event(user)) {
242 		/*
243 		 * The only reason this would fail here is if we cannot
244 		 * update the visibility of the event. In this case the
245 		 * event stays in the hashtable, waiting for someone to
246 		 * attempt to delete it later.
247 		 */
248 		pr_warn("user_events: Unable to delete event\n");
249 		refcount_set(&user->refcnt, 1);
250 	}
251 out:
252 	mutex_unlock(&event_mutex);
253 }
254 
user_event_put(struct user_event * user,bool locked)255 static void user_event_put(struct user_event *user, bool locked)
256 {
257 	bool delete;
258 
259 	if (unlikely(!user))
260 		return;
261 
262 	/*
263 	 * When the event is not enabled for auto-delete there will always
264 	 * be at least 1 reference to the event. During the event creation
265 	 * we initially set the refcnt to 2 to achieve this. In those cases
266 	 * the caller must acquire event_mutex and after decrement check if
267 	 * the refcnt is 1, meaning this is the last reference. When auto
268 	 * delete is enabled, there will only be 1 ref, IE: refcnt will be
269 	 * only set to 1 during creation to allow the below checks to go
270 	 * through upon the last put. The last put must always be done with
271 	 * the event mutex held.
272 	 */
273 	if (!locked) {
274 		lockdep_assert_not_held(&event_mutex);
275 		delete = refcount_dec_and_mutex_lock(&user->refcnt, &event_mutex);
276 	} else {
277 		lockdep_assert_held(&event_mutex);
278 		delete = refcount_dec_and_test(&user->refcnt);
279 	}
280 
281 	if (!delete)
282 		return;
283 
284 	/*
285 	 * We now have the event_mutex in all cases, which ensures that
286 	 * no new references will be taken until event_mutex is released.
287 	 * New references come through find_user_event(), which requires
288 	 * the event_mutex to be held.
289 	 */
290 
291 	if (user->reg_flags & USER_EVENT_REG_PERSIST) {
292 		/* We should not get here when persist flag is set */
293 		pr_alert("BUG: Auto-delete engaged on persistent event\n");
294 		goto out;
295 	}
296 
297 	/*
298 	 * Unfortunately we have to attempt the actual destroy in a work
299 	 * queue. This is because not all cases handle a trace_event_call
300 	 * being removed within the class->reg() operation for unregister.
301 	 */
302 	INIT_WORK(&user->put_work, delayed_destroy_user_event);
303 
304 	/*
305 	 * Since the event is still in the hashtable, we have to re-inc
306 	 * the ref count to 1. This count will be decremented and checked
307 	 * in the work queue to ensure it's still the last ref. This is
308 	 * needed because a user-process could register the same event in
309 	 * between the time of event_mutex release and the work queue
310 	 * running the delayed destroy. If we removed the item now from
311 	 * the hashtable, this would result in a timing window where a
312 	 * user process would fail a register because the trace_event_call
313 	 * register would fail in the tracing layers.
314 	 */
315 	refcount_set(&user->refcnt, 1);
316 
317 	if (WARN_ON_ONCE(!schedule_work(&user->put_work))) {
318 		/*
319 		 * If we fail we must wait for an admin to attempt delete or
320 		 * another register/close of the event, whichever is first.
321 		 */
322 		pr_warn("user_events: Unable to queue delayed destroy\n");
323 	}
324 out:
325 	/* Ensure if we didn't have event_mutex before we unlock it */
326 	if (!locked)
327 		mutex_unlock(&event_mutex);
328 }
329 
user_event_group_destroy(struct user_event_group * group)330 static void user_event_group_destroy(struct user_event_group *group)
331 {
332 	kfree(group->system_name);
333 	kfree(group);
334 }
335 
user_event_group_system_name(void)336 static char *user_event_group_system_name(void)
337 {
338 	char *system_name;
339 	int len = sizeof(USER_EVENTS_SYSTEM) + 1;
340 
341 	system_name = kmalloc(len, GFP_KERNEL);
342 
343 	if (!system_name)
344 		return NULL;
345 
346 	snprintf(system_name, len, "%s", USER_EVENTS_SYSTEM);
347 
348 	return system_name;
349 }
350 
current_user_event_group(void)351 static struct user_event_group *current_user_event_group(void)
352 {
353 	return init_group;
354 }
355 
user_event_group_create(void)356 static struct user_event_group *user_event_group_create(void)
357 {
358 	struct user_event_group *group;
359 
360 	group = kzalloc(sizeof(*group), GFP_KERNEL);
361 
362 	if (!group)
363 		return NULL;
364 
365 	group->system_name = user_event_group_system_name();
366 
367 	if (!group->system_name)
368 		goto error;
369 
370 	mutex_init(&group->reg_mutex);
371 	hash_init(group->register_table);
372 
373 	return group;
374 error:
375 	if (group)
376 		user_event_group_destroy(group);
377 
378 	return NULL;
379 };
380 
user_event_enabler_destroy(struct user_event_enabler * enabler,bool locked)381 static void user_event_enabler_destroy(struct user_event_enabler *enabler,
382 				       bool locked)
383 {
384 	list_del_rcu(&enabler->mm_enablers_link);
385 
386 	/* No longer tracking the event via the enabler */
387 	user_event_put(enabler->event, locked);
388 
389 	kfree(enabler);
390 }
391 
user_event_mm_fault_in(struct user_event_mm * mm,unsigned long uaddr,int attempt)392 static int user_event_mm_fault_in(struct user_event_mm *mm, unsigned long uaddr,
393 				  int attempt)
394 {
395 	bool unlocked;
396 	int ret;
397 
398 	/*
399 	 * Normally this is low, ensure that it cannot be taken advantage of by
400 	 * bad user processes to cause excessive looping.
401 	 */
402 	if (attempt > 10)
403 		return -EFAULT;
404 
405 	mmap_read_lock(mm->mm);
406 
407 	/* Ensure MM has tasks, cannot use after exit_mm() */
408 	if (refcount_read(&mm->tasks) == 0) {
409 		ret = -ENOENT;
410 		goto out;
411 	}
412 
413 	ret = fixup_user_fault(mm->mm, uaddr, FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE,
414 			       &unlocked);
415 out:
416 	mmap_read_unlock(mm->mm);
417 
418 	return ret;
419 }
420 
421 static int user_event_enabler_write(struct user_event_mm *mm,
422 				    struct user_event_enabler *enabler,
423 				    bool fixup_fault, int *attempt);
424 
user_event_enabler_fault_fixup(struct work_struct * work)425 static void user_event_enabler_fault_fixup(struct work_struct *work)
426 {
427 	struct user_event_enabler_fault *fault = container_of(
428 		work, struct user_event_enabler_fault, work);
429 	struct user_event_enabler *enabler = fault->enabler;
430 	struct user_event_mm *mm = fault->mm;
431 	unsigned long uaddr = enabler->addr;
432 	int attempt = fault->attempt;
433 	int ret;
434 
435 	ret = user_event_mm_fault_in(mm, uaddr, attempt);
436 
437 	if (ret && ret != -ENOENT) {
438 		struct user_event *user = enabler->event;
439 
440 		pr_warn("user_events: Fault for mm: 0x%pK @ 0x%llx event: %s\n",
441 			mm->mm, (unsigned long long)uaddr, EVENT_NAME(user));
442 	}
443 
444 	/* Prevent state changes from racing */
445 	mutex_lock(&event_mutex);
446 
447 	/* User asked for enabler to be removed during fault */
448 	if (test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))) {
449 		user_event_enabler_destroy(enabler, true);
450 		goto out;
451 	}
452 
453 	/*
454 	 * If we managed to get the page, re-issue the write. We do not
455 	 * want to get into a possible infinite loop, which is why we only
456 	 * attempt again directly if the page came in. If we couldn't get
457 	 * the page here, then we will try again the next time the event is
458 	 * enabled/disabled.
459 	 */
460 	clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
461 
462 	if (!ret) {
463 		mmap_read_lock(mm->mm);
464 		user_event_enabler_write(mm, enabler, true, &attempt);
465 		mmap_read_unlock(mm->mm);
466 	}
467 out:
468 	mutex_unlock(&event_mutex);
469 
470 	/* In all cases we no longer need the mm or fault */
471 	user_event_mm_put(mm);
472 	kmem_cache_free(fault_cache, fault);
473 }
474 
user_event_enabler_queue_fault(struct user_event_mm * mm,struct user_event_enabler * enabler,int attempt)475 static bool user_event_enabler_queue_fault(struct user_event_mm *mm,
476 					   struct user_event_enabler *enabler,
477 					   int attempt)
478 {
479 	struct user_event_enabler_fault *fault;
480 
481 	fault = kmem_cache_zalloc(fault_cache, GFP_NOWAIT | __GFP_NOWARN);
482 
483 	if (!fault)
484 		return false;
485 
486 	INIT_WORK(&fault->work, user_event_enabler_fault_fixup);
487 	fault->mm = user_event_mm_get(mm);
488 	fault->enabler = enabler;
489 	fault->attempt = attempt;
490 
491 	/* Don't try to queue in again while we have a pending fault */
492 	set_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
493 
494 	if (!schedule_work(&fault->work)) {
495 		/* Allow another attempt later */
496 		clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
497 
498 		user_event_mm_put(mm);
499 		kmem_cache_free(fault_cache, fault);
500 
501 		return false;
502 	}
503 
504 	return true;
505 }
506 
user_event_enabler_write(struct user_event_mm * mm,struct user_event_enabler * enabler,bool fixup_fault,int * attempt)507 static int user_event_enabler_write(struct user_event_mm *mm,
508 				    struct user_event_enabler *enabler,
509 				    bool fixup_fault, int *attempt)
510 {
511 	unsigned long uaddr = enabler->addr;
512 	unsigned long *ptr;
513 	struct page *page;
514 	void *kaddr;
515 	int bit = ENABLE_BIT(enabler);
516 	int ret;
517 
518 	lockdep_assert_held(&event_mutex);
519 	mmap_assert_locked(mm->mm);
520 
521 	*attempt += 1;
522 
523 	/* Ensure MM has tasks, cannot use after exit_mm() */
524 	if (refcount_read(&mm->tasks) == 0)
525 		return -ENOENT;
526 
527 	if (unlikely(test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)) ||
528 		     test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))))
529 		return -EBUSY;
530 
531 	align_addr_bit(&uaddr, &bit, ENABLE_BITOPS(enabler));
532 
533 	ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT,
534 				    &page, NULL);
535 
536 	if (unlikely(ret <= 0)) {
537 		if (!fixup_fault)
538 			return -EFAULT;
539 
540 		if (!user_event_enabler_queue_fault(mm, enabler, *attempt))
541 			pr_warn("user_events: Unable to queue fault handler\n");
542 
543 		return -EFAULT;
544 	}
545 
546 	kaddr = kmap_local_page(page);
547 	ptr = kaddr + (uaddr & ~PAGE_MASK);
548 
549 	/* Update bit atomically, user tracers must be atomic as well */
550 	if (enabler->event && enabler->event->status)
551 		set_bit(bit, ptr);
552 	else
553 		clear_bit(bit, ptr);
554 
555 	kunmap_local(kaddr);
556 	unpin_user_pages_dirty_lock(&page, 1, true);
557 
558 	return 0;
559 }
560 
user_event_enabler_exists(struct user_event_mm * mm,unsigned long uaddr,unsigned char bit)561 static bool user_event_enabler_exists(struct user_event_mm *mm,
562 				      unsigned long uaddr, unsigned char bit)
563 {
564 	struct user_event_enabler *enabler;
565 
566 	list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
567 		if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit)
568 			return true;
569 	}
570 
571 	return false;
572 }
573 
user_event_enabler_update(struct user_event * user)574 static void user_event_enabler_update(struct user_event *user)
575 {
576 	struct user_event_enabler *enabler;
577 	struct user_event_mm *next;
578 	struct user_event_mm *mm;
579 	int attempt;
580 
581 	lockdep_assert_held(&event_mutex);
582 
583 	/*
584 	 * We need to build a one-shot list of all the mms that have an
585 	 * enabler for the user_event passed in. This list is only valid
586 	 * while holding the event_mutex. The only reason for this is due
587 	 * to the global mm list being RCU protected and we use methods
588 	 * which can wait (mmap_read_lock and pin_user_pages_remote).
589 	 *
590 	 * NOTE: user_event_mm_get_all() increments the ref count of each
591 	 * mm that is added to the list to prevent removal timing windows.
592 	 * We must always put each mm after they are used, which may wait.
593 	 */
594 	mm = user_event_mm_get_all(user);
595 
596 	while (mm) {
597 		next = mm->next;
598 		mmap_read_lock(mm->mm);
599 
600 		list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
601 			if (enabler->event == user) {
602 				attempt = 0;
603 				user_event_enabler_write(mm, enabler, true, &attempt);
604 			}
605 		}
606 
607 		mmap_read_unlock(mm->mm);
608 		user_event_mm_put(mm);
609 		mm = next;
610 	}
611 }
612 
user_event_enabler_dup(struct user_event_enabler * orig,struct user_event_mm * mm)613 static bool user_event_enabler_dup(struct user_event_enabler *orig,
614 				   struct user_event_mm *mm)
615 {
616 	struct user_event_enabler *enabler;
617 
618 	/* Skip pending frees */
619 	if (unlikely(test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(orig))))
620 		return true;
621 
622 	enabler = kzalloc(sizeof(*enabler), GFP_NOWAIT | __GFP_ACCOUNT);
623 
624 	if (!enabler)
625 		return false;
626 
627 	enabler->event = user_event_get(orig->event);
628 	enabler->addr = orig->addr;
629 
630 	/* Only dup part of value (ignore future flags, etc) */
631 	enabler->values = orig->values & ENABLE_VAL_DUP_MASK;
632 
633 	/* Enablers not exposed yet, RCU not required */
634 	list_add(&enabler->mm_enablers_link, &mm->enablers);
635 
636 	return true;
637 }
638 
user_event_mm_get(struct user_event_mm * mm)639 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm)
640 {
641 	refcount_inc(&mm->refcnt);
642 
643 	return mm;
644 }
645 
user_event_mm_get_all(struct user_event * user)646 static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
647 {
648 	struct user_event_mm *found = NULL;
649 	struct user_event_enabler *enabler;
650 	struct user_event_mm *mm;
651 
652 	/*
653 	 * We use the mm->next field to build a one-shot list from the global
654 	 * RCU protected list. To build this list the event_mutex must be held.
655 	 * This lets us build a list without requiring allocs that could fail
656 	 * when user based events are most wanted for diagnostics.
657 	 */
658 	lockdep_assert_held(&event_mutex);
659 
660 	/*
661 	 * We do not want to block fork/exec while enablements are being
662 	 * updated, so we use RCU to walk the current tasks that have used
663 	 * user_events ABI for 1 or more events. Each enabler found in each
664 	 * task that matches the event being updated has a write to reflect
665 	 * the kernel state back into the process. Waits/faults must not occur
666 	 * during this. So we scan the list under RCU for all the mm that have
667 	 * the event within it. This is needed because mm_read_lock() can wait.
668 	 * Each user mm returned has a ref inc to handle remove RCU races.
669 	 */
670 	rcu_read_lock();
671 
672 	list_for_each_entry_rcu(mm, &user_event_mms, mms_link) {
673 		list_for_each_entry_rcu(enabler, &mm->enablers, mm_enablers_link) {
674 			if (enabler->event == user) {
675 				mm->next = found;
676 				found = user_event_mm_get(mm);
677 				break;
678 			}
679 		}
680 	}
681 
682 	rcu_read_unlock();
683 
684 	return found;
685 }
686 
user_event_mm_alloc(struct task_struct * t)687 static struct user_event_mm *user_event_mm_alloc(struct task_struct *t)
688 {
689 	struct user_event_mm *user_mm;
690 
691 	user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT);
692 
693 	if (!user_mm)
694 		return NULL;
695 
696 	user_mm->mm = t->mm;
697 	INIT_LIST_HEAD(&user_mm->enablers);
698 	refcount_set(&user_mm->refcnt, 1);
699 	refcount_set(&user_mm->tasks, 1);
700 
701 	/*
702 	 * The lifetime of the memory descriptor can slightly outlast
703 	 * the task lifetime if a ref to the user_event_mm is taken
704 	 * between list_del_rcu() and call_rcu(). Therefore we need
705 	 * to take a reference to it to ensure it can live this long
706 	 * under this corner case. This can also occur in clones that
707 	 * outlast the parent.
708 	 */
709 	mmgrab(user_mm->mm);
710 
711 	return user_mm;
712 }
713 
user_event_mm_attach(struct user_event_mm * user_mm,struct task_struct * t)714 static void user_event_mm_attach(struct user_event_mm *user_mm, struct task_struct *t)
715 {
716 	unsigned long flags;
717 
718 	spin_lock_irqsave(&user_event_mms_lock, flags);
719 	list_add_rcu(&user_mm->mms_link, &user_event_mms);
720 	spin_unlock_irqrestore(&user_event_mms_lock, flags);
721 
722 	t->user_event_mm = user_mm;
723 }
724 
current_user_event_mm(void)725 static struct user_event_mm *current_user_event_mm(void)
726 {
727 	struct user_event_mm *user_mm = current->user_event_mm;
728 
729 	if (user_mm)
730 		goto inc;
731 
732 	user_mm = user_event_mm_alloc(current);
733 
734 	if (!user_mm)
735 		goto error;
736 
737 	user_event_mm_attach(user_mm, current);
738 inc:
739 	refcount_inc(&user_mm->refcnt);
740 error:
741 	return user_mm;
742 }
743 
user_event_mm_destroy(struct user_event_mm * mm)744 static void user_event_mm_destroy(struct user_event_mm *mm)
745 {
746 	struct user_event_enabler *enabler, *next;
747 
748 	list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link)
749 		user_event_enabler_destroy(enabler, false);
750 
751 	mmdrop(mm->mm);
752 	kfree(mm);
753 }
754 
user_event_mm_put(struct user_event_mm * mm)755 static void user_event_mm_put(struct user_event_mm *mm)
756 {
757 	if (mm && refcount_dec_and_test(&mm->refcnt))
758 		user_event_mm_destroy(mm);
759 }
760 
delayed_user_event_mm_put(struct work_struct * work)761 static void delayed_user_event_mm_put(struct work_struct *work)
762 {
763 	struct user_event_mm *mm;
764 
765 	mm = container_of(to_rcu_work(work), struct user_event_mm, put_rwork);
766 	user_event_mm_put(mm);
767 }
768 
user_event_mm_remove(struct task_struct * t)769 void user_event_mm_remove(struct task_struct *t)
770 {
771 	struct user_event_mm *mm;
772 	unsigned long flags;
773 
774 	might_sleep();
775 
776 	mm = t->user_event_mm;
777 	t->user_event_mm = NULL;
778 
779 	/* Clone will increment the tasks, only remove if last clone */
780 	if (!refcount_dec_and_test(&mm->tasks))
781 		return;
782 
783 	/* Remove the mm from the list, so it can no longer be enabled */
784 	spin_lock_irqsave(&user_event_mms_lock, flags);
785 	list_del_rcu(&mm->mms_link);
786 	spin_unlock_irqrestore(&user_event_mms_lock, flags);
787 
788 	/*
789 	 * We need to wait for currently occurring writes to stop within
790 	 * the mm. This is required since exit_mm() snaps the current rss
791 	 * stats and clears them. On the final mmdrop(), check_mm() will
792 	 * report a bug if these increment.
793 	 *
794 	 * All writes/pins are done under mmap_read lock, take the write
795 	 * lock to ensure in-progress faults have completed. Faults that
796 	 * are pending but yet to run will check the task count and skip
797 	 * the fault since the mm is going away.
798 	 */
799 	mmap_write_lock(mm->mm);
800 	mmap_write_unlock(mm->mm);
801 
802 	/*
803 	 * Put for mm must be done after RCU delay to handle new refs in
804 	 * between the list_del_rcu() and now. This ensures any get refs
805 	 * during rcu_read_lock() are accounted for during list removal.
806 	 *
807 	 * CPU A			|	CPU B
808 	 * ---------------------------------------------------------------
809 	 * user_event_mm_remove()	|	rcu_read_lock();
810 	 * list_del_rcu()		|	list_for_each_entry_rcu();
811 	 * call_rcu()			|	refcount_inc();
812 	 * .				|	rcu_read_unlock();
813 	 * schedule_work()		|	.
814 	 * user_event_mm_put()		|	.
815 	 *
816 	 * mmdrop() cannot be called in the softirq context of call_rcu()
817 	 * so we use a work queue after call_rcu() to run within.
818 	 */
819 	INIT_RCU_WORK(&mm->put_rwork, delayed_user_event_mm_put);
820 	queue_rcu_work(system_wq, &mm->put_rwork);
821 }
822 
user_event_mm_dup(struct task_struct * t,struct user_event_mm * old_mm)823 void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm)
824 {
825 	struct user_event_mm *mm = user_event_mm_alloc(t);
826 	struct user_event_enabler *enabler;
827 
828 	if (!mm)
829 		return;
830 
831 	rcu_read_lock();
832 
833 	list_for_each_entry_rcu(enabler, &old_mm->enablers, mm_enablers_link) {
834 		if (!user_event_enabler_dup(enabler, mm))
835 			goto error;
836 	}
837 
838 	rcu_read_unlock();
839 
840 	user_event_mm_attach(mm, t);
841 	return;
842 error:
843 	rcu_read_unlock();
844 	user_event_mm_destroy(mm);
845 }
846 
current_user_event_enabler_exists(unsigned long uaddr,unsigned char bit)847 static bool current_user_event_enabler_exists(unsigned long uaddr,
848 					      unsigned char bit)
849 {
850 	struct user_event_mm *user_mm = current_user_event_mm();
851 	bool exists;
852 
853 	if (!user_mm)
854 		return false;
855 
856 	exists = user_event_enabler_exists(user_mm, uaddr, bit);
857 
858 	user_event_mm_put(user_mm);
859 
860 	return exists;
861 }
862 
863 static struct user_event_enabler
user_event_enabler_create(struct user_reg * reg,struct user_event * user,int * write_result)864 *user_event_enabler_create(struct user_reg *reg, struct user_event *user,
865 			   int *write_result)
866 {
867 	struct user_event_enabler *enabler;
868 	struct user_event_mm *user_mm;
869 	unsigned long uaddr = (unsigned long)reg->enable_addr;
870 	int attempt = 0;
871 
872 	user_mm = current_user_event_mm();
873 
874 	if (!user_mm)
875 		return NULL;
876 
877 	enabler = kzalloc(sizeof(*enabler), GFP_KERNEL_ACCOUNT);
878 
879 	if (!enabler)
880 		goto out;
881 
882 	enabler->event = user;
883 	enabler->addr = uaddr;
884 	enabler->values = reg->enable_bit;
885 
886 #if BITS_PER_LONG >= 64
887 	if (reg->enable_size == 4)
888 		set_bit(ENABLE_VAL_32_ON_64_BIT, ENABLE_BITOPS(enabler));
889 #endif
890 
891 retry:
892 	/* Prevents state changes from racing with new enablers */
893 	mutex_lock(&event_mutex);
894 
895 	/* Attempt to reflect the current state within the process */
896 	mmap_read_lock(user_mm->mm);
897 	*write_result = user_event_enabler_write(user_mm, enabler, false,
898 						 &attempt);
899 	mmap_read_unlock(user_mm->mm);
900 
901 	/*
902 	 * If the write works, then we will track the enabler. A ref to the
903 	 * underlying user_event is held by the enabler to prevent it going
904 	 * away while the enabler is still in use by a process. The ref is
905 	 * removed when the enabler is destroyed. This means a event cannot
906 	 * be forcefully deleted from the system until all tasks using it
907 	 * exit or run exec(), which includes forks and clones.
908 	 */
909 	if (!*write_result) {
910 		user_event_get(user);
911 		list_add_rcu(&enabler->mm_enablers_link, &user_mm->enablers);
912 	}
913 
914 	mutex_unlock(&event_mutex);
915 
916 	if (*write_result) {
917 		/* Attempt to fault-in and retry if it worked */
918 		if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
919 			goto retry;
920 
921 		kfree(enabler);
922 		enabler = NULL;
923 	}
924 out:
925 	user_event_mm_put(user_mm);
926 
927 	return enabler;
928 }
929 
930 static __always_inline __must_check
user_event_last_ref(struct user_event * user)931 bool user_event_last_ref(struct user_event *user)
932 {
933 	int last = 0;
934 
935 	if (user->reg_flags & USER_EVENT_REG_PERSIST)
936 		last = 1;
937 
938 	return refcount_read(&user->refcnt) == last;
939 }
940 
941 static __always_inline __must_check
copy_nofault(void * addr,size_t bytes,struct iov_iter * i)942 size_t copy_nofault(void *addr, size_t bytes, struct iov_iter *i)
943 {
944 	size_t ret;
945 
946 	pagefault_disable();
947 
948 	ret = copy_from_iter_nocache(addr, bytes, i);
949 
950 	pagefault_enable();
951 
952 	return ret;
953 }
954 
user_event_get_fields(struct trace_event_call * call)955 static struct list_head *user_event_get_fields(struct trace_event_call *call)
956 {
957 	struct user_event *user = (struct user_event *)call->data;
958 
959 	return &user->fields;
960 }
961 
962 /*
963  * Parses a register command for user_events
964  * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]]
965  *
966  * Example event named 'test' with a 20 char 'msg' field with an unsigned int
967  * 'id' field after:
968  * test char[20] msg;unsigned int id
969  *
970  * NOTE: Offsets are from the user data perspective, they are not from the
971  * trace_entry/buffer perspective. We automatically add the common properties
972  * sizes to the offset for the user.
973  *
974  * Upon success user_event has its ref count increased by 1.
975  */
user_event_parse_cmd(struct user_event_group * group,char * raw_command,struct user_event ** newuser,int reg_flags)976 static int user_event_parse_cmd(struct user_event_group *group,
977 				char *raw_command, struct user_event **newuser,
978 				int reg_flags)
979 {
980 	char *name = raw_command;
981 	char *args = strpbrk(name, " ");
982 	char *flags;
983 
984 	if (args)
985 		*args++ = '\0';
986 
987 	flags = strpbrk(name, ":");
988 
989 	if (flags)
990 		*flags++ = '\0';
991 
992 	return user_event_parse(group, name, args, flags, newuser, reg_flags);
993 }
994 
user_field_array_size(const char * type)995 static int user_field_array_size(const char *type)
996 {
997 	const char *start = strchr(type, '[');
998 	char val[8];
999 	char *bracket;
1000 	int size = 0;
1001 
1002 	if (start == NULL)
1003 		return -EINVAL;
1004 
1005 	if (strscpy(val, start + 1, sizeof(val)) <= 0)
1006 		return -EINVAL;
1007 
1008 	bracket = strchr(val, ']');
1009 
1010 	if (!bracket)
1011 		return -EINVAL;
1012 
1013 	*bracket = '\0';
1014 
1015 	if (kstrtouint(val, 0, &size))
1016 		return -EINVAL;
1017 
1018 	if (size > MAX_FIELD_ARRAY_SIZE)
1019 		return -EINVAL;
1020 
1021 	return size;
1022 }
1023 
user_field_size(const char * type)1024 static int user_field_size(const char *type)
1025 {
1026 	/* long is not allowed from a user, since it's ambigious in size */
1027 	if (strcmp(type, "s64") == 0)
1028 		return sizeof(s64);
1029 	if (strcmp(type, "u64") == 0)
1030 		return sizeof(u64);
1031 	if (strcmp(type, "s32") == 0)
1032 		return sizeof(s32);
1033 	if (strcmp(type, "u32") == 0)
1034 		return sizeof(u32);
1035 	if (strcmp(type, "int") == 0)
1036 		return sizeof(int);
1037 	if (strcmp(type, "unsigned int") == 0)
1038 		return sizeof(unsigned int);
1039 	if (strcmp(type, "s16") == 0)
1040 		return sizeof(s16);
1041 	if (strcmp(type, "u16") == 0)
1042 		return sizeof(u16);
1043 	if (strcmp(type, "short") == 0)
1044 		return sizeof(short);
1045 	if (strcmp(type, "unsigned short") == 0)
1046 		return sizeof(unsigned short);
1047 	if (strcmp(type, "s8") == 0)
1048 		return sizeof(s8);
1049 	if (strcmp(type, "u8") == 0)
1050 		return sizeof(u8);
1051 	if (strcmp(type, "char") == 0)
1052 		return sizeof(char);
1053 	if (strcmp(type, "unsigned char") == 0)
1054 		return sizeof(unsigned char);
1055 	if (str_has_prefix(type, "char["))
1056 		return user_field_array_size(type);
1057 	if (str_has_prefix(type, "unsigned char["))
1058 		return user_field_array_size(type);
1059 	if (str_has_prefix(type, "__data_loc "))
1060 		return sizeof(u32);
1061 	if (str_has_prefix(type, "__rel_loc "))
1062 		return sizeof(u32);
1063 
1064 	/* Uknown basic type, error */
1065 	return -EINVAL;
1066 }
1067 
user_event_destroy_validators(struct user_event * user)1068 static void user_event_destroy_validators(struct user_event *user)
1069 {
1070 	struct user_event_validator *validator, *next;
1071 	struct list_head *head = &user->validators;
1072 
1073 	list_for_each_entry_safe(validator, next, head, user_event_link) {
1074 		list_del(&validator->user_event_link);
1075 		kfree(validator);
1076 	}
1077 }
1078 
user_event_destroy_fields(struct user_event * user)1079 static void user_event_destroy_fields(struct user_event *user)
1080 {
1081 	struct ftrace_event_field *field, *next;
1082 	struct list_head *head = &user->fields;
1083 
1084 	list_for_each_entry_safe(field, next, head, link) {
1085 		list_del(&field->link);
1086 		kfree(field);
1087 	}
1088 }
1089 
user_event_add_field(struct user_event * user,const char * type,const char * name,int offset,int size,int is_signed,int filter_type)1090 static int user_event_add_field(struct user_event *user, const char *type,
1091 				const char *name, int offset, int size,
1092 				int is_signed, int filter_type)
1093 {
1094 	struct user_event_validator *validator;
1095 	struct ftrace_event_field *field;
1096 	int validator_flags = 0;
1097 
1098 	field = kmalloc(sizeof(*field), GFP_KERNEL_ACCOUNT);
1099 
1100 	if (!field)
1101 		return -ENOMEM;
1102 
1103 	if (str_has_prefix(type, "__data_loc "))
1104 		goto add_validator;
1105 
1106 	if (str_has_prefix(type, "__rel_loc ")) {
1107 		validator_flags |= VALIDATOR_REL;
1108 		goto add_validator;
1109 	}
1110 
1111 	goto add_field;
1112 
1113 add_validator:
1114 	if (strstr(type, "char") != NULL)
1115 		validator_flags |= VALIDATOR_ENSURE_NULL;
1116 
1117 	validator = kmalloc(sizeof(*validator), GFP_KERNEL_ACCOUNT);
1118 
1119 	if (!validator) {
1120 		kfree(field);
1121 		return -ENOMEM;
1122 	}
1123 
1124 	validator->flags = validator_flags;
1125 	validator->offset = offset;
1126 
1127 	/* Want sequential access when validating */
1128 	list_add_tail(&validator->user_event_link, &user->validators);
1129 
1130 add_field:
1131 	field->type = type;
1132 	field->name = name;
1133 	field->offset = offset;
1134 	field->size = size;
1135 	field->is_signed = is_signed;
1136 	field->filter_type = filter_type;
1137 
1138 	if (filter_type == FILTER_OTHER)
1139 		field->filter_type = filter_assign_type(type);
1140 
1141 	list_add(&field->link, &user->fields);
1142 
1143 	/*
1144 	 * Min size from user writes that are required, this does not include
1145 	 * the size of trace_entry (common fields).
1146 	 */
1147 	user->min_size = (offset + size) - sizeof(struct trace_entry);
1148 
1149 	return 0;
1150 }
1151 
1152 /*
1153  * Parses the values of a field within the description
1154  * Format: type name [size]
1155  */
user_event_parse_field(char * field,struct user_event * user,u32 * offset)1156 static int user_event_parse_field(char *field, struct user_event *user,
1157 				  u32 *offset)
1158 {
1159 	char *part, *type, *name;
1160 	u32 depth = 0, saved_offset = *offset;
1161 	int len, size = -EINVAL;
1162 	bool is_struct = false;
1163 
1164 	field = skip_spaces(field);
1165 
1166 	if (*field == '\0')
1167 		return 0;
1168 
1169 	/* Handle types that have a space within */
1170 	len = str_has_prefix(field, "unsigned ");
1171 	if (len)
1172 		goto skip_next;
1173 
1174 	len = str_has_prefix(field, "struct ");
1175 	if (len) {
1176 		is_struct = true;
1177 		goto skip_next;
1178 	}
1179 
1180 	len = str_has_prefix(field, "__data_loc unsigned ");
1181 	if (len)
1182 		goto skip_next;
1183 
1184 	len = str_has_prefix(field, "__data_loc ");
1185 	if (len)
1186 		goto skip_next;
1187 
1188 	len = str_has_prefix(field, "__rel_loc unsigned ");
1189 	if (len)
1190 		goto skip_next;
1191 
1192 	len = str_has_prefix(field, "__rel_loc ");
1193 	if (len)
1194 		goto skip_next;
1195 
1196 	goto parse;
1197 skip_next:
1198 	type = field;
1199 	field = strpbrk(field + len, " ");
1200 
1201 	if (field == NULL)
1202 		return -EINVAL;
1203 
1204 	*field++ = '\0';
1205 	depth++;
1206 parse:
1207 	name = NULL;
1208 
1209 	while ((part = strsep(&field, " ")) != NULL) {
1210 		switch (depth++) {
1211 		case FIELD_DEPTH_TYPE:
1212 			type = part;
1213 			break;
1214 		case FIELD_DEPTH_NAME:
1215 			name = part;
1216 			break;
1217 		case FIELD_DEPTH_SIZE:
1218 			if (!is_struct)
1219 				return -EINVAL;
1220 
1221 			if (kstrtou32(part, 10, &size))
1222 				return -EINVAL;
1223 			break;
1224 		default:
1225 			return -EINVAL;
1226 		}
1227 	}
1228 
1229 	if (depth < FIELD_DEPTH_SIZE || !name)
1230 		return -EINVAL;
1231 
1232 	if (depth == FIELD_DEPTH_SIZE)
1233 		size = user_field_size(type);
1234 
1235 	if (size == 0)
1236 		return -EINVAL;
1237 
1238 	if (size < 0)
1239 		return size;
1240 
1241 	*offset = saved_offset + size;
1242 
1243 	return user_event_add_field(user, type, name, saved_offset, size,
1244 				    type[0] != 'u', FILTER_OTHER);
1245 }
1246 
user_event_parse_fields(struct user_event * user,char * args)1247 static int user_event_parse_fields(struct user_event *user, char *args)
1248 {
1249 	char *field;
1250 	u32 offset = sizeof(struct trace_entry);
1251 	int ret = -EINVAL;
1252 
1253 	if (args == NULL)
1254 		return 0;
1255 
1256 	while ((field = strsep(&args, ";")) != NULL) {
1257 		ret = user_event_parse_field(field, user, &offset);
1258 
1259 		if (ret)
1260 			break;
1261 	}
1262 
1263 	return ret;
1264 }
1265 
1266 static struct trace_event_fields user_event_fields_array[1];
1267 
user_field_format(const char * type)1268 static const char *user_field_format(const char *type)
1269 {
1270 	if (strcmp(type, "s64") == 0)
1271 		return "%lld";
1272 	if (strcmp(type, "u64") == 0)
1273 		return "%llu";
1274 	if (strcmp(type, "s32") == 0)
1275 		return "%d";
1276 	if (strcmp(type, "u32") == 0)
1277 		return "%u";
1278 	if (strcmp(type, "int") == 0)
1279 		return "%d";
1280 	if (strcmp(type, "unsigned int") == 0)
1281 		return "%u";
1282 	if (strcmp(type, "s16") == 0)
1283 		return "%d";
1284 	if (strcmp(type, "u16") == 0)
1285 		return "%u";
1286 	if (strcmp(type, "short") == 0)
1287 		return "%d";
1288 	if (strcmp(type, "unsigned short") == 0)
1289 		return "%u";
1290 	if (strcmp(type, "s8") == 0)
1291 		return "%d";
1292 	if (strcmp(type, "u8") == 0)
1293 		return "%u";
1294 	if (strcmp(type, "char") == 0)
1295 		return "%d";
1296 	if (strcmp(type, "unsigned char") == 0)
1297 		return "%u";
1298 	if (strstr(type, "char[") != NULL)
1299 		return "%s";
1300 
1301 	/* Unknown, likely struct, allowed treat as 64-bit */
1302 	return "%llu";
1303 }
1304 
user_field_is_dyn_string(const char * type,const char ** str_func)1305 static bool user_field_is_dyn_string(const char *type, const char **str_func)
1306 {
1307 	if (str_has_prefix(type, "__data_loc ")) {
1308 		*str_func = "__get_str";
1309 		goto check;
1310 	}
1311 
1312 	if (str_has_prefix(type, "__rel_loc ")) {
1313 		*str_func = "__get_rel_str";
1314 		goto check;
1315 	}
1316 
1317 	return false;
1318 check:
1319 	return strstr(type, "char") != NULL;
1320 }
1321 
1322 #define LEN_OR_ZERO (len ? len - pos : 0)
user_dyn_field_set_string(int argc,const char ** argv,int * iout,char * buf,int len,bool * colon)1323 static int user_dyn_field_set_string(int argc, const char **argv, int *iout,
1324 				     char *buf, int len, bool *colon)
1325 {
1326 	int pos = 0, i = *iout;
1327 
1328 	*colon = false;
1329 
1330 	for (; i < argc; ++i) {
1331 		if (i != *iout)
1332 			pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1333 
1334 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", argv[i]);
1335 
1336 		if (strchr(argv[i], ';')) {
1337 			++i;
1338 			*colon = true;
1339 			break;
1340 		}
1341 	}
1342 
1343 	/* Actual set, advance i */
1344 	if (len != 0)
1345 		*iout = i;
1346 
1347 	return pos + 1;
1348 }
1349 
user_field_set_string(struct ftrace_event_field * field,char * buf,int len,bool colon)1350 static int user_field_set_string(struct ftrace_event_field *field,
1351 				 char *buf, int len, bool colon)
1352 {
1353 	int pos = 0;
1354 
1355 	pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->type);
1356 	pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1357 	pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name);
1358 
1359 	if (str_has_prefix(field->type, "struct "))
1360 		pos += snprintf(buf + pos, LEN_OR_ZERO, " %d", field->size);
1361 
1362 	if (colon)
1363 		pos += snprintf(buf + pos, LEN_OR_ZERO, ";");
1364 
1365 	return pos + 1;
1366 }
1367 
user_event_set_print_fmt(struct user_event * user,char * buf,int len)1368 static int user_event_set_print_fmt(struct user_event *user, char *buf, int len)
1369 {
1370 	struct ftrace_event_field *field;
1371 	struct list_head *head = &user->fields;
1372 	int pos = 0, depth = 0;
1373 	const char *str_func;
1374 
1375 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1376 
1377 	list_for_each_entry_reverse(field, head, link) {
1378 		if (depth != 0)
1379 			pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1380 
1381 		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s",
1382 				field->name, user_field_format(field->type));
1383 
1384 		depth++;
1385 	}
1386 
1387 	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1388 
1389 	list_for_each_entry_reverse(field, head, link) {
1390 		if (user_field_is_dyn_string(field->type, &str_func))
1391 			pos += snprintf(buf + pos, LEN_OR_ZERO,
1392 					", %s(%s)", str_func, field->name);
1393 		else
1394 			pos += snprintf(buf + pos, LEN_OR_ZERO,
1395 					", REC->%s", field->name);
1396 	}
1397 
1398 	return pos + 1;
1399 }
1400 #undef LEN_OR_ZERO
1401 
user_event_create_print_fmt(struct user_event * user)1402 static int user_event_create_print_fmt(struct user_event *user)
1403 {
1404 	char *print_fmt;
1405 	int len;
1406 
1407 	len = user_event_set_print_fmt(user, NULL, 0);
1408 
1409 	print_fmt = kmalloc(len, GFP_KERNEL_ACCOUNT);
1410 
1411 	if (!print_fmt)
1412 		return -ENOMEM;
1413 
1414 	user_event_set_print_fmt(user, print_fmt, len);
1415 
1416 	user->call.print_fmt = print_fmt;
1417 
1418 	return 0;
1419 }
1420 
user_event_print_trace(struct trace_iterator * iter,int flags,struct trace_event * event)1421 static enum print_line_t user_event_print_trace(struct trace_iterator *iter,
1422 						int flags,
1423 						struct trace_event *event)
1424 {
1425 	return print_event_fields(iter, event);
1426 }
1427 
1428 static struct trace_event_functions user_event_funcs = {
1429 	.trace = user_event_print_trace,
1430 };
1431 
user_event_set_call_visible(struct user_event * user,bool visible)1432 static int user_event_set_call_visible(struct user_event *user, bool visible)
1433 {
1434 	int ret;
1435 	const struct cred *old_cred;
1436 	struct cred *cred;
1437 
1438 	cred = prepare_creds();
1439 
1440 	if (!cred)
1441 		return -ENOMEM;
1442 
1443 	/*
1444 	 * While by default tracefs is locked down, systems can be configured
1445 	 * to allow user_event files to be less locked down. The extreme case
1446 	 * being "other" has read/write access to user_events_data/status.
1447 	 *
1448 	 * When not locked down, processes may not have permissions to
1449 	 * add/remove calls themselves to tracefs. We need to temporarily
1450 	 * switch to root file permission to allow for this scenario.
1451 	 */
1452 	cred->fsuid = GLOBAL_ROOT_UID;
1453 
1454 	old_cred = override_creds(cred);
1455 
1456 	if (visible)
1457 		ret = trace_add_event_call(&user->call);
1458 	else
1459 		ret = trace_remove_event_call(&user->call);
1460 
1461 	revert_creds(old_cred);
1462 	put_cred(cred);
1463 
1464 	return ret;
1465 }
1466 
destroy_user_event(struct user_event * user)1467 static int destroy_user_event(struct user_event *user)
1468 {
1469 	int ret = 0;
1470 
1471 	lockdep_assert_held(&event_mutex);
1472 
1473 	/* Must destroy fields before call removal */
1474 	user_event_destroy_fields(user);
1475 
1476 	ret = user_event_set_call_visible(user, false);
1477 
1478 	if (ret)
1479 		return ret;
1480 
1481 	dyn_event_remove(&user->devent);
1482 	hash_del(&user->node);
1483 
1484 	user_event_destroy_validators(user);
1485 	kfree(user->call.print_fmt);
1486 	kfree(EVENT_NAME(user));
1487 	kfree(user);
1488 
1489 	if (current_user_events > 0)
1490 		current_user_events--;
1491 	else
1492 		pr_alert("BUG: Bad current_user_events\n");
1493 
1494 	return ret;
1495 }
1496 
find_user_event(struct user_event_group * group,char * name,int argc,const char ** argv,u32 flags,u32 * outkey)1497 static struct user_event *find_user_event(struct user_event_group *group,
1498 					  char *name, int argc, const char **argv,
1499 					  u32 flags, u32 *outkey)
1500 {
1501 	struct user_event *user;
1502 	u32 key = user_event_key(name);
1503 
1504 	*outkey = key;
1505 
1506 	hash_for_each_possible(group->register_table, user, node, key) {
1507 		if (strcmp(EVENT_NAME(user), name))
1508 			continue;
1509 
1510 		if (user_fields_match(user, argc, argv))
1511 			return user_event_get(user);
1512 
1513 		return ERR_PTR(-EADDRINUSE);
1514 	}
1515 
1516 	return NULL;
1517 }
1518 
user_event_validate(struct user_event * user,void * data,int len)1519 static int user_event_validate(struct user_event *user, void *data, int len)
1520 {
1521 	struct list_head *head = &user->validators;
1522 	struct user_event_validator *validator;
1523 	void *pos, *end = data + len;
1524 	u32 loc, offset, size;
1525 
1526 	list_for_each_entry(validator, head, user_event_link) {
1527 		pos = data + validator->offset;
1528 
1529 		/* Already done min_size check, no bounds check here */
1530 		loc = *(u32 *)pos;
1531 		offset = loc & 0xffff;
1532 		size = loc >> 16;
1533 
1534 		if (likely(validator->flags & VALIDATOR_REL))
1535 			pos += offset + sizeof(loc);
1536 		else
1537 			pos = data + offset;
1538 
1539 		pos += size;
1540 
1541 		if (unlikely(pos > end))
1542 			return -EFAULT;
1543 
1544 		if (likely(validator->flags & VALIDATOR_ENSURE_NULL))
1545 			if (unlikely(*(char *)(pos - 1) != '\0'))
1546 				return -EFAULT;
1547 	}
1548 
1549 	return 0;
1550 }
1551 
1552 /*
1553  * Writes the user supplied payload out to a trace file.
1554  */
user_event_ftrace(struct user_event * user,struct iov_iter * i,void * tpdata,bool * faulted)1555 static void user_event_ftrace(struct user_event *user, struct iov_iter *i,
1556 			      void *tpdata, bool *faulted)
1557 {
1558 	struct trace_event_file *file;
1559 	struct trace_entry *entry;
1560 	struct trace_event_buffer event_buffer;
1561 	size_t size = sizeof(*entry) + i->count;
1562 
1563 	file = (struct trace_event_file *)tpdata;
1564 
1565 	if (!file ||
1566 	    !(file->flags & EVENT_FILE_FL_ENABLED) ||
1567 	    trace_trigger_soft_disabled(file))
1568 		return;
1569 
1570 	/* Allocates and fills trace_entry, + 1 of this is data payload */
1571 	entry = trace_event_buffer_reserve(&event_buffer, file, size);
1572 
1573 	if (unlikely(!entry))
1574 		return;
1575 
1576 	if (unlikely(i->count != 0 && !copy_nofault(entry + 1, i->count, i)))
1577 		goto discard;
1578 
1579 	if (!list_empty(&user->validators) &&
1580 	    unlikely(user_event_validate(user, entry, size)))
1581 		goto discard;
1582 
1583 	trace_event_buffer_commit(&event_buffer);
1584 
1585 	return;
1586 discard:
1587 	*faulted = true;
1588 	__trace_event_discard_commit(event_buffer.buffer,
1589 				     event_buffer.event);
1590 }
1591 
1592 #ifdef CONFIG_PERF_EVENTS
1593 /*
1594  * Writes the user supplied payload out to perf ring buffer.
1595  */
user_event_perf(struct user_event * user,struct iov_iter * i,void * tpdata,bool * faulted)1596 static void user_event_perf(struct user_event *user, struct iov_iter *i,
1597 			    void *tpdata, bool *faulted)
1598 {
1599 	struct hlist_head *perf_head;
1600 
1601 	perf_head = this_cpu_ptr(user->call.perf_events);
1602 
1603 	if (perf_head && !hlist_empty(perf_head)) {
1604 		struct trace_entry *perf_entry;
1605 		struct pt_regs *regs;
1606 		size_t size = sizeof(*perf_entry) + i->count;
1607 		int context;
1608 
1609 		perf_entry = perf_trace_buf_alloc(ALIGN(size, 8),
1610 						  &regs, &context);
1611 
1612 		if (unlikely(!perf_entry))
1613 			return;
1614 
1615 		perf_fetch_caller_regs(regs);
1616 
1617 		if (unlikely(i->count != 0 && !copy_nofault(perf_entry + 1, i->count, i)))
1618 			goto discard;
1619 
1620 		if (!list_empty(&user->validators) &&
1621 		    unlikely(user_event_validate(user, perf_entry, size)))
1622 			goto discard;
1623 
1624 		perf_trace_buf_submit(perf_entry, size, context,
1625 				      user->call.event.type, 1, regs,
1626 				      perf_head, NULL);
1627 
1628 		return;
1629 discard:
1630 		*faulted = true;
1631 		perf_swevent_put_recursion_context(context);
1632 	}
1633 }
1634 #endif
1635 
1636 /*
1637  * Update the enabled bit among all user processes.
1638  */
update_enable_bit_for(struct user_event * user)1639 static void update_enable_bit_for(struct user_event *user)
1640 {
1641 	struct tracepoint *tp = &user->tracepoint;
1642 	char status = 0;
1643 
1644 	if (atomic_read(&tp->key.enabled) > 0) {
1645 		struct tracepoint_func *probe_func_ptr;
1646 		user_event_func_t probe_func;
1647 
1648 		rcu_read_lock_sched();
1649 
1650 		probe_func_ptr = rcu_dereference_sched(tp->funcs);
1651 
1652 		if (probe_func_ptr) {
1653 			do {
1654 				probe_func = probe_func_ptr->func;
1655 
1656 				if (probe_func == user_event_ftrace)
1657 					status |= EVENT_STATUS_FTRACE;
1658 #ifdef CONFIG_PERF_EVENTS
1659 				else if (probe_func == user_event_perf)
1660 					status |= EVENT_STATUS_PERF;
1661 #endif
1662 				else
1663 					status |= EVENT_STATUS_OTHER;
1664 			} while ((++probe_func_ptr)->func);
1665 		}
1666 
1667 		rcu_read_unlock_sched();
1668 	}
1669 
1670 	user->status = status;
1671 
1672 	user_event_enabler_update(user);
1673 }
1674 
1675 /*
1676  * Register callback for our events from tracing sub-systems.
1677  */
user_event_reg(struct trace_event_call * call,enum trace_reg type,void * data)1678 static int user_event_reg(struct trace_event_call *call,
1679 			  enum trace_reg type,
1680 			  void *data)
1681 {
1682 	struct user_event *user = (struct user_event *)call->data;
1683 	int ret = 0;
1684 
1685 	if (!user)
1686 		return -ENOENT;
1687 
1688 	switch (type) {
1689 	case TRACE_REG_REGISTER:
1690 		ret = tracepoint_probe_register(call->tp,
1691 						call->class->probe,
1692 						data);
1693 		if (!ret)
1694 			goto inc;
1695 		break;
1696 
1697 	case TRACE_REG_UNREGISTER:
1698 		tracepoint_probe_unregister(call->tp,
1699 					    call->class->probe,
1700 					    data);
1701 		goto dec;
1702 
1703 #ifdef CONFIG_PERF_EVENTS
1704 	case TRACE_REG_PERF_REGISTER:
1705 		ret = tracepoint_probe_register(call->tp,
1706 						call->class->perf_probe,
1707 						data);
1708 		if (!ret)
1709 			goto inc;
1710 		break;
1711 
1712 	case TRACE_REG_PERF_UNREGISTER:
1713 		tracepoint_probe_unregister(call->tp,
1714 					    call->class->perf_probe,
1715 					    data);
1716 		goto dec;
1717 
1718 	case TRACE_REG_PERF_OPEN:
1719 	case TRACE_REG_PERF_CLOSE:
1720 	case TRACE_REG_PERF_ADD:
1721 	case TRACE_REG_PERF_DEL:
1722 		break;
1723 #endif
1724 	}
1725 
1726 	return ret;
1727 inc:
1728 	user_event_get(user);
1729 	update_enable_bit_for(user);
1730 	return 0;
1731 dec:
1732 	update_enable_bit_for(user);
1733 	user_event_put(user, true);
1734 	return 0;
1735 }
1736 
user_event_create(const char * raw_command)1737 static int user_event_create(const char *raw_command)
1738 {
1739 	struct user_event_group *group;
1740 	struct user_event *user;
1741 	char *name;
1742 	int ret;
1743 
1744 	if (!str_has_prefix(raw_command, USER_EVENTS_PREFIX))
1745 		return -ECANCELED;
1746 
1747 	raw_command += USER_EVENTS_PREFIX_LEN;
1748 	raw_command = skip_spaces(raw_command);
1749 
1750 	name = kstrdup(raw_command, GFP_KERNEL_ACCOUNT);
1751 
1752 	if (!name)
1753 		return -ENOMEM;
1754 
1755 	group = current_user_event_group();
1756 
1757 	if (!group) {
1758 		kfree(name);
1759 		return -ENOENT;
1760 	}
1761 
1762 	mutex_lock(&group->reg_mutex);
1763 
1764 	/* Dyn events persist, otherwise they would cleanup immediately */
1765 	ret = user_event_parse_cmd(group, name, &user, USER_EVENT_REG_PERSIST);
1766 
1767 	if (!ret)
1768 		user_event_put(user, false);
1769 
1770 	mutex_unlock(&group->reg_mutex);
1771 
1772 	if (ret)
1773 		kfree(name);
1774 
1775 	return ret;
1776 }
1777 
user_event_show(struct seq_file * m,struct dyn_event * ev)1778 static int user_event_show(struct seq_file *m, struct dyn_event *ev)
1779 {
1780 	struct user_event *user = container_of(ev, struct user_event, devent);
1781 	struct ftrace_event_field *field;
1782 	struct list_head *head;
1783 	int depth = 0;
1784 
1785 	seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user));
1786 
1787 	head = trace_get_fields(&user->call);
1788 
1789 	list_for_each_entry_reverse(field, head, link) {
1790 		if (depth == 0)
1791 			seq_puts(m, " ");
1792 		else
1793 			seq_puts(m, "; ");
1794 
1795 		seq_printf(m, "%s %s", field->type, field->name);
1796 
1797 		if (str_has_prefix(field->type, "struct "))
1798 			seq_printf(m, " %d", field->size);
1799 
1800 		depth++;
1801 	}
1802 
1803 	seq_puts(m, "\n");
1804 
1805 	return 0;
1806 }
1807 
user_event_is_busy(struct dyn_event * ev)1808 static bool user_event_is_busy(struct dyn_event *ev)
1809 {
1810 	struct user_event *user = container_of(ev, struct user_event, devent);
1811 
1812 	return !user_event_last_ref(user);
1813 }
1814 
user_event_free(struct dyn_event * ev)1815 static int user_event_free(struct dyn_event *ev)
1816 {
1817 	struct user_event *user = container_of(ev, struct user_event, devent);
1818 
1819 	if (!user_event_last_ref(user))
1820 		return -EBUSY;
1821 
1822 	if (!user_event_capable(user->reg_flags))
1823 		return -EPERM;
1824 
1825 	return destroy_user_event(user);
1826 }
1827 
user_field_match(struct ftrace_event_field * field,int argc,const char ** argv,int * iout)1828 static bool user_field_match(struct ftrace_event_field *field, int argc,
1829 			     const char **argv, int *iout)
1830 {
1831 	char *field_name = NULL, *dyn_field_name = NULL;
1832 	bool colon = false, match = false;
1833 	int dyn_len, len;
1834 
1835 	if (*iout >= argc)
1836 		return false;
1837 
1838 	dyn_len = user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1839 					    0, &colon);
1840 
1841 	len = user_field_set_string(field, field_name, 0, colon);
1842 
1843 	if (dyn_len != len)
1844 		return false;
1845 
1846 	dyn_field_name = kmalloc(dyn_len, GFP_KERNEL);
1847 	field_name = kmalloc(len, GFP_KERNEL);
1848 
1849 	if (!dyn_field_name || !field_name)
1850 		goto out;
1851 
1852 	user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1853 				  dyn_len, &colon);
1854 
1855 	user_field_set_string(field, field_name, len, colon);
1856 
1857 	match = strcmp(dyn_field_name, field_name) == 0;
1858 out:
1859 	kfree(dyn_field_name);
1860 	kfree(field_name);
1861 
1862 	return match;
1863 }
1864 
user_fields_match(struct user_event * user,int argc,const char ** argv)1865 static bool user_fields_match(struct user_event *user, int argc,
1866 			      const char **argv)
1867 {
1868 	struct ftrace_event_field *field;
1869 	struct list_head *head = &user->fields;
1870 	int i = 0;
1871 
1872 	if (argc == 0)
1873 		return list_empty(head);
1874 
1875 	list_for_each_entry_reverse(field, head, link) {
1876 		if (!user_field_match(field, argc, argv, &i))
1877 			return false;
1878 	}
1879 
1880 	if (i != argc)
1881 		return false;
1882 
1883 	return true;
1884 }
1885 
user_event_match(const char * system,const char * event,int argc,const char ** argv,struct dyn_event * ev)1886 static bool user_event_match(const char *system, const char *event,
1887 			     int argc, const char **argv, struct dyn_event *ev)
1888 {
1889 	struct user_event *user = container_of(ev, struct user_event, devent);
1890 	bool match;
1891 
1892 	match = strcmp(EVENT_NAME(user), event) == 0 &&
1893 		(!system || strcmp(system, USER_EVENTS_SYSTEM) == 0);
1894 
1895 	if (match)
1896 		match = user_fields_match(user, argc, argv);
1897 
1898 	return match;
1899 }
1900 
1901 static struct dyn_event_operations user_event_dops = {
1902 	.create = user_event_create,
1903 	.show = user_event_show,
1904 	.is_busy = user_event_is_busy,
1905 	.free = user_event_free,
1906 	.match = user_event_match,
1907 };
1908 
user_event_trace_register(struct user_event * user)1909 static int user_event_trace_register(struct user_event *user)
1910 {
1911 	int ret;
1912 
1913 	ret = register_trace_event(&user->call.event);
1914 
1915 	if (!ret)
1916 		return -ENODEV;
1917 
1918 	ret = user_event_set_call_visible(user, true);
1919 
1920 	if (ret)
1921 		unregister_trace_event(&user->call.event);
1922 
1923 	return ret;
1924 }
1925 
1926 /*
1927  * Counts how many ';' without a trailing space are in the args.
1928  */
count_semis_no_space(char * args)1929 static int count_semis_no_space(char *args)
1930 {
1931 	int count = 0;
1932 
1933 	while ((args = strchr(args, ';'))) {
1934 		args++;
1935 
1936 		if (!isspace(*args))
1937 			count++;
1938 	}
1939 
1940 	return count;
1941 }
1942 
1943 /*
1944  * Copies the arguments while ensuring all ';' have a trailing space.
1945  */
insert_space_after_semis(char * args,int count)1946 static char *insert_space_after_semis(char *args, int count)
1947 {
1948 	char *fixed, *pos;
1949 	int len;
1950 
1951 	len = strlen(args) + count;
1952 	fixed = kmalloc(len + 1, GFP_KERNEL);
1953 
1954 	if (!fixed)
1955 		return NULL;
1956 
1957 	pos = fixed;
1958 
1959 	/* Insert a space after ';' if there is no trailing space. */
1960 	while (*args) {
1961 		*pos = *args++;
1962 
1963 		if (*pos++ == ';' && !isspace(*args))
1964 			*pos++ = ' ';
1965 	}
1966 
1967 	*pos = '\0';
1968 
1969 	return fixed;
1970 }
1971 
user_event_argv_split(char * args,int * argc)1972 static char **user_event_argv_split(char *args, int *argc)
1973 {
1974 	char **split;
1975 	char *fixed;
1976 	int count;
1977 
1978 	/* Count how many ';' without a trailing space */
1979 	count = count_semis_no_space(args);
1980 
1981 	/* No fixup is required */
1982 	if (!count)
1983 		return argv_split(GFP_KERNEL, args, argc);
1984 
1985 	/* We must fixup 'field;field' to 'field; field' */
1986 	fixed = insert_space_after_semis(args, count);
1987 
1988 	if (!fixed)
1989 		return NULL;
1990 
1991 	/* We do a normal split afterwards */
1992 	split = argv_split(GFP_KERNEL, fixed, argc);
1993 
1994 	/* We can free since argv_split makes a copy */
1995 	kfree(fixed);
1996 
1997 	return split;
1998 }
1999 
2000 /*
2001  * Parses the event name, arguments and flags then registers if successful.
2002  * The name buffer lifetime is owned by this method for success cases only.
2003  * Upon success the returned user_event has its ref count increased by 1.
2004  */
user_event_parse(struct user_event_group * group,char * name,char * args,char * flags,struct user_event ** newuser,int reg_flags)2005 static int user_event_parse(struct user_event_group *group, char *name,
2006 			    char *args, char *flags,
2007 			    struct user_event **newuser, int reg_flags)
2008 {
2009 	struct user_event *user;
2010 	char **argv = NULL;
2011 	int argc = 0;
2012 	int ret;
2013 	u32 key;
2014 
2015 	/* Currently don't support any text based flags */
2016 	if (flags != NULL)
2017 		return -EINVAL;
2018 
2019 	if (!user_event_capable(reg_flags))
2020 		return -EPERM;
2021 
2022 	if (args) {
2023 		argv = user_event_argv_split(args, &argc);
2024 
2025 		if (!argv)
2026 			return -ENOMEM;
2027 	}
2028 
2029 	/* Prevent dyn_event from racing */
2030 	mutex_lock(&event_mutex);
2031 	user = find_user_event(group, name, argc, (const char **)argv,
2032 			       reg_flags, &key);
2033 	mutex_unlock(&event_mutex);
2034 
2035 	if (argv)
2036 		argv_free(argv);
2037 
2038 	if (IS_ERR(user))
2039 		return PTR_ERR(user);
2040 
2041 	if (user) {
2042 		*newuser = user;
2043 		/*
2044 		 * Name is allocated by caller, free it since it already exists.
2045 		 * Caller only worries about failure cases for freeing.
2046 		 */
2047 		kfree(name);
2048 
2049 		return 0;
2050 	}
2051 
2052 	user = kzalloc(sizeof(*user), GFP_KERNEL_ACCOUNT);
2053 
2054 	if (!user)
2055 		return -ENOMEM;
2056 
2057 	INIT_LIST_HEAD(&user->class.fields);
2058 	INIT_LIST_HEAD(&user->fields);
2059 	INIT_LIST_HEAD(&user->validators);
2060 
2061 	user->group = group;
2062 	user->tracepoint.name = name;
2063 
2064 	ret = user_event_parse_fields(user, args);
2065 
2066 	if (ret)
2067 		goto put_user;
2068 
2069 	ret = user_event_create_print_fmt(user);
2070 
2071 	if (ret)
2072 		goto put_user;
2073 
2074 	user->call.data = user;
2075 	user->call.class = &user->class;
2076 	user->call.name = name;
2077 	user->call.flags = TRACE_EVENT_FL_TRACEPOINT;
2078 	user->call.tp = &user->tracepoint;
2079 	user->call.event.funcs = &user_event_funcs;
2080 	user->class.system = group->system_name;
2081 
2082 	user->class.fields_array = user_event_fields_array;
2083 	user->class.get_fields = user_event_get_fields;
2084 	user->class.reg = user_event_reg;
2085 	user->class.probe = user_event_ftrace;
2086 #ifdef CONFIG_PERF_EVENTS
2087 	user->class.perf_probe = user_event_perf;
2088 #endif
2089 
2090 	mutex_lock(&event_mutex);
2091 
2092 	if (current_user_events >= max_user_events) {
2093 		ret = -EMFILE;
2094 		goto put_user_lock;
2095 	}
2096 
2097 	ret = user_event_trace_register(user);
2098 
2099 	if (ret)
2100 		goto put_user_lock;
2101 
2102 	user->reg_flags = reg_flags;
2103 
2104 	if (user->reg_flags & USER_EVENT_REG_PERSIST) {
2105 		/* Ensure we track self ref and caller ref (2) */
2106 		refcount_set(&user->refcnt, 2);
2107 	} else {
2108 		/* Ensure we track only caller ref (1) */
2109 		refcount_set(&user->refcnt, 1);
2110 	}
2111 
2112 	dyn_event_init(&user->devent, &user_event_dops);
2113 	dyn_event_add(&user->devent, &user->call);
2114 	hash_add(group->register_table, &user->node, key);
2115 	current_user_events++;
2116 
2117 	mutex_unlock(&event_mutex);
2118 
2119 	*newuser = user;
2120 	return 0;
2121 put_user_lock:
2122 	mutex_unlock(&event_mutex);
2123 put_user:
2124 	user_event_destroy_fields(user);
2125 	user_event_destroy_validators(user);
2126 	kfree(user->call.print_fmt);
2127 	kfree(user);
2128 	return ret;
2129 }
2130 
2131 /*
2132  * Deletes previously created events if they are no longer being used.
2133  */
delete_user_event(struct user_event_group * group,char * name)2134 static int delete_user_event(struct user_event_group *group, char *name)
2135 {
2136 	struct user_event *user;
2137 	struct hlist_node *tmp;
2138 	u32 key = user_event_key(name);
2139 	int ret = -ENOENT;
2140 
2141 	/* Attempt to delete all event(s) with the name passed in */
2142 	hash_for_each_possible_safe(group->register_table, user, tmp, node, key) {
2143 		if (strcmp(EVENT_NAME(user), name))
2144 			continue;
2145 
2146 		if (!user_event_last_ref(user))
2147 			return -EBUSY;
2148 
2149 		if (!user_event_capable(user->reg_flags))
2150 			return -EPERM;
2151 
2152 		ret = destroy_user_event(user);
2153 
2154 		if (ret)
2155 			goto out;
2156 	}
2157 out:
2158 	return ret;
2159 }
2160 
2161 /*
2162  * Validates the user payload and writes via iterator.
2163  */
user_events_write_core(struct file * file,struct iov_iter * i)2164 static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
2165 {
2166 	struct user_event_file_info *info = file->private_data;
2167 	struct user_event_refs *refs;
2168 	struct user_event *user = NULL;
2169 	struct tracepoint *tp;
2170 	ssize_t ret = i->count;
2171 	int idx;
2172 
2173 	if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx)))
2174 		return -EFAULT;
2175 
2176 	if (idx < 0)
2177 		return -EINVAL;
2178 
2179 	rcu_read_lock_sched();
2180 
2181 	refs = rcu_dereference_sched(info->refs);
2182 
2183 	/*
2184 	 * The refs->events array is protected by RCU, and new items may be
2185 	 * added. But the user retrieved from indexing into the events array
2186 	 * shall be immutable while the file is opened.
2187 	 */
2188 	if (likely(refs && idx < refs->count))
2189 		user = refs->events[idx];
2190 
2191 	rcu_read_unlock_sched();
2192 
2193 	if (unlikely(user == NULL))
2194 		return -ENOENT;
2195 
2196 	if (unlikely(i->count < user->min_size))
2197 		return -EINVAL;
2198 
2199 	tp = &user->tracepoint;
2200 
2201 	/*
2202 	 * It's possible key.enabled disables after this check, however
2203 	 * we don't mind if a few events are included in this condition.
2204 	 */
2205 	if (likely(atomic_read(&tp->key.enabled) > 0)) {
2206 		struct tracepoint_func *probe_func_ptr;
2207 		user_event_func_t probe_func;
2208 		struct iov_iter copy;
2209 		void *tpdata;
2210 		bool faulted;
2211 
2212 		if (unlikely(fault_in_iov_iter_readable(i, i->count)))
2213 			return -EFAULT;
2214 
2215 		faulted = false;
2216 
2217 		rcu_read_lock_sched();
2218 
2219 		probe_func_ptr = rcu_dereference_sched(tp->funcs);
2220 
2221 		if (probe_func_ptr) {
2222 			do {
2223 				copy = *i;
2224 				probe_func = probe_func_ptr->func;
2225 				tpdata = probe_func_ptr->data;
2226 				probe_func(user, &copy, tpdata, &faulted);
2227 			} while ((++probe_func_ptr)->func);
2228 		}
2229 
2230 		rcu_read_unlock_sched();
2231 
2232 		if (unlikely(faulted))
2233 			return -EFAULT;
2234 	} else
2235 		return -EBADF;
2236 
2237 	return ret;
2238 }
2239 
user_events_open(struct inode * node,struct file * file)2240 static int user_events_open(struct inode *node, struct file *file)
2241 {
2242 	struct user_event_group *group;
2243 	struct user_event_file_info *info;
2244 
2245 	group = current_user_event_group();
2246 
2247 	if (!group)
2248 		return -ENOENT;
2249 
2250 	info = kzalloc(sizeof(*info), GFP_KERNEL_ACCOUNT);
2251 
2252 	if (!info)
2253 		return -ENOMEM;
2254 
2255 	info->group = group;
2256 
2257 	file->private_data = info;
2258 
2259 	return 0;
2260 }
2261 
user_events_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)2262 static ssize_t user_events_write(struct file *file, const char __user *ubuf,
2263 				 size_t count, loff_t *ppos)
2264 {
2265 	struct iovec iov;
2266 	struct iov_iter i;
2267 
2268 	if (unlikely(*ppos != 0))
2269 		return -EFAULT;
2270 
2271 	if (unlikely(import_single_range(ITER_SOURCE, (char __user *)ubuf,
2272 					 count, &iov, &i)))
2273 		return -EFAULT;
2274 
2275 	return user_events_write_core(file, &i);
2276 }
2277 
user_events_write_iter(struct kiocb * kp,struct iov_iter * i)2278 static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i)
2279 {
2280 	return user_events_write_core(kp->ki_filp, i);
2281 }
2282 
user_events_ref_add(struct user_event_file_info * info,struct user_event * user)2283 static int user_events_ref_add(struct user_event_file_info *info,
2284 			       struct user_event *user)
2285 {
2286 	struct user_event_group *group = info->group;
2287 	struct user_event_refs *refs, *new_refs;
2288 	int i, size, count = 0;
2289 
2290 	refs = rcu_dereference_protected(info->refs,
2291 					 lockdep_is_held(&group->reg_mutex));
2292 
2293 	if (refs) {
2294 		count = refs->count;
2295 
2296 		for (i = 0; i < count; ++i)
2297 			if (refs->events[i] == user)
2298 				return i;
2299 	}
2300 
2301 	size = struct_size(refs, events, count + 1);
2302 
2303 	new_refs = kzalloc(size, GFP_KERNEL_ACCOUNT);
2304 
2305 	if (!new_refs)
2306 		return -ENOMEM;
2307 
2308 	new_refs->count = count + 1;
2309 
2310 	for (i = 0; i < count; ++i)
2311 		new_refs->events[i] = refs->events[i];
2312 
2313 	new_refs->events[i] = user_event_get(user);
2314 
2315 	rcu_assign_pointer(info->refs, new_refs);
2316 
2317 	if (refs)
2318 		kfree_rcu(refs, rcu);
2319 
2320 	return i;
2321 }
2322 
user_reg_get(struct user_reg __user * ureg,struct user_reg * kreg)2323 static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg)
2324 {
2325 	u32 size;
2326 	long ret;
2327 
2328 	ret = get_user(size, &ureg->size);
2329 
2330 	if (ret)
2331 		return ret;
2332 
2333 	if (size > PAGE_SIZE)
2334 		return -E2BIG;
2335 
2336 	if (size < offsetofend(struct user_reg, write_index))
2337 		return -EINVAL;
2338 
2339 	ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
2340 
2341 	if (ret)
2342 		return ret;
2343 
2344 	/* Ensure only valid flags */
2345 	if (kreg->flags & ~(USER_EVENT_REG_MAX-1))
2346 		return -EINVAL;
2347 
2348 	/* Ensure supported size */
2349 	switch (kreg->enable_size) {
2350 	case 4:
2351 		/* 32-bit */
2352 		break;
2353 #if BITS_PER_LONG >= 64
2354 	case 8:
2355 		/* 64-bit */
2356 		break;
2357 #endif
2358 	default:
2359 		return -EINVAL;
2360 	}
2361 
2362 	/* Ensure natural alignment */
2363 	if (kreg->enable_addr % kreg->enable_size)
2364 		return -EINVAL;
2365 
2366 	/* Ensure bit range for size */
2367 	if (kreg->enable_bit > (kreg->enable_size * BITS_PER_BYTE) - 1)
2368 		return -EINVAL;
2369 
2370 	/* Ensure accessible */
2371 	if (!access_ok((const void __user *)(uintptr_t)kreg->enable_addr,
2372 		       kreg->enable_size))
2373 		return -EFAULT;
2374 
2375 	kreg->size = size;
2376 
2377 	return 0;
2378 }
2379 
2380 /*
2381  * Registers a user_event on behalf of a user process.
2382  */
user_events_ioctl_reg(struct user_event_file_info * info,unsigned long uarg)2383 static long user_events_ioctl_reg(struct user_event_file_info *info,
2384 				  unsigned long uarg)
2385 {
2386 	struct user_reg __user *ureg = (struct user_reg __user *)uarg;
2387 	struct user_reg reg;
2388 	struct user_event *user;
2389 	struct user_event_enabler *enabler;
2390 	char *name;
2391 	long ret;
2392 	int write_result;
2393 
2394 	ret = user_reg_get(ureg, &reg);
2395 
2396 	if (ret)
2397 		return ret;
2398 
2399 	/*
2400 	 * Prevent users from using the same address and bit multiple times
2401 	 * within the same mm address space. This can cause unexpected behavior
2402 	 * for user processes that is far easier to debug if this is explictly
2403 	 * an error upon registering.
2404 	 */
2405 	if (current_user_event_enabler_exists((unsigned long)reg.enable_addr,
2406 					      reg.enable_bit))
2407 		return -EADDRINUSE;
2408 
2409 	name = strndup_user((const char __user *)(uintptr_t)reg.name_args,
2410 			    MAX_EVENT_DESC);
2411 
2412 	if (IS_ERR(name)) {
2413 		ret = PTR_ERR(name);
2414 		return ret;
2415 	}
2416 
2417 	ret = user_event_parse_cmd(info->group, name, &user, reg.flags);
2418 
2419 	if (ret) {
2420 		kfree(name);
2421 		return ret;
2422 	}
2423 
2424 	ret = user_events_ref_add(info, user);
2425 
2426 	/* No longer need parse ref, ref_add either worked or not */
2427 	user_event_put(user, false);
2428 
2429 	/* Positive number is index and valid */
2430 	if (ret < 0)
2431 		return ret;
2432 
2433 	/*
2434 	 * user_events_ref_add succeeded:
2435 	 * At this point we have a user_event, it's lifetime is bound by the
2436 	 * reference count, not this file. If anything fails, the user_event
2437 	 * still has a reference until the file is released. During release
2438 	 * any remaining references (from user_events_ref_add) are decremented.
2439 	 *
2440 	 * Attempt to create an enabler, which too has a lifetime tied in the
2441 	 * same way for the event. Once the task that caused the enabler to be
2442 	 * created exits or issues exec() then the enablers it has created
2443 	 * will be destroyed and the ref to the event will be decremented.
2444 	 */
2445 	enabler = user_event_enabler_create(&reg, user, &write_result);
2446 
2447 	if (!enabler)
2448 		return -ENOMEM;
2449 
2450 	/* Write failed/faulted, give error back to caller */
2451 	if (write_result)
2452 		return write_result;
2453 
2454 	put_user((u32)ret, &ureg->write_index);
2455 
2456 	return 0;
2457 }
2458 
2459 /*
2460  * Deletes a user_event on behalf of a user process.
2461  */
user_events_ioctl_del(struct user_event_file_info * info,unsigned long uarg)2462 static long user_events_ioctl_del(struct user_event_file_info *info,
2463 				  unsigned long uarg)
2464 {
2465 	void __user *ubuf = (void __user *)uarg;
2466 	char *name;
2467 	long ret;
2468 
2469 	name = strndup_user(ubuf, MAX_EVENT_DESC);
2470 
2471 	if (IS_ERR(name))
2472 		return PTR_ERR(name);
2473 
2474 	/* event_mutex prevents dyn_event from racing */
2475 	mutex_lock(&event_mutex);
2476 	ret = delete_user_event(info->group, name);
2477 	mutex_unlock(&event_mutex);
2478 
2479 	kfree(name);
2480 
2481 	return ret;
2482 }
2483 
user_unreg_get(struct user_unreg __user * ureg,struct user_unreg * kreg)2484 static long user_unreg_get(struct user_unreg __user *ureg,
2485 			   struct user_unreg *kreg)
2486 {
2487 	u32 size;
2488 	long ret;
2489 
2490 	ret = get_user(size, &ureg->size);
2491 
2492 	if (ret)
2493 		return ret;
2494 
2495 	if (size > PAGE_SIZE)
2496 		return -E2BIG;
2497 
2498 	if (size < offsetofend(struct user_unreg, disable_addr))
2499 		return -EINVAL;
2500 
2501 	ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
2502 
2503 	/* Ensure no reserved values, since we don't support any yet */
2504 	if (kreg->__reserved || kreg->__reserved2)
2505 		return -EINVAL;
2506 
2507 	return ret;
2508 }
2509 
user_event_mm_clear_bit(struct user_event_mm * user_mm,unsigned long uaddr,unsigned char bit,unsigned long flags)2510 static int user_event_mm_clear_bit(struct user_event_mm *user_mm,
2511 				   unsigned long uaddr, unsigned char bit,
2512 				   unsigned long flags)
2513 {
2514 	struct user_event_enabler enabler;
2515 	int result;
2516 	int attempt = 0;
2517 
2518 	memset(&enabler, 0, sizeof(enabler));
2519 	enabler.addr = uaddr;
2520 	enabler.values = bit | flags;
2521 retry:
2522 	/* Prevents state changes from racing with new enablers */
2523 	mutex_lock(&event_mutex);
2524 
2525 	/* Force the bit to be cleared, since no event is attached */
2526 	mmap_read_lock(user_mm->mm);
2527 	result = user_event_enabler_write(user_mm, &enabler, false, &attempt);
2528 	mmap_read_unlock(user_mm->mm);
2529 
2530 	mutex_unlock(&event_mutex);
2531 
2532 	if (result) {
2533 		/* Attempt to fault-in and retry if it worked */
2534 		if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
2535 			goto retry;
2536 	}
2537 
2538 	return result;
2539 }
2540 
2541 /*
2542  * Unregisters an enablement address/bit within a task/user mm.
2543  */
user_events_ioctl_unreg(unsigned long uarg)2544 static long user_events_ioctl_unreg(unsigned long uarg)
2545 {
2546 	struct user_unreg __user *ureg = (struct user_unreg __user *)uarg;
2547 	struct user_event_mm *mm = current->user_event_mm;
2548 	struct user_event_enabler *enabler, *next;
2549 	struct user_unreg reg;
2550 	unsigned long flags;
2551 	long ret;
2552 
2553 	ret = user_unreg_get(ureg, &reg);
2554 
2555 	if (ret)
2556 		return ret;
2557 
2558 	if (!mm)
2559 		return -ENOENT;
2560 
2561 	flags = 0;
2562 	ret = -ENOENT;
2563 
2564 	/*
2565 	 * Flags freeing and faulting are used to indicate if the enabler is in
2566 	 * use at all. When faulting is set a page-fault is occurring asyncly.
2567 	 * During async fault if freeing is set, the enabler will be destroyed.
2568 	 * If no async fault is happening, we can destroy it now since we hold
2569 	 * the event_mutex during these checks.
2570 	 */
2571 	mutex_lock(&event_mutex);
2572 
2573 	list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) {
2574 		if (enabler->addr == reg.disable_addr &&
2575 		    ENABLE_BIT(enabler) == reg.disable_bit) {
2576 			set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler));
2577 
2578 			/* We must keep compat flags for the clear */
2579 			flags |= enabler->values & ENABLE_VAL_COMPAT_MASK;
2580 
2581 			if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)))
2582 				user_event_enabler_destroy(enabler, true);
2583 
2584 			/* Removed at least one */
2585 			ret = 0;
2586 		}
2587 	}
2588 
2589 	mutex_unlock(&event_mutex);
2590 
2591 	/* Ensure bit is now cleared for user, regardless of event status */
2592 	if (!ret)
2593 		ret = user_event_mm_clear_bit(mm, reg.disable_addr,
2594 					      reg.disable_bit, flags);
2595 
2596 	return ret;
2597 }
2598 
2599 /*
2600  * Handles the ioctl from user mode to register or alter operations.
2601  */
user_events_ioctl(struct file * file,unsigned int cmd,unsigned long uarg)2602 static long user_events_ioctl(struct file *file, unsigned int cmd,
2603 			      unsigned long uarg)
2604 {
2605 	struct user_event_file_info *info = file->private_data;
2606 	struct user_event_group *group = info->group;
2607 	long ret = -ENOTTY;
2608 
2609 	switch (cmd) {
2610 	case DIAG_IOCSREG:
2611 		mutex_lock(&group->reg_mutex);
2612 		ret = user_events_ioctl_reg(info, uarg);
2613 		mutex_unlock(&group->reg_mutex);
2614 		break;
2615 
2616 	case DIAG_IOCSDEL:
2617 		mutex_lock(&group->reg_mutex);
2618 		ret = user_events_ioctl_del(info, uarg);
2619 		mutex_unlock(&group->reg_mutex);
2620 		break;
2621 
2622 	case DIAG_IOCSUNREG:
2623 		mutex_lock(&group->reg_mutex);
2624 		ret = user_events_ioctl_unreg(uarg);
2625 		mutex_unlock(&group->reg_mutex);
2626 		break;
2627 	}
2628 
2629 	return ret;
2630 }
2631 
2632 /*
2633  * Handles the final close of the file from user mode.
2634  */
user_events_release(struct inode * node,struct file * file)2635 static int user_events_release(struct inode *node, struct file *file)
2636 {
2637 	struct user_event_file_info *info = file->private_data;
2638 	struct user_event_group *group;
2639 	struct user_event_refs *refs;
2640 	int i;
2641 
2642 	if (!info)
2643 		return -EINVAL;
2644 
2645 	group = info->group;
2646 
2647 	/*
2648 	 * Ensure refs cannot change under any situation by taking the
2649 	 * register mutex during the final freeing of the references.
2650 	 */
2651 	mutex_lock(&group->reg_mutex);
2652 
2653 	refs = info->refs;
2654 
2655 	if (!refs)
2656 		goto out;
2657 
2658 	/*
2659 	 * The lifetime of refs has reached an end, it's tied to this file.
2660 	 * The underlying user_events are ref counted, and cannot be freed.
2661 	 * After this decrement, the user_events may be freed elsewhere.
2662 	 */
2663 	for (i = 0; i < refs->count; ++i)
2664 		user_event_put(refs->events[i], false);
2665 
2666 out:
2667 	file->private_data = NULL;
2668 
2669 	mutex_unlock(&group->reg_mutex);
2670 
2671 	kfree(refs);
2672 	kfree(info);
2673 
2674 	return 0;
2675 }
2676 
2677 static const struct file_operations user_data_fops = {
2678 	.open		= user_events_open,
2679 	.write		= user_events_write,
2680 	.write_iter	= user_events_write_iter,
2681 	.unlocked_ioctl	= user_events_ioctl,
2682 	.release	= user_events_release,
2683 };
2684 
user_seq_start(struct seq_file * m,loff_t * pos)2685 static void *user_seq_start(struct seq_file *m, loff_t *pos)
2686 {
2687 	if (*pos)
2688 		return NULL;
2689 
2690 	return (void *)1;
2691 }
2692 
user_seq_next(struct seq_file * m,void * p,loff_t * pos)2693 static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos)
2694 {
2695 	++*pos;
2696 	return NULL;
2697 }
2698 
user_seq_stop(struct seq_file * m,void * p)2699 static void user_seq_stop(struct seq_file *m, void *p)
2700 {
2701 }
2702 
user_seq_show(struct seq_file * m,void * p)2703 static int user_seq_show(struct seq_file *m, void *p)
2704 {
2705 	struct user_event_group *group = m->private;
2706 	struct user_event *user;
2707 	char status;
2708 	int i, active = 0, busy = 0;
2709 
2710 	if (!group)
2711 		return -EINVAL;
2712 
2713 	mutex_lock(&group->reg_mutex);
2714 
2715 	hash_for_each(group->register_table, i, user, node) {
2716 		status = user->status;
2717 
2718 		seq_printf(m, "%s", EVENT_NAME(user));
2719 
2720 		if (status != 0)
2721 			seq_puts(m, " #");
2722 
2723 		if (status != 0) {
2724 			seq_puts(m, " Used by");
2725 			if (status & EVENT_STATUS_FTRACE)
2726 				seq_puts(m, " ftrace");
2727 			if (status & EVENT_STATUS_PERF)
2728 				seq_puts(m, " perf");
2729 			if (status & EVENT_STATUS_OTHER)
2730 				seq_puts(m, " other");
2731 			busy++;
2732 		}
2733 
2734 		seq_puts(m, "\n");
2735 		active++;
2736 	}
2737 
2738 	mutex_unlock(&group->reg_mutex);
2739 
2740 	seq_puts(m, "\n");
2741 	seq_printf(m, "Active: %d\n", active);
2742 	seq_printf(m, "Busy: %d\n", busy);
2743 
2744 	return 0;
2745 }
2746 
2747 static const struct seq_operations user_seq_ops = {
2748 	.start	= user_seq_start,
2749 	.next	= user_seq_next,
2750 	.stop	= user_seq_stop,
2751 	.show	= user_seq_show,
2752 };
2753 
user_status_open(struct inode * node,struct file * file)2754 static int user_status_open(struct inode *node, struct file *file)
2755 {
2756 	struct user_event_group *group;
2757 	int ret;
2758 
2759 	group = current_user_event_group();
2760 
2761 	if (!group)
2762 		return -ENOENT;
2763 
2764 	ret = seq_open(file, &user_seq_ops);
2765 
2766 	if (!ret) {
2767 		/* Chain group to seq_file */
2768 		struct seq_file *m = file->private_data;
2769 
2770 		m->private = group;
2771 	}
2772 
2773 	return ret;
2774 }
2775 
2776 static const struct file_operations user_status_fops = {
2777 	.open		= user_status_open,
2778 	.read		= seq_read,
2779 	.llseek		= seq_lseek,
2780 	.release	= seq_release,
2781 };
2782 
2783 /*
2784  * Creates a set of tracefs files to allow user mode interactions.
2785  */
create_user_tracefs(void)2786 static int create_user_tracefs(void)
2787 {
2788 	struct dentry *edata, *emmap;
2789 
2790 	edata = tracefs_create_file("user_events_data", TRACE_MODE_WRITE,
2791 				    NULL, NULL, &user_data_fops);
2792 
2793 	if (!edata) {
2794 		pr_warn("Could not create tracefs 'user_events_data' entry\n");
2795 		goto err;
2796 	}
2797 
2798 	emmap = tracefs_create_file("user_events_status", TRACE_MODE_READ,
2799 				    NULL, NULL, &user_status_fops);
2800 
2801 	if (!emmap) {
2802 		tracefs_remove(edata);
2803 		pr_warn("Could not create tracefs 'user_events_mmap' entry\n");
2804 		goto err;
2805 	}
2806 
2807 	return 0;
2808 err:
2809 	return -ENODEV;
2810 }
2811 
set_max_user_events_sysctl(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)2812 static int set_max_user_events_sysctl(struct ctl_table *table, int write,
2813 				      void *buffer, size_t *lenp, loff_t *ppos)
2814 {
2815 	int ret;
2816 
2817 	mutex_lock(&event_mutex);
2818 
2819 	ret = proc_douintvec(table, write, buffer, lenp, ppos);
2820 
2821 	mutex_unlock(&event_mutex);
2822 
2823 	return ret;
2824 }
2825 
2826 static struct ctl_table user_event_sysctls[] = {
2827 	{
2828 		.procname	= "user_events_max",
2829 		.data		= &max_user_events,
2830 		.maxlen		= sizeof(unsigned int),
2831 		.mode		= 0644,
2832 		.proc_handler	= set_max_user_events_sysctl,
2833 	},
2834 	{}
2835 };
2836 
trace_events_user_init(void)2837 static int __init trace_events_user_init(void)
2838 {
2839 	int ret;
2840 
2841 	fault_cache = KMEM_CACHE(user_event_enabler_fault, 0);
2842 
2843 	if (!fault_cache)
2844 		return -ENOMEM;
2845 
2846 	init_group = user_event_group_create();
2847 
2848 	if (!init_group) {
2849 		kmem_cache_destroy(fault_cache);
2850 		return -ENOMEM;
2851 	}
2852 
2853 	ret = create_user_tracefs();
2854 
2855 	if (ret) {
2856 		pr_warn("user_events could not register with tracefs\n");
2857 		user_event_group_destroy(init_group);
2858 		kmem_cache_destroy(fault_cache);
2859 		init_group = NULL;
2860 		return ret;
2861 	}
2862 
2863 	if (dyn_event_register(&user_event_dops))
2864 		pr_warn("user_events could not register with dyn_events\n");
2865 
2866 	register_sysctl_init("kernel", user_event_sysctls);
2867 
2868 	return 0;
2869 }
2870 
2871 fs_initcall(trace_events_user_init);
2872