1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_trigger - trace event triggers
4  *
5  * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/security.h>
9 #include <linux/module.h>
10 #include <linux/ctype.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/rculist.h>
14 
15 #include "trace.h"
16 
17 static LIST_HEAD(trigger_commands);
18 static DEFINE_MUTEX(trigger_cmd_mutex);
19 
20 void trigger_data_free(struct event_trigger_data *data)
21 {
22 	if (data->cmd_ops->set_filter)
23 		data->cmd_ops->set_filter(NULL, data, NULL);
24 
25 	/* make sure current triggers exit before free */
26 	tracepoint_synchronize_unregister();
27 
28 	kfree(data);
29 }
30 
31 /**
32  * event_triggers_call - Call triggers associated with a trace event
33  * @file: The trace_event_file associated with the event
34  * @rec: The trace entry for the event, NULL for unconditional invocation
35  *
36  * For each trigger associated with an event, invoke the trigger
37  * function registered with the associated trigger command.  If rec is
38  * non-NULL, it means that the trigger requires further processing and
39  * shouldn't be unconditionally invoked.  If rec is non-NULL and the
40  * trigger has a filter associated with it, rec will checked against
41  * the filter and if the record matches the trigger will be invoked.
42  * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
43  * in any case until the current event is written, the trigger
44  * function isn't invoked but the bit associated with the deferred
45  * trigger is set in the return value.
46  *
47  * Returns an enum event_trigger_type value containing a set bit for
48  * any trigger that should be deferred, ETT_NONE if nothing to defer.
49  *
50  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
51  *
52  * Return: an enum event_trigger_type value containing a set bit for
53  * any trigger that should be deferred, ETT_NONE if nothing to defer.
54  */
55 enum event_trigger_type
56 event_triggers_call(struct trace_event_file *file, void *rec,
57 		    struct ring_buffer_event *event)
58 {
59 	struct event_trigger_data *data;
60 	enum event_trigger_type tt = ETT_NONE;
61 	struct event_filter *filter;
62 
63 	if (list_empty(&file->triggers))
64 		return tt;
65 
66 	list_for_each_entry_rcu(data, &file->triggers, list) {
67 		if (data->paused)
68 			continue;
69 		if (!rec) {
70 			data->ops->func(data, rec, event);
71 			continue;
72 		}
73 		filter = rcu_dereference_sched(data->filter);
74 		if (filter && !filter_match_preds(filter, rec))
75 			continue;
76 		if (event_command_post_trigger(data->cmd_ops)) {
77 			tt |= data->cmd_ops->trigger_type;
78 			continue;
79 		}
80 		data->ops->func(data, rec, event);
81 	}
82 	return tt;
83 }
84 EXPORT_SYMBOL_GPL(event_triggers_call);
85 
86 /**
87  * event_triggers_post_call - Call 'post_triggers' for a trace event
88  * @file: The trace_event_file associated with the event
89  * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
90  *
91  * For each trigger associated with an event, invoke the trigger
92  * function registered with the associated trigger command, if the
93  * corresponding bit is set in the tt enum passed into this function.
94  * See @event_triggers_call for details on how those bits are set.
95  *
96  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
97  */
98 void
99 event_triggers_post_call(struct trace_event_file *file,
100 			 enum event_trigger_type tt)
101 {
102 	struct event_trigger_data *data;
103 
104 	list_for_each_entry_rcu(data, &file->triggers, list) {
105 		if (data->paused)
106 			continue;
107 		if (data->cmd_ops->trigger_type & tt)
108 			data->ops->func(data, NULL, NULL);
109 	}
110 }
111 EXPORT_SYMBOL_GPL(event_triggers_post_call);
112 
113 #define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
114 
115 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
116 {
117 	struct trace_event_file *event_file = event_file_data(m->private);
118 
119 	if (t == SHOW_AVAILABLE_TRIGGERS) {
120 		(*pos)++;
121 		return NULL;
122 	}
123 	return seq_list_next(t, &event_file->triggers, pos);
124 }
125 
126 static void *trigger_start(struct seq_file *m, loff_t *pos)
127 {
128 	struct trace_event_file *event_file;
129 
130 	/* ->stop() is called even if ->start() fails */
131 	mutex_lock(&event_mutex);
132 	event_file = event_file_data(m->private);
133 	if (unlikely(!event_file))
134 		return ERR_PTR(-ENODEV);
135 
136 	if (list_empty(&event_file->triggers))
137 		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
138 
139 	return seq_list_start(&event_file->triggers, *pos);
140 }
141 
142 static void trigger_stop(struct seq_file *m, void *t)
143 {
144 	mutex_unlock(&event_mutex);
145 }
146 
147 static int trigger_show(struct seq_file *m, void *v)
148 {
149 	struct event_trigger_data *data;
150 	struct event_command *p;
151 
152 	if (v == SHOW_AVAILABLE_TRIGGERS) {
153 		seq_puts(m, "# Available triggers:\n");
154 		seq_putc(m, '#');
155 		mutex_lock(&trigger_cmd_mutex);
156 		list_for_each_entry_reverse(p, &trigger_commands, list)
157 			seq_printf(m, " %s", p->name);
158 		seq_putc(m, '\n');
159 		mutex_unlock(&trigger_cmd_mutex);
160 		return 0;
161 	}
162 
163 	data = list_entry(v, struct event_trigger_data, list);
164 	data->ops->print(m, data->ops, data);
165 
166 	return 0;
167 }
168 
169 static const struct seq_operations event_triggers_seq_ops = {
170 	.start = trigger_start,
171 	.next = trigger_next,
172 	.stop = trigger_stop,
173 	.show = trigger_show,
174 };
175 
176 static int event_trigger_regex_open(struct inode *inode, struct file *file)
177 {
178 	int ret;
179 
180 	ret = security_locked_down(LOCKDOWN_TRACEFS);
181 	if (ret)
182 		return ret;
183 
184 	mutex_lock(&event_mutex);
185 
186 	if (unlikely(!event_file_data(file))) {
187 		mutex_unlock(&event_mutex);
188 		return -ENODEV;
189 	}
190 
191 	if ((file->f_mode & FMODE_WRITE) &&
192 	    (file->f_flags & O_TRUNC)) {
193 		struct trace_event_file *event_file;
194 		struct event_command *p;
195 
196 		event_file = event_file_data(file);
197 
198 		list_for_each_entry(p, &trigger_commands, list) {
199 			if (p->unreg_all)
200 				p->unreg_all(event_file);
201 		}
202 	}
203 
204 	if (file->f_mode & FMODE_READ) {
205 		ret = seq_open(file, &event_triggers_seq_ops);
206 		if (!ret) {
207 			struct seq_file *m = file->private_data;
208 			m->private = file;
209 		}
210 	}
211 
212 	mutex_unlock(&event_mutex);
213 
214 	return ret;
215 }
216 
217 int trigger_process_regex(struct trace_event_file *file, char *buff)
218 {
219 	char *command, *next = buff;
220 	struct event_command *p;
221 	int ret = -EINVAL;
222 
223 	command = strsep(&next, ": \t");
224 	command = (command[0] != '!') ? command : command + 1;
225 
226 	mutex_lock(&trigger_cmd_mutex);
227 	list_for_each_entry(p, &trigger_commands, list) {
228 		if (strcmp(p->name, command) == 0) {
229 			ret = p->func(p, file, buff, command, next);
230 			goto out_unlock;
231 		}
232 	}
233  out_unlock:
234 	mutex_unlock(&trigger_cmd_mutex);
235 
236 	return ret;
237 }
238 
239 static ssize_t event_trigger_regex_write(struct file *file,
240 					 const char __user *ubuf,
241 					 size_t cnt, loff_t *ppos)
242 {
243 	struct trace_event_file *event_file;
244 	ssize_t ret;
245 	char *buf;
246 
247 	if (!cnt)
248 		return 0;
249 
250 	if (cnt >= PAGE_SIZE)
251 		return -EINVAL;
252 
253 	buf = memdup_user_nul(ubuf, cnt);
254 	if (IS_ERR(buf))
255 		return PTR_ERR(buf);
256 
257 	strim(buf);
258 
259 	mutex_lock(&event_mutex);
260 	event_file = event_file_data(file);
261 	if (unlikely(!event_file)) {
262 		mutex_unlock(&event_mutex);
263 		kfree(buf);
264 		return -ENODEV;
265 	}
266 	ret = trigger_process_regex(event_file, buf);
267 	mutex_unlock(&event_mutex);
268 
269 	kfree(buf);
270 	if (ret < 0)
271 		goto out;
272 
273 	*ppos += cnt;
274 	ret = cnt;
275  out:
276 	return ret;
277 }
278 
279 static int event_trigger_regex_release(struct inode *inode, struct file *file)
280 {
281 	mutex_lock(&event_mutex);
282 
283 	if (file->f_mode & FMODE_READ)
284 		seq_release(inode, file);
285 
286 	mutex_unlock(&event_mutex);
287 
288 	return 0;
289 }
290 
291 static ssize_t
292 event_trigger_write(struct file *filp, const char __user *ubuf,
293 		    size_t cnt, loff_t *ppos)
294 {
295 	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
296 }
297 
298 static int
299 event_trigger_open(struct inode *inode, struct file *filp)
300 {
301 	/* Checks for tracefs lockdown */
302 	return event_trigger_regex_open(inode, filp);
303 }
304 
305 static int
306 event_trigger_release(struct inode *inode, struct file *file)
307 {
308 	return event_trigger_regex_release(inode, file);
309 }
310 
311 const struct file_operations event_trigger_fops = {
312 	.open = event_trigger_open,
313 	.read = seq_read,
314 	.write = event_trigger_write,
315 	.llseek = tracing_lseek,
316 	.release = event_trigger_release,
317 };
318 
319 /*
320  * Currently we only register event commands from __init, so mark this
321  * __init too.
322  */
323 __init int register_event_command(struct event_command *cmd)
324 {
325 	struct event_command *p;
326 	int ret = 0;
327 
328 	mutex_lock(&trigger_cmd_mutex);
329 	list_for_each_entry(p, &trigger_commands, list) {
330 		if (strcmp(cmd->name, p->name) == 0) {
331 			ret = -EBUSY;
332 			goto out_unlock;
333 		}
334 	}
335 	list_add(&cmd->list, &trigger_commands);
336  out_unlock:
337 	mutex_unlock(&trigger_cmd_mutex);
338 
339 	return ret;
340 }
341 
342 /*
343  * Currently we only unregister event commands from __init, so mark
344  * this __init too.
345  */
346 __init int unregister_event_command(struct event_command *cmd)
347 {
348 	struct event_command *p, *n;
349 	int ret = -ENODEV;
350 
351 	mutex_lock(&trigger_cmd_mutex);
352 	list_for_each_entry_safe(p, n, &trigger_commands, list) {
353 		if (strcmp(cmd->name, p->name) == 0) {
354 			ret = 0;
355 			list_del_init(&p->list);
356 			goto out_unlock;
357 		}
358 	}
359  out_unlock:
360 	mutex_unlock(&trigger_cmd_mutex);
361 
362 	return ret;
363 }
364 
365 /**
366  * event_trigger_print - Generic event_trigger_ops @print implementation
367  * @name: The name of the event trigger
368  * @m: The seq_file being printed to
369  * @data: Trigger-specific data
370  * @filter_str: filter_str to print, if present
371  *
372  * Common implementation for event triggers to print themselves.
373  *
374  * Usually wrapped by a function that simply sets the @name of the
375  * trigger command and then invokes this.
376  *
377  * Return: 0 on success, errno otherwise
378  */
379 static int
380 event_trigger_print(const char *name, struct seq_file *m,
381 		    void *data, char *filter_str)
382 {
383 	long count = (long)data;
384 
385 	seq_puts(m, name);
386 
387 	if (count == -1)
388 		seq_puts(m, ":unlimited");
389 	else
390 		seq_printf(m, ":count=%ld", count);
391 
392 	if (filter_str)
393 		seq_printf(m, " if %s\n", filter_str);
394 	else
395 		seq_putc(m, '\n');
396 
397 	return 0;
398 }
399 
400 /**
401  * event_trigger_init - Generic event_trigger_ops @init implementation
402  * @ops: The trigger ops associated with the trigger
403  * @data: Trigger-specific data
404  *
405  * Common implementation of event trigger initialization.
406  *
407  * Usually used directly as the @init method in event trigger
408  * implementations.
409  *
410  * Return: 0 on success, errno otherwise
411  */
412 int event_trigger_init(struct event_trigger_ops *ops,
413 		       struct event_trigger_data *data)
414 {
415 	data->ref++;
416 	return 0;
417 }
418 
419 /**
420  * event_trigger_free - Generic event_trigger_ops @free implementation
421  * @ops: The trigger ops associated with the trigger
422  * @data: Trigger-specific data
423  *
424  * Common implementation of event trigger de-initialization.
425  *
426  * Usually used directly as the @free method in event trigger
427  * implementations.
428  */
429 static void
430 event_trigger_free(struct event_trigger_ops *ops,
431 		   struct event_trigger_data *data)
432 {
433 	if (WARN_ON_ONCE(data->ref <= 0))
434 		return;
435 
436 	data->ref--;
437 	if (!data->ref)
438 		trigger_data_free(data);
439 }
440 
441 int trace_event_trigger_enable_disable(struct trace_event_file *file,
442 				       int trigger_enable)
443 {
444 	int ret = 0;
445 
446 	if (trigger_enable) {
447 		if (atomic_inc_return(&file->tm_ref) > 1)
448 			return ret;
449 		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
450 		ret = trace_event_enable_disable(file, 1, 1);
451 	} else {
452 		if (atomic_dec_return(&file->tm_ref) > 0)
453 			return ret;
454 		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
455 		ret = trace_event_enable_disable(file, 0, 1);
456 	}
457 
458 	return ret;
459 }
460 
461 /**
462  * clear_event_triggers - Clear all triggers associated with a trace array
463  * @tr: The trace array to clear
464  *
465  * For each trigger, the triggering event has its tm_ref decremented
466  * via trace_event_trigger_enable_disable(), and any associated event
467  * (in the case of enable/disable_event triggers) will have its sm_ref
468  * decremented via free()->trace_event_enable_disable().  That
469  * combination effectively reverses the soft-mode/trigger state added
470  * by trigger registration.
471  *
472  * Must be called with event_mutex held.
473  */
474 void
475 clear_event_triggers(struct trace_array *tr)
476 {
477 	struct trace_event_file *file;
478 
479 	list_for_each_entry(file, &tr->events, list) {
480 		struct event_trigger_data *data, *n;
481 		list_for_each_entry_safe(data, n, &file->triggers, list) {
482 			trace_event_trigger_enable_disable(file, 0);
483 			list_del_rcu(&data->list);
484 			if (data->ops->free)
485 				data->ops->free(data->ops, data);
486 		}
487 	}
488 }
489 
490 /**
491  * update_cond_flag - Set or reset the TRIGGER_COND bit
492  * @file: The trace_event_file associated with the event
493  *
494  * If an event has triggers and any of those triggers has a filter or
495  * a post_trigger, trigger invocation needs to be deferred until after
496  * the current event has logged its data, and the event should have
497  * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
498  * cleared.
499  */
500 void update_cond_flag(struct trace_event_file *file)
501 {
502 	struct event_trigger_data *data;
503 	bool set_cond = false;
504 
505 	lockdep_assert_held(&event_mutex);
506 
507 	list_for_each_entry(data, &file->triggers, list) {
508 		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
509 		    event_command_needs_rec(data->cmd_ops)) {
510 			set_cond = true;
511 			break;
512 		}
513 	}
514 
515 	if (set_cond)
516 		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
517 	else
518 		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
519 }
520 
521 /**
522  * register_trigger - Generic event_command @reg implementation
523  * @glob: The raw string used to register the trigger
524  * @ops: The trigger ops associated with the trigger
525  * @data: Trigger-specific data to associate with the trigger
526  * @file: The trace_event_file associated with the event
527  *
528  * Common implementation for event trigger registration.
529  *
530  * Usually used directly as the @reg method in event command
531  * implementations.
532  *
533  * Return: 0 on success, errno otherwise
534  */
535 static int register_trigger(char *glob, struct event_trigger_ops *ops,
536 			    struct event_trigger_data *data,
537 			    struct trace_event_file *file)
538 {
539 	struct event_trigger_data *test;
540 	int ret = 0;
541 
542 	lockdep_assert_held(&event_mutex);
543 
544 	list_for_each_entry(test, &file->triggers, list) {
545 		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
546 			ret = -EEXIST;
547 			goto out;
548 		}
549 	}
550 
551 	if (data->ops->init) {
552 		ret = data->ops->init(data->ops, data);
553 		if (ret < 0)
554 			goto out;
555 	}
556 
557 	list_add_rcu(&data->list, &file->triggers);
558 	ret++;
559 
560 	update_cond_flag(file);
561 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
562 		list_del_rcu(&data->list);
563 		update_cond_flag(file);
564 		ret--;
565 	}
566 out:
567 	return ret;
568 }
569 
570 /**
571  * unregister_trigger - Generic event_command @unreg implementation
572  * @glob: The raw string used to register the trigger
573  * @ops: The trigger ops associated with the trigger
574  * @test: Trigger-specific data used to find the trigger to remove
575  * @file: The trace_event_file associated with the event
576  *
577  * Common implementation for event trigger unregistration.
578  *
579  * Usually used directly as the @unreg method in event command
580  * implementations.
581  */
582 static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
583 			       struct event_trigger_data *test,
584 			       struct trace_event_file *file)
585 {
586 	struct event_trigger_data *data;
587 	bool unregistered = false;
588 
589 	lockdep_assert_held(&event_mutex);
590 
591 	list_for_each_entry(data, &file->triggers, list) {
592 		if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
593 			unregistered = true;
594 			list_del_rcu(&data->list);
595 			trace_event_trigger_enable_disable(file, 0);
596 			update_cond_flag(file);
597 			break;
598 		}
599 	}
600 
601 	if (unregistered && data->ops->free)
602 		data->ops->free(data->ops, data);
603 }
604 
605 /**
606  * event_trigger_callback - Generic event_command @func implementation
607  * @cmd_ops: The command ops, used for trigger registration
608  * @file: The trace_event_file associated with the event
609  * @glob: The raw string used to register the trigger
610  * @cmd: The cmd portion of the string used to register the trigger
611  * @param: The params portion of the string used to register the trigger
612  *
613  * Common implementation for event command parsing and trigger
614  * instantiation.
615  *
616  * Usually used directly as the @func method in event command
617  * implementations.
618  *
619  * Return: 0 on success, errno otherwise
620  */
621 static int
622 event_trigger_callback(struct event_command *cmd_ops,
623 		       struct trace_event_file *file,
624 		       char *glob, char *cmd, char *param)
625 {
626 	struct event_trigger_data *trigger_data;
627 	struct event_trigger_ops *trigger_ops;
628 	char *trigger = NULL;
629 	char *number;
630 	int ret;
631 
632 	/* separate the trigger from the filter (t:n [if filter]) */
633 	if (param && isdigit(param[0]))
634 		trigger = strsep(&param, " \t");
635 
636 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
637 
638 	ret = -ENOMEM;
639 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
640 	if (!trigger_data)
641 		goto out;
642 
643 	trigger_data->count = -1;
644 	trigger_data->ops = trigger_ops;
645 	trigger_data->cmd_ops = cmd_ops;
646 	trigger_data->private_data = file;
647 	INIT_LIST_HEAD(&trigger_data->list);
648 	INIT_LIST_HEAD(&trigger_data->named_list);
649 
650 	if (glob[0] == '!') {
651 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
652 		kfree(trigger_data);
653 		ret = 0;
654 		goto out;
655 	}
656 
657 	if (trigger) {
658 		number = strsep(&trigger, ":");
659 
660 		ret = -EINVAL;
661 		if (!strlen(number))
662 			goto out_free;
663 
664 		/*
665 		 * We use the callback data field (which is a pointer)
666 		 * as our counter.
667 		 */
668 		ret = kstrtoul(number, 0, &trigger_data->count);
669 		if (ret)
670 			goto out_free;
671 	}
672 
673 	if (!param) /* if param is non-empty, it's supposed to be a filter */
674 		goto out_reg;
675 
676 	if (!cmd_ops->set_filter)
677 		goto out_reg;
678 
679 	ret = cmd_ops->set_filter(param, trigger_data, file);
680 	if (ret < 0)
681 		goto out_free;
682 
683  out_reg:
684 	/* Up the trigger_data count to make sure reg doesn't free it on failure */
685 	event_trigger_init(trigger_ops, trigger_data);
686 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
687 	/*
688 	 * The above returns on success the # of functions enabled,
689 	 * but if it didn't find any functions it returns zero.
690 	 * Consider no functions a failure too.
691 	 */
692 	if (!ret) {
693 		cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
694 		ret = -ENOENT;
695 	} else if (ret > 0)
696 		ret = 0;
697 
698 	/* Down the counter of trigger_data or free it if not used anymore */
699 	event_trigger_free(trigger_ops, trigger_data);
700  out:
701 	return ret;
702 
703  out_free:
704 	if (cmd_ops->set_filter)
705 		cmd_ops->set_filter(NULL, trigger_data, NULL);
706 	kfree(trigger_data);
707 	goto out;
708 }
709 
710 /**
711  * set_trigger_filter - Generic event_command @set_filter implementation
712  * @filter_str: The filter string for the trigger, NULL to remove filter
713  * @trigger_data: Trigger-specific data
714  * @file: The trace_event_file associated with the event
715  *
716  * Common implementation for event command filter parsing and filter
717  * instantiation.
718  *
719  * Usually used directly as the @set_filter method in event command
720  * implementations.
721  *
722  * Also used to remove a filter (if filter_str = NULL).
723  *
724  * Return: 0 on success, errno otherwise
725  */
726 int set_trigger_filter(char *filter_str,
727 		       struct event_trigger_data *trigger_data,
728 		       struct trace_event_file *file)
729 {
730 	struct event_trigger_data *data = trigger_data;
731 	struct event_filter *filter = NULL, *tmp;
732 	int ret = -EINVAL;
733 	char *s;
734 
735 	if (!filter_str) /* clear the current filter */
736 		goto assign;
737 
738 	s = strsep(&filter_str, " \t");
739 
740 	if (!strlen(s) || strcmp(s, "if") != 0)
741 		goto out;
742 
743 	if (!filter_str)
744 		goto out;
745 
746 	/* The filter is for the 'trigger' event, not the triggered event */
747 	ret = create_event_filter(file->tr, file->event_call,
748 				  filter_str, false, &filter);
749 	/*
750 	 * If create_event_filter() fails, filter still needs to be freed.
751 	 * Which the calling code will do with data->filter.
752 	 */
753  assign:
754 	tmp = rcu_access_pointer(data->filter);
755 
756 	rcu_assign_pointer(data->filter, filter);
757 
758 	if (tmp) {
759 		/* Make sure the call is done with the filter */
760 		tracepoint_synchronize_unregister();
761 		free_event_filter(tmp);
762 	}
763 
764 	kfree(data->filter_str);
765 	data->filter_str = NULL;
766 
767 	if (filter_str) {
768 		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
769 		if (!data->filter_str) {
770 			free_event_filter(rcu_access_pointer(data->filter));
771 			data->filter = NULL;
772 			ret = -ENOMEM;
773 		}
774 	}
775  out:
776 	return ret;
777 }
778 
779 static LIST_HEAD(named_triggers);
780 
781 /**
782  * find_named_trigger - Find the common named trigger associated with @name
783  * @name: The name of the set of named triggers to find the common data for
784  *
785  * Named triggers are sets of triggers that share a common set of
786  * trigger data.  The first named trigger registered with a given name
787  * owns the common trigger data that the others subsequently
788  * registered with the same name will reference.  This function
789  * returns the common trigger data associated with that first
790  * registered instance.
791  *
792  * Return: the common trigger data for the given named trigger on
793  * success, NULL otherwise.
794  */
795 struct event_trigger_data *find_named_trigger(const char *name)
796 {
797 	struct event_trigger_data *data;
798 
799 	if (!name)
800 		return NULL;
801 
802 	list_for_each_entry(data, &named_triggers, named_list) {
803 		if (data->named_data)
804 			continue;
805 		if (strcmp(data->name, name) == 0)
806 			return data;
807 	}
808 
809 	return NULL;
810 }
811 
812 /**
813  * is_named_trigger - determine if a given trigger is a named trigger
814  * @test: The trigger data to test
815  *
816  * Return: true if 'test' is a named trigger, false otherwise.
817  */
818 bool is_named_trigger(struct event_trigger_data *test)
819 {
820 	struct event_trigger_data *data;
821 
822 	list_for_each_entry(data, &named_triggers, named_list) {
823 		if (test == data)
824 			return true;
825 	}
826 
827 	return false;
828 }
829 
830 /**
831  * save_named_trigger - save the trigger in the named trigger list
832  * @name: The name of the named trigger set
833  * @data: The trigger data to save
834  *
835  * Return: 0 if successful, negative error otherwise.
836  */
837 int save_named_trigger(const char *name, struct event_trigger_data *data)
838 {
839 	data->name = kstrdup(name, GFP_KERNEL);
840 	if (!data->name)
841 		return -ENOMEM;
842 
843 	list_add(&data->named_list, &named_triggers);
844 
845 	return 0;
846 }
847 
848 /**
849  * del_named_trigger - delete a trigger from the named trigger list
850  * @data: The trigger data to delete
851  */
852 void del_named_trigger(struct event_trigger_data *data)
853 {
854 	kfree(data->name);
855 	data->name = NULL;
856 
857 	list_del(&data->named_list);
858 }
859 
860 static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
861 {
862 	struct event_trigger_data *test;
863 
864 	list_for_each_entry(test, &named_triggers, named_list) {
865 		if (strcmp(test->name, data->name) == 0) {
866 			if (pause) {
867 				test->paused_tmp = test->paused;
868 				test->paused = true;
869 			} else {
870 				test->paused = test->paused_tmp;
871 			}
872 		}
873 	}
874 }
875 
876 /**
877  * pause_named_trigger - Pause all named triggers with the same name
878  * @data: The trigger data of a named trigger to pause
879  *
880  * Pauses a named trigger along with all other triggers having the
881  * same name.  Because named triggers share a common set of data,
882  * pausing only one is meaningless, so pausing one named trigger needs
883  * to pause all triggers with the same name.
884  */
885 void pause_named_trigger(struct event_trigger_data *data)
886 {
887 	__pause_named_trigger(data, true);
888 }
889 
890 /**
891  * unpause_named_trigger - Un-pause all named triggers with the same name
892  * @data: The trigger data of a named trigger to unpause
893  *
894  * Un-pauses a named trigger along with all other triggers having the
895  * same name.  Because named triggers share a common set of data,
896  * unpausing only one is meaningless, so unpausing one named trigger
897  * needs to unpause all triggers with the same name.
898  */
899 void unpause_named_trigger(struct event_trigger_data *data)
900 {
901 	__pause_named_trigger(data, false);
902 }
903 
904 /**
905  * set_named_trigger_data - Associate common named trigger data
906  * @data: The trigger data of a named trigger to unpause
907  *
908  * Named triggers are sets of triggers that share a common set of
909  * trigger data.  The first named trigger registered with a given name
910  * owns the common trigger data that the others subsequently
911  * registered with the same name will reference.  This function
912  * associates the common trigger data from the first trigger with the
913  * given trigger.
914  */
915 void set_named_trigger_data(struct event_trigger_data *data,
916 			    struct event_trigger_data *named_data)
917 {
918 	data->named_data = named_data;
919 }
920 
921 struct event_trigger_data *
922 get_named_trigger_data(struct event_trigger_data *data)
923 {
924 	return data->named_data;
925 }
926 
927 static void
928 traceon_trigger(struct event_trigger_data *data, void *rec,
929 		struct ring_buffer_event *event)
930 {
931 	if (tracing_is_on())
932 		return;
933 
934 	tracing_on();
935 }
936 
937 static void
938 traceon_count_trigger(struct event_trigger_data *data, void *rec,
939 		      struct ring_buffer_event *event)
940 {
941 	if (tracing_is_on())
942 		return;
943 
944 	if (!data->count)
945 		return;
946 
947 	if (data->count != -1)
948 		(data->count)--;
949 
950 	tracing_on();
951 }
952 
953 static void
954 traceoff_trigger(struct event_trigger_data *data, void *rec,
955 		 struct ring_buffer_event *event)
956 {
957 	if (!tracing_is_on())
958 		return;
959 
960 	tracing_off();
961 }
962 
963 static void
964 traceoff_count_trigger(struct event_trigger_data *data, void *rec,
965 		       struct ring_buffer_event *event)
966 {
967 	if (!tracing_is_on())
968 		return;
969 
970 	if (!data->count)
971 		return;
972 
973 	if (data->count != -1)
974 		(data->count)--;
975 
976 	tracing_off();
977 }
978 
979 static int
980 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
981 		      struct event_trigger_data *data)
982 {
983 	return event_trigger_print("traceon", m, (void *)data->count,
984 				   data->filter_str);
985 }
986 
987 static int
988 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
989 		       struct event_trigger_data *data)
990 {
991 	return event_trigger_print("traceoff", m, (void *)data->count,
992 				   data->filter_str);
993 }
994 
995 static struct event_trigger_ops traceon_trigger_ops = {
996 	.func			= traceon_trigger,
997 	.print			= traceon_trigger_print,
998 	.init			= event_trigger_init,
999 	.free			= event_trigger_free,
1000 };
1001 
1002 static struct event_trigger_ops traceon_count_trigger_ops = {
1003 	.func			= traceon_count_trigger,
1004 	.print			= traceon_trigger_print,
1005 	.init			= event_trigger_init,
1006 	.free			= event_trigger_free,
1007 };
1008 
1009 static struct event_trigger_ops traceoff_trigger_ops = {
1010 	.func			= traceoff_trigger,
1011 	.print			= traceoff_trigger_print,
1012 	.init			= event_trigger_init,
1013 	.free			= event_trigger_free,
1014 };
1015 
1016 static struct event_trigger_ops traceoff_count_trigger_ops = {
1017 	.func			= traceoff_count_trigger,
1018 	.print			= traceoff_trigger_print,
1019 	.init			= event_trigger_init,
1020 	.free			= event_trigger_free,
1021 };
1022 
1023 static struct event_trigger_ops *
1024 onoff_get_trigger_ops(char *cmd, char *param)
1025 {
1026 	struct event_trigger_ops *ops;
1027 
1028 	/* we register both traceon and traceoff to this callback */
1029 	if (strcmp(cmd, "traceon") == 0)
1030 		ops = param ? &traceon_count_trigger_ops :
1031 			&traceon_trigger_ops;
1032 	else
1033 		ops = param ? &traceoff_count_trigger_ops :
1034 			&traceoff_trigger_ops;
1035 
1036 	return ops;
1037 }
1038 
1039 static struct event_command trigger_traceon_cmd = {
1040 	.name			= "traceon",
1041 	.trigger_type		= ETT_TRACE_ONOFF,
1042 	.func			= event_trigger_callback,
1043 	.reg			= register_trigger,
1044 	.unreg			= unregister_trigger,
1045 	.get_trigger_ops	= onoff_get_trigger_ops,
1046 	.set_filter		= set_trigger_filter,
1047 };
1048 
1049 static struct event_command trigger_traceoff_cmd = {
1050 	.name			= "traceoff",
1051 	.trigger_type		= ETT_TRACE_ONOFF,
1052 	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1053 	.func			= event_trigger_callback,
1054 	.reg			= register_trigger,
1055 	.unreg			= unregister_trigger,
1056 	.get_trigger_ops	= onoff_get_trigger_ops,
1057 	.set_filter		= set_trigger_filter,
1058 };
1059 
1060 #ifdef CONFIG_TRACER_SNAPSHOT
1061 static void
1062 snapshot_trigger(struct event_trigger_data *data, void *rec,
1063 		 struct ring_buffer_event *event)
1064 {
1065 	struct trace_event_file *file = data->private_data;
1066 
1067 	if (file)
1068 		tracing_snapshot_instance(file->tr);
1069 	else
1070 		tracing_snapshot();
1071 }
1072 
1073 static void
1074 snapshot_count_trigger(struct event_trigger_data *data, void *rec,
1075 		       struct ring_buffer_event *event)
1076 {
1077 	if (!data->count)
1078 		return;
1079 
1080 	if (data->count != -1)
1081 		(data->count)--;
1082 
1083 	snapshot_trigger(data, rec, event);
1084 }
1085 
1086 static int
1087 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1088 			  struct event_trigger_data *data,
1089 			  struct trace_event_file *file)
1090 {
1091 	int ret = register_trigger(glob, ops, data, file);
1092 
1093 	if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
1094 		unregister_trigger(glob, ops, data, file);
1095 		ret = 0;
1096 	}
1097 
1098 	return ret;
1099 }
1100 
1101 static int
1102 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1103 		       struct event_trigger_data *data)
1104 {
1105 	return event_trigger_print("snapshot", m, (void *)data->count,
1106 				   data->filter_str);
1107 }
1108 
1109 static struct event_trigger_ops snapshot_trigger_ops = {
1110 	.func			= snapshot_trigger,
1111 	.print			= snapshot_trigger_print,
1112 	.init			= event_trigger_init,
1113 	.free			= event_trigger_free,
1114 };
1115 
1116 static struct event_trigger_ops snapshot_count_trigger_ops = {
1117 	.func			= snapshot_count_trigger,
1118 	.print			= snapshot_trigger_print,
1119 	.init			= event_trigger_init,
1120 	.free			= event_trigger_free,
1121 };
1122 
1123 static struct event_trigger_ops *
1124 snapshot_get_trigger_ops(char *cmd, char *param)
1125 {
1126 	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1127 }
1128 
1129 static struct event_command trigger_snapshot_cmd = {
1130 	.name			= "snapshot",
1131 	.trigger_type		= ETT_SNAPSHOT,
1132 	.func			= event_trigger_callback,
1133 	.reg			= register_snapshot_trigger,
1134 	.unreg			= unregister_trigger,
1135 	.get_trigger_ops	= snapshot_get_trigger_ops,
1136 	.set_filter		= set_trigger_filter,
1137 };
1138 
1139 static __init int register_trigger_snapshot_cmd(void)
1140 {
1141 	int ret;
1142 
1143 	ret = register_event_command(&trigger_snapshot_cmd);
1144 	WARN_ON(ret < 0);
1145 
1146 	return ret;
1147 }
1148 #else
1149 static __init int register_trigger_snapshot_cmd(void) { return 0; }
1150 #endif /* CONFIG_TRACER_SNAPSHOT */
1151 
1152 #ifdef CONFIG_STACKTRACE
1153 #ifdef CONFIG_UNWINDER_ORC
1154 /* Skip 2:
1155  *   event_triggers_post_call()
1156  *   trace_event_raw_event_xxx()
1157  */
1158 # define STACK_SKIP 2
1159 #else
1160 /*
1161  * Skip 4:
1162  *   stacktrace_trigger()
1163  *   event_triggers_post_call()
1164  *   trace_event_buffer_commit()
1165  *   trace_event_raw_event_xxx()
1166  */
1167 #define STACK_SKIP 4
1168 #endif
1169 
1170 static void
1171 stacktrace_trigger(struct event_trigger_data *data, void *rec,
1172 		   struct ring_buffer_event *event)
1173 {
1174 	trace_dump_stack(STACK_SKIP);
1175 }
1176 
1177 static void
1178 stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
1179 			 struct ring_buffer_event *event)
1180 {
1181 	if (!data->count)
1182 		return;
1183 
1184 	if (data->count != -1)
1185 		(data->count)--;
1186 
1187 	stacktrace_trigger(data, rec, event);
1188 }
1189 
1190 static int
1191 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1192 			 struct event_trigger_data *data)
1193 {
1194 	return event_trigger_print("stacktrace", m, (void *)data->count,
1195 				   data->filter_str);
1196 }
1197 
1198 static struct event_trigger_ops stacktrace_trigger_ops = {
1199 	.func			= stacktrace_trigger,
1200 	.print			= stacktrace_trigger_print,
1201 	.init			= event_trigger_init,
1202 	.free			= event_trigger_free,
1203 };
1204 
1205 static struct event_trigger_ops stacktrace_count_trigger_ops = {
1206 	.func			= stacktrace_count_trigger,
1207 	.print			= stacktrace_trigger_print,
1208 	.init			= event_trigger_init,
1209 	.free			= event_trigger_free,
1210 };
1211 
1212 static struct event_trigger_ops *
1213 stacktrace_get_trigger_ops(char *cmd, char *param)
1214 {
1215 	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1216 }
1217 
1218 static struct event_command trigger_stacktrace_cmd = {
1219 	.name			= "stacktrace",
1220 	.trigger_type		= ETT_STACKTRACE,
1221 	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1222 	.func			= event_trigger_callback,
1223 	.reg			= register_trigger,
1224 	.unreg			= unregister_trigger,
1225 	.get_trigger_ops	= stacktrace_get_trigger_ops,
1226 	.set_filter		= set_trigger_filter,
1227 };
1228 
1229 static __init int register_trigger_stacktrace_cmd(void)
1230 {
1231 	int ret;
1232 
1233 	ret = register_event_command(&trigger_stacktrace_cmd);
1234 	WARN_ON(ret < 0);
1235 
1236 	return ret;
1237 }
1238 #else
1239 static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1240 #endif /* CONFIG_STACKTRACE */
1241 
1242 static __init void unregister_trigger_traceon_traceoff_cmds(void)
1243 {
1244 	unregister_event_command(&trigger_traceon_cmd);
1245 	unregister_event_command(&trigger_traceoff_cmd);
1246 }
1247 
1248 static void
1249 event_enable_trigger(struct event_trigger_data *data, void *rec,
1250 		     struct ring_buffer_event *event)
1251 {
1252 	struct enable_trigger_data *enable_data = data->private_data;
1253 
1254 	if (enable_data->enable)
1255 		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1256 	else
1257 		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1258 }
1259 
1260 static void
1261 event_enable_count_trigger(struct event_trigger_data *data, void *rec,
1262 			   struct ring_buffer_event *event)
1263 {
1264 	struct enable_trigger_data *enable_data = data->private_data;
1265 
1266 	if (!data->count)
1267 		return;
1268 
1269 	/* Skip if the event is in a state we want to switch to */
1270 	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1271 		return;
1272 
1273 	if (data->count != -1)
1274 		(data->count)--;
1275 
1276 	event_enable_trigger(data, rec, event);
1277 }
1278 
1279 int event_enable_trigger_print(struct seq_file *m,
1280 			       struct event_trigger_ops *ops,
1281 			       struct event_trigger_data *data)
1282 {
1283 	struct enable_trigger_data *enable_data = data->private_data;
1284 
1285 	seq_printf(m, "%s:%s:%s",
1286 		   enable_data->hist ?
1287 		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1288 		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1289 		   enable_data->file->event_call->class->system,
1290 		   trace_event_name(enable_data->file->event_call));
1291 
1292 	if (data->count == -1)
1293 		seq_puts(m, ":unlimited");
1294 	else
1295 		seq_printf(m, ":count=%ld", data->count);
1296 
1297 	if (data->filter_str)
1298 		seq_printf(m, " if %s\n", data->filter_str);
1299 	else
1300 		seq_putc(m, '\n');
1301 
1302 	return 0;
1303 }
1304 
1305 void event_enable_trigger_free(struct event_trigger_ops *ops,
1306 			       struct event_trigger_data *data)
1307 {
1308 	struct enable_trigger_data *enable_data = data->private_data;
1309 
1310 	if (WARN_ON_ONCE(data->ref <= 0))
1311 		return;
1312 
1313 	data->ref--;
1314 	if (!data->ref) {
1315 		/* Remove the SOFT_MODE flag */
1316 		trace_event_enable_disable(enable_data->file, 0, 1);
1317 		module_put(enable_data->file->event_call->mod);
1318 		trigger_data_free(data);
1319 		kfree(enable_data);
1320 	}
1321 }
1322 
1323 static struct event_trigger_ops event_enable_trigger_ops = {
1324 	.func			= event_enable_trigger,
1325 	.print			= event_enable_trigger_print,
1326 	.init			= event_trigger_init,
1327 	.free			= event_enable_trigger_free,
1328 };
1329 
1330 static struct event_trigger_ops event_enable_count_trigger_ops = {
1331 	.func			= event_enable_count_trigger,
1332 	.print			= event_enable_trigger_print,
1333 	.init			= event_trigger_init,
1334 	.free			= event_enable_trigger_free,
1335 };
1336 
1337 static struct event_trigger_ops event_disable_trigger_ops = {
1338 	.func			= event_enable_trigger,
1339 	.print			= event_enable_trigger_print,
1340 	.init			= event_trigger_init,
1341 	.free			= event_enable_trigger_free,
1342 };
1343 
1344 static struct event_trigger_ops event_disable_count_trigger_ops = {
1345 	.func			= event_enable_count_trigger,
1346 	.print			= event_enable_trigger_print,
1347 	.init			= event_trigger_init,
1348 	.free			= event_enable_trigger_free,
1349 };
1350 
1351 int event_enable_trigger_func(struct event_command *cmd_ops,
1352 			      struct trace_event_file *file,
1353 			      char *glob, char *cmd, char *param)
1354 {
1355 	struct trace_event_file *event_enable_file;
1356 	struct enable_trigger_data *enable_data;
1357 	struct event_trigger_data *trigger_data;
1358 	struct event_trigger_ops *trigger_ops;
1359 	struct trace_array *tr = file->tr;
1360 	const char *system;
1361 	const char *event;
1362 	bool hist = false;
1363 	char *trigger;
1364 	char *number;
1365 	bool enable;
1366 	int ret;
1367 
1368 	if (!param)
1369 		return -EINVAL;
1370 
1371 	/* separate the trigger from the filter (s:e:n [if filter]) */
1372 	trigger = strsep(&param, " \t");
1373 	if (!trigger)
1374 		return -EINVAL;
1375 
1376 	system = strsep(&trigger, ":");
1377 	if (!trigger)
1378 		return -EINVAL;
1379 
1380 	event = strsep(&trigger, ":");
1381 
1382 	ret = -EINVAL;
1383 	event_enable_file = find_event_file(tr, system, event);
1384 	if (!event_enable_file)
1385 		goto out;
1386 
1387 #ifdef CONFIG_HIST_TRIGGERS
1388 	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1389 		(strcmp(cmd, DISABLE_HIST_STR) == 0));
1390 
1391 	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1392 		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1393 #else
1394 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1395 #endif
1396 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1397 
1398 	ret = -ENOMEM;
1399 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1400 	if (!trigger_data)
1401 		goto out;
1402 
1403 	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1404 	if (!enable_data) {
1405 		kfree(trigger_data);
1406 		goto out;
1407 	}
1408 
1409 	trigger_data->count = -1;
1410 	trigger_data->ops = trigger_ops;
1411 	trigger_data->cmd_ops = cmd_ops;
1412 	INIT_LIST_HEAD(&trigger_data->list);
1413 	RCU_INIT_POINTER(trigger_data->filter, NULL);
1414 
1415 	enable_data->hist = hist;
1416 	enable_data->enable = enable;
1417 	enable_data->file = event_enable_file;
1418 	trigger_data->private_data = enable_data;
1419 
1420 	if (glob[0] == '!') {
1421 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1422 		kfree(trigger_data);
1423 		kfree(enable_data);
1424 		ret = 0;
1425 		goto out;
1426 	}
1427 
1428 	/* Up the trigger_data count to make sure nothing frees it on failure */
1429 	event_trigger_init(trigger_ops, trigger_data);
1430 
1431 	if (trigger) {
1432 		number = strsep(&trigger, ":");
1433 
1434 		ret = -EINVAL;
1435 		if (!strlen(number))
1436 			goto out_free;
1437 
1438 		/*
1439 		 * We use the callback data field (which is a pointer)
1440 		 * as our counter.
1441 		 */
1442 		ret = kstrtoul(number, 0, &trigger_data->count);
1443 		if (ret)
1444 			goto out_free;
1445 	}
1446 
1447 	if (!param) /* if param is non-empty, it's supposed to be a filter */
1448 		goto out_reg;
1449 
1450 	if (!cmd_ops->set_filter)
1451 		goto out_reg;
1452 
1453 	ret = cmd_ops->set_filter(param, trigger_data, file);
1454 	if (ret < 0)
1455 		goto out_free;
1456 
1457  out_reg:
1458 	/* Don't let event modules unload while probe registered */
1459 	ret = try_module_get(event_enable_file->event_call->mod);
1460 	if (!ret) {
1461 		ret = -EBUSY;
1462 		goto out_free;
1463 	}
1464 
1465 	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1466 	if (ret < 0)
1467 		goto out_put;
1468 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1469 	/*
1470 	 * The above returns on success the # of functions enabled,
1471 	 * but if it didn't find any functions it returns zero.
1472 	 * Consider no functions a failure too.
1473 	 */
1474 	if (!ret) {
1475 		ret = -ENOENT;
1476 		goto out_disable;
1477 	} else if (ret < 0)
1478 		goto out_disable;
1479 	/* Just return zero, not the number of enabled functions */
1480 	ret = 0;
1481 	event_trigger_free(trigger_ops, trigger_data);
1482  out:
1483 	return ret;
1484 
1485  out_disable:
1486 	trace_event_enable_disable(event_enable_file, 0, 1);
1487  out_put:
1488 	module_put(event_enable_file->event_call->mod);
1489  out_free:
1490 	if (cmd_ops->set_filter)
1491 		cmd_ops->set_filter(NULL, trigger_data, NULL);
1492 	event_trigger_free(trigger_ops, trigger_data);
1493 	kfree(enable_data);
1494 	goto out;
1495 }
1496 
1497 int event_enable_register_trigger(char *glob,
1498 				  struct event_trigger_ops *ops,
1499 				  struct event_trigger_data *data,
1500 				  struct trace_event_file *file)
1501 {
1502 	struct enable_trigger_data *enable_data = data->private_data;
1503 	struct enable_trigger_data *test_enable_data;
1504 	struct event_trigger_data *test;
1505 	int ret = 0;
1506 
1507 	lockdep_assert_held(&event_mutex);
1508 
1509 	list_for_each_entry(test, &file->triggers, list) {
1510 		test_enable_data = test->private_data;
1511 		if (test_enable_data &&
1512 		    (test->cmd_ops->trigger_type ==
1513 		     data->cmd_ops->trigger_type) &&
1514 		    (test_enable_data->file == enable_data->file)) {
1515 			ret = -EEXIST;
1516 			goto out;
1517 		}
1518 	}
1519 
1520 	if (data->ops->init) {
1521 		ret = data->ops->init(data->ops, data);
1522 		if (ret < 0)
1523 			goto out;
1524 	}
1525 
1526 	list_add_rcu(&data->list, &file->triggers);
1527 	ret++;
1528 
1529 	update_cond_flag(file);
1530 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
1531 		list_del_rcu(&data->list);
1532 		update_cond_flag(file);
1533 		ret--;
1534 	}
1535 out:
1536 	return ret;
1537 }
1538 
1539 void event_enable_unregister_trigger(char *glob,
1540 				     struct event_trigger_ops *ops,
1541 				     struct event_trigger_data *test,
1542 				     struct trace_event_file *file)
1543 {
1544 	struct enable_trigger_data *test_enable_data = test->private_data;
1545 	struct enable_trigger_data *enable_data;
1546 	struct event_trigger_data *data;
1547 	bool unregistered = false;
1548 
1549 	lockdep_assert_held(&event_mutex);
1550 
1551 	list_for_each_entry(data, &file->triggers, list) {
1552 		enable_data = data->private_data;
1553 		if (enable_data &&
1554 		    (data->cmd_ops->trigger_type ==
1555 		     test->cmd_ops->trigger_type) &&
1556 		    (enable_data->file == test_enable_data->file)) {
1557 			unregistered = true;
1558 			list_del_rcu(&data->list);
1559 			trace_event_trigger_enable_disable(file, 0);
1560 			update_cond_flag(file);
1561 			break;
1562 		}
1563 	}
1564 
1565 	if (unregistered && data->ops->free)
1566 		data->ops->free(data->ops, data);
1567 }
1568 
1569 static struct event_trigger_ops *
1570 event_enable_get_trigger_ops(char *cmd, char *param)
1571 {
1572 	struct event_trigger_ops *ops;
1573 	bool enable;
1574 
1575 #ifdef CONFIG_HIST_TRIGGERS
1576 	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1577 		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1578 #else
1579 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1580 #endif
1581 	if (enable)
1582 		ops = param ? &event_enable_count_trigger_ops :
1583 			&event_enable_trigger_ops;
1584 	else
1585 		ops = param ? &event_disable_count_trigger_ops :
1586 			&event_disable_trigger_ops;
1587 
1588 	return ops;
1589 }
1590 
1591 static struct event_command trigger_enable_cmd = {
1592 	.name			= ENABLE_EVENT_STR,
1593 	.trigger_type		= ETT_EVENT_ENABLE,
1594 	.func			= event_enable_trigger_func,
1595 	.reg			= event_enable_register_trigger,
1596 	.unreg			= event_enable_unregister_trigger,
1597 	.get_trigger_ops	= event_enable_get_trigger_ops,
1598 	.set_filter		= set_trigger_filter,
1599 };
1600 
1601 static struct event_command trigger_disable_cmd = {
1602 	.name			= DISABLE_EVENT_STR,
1603 	.trigger_type		= ETT_EVENT_ENABLE,
1604 	.func			= event_enable_trigger_func,
1605 	.reg			= event_enable_register_trigger,
1606 	.unreg			= event_enable_unregister_trigger,
1607 	.get_trigger_ops	= event_enable_get_trigger_ops,
1608 	.set_filter		= set_trigger_filter,
1609 };
1610 
1611 static __init void unregister_trigger_enable_disable_cmds(void)
1612 {
1613 	unregister_event_command(&trigger_enable_cmd);
1614 	unregister_event_command(&trigger_disable_cmd);
1615 }
1616 
1617 static __init int register_trigger_enable_disable_cmds(void)
1618 {
1619 	int ret;
1620 
1621 	ret = register_event_command(&trigger_enable_cmd);
1622 	if (WARN_ON(ret < 0))
1623 		return ret;
1624 	ret = register_event_command(&trigger_disable_cmd);
1625 	if (WARN_ON(ret < 0))
1626 		unregister_trigger_enable_disable_cmds();
1627 
1628 	return ret;
1629 }
1630 
1631 static __init int register_trigger_traceon_traceoff_cmds(void)
1632 {
1633 	int ret;
1634 
1635 	ret = register_event_command(&trigger_traceon_cmd);
1636 	if (WARN_ON(ret < 0))
1637 		return ret;
1638 	ret = register_event_command(&trigger_traceoff_cmd);
1639 	if (WARN_ON(ret < 0))
1640 		unregister_trigger_traceon_traceoff_cmds();
1641 
1642 	return ret;
1643 }
1644 
1645 __init int register_trigger_cmds(void)
1646 {
1647 	register_trigger_traceon_traceoff_cmds();
1648 	register_trigger_snapshot_cmd();
1649 	register_trigger_stacktrace_cmd();
1650 	register_trigger_enable_disable_cmds();
1651 	register_trigger_hist_enable_disable_cmds();
1652 	register_trigger_hist_cmd();
1653 
1654 	return 0;
1655 }
1656