1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace_events_trigger - trace event triggers
4  *
5  * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
6  */
7 
8 #include <linux/security.h>
9 #include <linux/module.h>
10 #include <linux/ctype.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/rculist.h>
14 
15 #include "trace.h"
16 
17 static LIST_HEAD(trigger_commands);
18 static DEFINE_MUTEX(trigger_cmd_mutex);
19 
20 void trigger_data_free(struct event_trigger_data *data)
21 {
22 	if (data->cmd_ops->set_filter)
23 		data->cmd_ops->set_filter(NULL, data, NULL);
24 
25 	/* make sure current triggers exit before free */
26 	tracepoint_synchronize_unregister();
27 
28 	kfree(data);
29 }
30 
31 /**
32  * event_triggers_call - Call triggers associated with a trace event
33  * @file: The trace_event_file associated with the event
34  * @rec: The trace entry for the event, NULL for unconditional invocation
35  *
36  * For each trigger associated with an event, invoke the trigger
37  * function registered with the associated trigger command.  If rec is
38  * non-NULL, it means that the trigger requires further processing and
39  * shouldn't be unconditionally invoked.  If rec is non-NULL and the
40  * trigger has a filter associated with it, rec will checked against
41  * the filter and if the record matches the trigger will be invoked.
42  * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
43  * in any case until the current event is written, the trigger
44  * function isn't invoked but the bit associated with the deferred
45  * trigger is set in the return value.
46  *
47  * Returns an enum event_trigger_type value containing a set bit for
48  * any trigger that should be deferred, ETT_NONE if nothing to defer.
49  *
50  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
51  *
52  * Return: an enum event_trigger_type value containing a set bit for
53  * any trigger that should be deferred, ETT_NONE if nothing to defer.
54  */
55 enum event_trigger_type
56 event_triggers_call(struct trace_event_file *file, void *rec,
57 		    struct ring_buffer_event *event)
58 {
59 	struct event_trigger_data *data;
60 	enum event_trigger_type tt = ETT_NONE;
61 	struct event_filter *filter;
62 
63 	if (list_empty(&file->triggers))
64 		return tt;
65 
66 	list_for_each_entry_rcu(data, &file->triggers, list) {
67 		if (data->paused)
68 			continue;
69 		if (!rec) {
70 			data->ops->func(data, rec, event);
71 			continue;
72 		}
73 		filter = rcu_dereference_sched(data->filter);
74 		if (filter && !filter_match_preds(filter, rec))
75 			continue;
76 		if (event_command_post_trigger(data->cmd_ops)) {
77 			tt |= data->cmd_ops->trigger_type;
78 			continue;
79 		}
80 		data->ops->func(data, rec, event);
81 	}
82 	return tt;
83 }
84 EXPORT_SYMBOL_GPL(event_triggers_call);
85 
86 /**
87  * event_triggers_post_call - Call 'post_triggers' for a trace event
88  * @file: The trace_event_file associated with the event
89  * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
90  *
91  * For each trigger associated with an event, invoke the trigger
92  * function registered with the associated trigger command, if the
93  * corresponding bit is set in the tt enum passed into this function.
94  * See @event_triggers_call for details on how those bits are set.
95  *
96  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
97  */
98 void
99 event_triggers_post_call(struct trace_event_file *file,
100 			 enum event_trigger_type tt)
101 {
102 	struct event_trigger_data *data;
103 
104 	list_for_each_entry_rcu(data, &file->triggers, list) {
105 		if (data->paused)
106 			continue;
107 		if (data->cmd_ops->trigger_type & tt)
108 			data->ops->func(data, NULL, NULL);
109 	}
110 }
111 EXPORT_SYMBOL_GPL(event_triggers_post_call);
112 
113 #define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
114 
115 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
116 {
117 	struct trace_event_file *event_file = event_file_data(m->private);
118 
119 	if (t == SHOW_AVAILABLE_TRIGGERS)
120 		return NULL;
121 
122 	return seq_list_next(t, &event_file->triggers, pos);
123 }
124 
125 static void *trigger_start(struct seq_file *m, loff_t *pos)
126 {
127 	struct trace_event_file *event_file;
128 
129 	/* ->stop() is called even if ->start() fails */
130 	mutex_lock(&event_mutex);
131 	event_file = event_file_data(m->private);
132 	if (unlikely(!event_file))
133 		return ERR_PTR(-ENODEV);
134 
135 	if (list_empty(&event_file->triggers))
136 		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
137 
138 	return seq_list_start(&event_file->triggers, *pos);
139 }
140 
141 static void trigger_stop(struct seq_file *m, void *t)
142 {
143 	mutex_unlock(&event_mutex);
144 }
145 
146 static int trigger_show(struct seq_file *m, void *v)
147 {
148 	struct event_trigger_data *data;
149 	struct event_command *p;
150 
151 	if (v == SHOW_AVAILABLE_TRIGGERS) {
152 		seq_puts(m, "# Available triggers:\n");
153 		seq_putc(m, '#');
154 		mutex_lock(&trigger_cmd_mutex);
155 		list_for_each_entry_reverse(p, &trigger_commands, list)
156 			seq_printf(m, " %s", p->name);
157 		seq_putc(m, '\n');
158 		mutex_unlock(&trigger_cmd_mutex);
159 		return 0;
160 	}
161 
162 	data = list_entry(v, struct event_trigger_data, list);
163 	data->ops->print(m, data->ops, data);
164 
165 	return 0;
166 }
167 
168 static const struct seq_operations event_triggers_seq_ops = {
169 	.start = trigger_start,
170 	.next = trigger_next,
171 	.stop = trigger_stop,
172 	.show = trigger_show,
173 };
174 
175 static int event_trigger_regex_open(struct inode *inode, struct file *file)
176 {
177 	int ret;
178 
179 	ret = security_locked_down(LOCKDOWN_TRACEFS);
180 	if (ret)
181 		return ret;
182 
183 	mutex_lock(&event_mutex);
184 
185 	if (unlikely(!event_file_data(file))) {
186 		mutex_unlock(&event_mutex);
187 		return -ENODEV;
188 	}
189 
190 	if ((file->f_mode & FMODE_WRITE) &&
191 	    (file->f_flags & O_TRUNC)) {
192 		struct trace_event_file *event_file;
193 		struct event_command *p;
194 
195 		event_file = event_file_data(file);
196 
197 		list_for_each_entry(p, &trigger_commands, list) {
198 			if (p->unreg_all)
199 				p->unreg_all(event_file);
200 		}
201 	}
202 
203 	if (file->f_mode & FMODE_READ) {
204 		ret = seq_open(file, &event_triggers_seq_ops);
205 		if (!ret) {
206 			struct seq_file *m = file->private_data;
207 			m->private = file;
208 		}
209 	}
210 
211 	mutex_unlock(&event_mutex);
212 
213 	return ret;
214 }
215 
216 static int trigger_process_regex(struct trace_event_file *file, char *buff)
217 {
218 	char *command, *next = buff;
219 	struct event_command *p;
220 	int ret = -EINVAL;
221 
222 	command = strsep(&next, ": \t");
223 	command = (command[0] != '!') ? command : command + 1;
224 
225 	mutex_lock(&trigger_cmd_mutex);
226 	list_for_each_entry(p, &trigger_commands, list) {
227 		if (strcmp(p->name, command) == 0) {
228 			ret = p->func(p, file, buff, command, next);
229 			goto out_unlock;
230 		}
231 	}
232  out_unlock:
233 	mutex_unlock(&trigger_cmd_mutex);
234 
235 	return ret;
236 }
237 
238 static ssize_t event_trigger_regex_write(struct file *file,
239 					 const char __user *ubuf,
240 					 size_t cnt, loff_t *ppos)
241 {
242 	struct trace_event_file *event_file;
243 	ssize_t ret;
244 	char *buf;
245 
246 	if (!cnt)
247 		return 0;
248 
249 	if (cnt >= PAGE_SIZE)
250 		return -EINVAL;
251 
252 	buf = memdup_user_nul(ubuf, cnt);
253 	if (IS_ERR(buf))
254 		return PTR_ERR(buf);
255 
256 	strim(buf);
257 
258 	mutex_lock(&event_mutex);
259 	event_file = event_file_data(file);
260 	if (unlikely(!event_file)) {
261 		mutex_unlock(&event_mutex);
262 		kfree(buf);
263 		return -ENODEV;
264 	}
265 	ret = trigger_process_regex(event_file, buf);
266 	mutex_unlock(&event_mutex);
267 
268 	kfree(buf);
269 	if (ret < 0)
270 		goto out;
271 
272 	*ppos += cnt;
273 	ret = cnt;
274  out:
275 	return ret;
276 }
277 
278 static int event_trigger_regex_release(struct inode *inode, struct file *file)
279 {
280 	mutex_lock(&event_mutex);
281 
282 	if (file->f_mode & FMODE_READ)
283 		seq_release(inode, file);
284 
285 	mutex_unlock(&event_mutex);
286 
287 	return 0;
288 }
289 
290 static ssize_t
291 event_trigger_write(struct file *filp, const char __user *ubuf,
292 		    size_t cnt, loff_t *ppos)
293 {
294 	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
295 }
296 
297 static int
298 event_trigger_open(struct inode *inode, struct file *filp)
299 {
300 	/* Checks for tracefs lockdown */
301 	return event_trigger_regex_open(inode, filp);
302 }
303 
304 static int
305 event_trigger_release(struct inode *inode, struct file *file)
306 {
307 	return event_trigger_regex_release(inode, file);
308 }
309 
310 const struct file_operations event_trigger_fops = {
311 	.open = event_trigger_open,
312 	.read = seq_read,
313 	.write = event_trigger_write,
314 	.llseek = tracing_lseek,
315 	.release = event_trigger_release,
316 };
317 
318 /*
319  * Currently we only register event commands from __init, so mark this
320  * __init too.
321  */
322 __init int register_event_command(struct event_command *cmd)
323 {
324 	struct event_command *p;
325 	int ret = 0;
326 
327 	mutex_lock(&trigger_cmd_mutex);
328 	list_for_each_entry(p, &trigger_commands, list) {
329 		if (strcmp(cmd->name, p->name) == 0) {
330 			ret = -EBUSY;
331 			goto out_unlock;
332 		}
333 	}
334 	list_add(&cmd->list, &trigger_commands);
335  out_unlock:
336 	mutex_unlock(&trigger_cmd_mutex);
337 
338 	return ret;
339 }
340 
341 /*
342  * Currently we only unregister event commands from __init, so mark
343  * this __init too.
344  */
345 __init int unregister_event_command(struct event_command *cmd)
346 {
347 	struct event_command *p, *n;
348 	int ret = -ENODEV;
349 
350 	mutex_lock(&trigger_cmd_mutex);
351 	list_for_each_entry_safe(p, n, &trigger_commands, list) {
352 		if (strcmp(cmd->name, p->name) == 0) {
353 			ret = 0;
354 			list_del_init(&p->list);
355 			goto out_unlock;
356 		}
357 	}
358  out_unlock:
359 	mutex_unlock(&trigger_cmd_mutex);
360 
361 	return ret;
362 }
363 
364 /**
365  * event_trigger_print - Generic event_trigger_ops @print implementation
366  * @name: The name of the event trigger
367  * @m: The seq_file being printed to
368  * @data: Trigger-specific data
369  * @filter_str: filter_str to print, if present
370  *
371  * Common implementation for event triggers to print themselves.
372  *
373  * Usually wrapped by a function that simply sets the @name of the
374  * trigger command and then invokes this.
375  *
376  * Return: 0 on success, errno otherwise
377  */
378 static int
379 event_trigger_print(const char *name, struct seq_file *m,
380 		    void *data, char *filter_str)
381 {
382 	long count = (long)data;
383 
384 	seq_puts(m, name);
385 
386 	if (count == -1)
387 		seq_puts(m, ":unlimited");
388 	else
389 		seq_printf(m, ":count=%ld", count);
390 
391 	if (filter_str)
392 		seq_printf(m, " if %s\n", filter_str);
393 	else
394 		seq_putc(m, '\n');
395 
396 	return 0;
397 }
398 
399 /**
400  * event_trigger_init - Generic event_trigger_ops @init implementation
401  * @ops: The trigger ops associated with the trigger
402  * @data: Trigger-specific data
403  *
404  * Common implementation of event trigger initialization.
405  *
406  * Usually used directly as the @init method in event trigger
407  * implementations.
408  *
409  * Return: 0 on success, errno otherwise
410  */
411 int event_trigger_init(struct event_trigger_ops *ops,
412 		       struct event_trigger_data *data)
413 {
414 	data->ref++;
415 	return 0;
416 }
417 
418 /**
419  * event_trigger_free - Generic event_trigger_ops @free implementation
420  * @ops: The trigger ops associated with the trigger
421  * @data: Trigger-specific data
422  *
423  * Common implementation of event trigger de-initialization.
424  *
425  * Usually used directly as the @free method in event trigger
426  * implementations.
427  */
428 static void
429 event_trigger_free(struct event_trigger_ops *ops,
430 		   struct event_trigger_data *data)
431 {
432 	if (WARN_ON_ONCE(data->ref <= 0))
433 		return;
434 
435 	data->ref--;
436 	if (!data->ref)
437 		trigger_data_free(data);
438 }
439 
440 int trace_event_trigger_enable_disable(struct trace_event_file *file,
441 				       int trigger_enable)
442 {
443 	int ret = 0;
444 
445 	if (trigger_enable) {
446 		if (atomic_inc_return(&file->tm_ref) > 1)
447 			return ret;
448 		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
449 		ret = trace_event_enable_disable(file, 1, 1);
450 	} else {
451 		if (atomic_dec_return(&file->tm_ref) > 0)
452 			return ret;
453 		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
454 		ret = trace_event_enable_disable(file, 0, 1);
455 	}
456 
457 	return ret;
458 }
459 
460 /**
461  * clear_event_triggers - Clear all triggers associated with a trace array
462  * @tr: The trace array to clear
463  *
464  * For each trigger, the triggering event has its tm_ref decremented
465  * via trace_event_trigger_enable_disable(), and any associated event
466  * (in the case of enable/disable_event triggers) will have its sm_ref
467  * decremented via free()->trace_event_enable_disable().  That
468  * combination effectively reverses the soft-mode/trigger state added
469  * by trigger registration.
470  *
471  * Must be called with event_mutex held.
472  */
473 void
474 clear_event_triggers(struct trace_array *tr)
475 {
476 	struct trace_event_file *file;
477 
478 	list_for_each_entry(file, &tr->events, list) {
479 		struct event_trigger_data *data, *n;
480 		list_for_each_entry_safe(data, n, &file->triggers, list) {
481 			trace_event_trigger_enable_disable(file, 0);
482 			list_del_rcu(&data->list);
483 			if (data->ops->free)
484 				data->ops->free(data->ops, data);
485 		}
486 	}
487 }
488 
489 /**
490  * update_cond_flag - Set or reset the TRIGGER_COND bit
491  * @file: The trace_event_file associated with the event
492  *
493  * If an event has triggers and any of those triggers has a filter or
494  * a post_trigger, trigger invocation needs to be deferred until after
495  * the current event has logged its data, and the event should have
496  * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
497  * cleared.
498  */
499 void update_cond_flag(struct trace_event_file *file)
500 {
501 	struct event_trigger_data *data;
502 	bool set_cond = false;
503 
504 	lockdep_assert_held(&event_mutex);
505 
506 	list_for_each_entry(data, &file->triggers, list) {
507 		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
508 		    event_command_needs_rec(data->cmd_ops)) {
509 			set_cond = true;
510 			break;
511 		}
512 	}
513 
514 	if (set_cond)
515 		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
516 	else
517 		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
518 }
519 
520 /**
521  * register_trigger - Generic event_command @reg implementation
522  * @glob: The raw string used to register the trigger
523  * @ops: The trigger ops associated with the trigger
524  * @data: Trigger-specific data to associate with the trigger
525  * @file: The trace_event_file associated with the event
526  *
527  * Common implementation for event trigger registration.
528  *
529  * Usually used directly as the @reg method in event command
530  * implementations.
531  *
532  * Return: 0 on success, errno otherwise
533  */
534 static int register_trigger(char *glob, struct event_trigger_ops *ops,
535 			    struct event_trigger_data *data,
536 			    struct trace_event_file *file)
537 {
538 	struct event_trigger_data *test;
539 	int ret = 0;
540 
541 	lockdep_assert_held(&event_mutex);
542 
543 	list_for_each_entry(test, &file->triggers, list) {
544 		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
545 			ret = -EEXIST;
546 			goto out;
547 		}
548 	}
549 
550 	if (data->ops->init) {
551 		ret = data->ops->init(data->ops, data);
552 		if (ret < 0)
553 			goto out;
554 	}
555 
556 	list_add_rcu(&data->list, &file->triggers);
557 	ret++;
558 
559 	update_cond_flag(file);
560 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
561 		list_del_rcu(&data->list);
562 		update_cond_flag(file);
563 		ret--;
564 	}
565 out:
566 	return ret;
567 }
568 
569 /**
570  * unregister_trigger - Generic event_command @unreg implementation
571  * @glob: The raw string used to register the trigger
572  * @ops: The trigger ops associated with the trigger
573  * @test: Trigger-specific data used to find the trigger to remove
574  * @file: The trace_event_file associated with the event
575  *
576  * Common implementation for event trigger unregistration.
577  *
578  * Usually used directly as the @unreg method in event command
579  * implementations.
580  */
581 static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
582 			       struct event_trigger_data *test,
583 			       struct trace_event_file *file)
584 {
585 	struct event_trigger_data *data;
586 	bool unregistered = false;
587 
588 	lockdep_assert_held(&event_mutex);
589 
590 	list_for_each_entry(data, &file->triggers, list) {
591 		if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
592 			unregistered = true;
593 			list_del_rcu(&data->list);
594 			trace_event_trigger_enable_disable(file, 0);
595 			update_cond_flag(file);
596 			break;
597 		}
598 	}
599 
600 	if (unregistered && data->ops->free)
601 		data->ops->free(data->ops, data);
602 }
603 
604 /**
605  * event_trigger_callback - Generic event_command @func implementation
606  * @cmd_ops: The command ops, used for trigger registration
607  * @file: The trace_event_file associated with the event
608  * @glob: The raw string used to register the trigger
609  * @cmd: The cmd portion of the string used to register the trigger
610  * @param: The params portion of the string used to register the trigger
611  *
612  * Common implementation for event command parsing and trigger
613  * instantiation.
614  *
615  * Usually used directly as the @func method in event command
616  * implementations.
617  *
618  * Return: 0 on success, errno otherwise
619  */
620 static int
621 event_trigger_callback(struct event_command *cmd_ops,
622 		       struct trace_event_file *file,
623 		       char *glob, char *cmd, char *param)
624 {
625 	struct event_trigger_data *trigger_data;
626 	struct event_trigger_ops *trigger_ops;
627 	char *trigger = NULL;
628 	char *number;
629 	int ret;
630 
631 	/* separate the trigger from the filter (t:n [if filter]) */
632 	if (param && isdigit(param[0]))
633 		trigger = strsep(&param, " \t");
634 
635 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
636 
637 	ret = -ENOMEM;
638 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
639 	if (!trigger_data)
640 		goto out;
641 
642 	trigger_data->count = -1;
643 	trigger_data->ops = trigger_ops;
644 	trigger_data->cmd_ops = cmd_ops;
645 	trigger_data->private_data = file;
646 	INIT_LIST_HEAD(&trigger_data->list);
647 	INIT_LIST_HEAD(&trigger_data->named_list);
648 
649 	if (glob[0] == '!') {
650 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
651 		kfree(trigger_data);
652 		ret = 0;
653 		goto out;
654 	}
655 
656 	if (trigger) {
657 		number = strsep(&trigger, ":");
658 
659 		ret = -EINVAL;
660 		if (!strlen(number))
661 			goto out_free;
662 
663 		/*
664 		 * We use the callback data field (which is a pointer)
665 		 * as our counter.
666 		 */
667 		ret = kstrtoul(number, 0, &trigger_data->count);
668 		if (ret)
669 			goto out_free;
670 	}
671 
672 	if (!param) /* if param is non-empty, it's supposed to be a filter */
673 		goto out_reg;
674 
675 	if (!cmd_ops->set_filter)
676 		goto out_reg;
677 
678 	ret = cmd_ops->set_filter(param, trigger_data, file);
679 	if (ret < 0)
680 		goto out_free;
681 
682  out_reg:
683 	/* Up the trigger_data count to make sure reg doesn't free it on failure */
684 	event_trigger_init(trigger_ops, trigger_data);
685 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
686 	/*
687 	 * The above returns on success the # of functions enabled,
688 	 * but if it didn't find any functions it returns zero.
689 	 * Consider no functions a failure too.
690 	 */
691 	if (!ret) {
692 		cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
693 		ret = -ENOENT;
694 	} else if (ret > 0)
695 		ret = 0;
696 
697 	/* Down the counter of trigger_data or free it if not used anymore */
698 	event_trigger_free(trigger_ops, trigger_data);
699  out:
700 	return ret;
701 
702  out_free:
703 	if (cmd_ops->set_filter)
704 		cmd_ops->set_filter(NULL, trigger_data, NULL);
705 	kfree(trigger_data);
706 	goto out;
707 }
708 
709 /**
710  * set_trigger_filter - Generic event_command @set_filter implementation
711  * @filter_str: The filter string for the trigger, NULL to remove filter
712  * @trigger_data: Trigger-specific data
713  * @file: The trace_event_file associated with the event
714  *
715  * Common implementation for event command filter parsing and filter
716  * instantiation.
717  *
718  * Usually used directly as the @set_filter method in event command
719  * implementations.
720  *
721  * Also used to remove a filter (if filter_str = NULL).
722  *
723  * Return: 0 on success, errno otherwise
724  */
725 int set_trigger_filter(char *filter_str,
726 		       struct event_trigger_data *trigger_data,
727 		       struct trace_event_file *file)
728 {
729 	struct event_trigger_data *data = trigger_data;
730 	struct event_filter *filter = NULL, *tmp;
731 	int ret = -EINVAL;
732 	char *s;
733 
734 	if (!filter_str) /* clear the current filter */
735 		goto assign;
736 
737 	s = strsep(&filter_str, " \t");
738 
739 	if (!strlen(s) || strcmp(s, "if") != 0)
740 		goto out;
741 
742 	if (!filter_str)
743 		goto out;
744 
745 	/* The filter is for the 'trigger' event, not the triggered event */
746 	ret = create_event_filter(file->tr, file->event_call,
747 				  filter_str, false, &filter);
748 	/*
749 	 * If create_event_filter() fails, filter still needs to be freed.
750 	 * Which the calling code will do with data->filter.
751 	 */
752  assign:
753 	tmp = rcu_access_pointer(data->filter);
754 
755 	rcu_assign_pointer(data->filter, filter);
756 
757 	if (tmp) {
758 		/* Make sure the call is done with the filter */
759 		tracepoint_synchronize_unregister();
760 		free_event_filter(tmp);
761 	}
762 
763 	kfree(data->filter_str);
764 	data->filter_str = NULL;
765 
766 	if (filter_str) {
767 		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
768 		if (!data->filter_str) {
769 			free_event_filter(rcu_access_pointer(data->filter));
770 			data->filter = NULL;
771 			ret = -ENOMEM;
772 		}
773 	}
774  out:
775 	return ret;
776 }
777 
778 static LIST_HEAD(named_triggers);
779 
780 /**
781  * find_named_trigger - Find the common named trigger associated with @name
782  * @name: The name of the set of named triggers to find the common data for
783  *
784  * Named triggers are sets of triggers that share a common set of
785  * trigger data.  The first named trigger registered with a given name
786  * owns the common trigger data that the others subsequently
787  * registered with the same name will reference.  This function
788  * returns the common trigger data associated with that first
789  * registered instance.
790  *
791  * Return: the common trigger data for the given named trigger on
792  * success, NULL otherwise.
793  */
794 struct event_trigger_data *find_named_trigger(const char *name)
795 {
796 	struct event_trigger_data *data;
797 
798 	if (!name)
799 		return NULL;
800 
801 	list_for_each_entry(data, &named_triggers, named_list) {
802 		if (data->named_data)
803 			continue;
804 		if (strcmp(data->name, name) == 0)
805 			return data;
806 	}
807 
808 	return NULL;
809 }
810 
811 /**
812  * is_named_trigger - determine if a given trigger is a named trigger
813  * @test: The trigger data to test
814  *
815  * Return: true if 'test' is a named trigger, false otherwise.
816  */
817 bool is_named_trigger(struct event_trigger_data *test)
818 {
819 	struct event_trigger_data *data;
820 
821 	list_for_each_entry(data, &named_triggers, named_list) {
822 		if (test == data)
823 			return true;
824 	}
825 
826 	return false;
827 }
828 
829 /**
830  * save_named_trigger - save the trigger in the named trigger list
831  * @name: The name of the named trigger set
832  * @data: The trigger data to save
833  *
834  * Return: 0 if successful, negative error otherwise.
835  */
836 int save_named_trigger(const char *name, struct event_trigger_data *data)
837 {
838 	data->name = kstrdup(name, GFP_KERNEL);
839 	if (!data->name)
840 		return -ENOMEM;
841 
842 	list_add(&data->named_list, &named_triggers);
843 
844 	return 0;
845 }
846 
847 /**
848  * del_named_trigger - delete a trigger from the named trigger list
849  * @data: The trigger data to delete
850  */
851 void del_named_trigger(struct event_trigger_data *data)
852 {
853 	kfree(data->name);
854 	data->name = NULL;
855 
856 	list_del(&data->named_list);
857 }
858 
859 static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
860 {
861 	struct event_trigger_data *test;
862 
863 	list_for_each_entry(test, &named_triggers, named_list) {
864 		if (strcmp(test->name, data->name) == 0) {
865 			if (pause) {
866 				test->paused_tmp = test->paused;
867 				test->paused = true;
868 			} else {
869 				test->paused = test->paused_tmp;
870 			}
871 		}
872 	}
873 }
874 
875 /**
876  * pause_named_trigger - Pause all named triggers with the same name
877  * @data: The trigger data of a named trigger to pause
878  *
879  * Pauses a named trigger along with all other triggers having the
880  * same name.  Because named triggers share a common set of data,
881  * pausing only one is meaningless, so pausing one named trigger needs
882  * to pause all triggers with the same name.
883  */
884 void pause_named_trigger(struct event_trigger_data *data)
885 {
886 	__pause_named_trigger(data, true);
887 }
888 
889 /**
890  * unpause_named_trigger - Un-pause all named triggers with the same name
891  * @data: The trigger data of a named trigger to unpause
892  *
893  * Un-pauses a named trigger along with all other triggers having the
894  * same name.  Because named triggers share a common set of data,
895  * unpausing only one is meaningless, so unpausing one named trigger
896  * needs to unpause all triggers with the same name.
897  */
898 void unpause_named_trigger(struct event_trigger_data *data)
899 {
900 	__pause_named_trigger(data, false);
901 }
902 
903 /**
904  * set_named_trigger_data - Associate common named trigger data
905  * @data: The trigger data of a named trigger to unpause
906  *
907  * Named triggers are sets of triggers that share a common set of
908  * trigger data.  The first named trigger registered with a given name
909  * owns the common trigger data that the others subsequently
910  * registered with the same name will reference.  This function
911  * associates the common trigger data from the first trigger with the
912  * given trigger.
913  */
914 void set_named_trigger_data(struct event_trigger_data *data,
915 			    struct event_trigger_data *named_data)
916 {
917 	data->named_data = named_data;
918 }
919 
920 struct event_trigger_data *
921 get_named_trigger_data(struct event_trigger_data *data)
922 {
923 	return data->named_data;
924 }
925 
926 static void
927 traceon_trigger(struct event_trigger_data *data, void *rec,
928 		struct ring_buffer_event *event)
929 {
930 	if (tracing_is_on())
931 		return;
932 
933 	tracing_on();
934 }
935 
936 static void
937 traceon_count_trigger(struct event_trigger_data *data, void *rec,
938 		      struct ring_buffer_event *event)
939 {
940 	if (tracing_is_on())
941 		return;
942 
943 	if (!data->count)
944 		return;
945 
946 	if (data->count != -1)
947 		(data->count)--;
948 
949 	tracing_on();
950 }
951 
952 static void
953 traceoff_trigger(struct event_trigger_data *data, void *rec,
954 		 struct ring_buffer_event *event)
955 {
956 	if (!tracing_is_on())
957 		return;
958 
959 	tracing_off();
960 }
961 
962 static void
963 traceoff_count_trigger(struct event_trigger_data *data, void *rec,
964 		       struct ring_buffer_event *event)
965 {
966 	if (!tracing_is_on())
967 		return;
968 
969 	if (!data->count)
970 		return;
971 
972 	if (data->count != -1)
973 		(data->count)--;
974 
975 	tracing_off();
976 }
977 
978 static int
979 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
980 		      struct event_trigger_data *data)
981 {
982 	return event_trigger_print("traceon", m, (void *)data->count,
983 				   data->filter_str);
984 }
985 
986 static int
987 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
988 		       struct event_trigger_data *data)
989 {
990 	return event_trigger_print("traceoff", m, (void *)data->count,
991 				   data->filter_str);
992 }
993 
994 static struct event_trigger_ops traceon_trigger_ops = {
995 	.func			= traceon_trigger,
996 	.print			= traceon_trigger_print,
997 	.init			= event_trigger_init,
998 	.free			= event_trigger_free,
999 };
1000 
1001 static struct event_trigger_ops traceon_count_trigger_ops = {
1002 	.func			= traceon_count_trigger,
1003 	.print			= traceon_trigger_print,
1004 	.init			= event_trigger_init,
1005 	.free			= event_trigger_free,
1006 };
1007 
1008 static struct event_trigger_ops traceoff_trigger_ops = {
1009 	.func			= traceoff_trigger,
1010 	.print			= traceoff_trigger_print,
1011 	.init			= event_trigger_init,
1012 	.free			= event_trigger_free,
1013 };
1014 
1015 static struct event_trigger_ops traceoff_count_trigger_ops = {
1016 	.func			= traceoff_count_trigger,
1017 	.print			= traceoff_trigger_print,
1018 	.init			= event_trigger_init,
1019 	.free			= event_trigger_free,
1020 };
1021 
1022 static struct event_trigger_ops *
1023 onoff_get_trigger_ops(char *cmd, char *param)
1024 {
1025 	struct event_trigger_ops *ops;
1026 
1027 	/* we register both traceon and traceoff to this callback */
1028 	if (strcmp(cmd, "traceon") == 0)
1029 		ops = param ? &traceon_count_trigger_ops :
1030 			&traceon_trigger_ops;
1031 	else
1032 		ops = param ? &traceoff_count_trigger_ops :
1033 			&traceoff_trigger_ops;
1034 
1035 	return ops;
1036 }
1037 
1038 static struct event_command trigger_traceon_cmd = {
1039 	.name			= "traceon",
1040 	.trigger_type		= ETT_TRACE_ONOFF,
1041 	.func			= event_trigger_callback,
1042 	.reg			= register_trigger,
1043 	.unreg			= unregister_trigger,
1044 	.get_trigger_ops	= onoff_get_trigger_ops,
1045 	.set_filter		= set_trigger_filter,
1046 };
1047 
1048 static struct event_command trigger_traceoff_cmd = {
1049 	.name			= "traceoff",
1050 	.trigger_type		= ETT_TRACE_ONOFF,
1051 	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1052 	.func			= event_trigger_callback,
1053 	.reg			= register_trigger,
1054 	.unreg			= unregister_trigger,
1055 	.get_trigger_ops	= onoff_get_trigger_ops,
1056 	.set_filter		= set_trigger_filter,
1057 };
1058 
1059 #ifdef CONFIG_TRACER_SNAPSHOT
1060 static void
1061 snapshot_trigger(struct event_trigger_data *data, void *rec,
1062 		 struct ring_buffer_event *event)
1063 {
1064 	struct trace_event_file *file = data->private_data;
1065 
1066 	if (file)
1067 		tracing_snapshot_instance(file->tr);
1068 	else
1069 		tracing_snapshot();
1070 }
1071 
1072 static void
1073 snapshot_count_trigger(struct event_trigger_data *data, void *rec,
1074 		       struct ring_buffer_event *event)
1075 {
1076 	if (!data->count)
1077 		return;
1078 
1079 	if (data->count != -1)
1080 		(data->count)--;
1081 
1082 	snapshot_trigger(data, rec, event);
1083 }
1084 
1085 static int
1086 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1087 			  struct event_trigger_data *data,
1088 			  struct trace_event_file *file)
1089 {
1090 	int ret = register_trigger(glob, ops, data, file);
1091 
1092 	if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
1093 		unregister_trigger(glob, ops, data, file);
1094 		ret = 0;
1095 	}
1096 
1097 	return ret;
1098 }
1099 
1100 static int
1101 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1102 		       struct event_trigger_data *data)
1103 {
1104 	return event_trigger_print("snapshot", m, (void *)data->count,
1105 				   data->filter_str);
1106 }
1107 
1108 static struct event_trigger_ops snapshot_trigger_ops = {
1109 	.func			= snapshot_trigger,
1110 	.print			= snapshot_trigger_print,
1111 	.init			= event_trigger_init,
1112 	.free			= event_trigger_free,
1113 };
1114 
1115 static struct event_trigger_ops snapshot_count_trigger_ops = {
1116 	.func			= snapshot_count_trigger,
1117 	.print			= snapshot_trigger_print,
1118 	.init			= event_trigger_init,
1119 	.free			= event_trigger_free,
1120 };
1121 
1122 static struct event_trigger_ops *
1123 snapshot_get_trigger_ops(char *cmd, char *param)
1124 {
1125 	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1126 }
1127 
1128 static struct event_command trigger_snapshot_cmd = {
1129 	.name			= "snapshot",
1130 	.trigger_type		= ETT_SNAPSHOT,
1131 	.func			= event_trigger_callback,
1132 	.reg			= register_snapshot_trigger,
1133 	.unreg			= unregister_trigger,
1134 	.get_trigger_ops	= snapshot_get_trigger_ops,
1135 	.set_filter		= set_trigger_filter,
1136 };
1137 
1138 static __init int register_trigger_snapshot_cmd(void)
1139 {
1140 	int ret;
1141 
1142 	ret = register_event_command(&trigger_snapshot_cmd);
1143 	WARN_ON(ret < 0);
1144 
1145 	return ret;
1146 }
1147 #else
1148 static __init int register_trigger_snapshot_cmd(void) { return 0; }
1149 #endif /* CONFIG_TRACER_SNAPSHOT */
1150 
1151 #ifdef CONFIG_STACKTRACE
1152 #ifdef CONFIG_UNWINDER_ORC
1153 /* Skip 2:
1154  *   event_triggers_post_call()
1155  *   trace_event_raw_event_xxx()
1156  */
1157 # define STACK_SKIP 2
1158 #else
1159 /*
1160  * Skip 4:
1161  *   stacktrace_trigger()
1162  *   event_triggers_post_call()
1163  *   trace_event_buffer_commit()
1164  *   trace_event_raw_event_xxx()
1165  */
1166 #define STACK_SKIP 4
1167 #endif
1168 
1169 static void
1170 stacktrace_trigger(struct event_trigger_data *data, void *rec,
1171 		   struct ring_buffer_event *event)
1172 {
1173 	trace_dump_stack(STACK_SKIP);
1174 }
1175 
1176 static void
1177 stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
1178 			 struct ring_buffer_event *event)
1179 {
1180 	if (!data->count)
1181 		return;
1182 
1183 	if (data->count != -1)
1184 		(data->count)--;
1185 
1186 	stacktrace_trigger(data, rec, event);
1187 }
1188 
1189 static int
1190 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1191 			 struct event_trigger_data *data)
1192 {
1193 	return event_trigger_print("stacktrace", m, (void *)data->count,
1194 				   data->filter_str);
1195 }
1196 
1197 static struct event_trigger_ops stacktrace_trigger_ops = {
1198 	.func			= stacktrace_trigger,
1199 	.print			= stacktrace_trigger_print,
1200 	.init			= event_trigger_init,
1201 	.free			= event_trigger_free,
1202 };
1203 
1204 static struct event_trigger_ops stacktrace_count_trigger_ops = {
1205 	.func			= stacktrace_count_trigger,
1206 	.print			= stacktrace_trigger_print,
1207 	.init			= event_trigger_init,
1208 	.free			= event_trigger_free,
1209 };
1210 
1211 static struct event_trigger_ops *
1212 stacktrace_get_trigger_ops(char *cmd, char *param)
1213 {
1214 	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1215 }
1216 
1217 static struct event_command trigger_stacktrace_cmd = {
1218 	.name			= "stacktrace",
1219 	.trigger_type		= ETT_STACKTRACE,
1220 	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1221 	.func			= event_trigger_callback,
1222 	.reg			= register_trigger,
1223 	.unreg			= unregister_trigger,
1224 	.get_trigger_ops	= stacktrace_get_trigger_ops,
1225 	.set_filter		= set_trigger_filter,
1226 };
1227 
1228 static __init int register_trigger_stacktrace_cmd(void)
1229 {
1230 	int ret;
1231 
1232 	ret = register_event_command(&trigger_stacktrace_cmd);
1233 	WARN_ON(ret < 0);
1234 
1235 	return ret;
1236 }
1237 #else
1238 static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1239 #endif /* CONFIG_STACKTRACE */
1240 
1241 static __init void unregister_trigger_traceon_traceoff_cmds(void)
1242 {
1243 	unregister_event_command(&trigger_traceon_cmd);
1244 	unregister_event_command(&trigger_traceoff_cmd);
1245 }
1246 
1247 static void
1248 event_enable_trigger(struct event_trigger_data *data, void *rec,
1249 		     struct ring_buffer_event *event)
1250 {
1251 	struct enable_trigger_data *enable_data = data->private_data;
1252 
1253 	if (enable_data->enable)
1254 		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1255 	else
1256 		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1257 }
1258 
1259 static void
1260 event_enable_count_trigger(struct event_trigger_data *data, void *rec,
1261 			   struct ring_buffer_event *event)
1262 {
1263 	struct enable_trigger_data *enable_data = data->private_data;
1264 
1265 	if (!data->count)
1266 		return;
1267 
1268 	/* Skip if the event is in a state we want to switch to */
1269 	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1270 		return;
1271 
1272 	if (data->count != -1)
1273 		(data->count)--;
1274 
1275 	event_enable_trigger(data, rec, event);
1276 }
1277 
1278 int event_enable_trigger_print(struct seq_file *m,
1279 			       struct event_trigger_ops *ops,
1280 			       struct event_trigger_data *data)
1281 {
1282 	struct enable_trigger_data *enable_data = data->private_data;
1283 
1284 	seq_printf(m, "%s:%s:%s",
1285 		   enable_data->hist ?
1286 		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1287 		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1288 		   enable_data->file->event_call->class->system,
1289 		   trace_event_name(enable_data->file->event_call));
1290 
1291 	if (data->count == -1)
1292 		seq_puts(m, ":unlimited");
1293 	else
1294 		seq_printf(m, ":count=%ld", data->count);
1295 
1296 	if (data->filter_str)
1297 		seq_printf(m, " if %s\n", data->filter_str);
1298 	else
1299 		seq_putc(m, '\n');
1300 
1301 	return 0;
1302 }
1303 
1304 void event_enable_trigger_free(struct event_trigger_ops *ops,
1305 			       struct event_trigger_data *data)
1306 {
1307 	struct enable_trigger_data *enable_data = data->private_data;
1308 
1309 	if (WARN_ON_ONCE(data->ref <= 0))
1310 		return;
1311 
1312 	data->ref--;
1313 	if (!data->ref) {
1314 		/* Remove the SOFT_MODE flag */
1315 		trace_event_enable_disable(enable_data->file, 0, 1);
1316 		module_put(enable_data->file->event_call->mod);
1317 		trigger_data_free(data);
1318 		kfree(enable_data);
1319 	}
1320 }
1321 
1322 static struct event_trigger_ops event_enable_trigger_ops = {
1323 	.func			= event_enable_trigger,
1324 	.print			= event_enable_trigger_print,
1325 	.init			= event_trigger_init,
1326 	.free			= event_enable_trigger_free,
1327 };
1328 
1329 static struct event_trigger_ops event_enable_count_trigger_ops = {
1330 	.func			= event_enable_count_trigger,
1331 	.print			= event_enable_trigger_print,
1332 	.init			= event_trigger_init,
1333 	.free			= event_enable_trigger_free,
1334 };
1335 
1336 static struct event_trigger_ops event_disable_trigger_ops = {
1337 	.func			= event_enable_trigger,
1338 	.print			= event_enable_trigger_print,
1339 	.init			= event_trigger_init,
1340 	.free			= event_enable_trigger_free,
1341 };
1342 
1343 static struct event_trigger_ops event_disable_count_trigger_ops = {
1344 	.func			= event_enable_count_trigger,
1345 	.print			= event_enable_trigger_print,
1346 	.init			= event_trigger_init,
1347 	.free			= event_enable_trigger_free,
1348 };
1349 
1350 int event_enable_trigger_func(struct event_command *cmd_ops,
1351 			      struct trace_event_file *file,
1352 			      char *glob, char *cmd, char *param)
1353 {
1354 	struct trace_event_file *event_enable_file;
1355 	struct enable_trigger_data *enable_data;
1356 	struct event_trigger_data *trigger_data;
1357 	struct event_trigger_ops *trigger_ops;
1358 	struct trace_array *tr = file->tr;
1359 	const char *system;
1360 	const char *event;
1361 	bool hist = false;
1362 	char *trigger;
1363 	char *number;
1364 	bool enable;
1365 	int ret;
1366 
1367 	if (!param)
1368 		return -EINVAL;
1369 
1370 	/* separate the trigger from the filter (s:e:n [if filter]) */
1371 	trigger = strsep(&param, " \t");
1372 	if (!trigger)
1373 		return -EINVAL;
1374 
1375 	system = strsep(&trigger, ":");
1376 	if (!trigger)
1377 		return -EINVAL;
1378 
1379 	event = strsep(&trigger, ":");
1380 
1381 	ret = -EINVAL;
1382 	event_enable_file = find_event_file(tr, system, event);
1383 	if (!event_enable_file)
1384 		goto out;
1385 
1386 #ifdef CONFIG_HIST_TRIGGERS
1387 	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1388 		(strcmp(cmd, DISABLE_HIST_STR) == 0));
1389 
1390 	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1391 		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1392 #else
1393 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1394 #endif
1395 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1396 
1397 	ret = -ENOMEM;
1398 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1399 	if (!trigger_data)
1400 		goto out;
1401 
1402 	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1403 	if (!enable_data) {
1404 		kfree(trigger_data);
1405 		goto out;
1406 	}
1407 
1408 	trigger_data->count = -1;
1409 	trigger_data->ops = trigger_ops;
1410 	trigger_data->cmd_ops = cmd_ops;
1411 	INIT_LIST_HEAD(&trigger_data->list);
1412 	RCU_INIT_POINTER(trigger_data->filter, NULL);
1413 
1414 	enable_data->hist = hist;
1415 	enable_data->enable = enable;
1416 	enable_data->file = event_enable_file;
1417 	trigger_data->private_data = enable_data;
1418 
1419 	if (glob[0] == '!') {
1420 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1421 		kfree(trigger_data);
1422 		kfree(enable_data);
1423 		ret = 0;
1424 		goto out;
1425 	}
1426 
1427 	/* Up the trigger_data count to make sure nothing frees it on failure */
1428 	event_trigger_init(trigger_ops, trigger_data);
1429 
1430 	if (trigger) {
1431 		number = strsep(&trigger, ":");
1432 
1433 		ret = -EINVAL;
1434 		if (!strlen(number))
1435 			goto out_free;
1436 
1437 		/*
1438 		 * We use the callback data field (which is a pointer)
1439 		 * as our counter.
1440 		 */
1441 		ret = kstrtoul(number, 0, &trigger_data->count);
1442 		if (ret)
1443 			goto out_free;
1444 	}
1445 
1446 	if (!param) /* if param is non-empty, it's supposed to be a filter */
1447 		goto out_reg;
1448 
1449 	if (!cmd_ops->set_filter)
1450 		goto out_reg;
1451 
1452 	ret = cmd_ops->set_filter(param, trigger_data, file);
1453 	if (ret < 0)
1454 		goto out_free;
1455 
1456  out_reg:
1457 	/* Don't let event modules unload while probe registered */
1458 	ret = try_module_get(event_enable_file->event_call->mod);
1459 	if (!ret) {
1460 		ret = -EBUSY;
1461 		goto out_free;
1462 	}
1463 
1464 	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1465 	if (ret < 0)
1466 		goto out_put;
1467 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1468 	/*
1469 	 * The above returns on success the # of functions enabled,
1470 	 * but if it didn't find any functions it returns zero.
1471 	 * Consider no functions a failure too.
1472 	 */
1473 	if (!ret) {
1474 		ret = -ENOENT;
1475 		goto out_disable;
1476 	} else if (ret < 0)
1477 		goto out_disable;
1478 	/* Just return zero, not the number of enabled functions */
1479 	ret = 0;
1480 	event_trigger_free(trigger_ops, trigger_data);
1481  out:
1482 	return ret;
1483 
1484  out_disable:
1485 	trace_event_enable_disable(event_enable_file, 0, 1);
1486  out_put:
1487 	module_put(event_enable_file->event_call->mod);
1488  out_free:
1489 	if (cmd_ops->set_filter)
1490 		cmd_ops->set_filter(NULL, trigger_data, NULL);
1491 	event_trigger_free(trigger_ops, trigger_data);
1492 	kfree(enable_data);
1493 	goto out;
1494 }
1495 
1496 int event_enable_register_trigger(char *glob,
1497 				  struct event_trigger_ops *ops,
1498 				  struct event_trigger_data *data,
1499 				  struct trace_event_file *file)
1500 {
1501 	struct enable_trigger_data *enable_data = data->private_data;
1502 	struct enable_trigger_data *test_enable_data;
1503 	struct event_trigger_data *test;
1504 	int ret = 0;
1505 
1506 	lockdep_assert_held(&event_mutex);
1507 
1508 	list_for_each_entry(test, &file->triggers, list) {
1509 		test_enable_data = test->private_data;
1510 		if (test_enable_data &&
1511 		    (test->cmd_ops->trigger_type ==
1512 		     data->cmd_ops->trigger_type) &&
1513 		    (test_enable_data->file == enable_data->file)) {
1514 			ret = -EEXIST;
1515 			goto out;
1516 		}
1517 	}
1518 
1519 	if (data->ops->init) {
1520 		ret = data->ops->init(data->ops, data);
1521 		if (ret < 0)
1522 			goto out;
1523 	}
1524 
1525 	list_add_rcu(&data->list, &file->triggers);
1526 	ret++;
1527 
1528 	update_cond_flag(file);
1529 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
1530 		list_del_rcu(&data->list);
1531 		update_cond_flag(file);
1532 		ret--;
1533 	}
1534 out:
1535 	return ret;
1536 }
1537 
1538 void event_enable_unregister_trigger(char *glob,
1539 				     struct event_trigger_ops *ops,
1540 				     struct event_trigger_data *test,
1541 				     struct trace_event_file *file)
1542 {
1543 	struct enable_trigger_data *test_enable_data = test->private_data;
1544 	struct enable_trigger_data *enable_data;
1545 	struct event_trigger_data *data;
1546 	bool unregistered = false;
1547 
1548 	lockdep_assert_held(&event_mutex);
1549 
1550 	list_for_each_entry(data, &file->triggers, list) {
1551 		enable_data = data->private_data;
1552 		if (enable_data &&
1553 		    (data->cmd_ops->trigger_type ==
1554 		     test->cmd_ops->trigger_type) &&
1555 		    (enable_data->file == test_enable_data->file)) {
1556 			unregistered = true;
1557 			list_del_rcu(&data->list);
1558 			trace_event_trigger_enable_disable(file, 0);
1559 			update_cond_flag(file);
1560 			break;
1561 		}
1562 	}
1563 
1564 	if (unregistered && data->ops->free)
1565 		data->ops->free(data->ops, data);
1566 }
1567 
1568 static struct event_trigger_ops *
1569 event_enable_get_trigger_ops(char *cmd, char *param)
1570 {
1571 	struct event_trigger_ops *ops;
1572 	bool enable;
1573 
1574 #ifdef CONFIG_HIST_TRIGGERS
1575 	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1576 		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1577 #else
1578 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1579 #endif
1580 	if (enable)
1581 		ops = param ? &event_enable_count_trigger_ops :
1582 			&event_enable_trigger_ops;
1583 	else
1584 		ops = param ? &event_disable_count_trigger_ops :
1585 			&event_disable_trigger_ops;
1586 
1587 	return ops;
1588 }
1589 
1590 static struct event_command trigger_enable_cmd = {
1591 	.name			= ENABLE_EVENT_STR,
1592 	.trigger_type		= ETT_EVENT_ENABLE,
1593 	.func			= event_enable_trigger_func,
1594 	.reg			= event_enable_register_trigger,
1595 	.unreg			= event_enable_unregister_trigger,
1596 	.get_trigger_ops	= event_enable_get_trigger_ops,
1597 	.set_filter		= set_trigger_filter,
1598 };
1599 
1600 static struct event_command trigger_disable_cmd = {
1601 	.name			= DISABLE_EVENT_STR,
1602 	.trigger_type		= ETT_EVENT_ENABLE,
1603 	.func			= event_enable_trigger_func,
1604 	.reg			= event_enable_register_trigger,
1605 	.unreg			= event_enable_unregister_trigger,
1606 	.get_trigger_ops	= event_enable_get_trigger_ops,
1607 	.set_filter		= set_trigger_filter,
1608 };
1609 
1610 static __init void unregister_trigger_enable_disable_cmds(void)
1611 {
1612 	unregister_event_command(&trigger_enable_cmd);
1613 	unregister_event_command(&trigger_disable_cmd);
1614 }
1615 
1616 static __init int register_trigger_enable_disable_cmds(void)
1617 {
1618 	int ret;
1619 
1620 	ret = register_event_command(&trigger_enable_cmd);
1621 	if (WARN_ON(ret < 0))
1622 		return ret;
1623 	ret = register_event_command(&trigger_disable_cmd);
1624 	if (WARN_ON(ret < 0))
1625 		unregister_trigger_enable_disable_cmds();
1626 
1627 	return ret;
1628 }
1629 
1630 static __init int register_trigger_traceon_traceoff_cmds(void)
1631 {
1632 	int ret;
1633 
1634 	ret = register_event_command(&trigger_traceon_cmd);
1635 	if (WARN_ON(ret < 0))
1636 		return ret;
1637 	ret = register_event_command(&trigger_traceoff_cmd);
1638 	if (WARN_ON(ret < 0))
1639 		unregister_trigger_traceon_traceoff_cmds();
1640 
1641 	return ret;
1642 }
1643 
1644 __init int register_trigger_cmds(void)
1645 {
1646 	register_trigger_traceon_traceoff_cmds();
1647 	register_trigger_snapshot_cmd();
1648 	register_trigger_stacktrace_cmd();
1649 	register_trigger_enable_disable_cmds();
1650 	register_trigger_hist_enable_disable_cmds();
1651 	register_trigger_hist_cmd();
1652 
1653 	return 0;
1654 }
1655