xref: /openbmc/linux/kernel/trace/trace_events_trigger.c (revision ca90578000afb0d8f177ea36f7259a9c3640cf49)
1 /*
2  * trace_events_trigger - trace event triggers
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
19  */
20 
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/rculist.h>
26 
27 #include "trace.h"
28 
29 static LIST_HEAD(trigger_commands);
30 static DEFINE_MUTEX(trigger_cmd_mutex);
31 
32 void trigger_data_free(struct event_trigger_data *data)
33 {
34 	if (data->cmd_ops->set_filter)
35 		data->cmd_ops->set_filter(NULL, data, NULL);
36 
37 	synchronize_sched(); /* make sure current triggers exit before free */
38 	kfree(data);
39 }
40 
41 /**
42  * event_triggers_call - Call triggers associated with a trace event
43  * @file: The trace_event_file associated with the event
44  * @rec: The trace entry for the event, NULL for unconditional invocation
45  *
46  * For each trigger associated with an event, invoke the trigger
47  * function registered with the associated trigger command.  If rec is
48  * non-NULL, it means that the trigger requires further processing and
49  * shouldn't be unconditionally invoked.  If rec is non-NULL and the
50  * trigger has a filter associated with it, rec will checked against
51  * the filter and if the record matches the trigger will be invoked.
52  * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
53  * in any case until the current event is written, the trigger
54  * function isn't invoked but the bit associated with the deferred
55  * trigger is set in the return value.
56  *
57  * Returns an enum event_trigger_type value containing a set bit for
58  * any trigger that should be deferred, ETT_NONE if nothing to defer.
59  *
60  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
61  *
62  * Return: an enum event_trigger_type value containing a set bit for
63  * any trigger that should be deferred, ETT_NONE if nothing to defer.
64  */
65 enum event_trigger_type
66 event_triggers_call(struct trace_event_file *file, void *rec,
67 		    struct ring_buffer_event *event)
68 {
69 	struct event_trigger_data *data;
70 	enum event_trigger_type tt = ETT_NONE;
71 	struct event_filter *filter;
72 
73 	if (list_empty(&file->triggers))
74 		return tt;
75 
76 	list_for_each_entry_rcu(data, &file->triggers, list) {
77 		if (data->paused)
78 			continue;
79 		if (!rec) {
80 			data->ops->func(data, rec, event);
81 			continue;
82 		}
83 		filter = rcu_dereference_sched(data->filter);
84 		if (filter && !filter_match_preds(filter, rec))
85 			continue;
86 		if (event_command_post_trigger(data->cmd_ops)) {
87 			tt |= data->cmd_ops->trigger_type;
88 			continue;
89 		}
90 		data->ops->func(data, rec, event);
91 	}
92 	return tt;
93 }
94 EXPORT_SYMBOL_GPL(event_triggers_call);
95 
96 /**
97  * event_triggers_post_call - Call 'post_triggers' for a trace event
98  * @file: The trace_event_file associated with the event
99  * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
100  * @rec: The trace entry for the event
101  *
102  * For each trigger associated with an event, invoke the trigger
103  * function registered with the associated trigger command, if the
104  * corresponding bit is set in the tt enum passed into this function.
105  * See @event_triggers_call for details on how those bits are set.
106  *
107  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
108  */
109 void
110 event_triggers_post_call(struct trace_event_file *file,
111 			 enum event_trigger_type tt,
112 			 void *rec, struct ring_buffer_event *event)
113 {
114 	struct event_trigger_data *data;
115 
116 	list_for_each_entry_rcu(data, &file->triggers, list) {
117 		if (data->paused)
118 			continue;
119 		if (data->cmd_ops->trigger_type & tt)
120 			data->ops->func(data, rec, event);
121 	}
122 }
123 EXPORT_SYMBOL_GPL(event_triggers_post_call);
124 
125 #define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
126 
127 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
128 {
129 	struct trace_event_file *event_file = event_file_data(m->private);
130 
131 	if (t == SHOW_AVAILABLE_TRIGGERS)
132 		return NULL;
133 
134 	return seq_list_next(t, &event_file->triggers, pos);
135 }
136 
137 static void *trigger_start(struct seq_file *m, loff_t *pos)
138 {
139 	struct trace_event_file *event_file;
140 
141 	/* ->stop() is called even if ->start() fails */
142 	mutex_lock(&event_mutex);
143 	event_file = event_file_data(m->private);
144 	if (unlikely(!event_file))
145 		return ERR_PTR(-ENODEV);
146 
147 	if (list_empty(&event_file->triggers))
148 		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
149 
150 	return seq_list_start(&event_file->triggers, *pos);
151 }
152 
153 static void trigger_stop(struct seq_file *m, void *t)
154 {
155 	mutex_unlock(&event_mutex);
156 }
157 
158 static int trigger_show(struct seq_file *m, void *v)
159 {
160 	struct event_trigger_data *data;
161 	struct event_command *p;
162 
163 	if (v == SHOW_AVAILABLE_TRIGGERS) {
164 		seq_puts(m, "# Available triggers:\n");
165 		seq_putc(m, '#');
166 		mutex_lock(&trigger_cmd_mutex);
167 		list_for_each_entry_reverse(p, &trigger_commands, list)
168 			seq_printf(m, " %s", p->name);
169 		seq_putc(m, '\n');
170 		mutex_unlock(&trigger_cmd_mutex);
171 		return 0;
172 	}
173 
174 	data = list_entry(v, struct event_trigger_data, list);
175 	data->ops->print(m, data->ops, data);
176 
177 	return 0;
178 }
179 
180 static const struct seq_operations event_triggers_seq_ops = {
181 	.start = trigger_start,
182 	.next = trigger_next,
183 	.stop = trigger_stop,
184 	.show = trigger_show,
185 };
186 
187 static int event_trigger_regex_open(struct inode *inode, struct file *file)
188 {
189 	int ret = 0;
190 
191 	mutex_lock(&event_mutex);
192 
193 	if (unlikely(!event_file_data(file))) {
194 		mutex_unlock(&event_mutex);
195 		return -ENODEV;
196 	}
197 
198 	if ((file->f_mode & FMODE_WRITE) &&
199 	    (file->f_flags & O_TRUNC)) {
200 		struct trace_event_file *event_file;
201 		struct event_command *p;
202 
203 		event_file = event_file_data(file);
204 
205 		list_for_each_entry(p, &trigger_commands, list) {
206 			if (p->unreg_all)
207 				p->unreg_all(event_file);
208 		}
209 	}
210 
211 	if (file->f_mode & FMODE_READ) {
212 		ret = seq_open(file, &event_triggers_seq_ops);
213 		if (!ret) {
214 			struct seq_file *m = file->private_data;
215 			m->private = file;
216 		}
217 	}
218 
219 	mutex_unlock(&event_mutex);
220 
221 	return ret;
222 }
223 
224 static int trigger_process_regex(struct trace_event_file *file, char *buff)
225 {
226 	char *command, *next = buff;
227 	struct event_command *p;
228 	int ret = -EINVAL;
229 
230 	command = strsep(&next, ": \t");
231 	command = (command[0] != '!') ? command : command + 1;
232 
233 	mutex_lock(&trigger_cmd_mutex);
234 	list_for_each_entry(p, &trigger_commands, list) {
235 		if (strcmp(p->name, command) == 0) {
236 			ret = p->func(p, file, buff, command, next);
237 			goto out_unlock;
238 		}
239 	}
240  out_unlock:
241 	mutex_unlock(&trigger_cmd_mutex);
242 
243 	return ret;
244 }
245 
246 static ssize_t event_trigger_regex_write(struct file *file,
247 					 const char __user *ubuf,
248 					 size_t cnt, loff_t *ppos)
249 {
250 	struct trace_event_file *event_file;
251 	ssize_t ret;
252 	char *buf;
253 
254 	if (!cnt)
255 		return 0;
256 
257 	if (cnt >= PAGE_SIZE)
258 		return -EINVAL;
259 
260 	buf = memdup_user_nul(ubuf, cnt);
261 	if (IS_ERR(buf))
262 		return PTR_ERR(buf);
263 
264 	strim(buf);
265 
266 	mutex_lock(&event_mutex);
267 	event_file = event_file_data(file);
268 	if (unlikely(!event_file)) {
269 		mutex_unlock(&event_mutex);
270 		kfree(buf);
271 		return -ENODEV;
272 	}
273 	ret = trigger_process_regex(event_file, buf);
274 	mutex_unlock(&event_mutex);
275 
276 	kfree(buf);
277 	if (ret < 0)
278 		goto out;
279 
280 	*ppos += cnt;
281 	ret = cnt;
282  out:
283 	return ret;
284 }
285 
286 static int event_trigger_regex_release(struct inode *inode, struct file *file)
287 {
288 	mutex_lock(&event_mutex);
289 
290 	if (file->f_mode & FMODE_READ)
291 		seq_release(inode, file);
292 
293 	mutex_unlock(&event_mutex);
294 
295 	return 0;
296 }
297 
298 static ssize_t
299 event_trigger_write(struct file *filp, const char __user *ubuf,
300 		    size_t cnt, loff_t *ppos)
301 {
302 	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
303 }
304 
305 static int
306 event_trigger_open(struct inode *inode, struct file *filp)
307 {
308 	return event_trigger_regex_open(inode, filp);
309 }
310 
311 static int
312 event_trigger_release(struct inode *inode, struct file *file)
313 {
314 	return event_trigger_regex_release(inode, file);
315 }
316 
317 const struct file_operations event_trigger_fops = {
318 	.open = event_trigger_open,
319 	.read = seq_read,
320 	.write = event_trigger_write,
321 	.llseek = tracing_lseek,
322 	.release = event_trigger_release,
323 };
324 
325 /*
326  * Currently we only register event commands from __init, so mark this
327  * __init too.
328  */
329 __init int register_event_command(struct event_command *cmd)
330 {
331 	struct event_command *p;
332 	int ret = 0;
333 
334 	mutex_lock(&trigger_cmd_mutex);
335 	list_for_each_entry(p, &trigger_commands, list) {
336 		if (strcmp(cmd->name, p->name) == 0) {
337 			ret = -EBUSY;
338 			goto out_unlock;
339 		}
340 	}
341 	list_add(&cmd->list, &trigger_commands);
342  out_unlock:
343 	mutex_unlock(&trigger_cmd_mutex);
344 
345 	return ret;
346 }
347 
348 /*
349  * Currently we only unregister event commands from __init, so mark
350  * this __init too.
351  */
352 __init int unregister_event_command(struct event_command *cmd)
353 {
354 	struct event_command *p, *n;
355 	int ret = -ENODEV;
356 
357 	mutex_lock(&trigger_cmd_mutex);
358 	list_for_each_entry_safe(p, n, &trigger_commands, list) {
359 		if (strcmp(cmd->name, p->name) == 0) {
360 			ret = 0;
361 			list_del_init(&p->list);
362 			goto out_unlock;
363 		}
364 	}
365  out_unlock:
366 	mutex_unlock(&trigger_cmd_mutex);
367 
368 	return ret;
369 }
370 
371 /**
372  * event_trigger_print - Generic event_trigger_ops @print implementation
373  * @name: The name of the event trigger
374  * @m: The seq_file being printed to
375  * @data: Trigger-specific data
376  * @filter_str: filter_str to print, if present
377  *
378  * Common implementation for event triggers to print themselves.
379  *
380  * Usually wrapped by a function that simply sets the @name of the
381  * trigger command and then invokes this.
382  *
383  * Return: 0 on success, errno otherwise
384  */
385 static int
386 event_trigger_print(const char *name, struct seq_file *m,
387 		    void *data, char *filter_str)
388 {
389 	long count = (long)data;
390 
391 	seq_puts(m, name);
392 
393 	if (count == -1)
394 		seq_puts(m, ":unlimited");
395 	else
396 		seq_printf(m, ":count=%ld", count);
397 
398 	if (filter_str)
399 		seq_printf(m, " if %s\n", filter_str);
400 	else
401 		seq_putc(m, '\n');
402 
403 	return 0;
404 }
405 
406 /**
407  * event_trigger_init - Generic event_trigger_ops @init implementation
408  * @ops: The trigger ops associated with the trigger
409  * @data: Trigger-specific data
410  *
411  * Common implementation of event trigger initialization.
412  *
413  * Usually used directly as the @init method in event trigger
414  * implementations.
415  *
416  * Return: 0 on success, errno otherwise
417  */
418 int event_trigger_init(struct event_trigger_ops *ops,
419 		       struct event_trigger_data *data)
420 {
421 	data->ref++;
422 	return 0;
423 }
424 
425 /**
426  * event_trigger_free - Generic event_trigger_ops @free implementation
427  * @ops: The trigger ops associated with the trigger
428  * @data: Trigger-specific data
429  *
430  * Common implementation of event trigger de-initialization.
431  *
432  * Usually used directly as the @free method in event trigger
433  * implementations.
434  */
435 static void
436 event_trigger_free(struct event_trigger_ops *ops,
437 		   struct event_trigger_data *data)
438 {
439 	if (WARN_ON_ONCE(data->ref <= 0))
440 		return;
441 
442 	data->ref--;
443 	if (!data->ref)
444 		trigger_data_free(data);
445 }
446 
447 int trace_event_trigger_enable_disable(struct trace_event_file *file,
448 				       int trigger_enable)
449 {
450 	int ret = 0;
451 
452 	if (trigger_enable) {
453 		if (atomic_inc_return(&file->tm_ref) > 1)
454 			return ret;
455 		set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
456 		ret = trace_event_enable_disable(file, 1, 1);
457 	} else {
458 		if (atomic_dec_return(&file->tm_ref) > 0)
459 			return ret;
460 		clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
461 		ret = trace_event_enable_disable(file, 0, 1);
462 	}
463 
464 	return ret;
465 }
466 
467 /**
468  * clear_event_triggers - Clear all triggers associated with a trace array
469  * @tr: The trace array to clear
470  *
471  * For each trigger, the triggering event has its tm_ref decremented
472  * via trace_event_trigger_enable_disable(), and any associated event
473  * (in the case of enable/disable_event triggers) will have its sm_ref
474  * decremented via free()->trace_event_enable_disable().  That
475  * combination effectively reverses the soft-mode/trigger state added
476  * by trigger registration.
477  *
478  * Must be called with event_mutex held.
479  */
480 void
481 clear_event_triggers(struct trace_array *tr)
482 {
483 	struct trace_event_file *file;
484 
485 	list_for_each_entry(file, &tr->events, list) {
486 		struct event_trigger_data *data;
487 		list_for_each_entry_rcu(data, &file->triggers, list) {
488 			trace_event_trigger_enable_disable(file, 0);
489 			if (data->ops->free)
490 				data->ops->free(data->ops, data);
491 		}
492 	}
493 }
494 
495 /**
496  * update_cond_flag - Set or reset the TRIGGER_COND bit
497  * @file: The trace_event_file associated with the event
498  *
499  * If an event has triggers and any of those triggers has a filter or
500  * a post_trigger, trigger invocation needs to be deferred until after
501  * the current event has logged its data, and the event should have
502  * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
503  * cleared.
504  */
505 void update_cond_flag(struct trace_event_file *file)
506 {
507 	struct event_trigger_data *data;
508 	bool set_cond = false;
509 
510 	list_for_each_entry_rcu(data, &file->triggers, list) {
511 		if (data->filter || event_command_post_trigger(data->cmd_ops) ||
512 		    event_command_needs_rec(data->cmd_ops)) {
513 			set_cond = true;
514 			break;
515 		}
516 	}
517 
518 	if (set_cond)
519 		set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
520 	else
521 		clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
522 }
523 
524 /**
525  * register_trigger - Generic event_command @reg implementation
526  * @glob: The raw string used to register the trigger
527  * @ops: The trigger ops associated with the trigger
528  * @data: Trigger-specific data to associate with the trigger
529  * @file: The trace_event_file associated with the event
530  *
531  * Common implementation for event trigger registration.
532  *
533  * Usually used directly as the @reg method in event command
534  * implementations.
535  *
536  * Return: 0 on success, errno otherwise
537  */
538 static int register_trigger(char *glob, struct event_trigger_ops *ops,
539 			    struct event_trigger_data *data,
540 			    struct trace_event_file *file)
541 {
542 	struct event_trigger_data *test;
543 	int ret = 0;
544 
545 	list_for_each_entry_rcu(test, &file->triggers, list) {
546 		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
547 			ret = -EEXIST;
548 			goto out;
549 		}
550 	}
551 
552 	if (data->ops->init) {
553 		ret = data->ops->init(data->ops, data);
554 		if (ret < 0)
555 			goto out;
556 	}
557 
558 	list_add_rcu(&data->list, &file->triggers);
559 	ret++;
560 
561 	update_cond_flag(file);
562 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
563 		list_del_rcu(&data->list);
564 		update_cond_flag(file);
565 		ret--;
566 	}
567 out:
568 	return ret;
569 }
570 
571 /**
572  * unregister_trigger - Generic event_command @unreg implementation
573  * @glob: The raw string used to register the trigger
574  * @ops: The trigger ops associated with the trigger
575  * @test: Trigger-specific data used to find the trigger to remove
576  * @file: The trace_event_file associated with the event
577  *
578  * Common implementation for event trigger unregistration.
579  *
580  * Usually used directly as the @unreg method in event command
581  * implementations.
582  */
583 void unregister_trigger(char *glob, struct event_trigger_ops *ops,
584 			struct event_trigger_data *test,
585 			struct trace_event_file *file)
586 {
587 	struct event_trigger_data *data;
588 	bool unregistered = false;
589 
590 	list_for_each_entry_rcu(data, &file->triggers, list) {
591 		if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
592 			unregistered = true;
593 			list_del_rcu(&data->list);
594 			trace_event_trigger_enable_disable(file, 0);
595 			update_cond_flag(file);
596 			break;
597 		}
598 	}
599 
600 	if (unregistered && data->ops->free)
601 		data->ops->free(data->ops, data);
602 }
603 
604 /**
605  * event_trigger_callback - Generic event_command @func implementation
606  * @cmd_ops: The command ops, used for trigger registration
607  * @file: The trace_event_file associated with the event
608  * @glob: The raw string used to register the trigger
609  * @cmd: The cmd portion of the string used to register the trigger
610  * @param: The params portion of the string used to register the trigger
611  *
612  * Common implementation for event command parsing and trigger
613  * instantiation.
614  *
615  * Usually used directly as the @func method in event command
616  * implementations.
617  *
618  * Return: 0 on success, errno otherwise
619  */
620 static int
621 event_trigger_callback(struct event_command *cmd_ops,
622 		       struct trace_event_file *file,
623 		       char *glob, char *cmd, char *param)
624 {
625 	struct event_trigger_data *trigger_data;
626 	struct event_trigger_ops *trigger_ops;
627 	char *trigger = NULL;
628 	char *number;
629 	int ret;
630 
631 	/* separate the trigger from the filter (t:n [if filter]) */
632 	if (param && isdigit(param[0]))
633 		trigger = strsep(&param, " \t");
634 
635 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
636 
637 	ret = -ENOMEM;
638 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
639 	if (!trigger_data)
640 		goto out;
641 
642 	trigger_data->count = -1;
643 	trigger_data->ops = trigger_ops;
644 	trigger_data->cmd_ops = cmd_ops;
645 	INIT_LIST_HEAD(&trigger_data->list);
646 	INIT_LIST_HEAD(&trigger_data->named_list);
647 
648 	if (glob[0] == '!') {
649 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
650 		kfree(trigger_data);
651 		ret = 0;
652 		goto out;
653 	}
654 
655 	if (trigger) {
656 		number = strsep(&trigger, ":");
657 
658 		ret = -EINVAL;
659 		if (!strlen(number))
660 			goto out_free;
661 
662 		/*
663 		 * We use the callback data field (which is a pointer)
664 		 * as our counter.
665 		 */
666 		ret = kstrtoul(number, 0, &trigger_data->count);
667 		if (ret)
668 			goto out_free;
669 	}
670 
671 	if (!param) /* if param is non-empty, it's supposed to be a filter */
672 		goto out_reg;
673 
674 	if (!cmd_ops->set_filter)
675 		goto out_reg;
676 
677 	ret = cmd_ops->set_filter(param, trigger_data, file);
678 	if (ret < 0)
679 		goto out_free;
680 
681  out_reg:
682 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
683 	/*
684 	 * The above returns on success the # of functions enabled,
685 	 * but if it didn't find any functions it returns zero.
686 	 * Consider no functions a failure too.
687 	 */
688 	if (!ret) {
689 		ret = -ENOENT;
690 		goto out_free;
691 	} else if (ret < 0)
692 		goto out_free;
693 	ret = 0;
694  out:
695 	return ret;
696 
697  out_free:
698 	if (cmd_ops->set_filter)
699 		cmd_ops->set_filter(NULL, trigger_data, NULL);
700 	kfree(trigger_data);
701 	goto out;
702 }
703 
704 /**
705  * set_trigger_filter - Generic event_command @set_filter implementation
706  * @filter_str: The filter string for the trigger, NULL to remove filter
707  * @trigger_data: Trigger-specific data
708  * @file: The trace_event_file associated with the event
709  *
710  * Common implementation for event command filter parsing and filter
711  * instantiation.
712  *
713  * Usually used directly as the @set_filter method in event command
714  * implementations.
715  *
716  * Also used to remove a filter (if filter_str = NULL).
717  *
718  * Return: 0 on success, errno otherwise
719  */
720 int set_trigger_filter(char *filter_str,
721 		       struct event_trigger_data *trigger_data,
722 		       struct trace_event_file *file)
723 {
724 	struct event_trigger_data *data = trigger_data;
725 	struct event_filter *filter = NULL, *tmp;
726 	int ret = -EINVAL;
727 	char *s;
728 
729 	if (!filter_str) /* clear the current filter */
730 		goto assign;
731 
732 	s = strsep(&filter_str, " \t");
733 
734 	if (!strlen(s) || strcmp(s, "if") != 0)
735 		goto out;
736 
737 	if (!filter_str)
738 		goto out;
739 
740 	/* The filter is for the 'trigger' event, not the triggered event */
741 	ret = create_event_filter(file->event_call, filter_str, false, &filter);
742 	if (ret)
743 		goto out;
744  assign:
745 	tmp = rcu_access_pointer(data->filter);
746 
747 	rcu_assign_pointer(data->filter, filter);
748 
749 	if (tmp) {
750 		/* Make sure the call is done with the filter */
751 		synchronize_sched();
752 		free_event_filter(tmp);
753 	}
754 
755 	kfree(data->filter_str);
756 	data->filter_str = NULL;
757 
758 	if (filter_str) {
759 		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
760 		if (!data->filter_str) {
761 			free_event_filter(rcu_access_pointer(data->filter));
762 			data->filter = NULL;
763 			ret = -ENOMEM;
764 		}
765 	}
766  out:
767 	return ret;
768 }
769 
770 static LIST_HEAD(named_triggers);
771 
772 /**
773  * find_named_trigger - Find the common named trigger associated with @name
774  * @name: The name of the set of named triggers to find the common data for
775  *
776  * Named triggers are sets of triggers that share a common set of
777  * trigger data.  The first named trigger registered with a given name
778  * owns the common trigger data that the others subsequently
779  * registered with the same name will reference.  This function
780  * returns the common trigger data associated with that first
781  * registered instance.
782  *
783  * Return: the common trigger data for the given named trigger on
784  * success, NULL otherwise.
785  */
786 struct event_trigger_data *find_named_trigger(const char *name)
787 {
788 	struct event_trigger_data *data;
789 
790 	if (!name)
791 		return NULL;
792 
793 	list_for_each_entry(data, &named_triggers, named_list) {
794 		if (data->named_data)
795 			continue;
796 		if (strcmp(data->name, name) == 0)
797 			return data;
798 	}
799 
800 	return NULL;
801 }
802 
803 /**
804  * is_named_trigger - determine if a given trigger is a named trigger
805  * @test: The trigger data to test
806  *
807  * Return: true if 'test' is a named trigger, false otherwise.
808  */
809 bool is_named_trigger(struct event_trigger_data *test)
810 {
811 	struct event_trigger_data *data;
812 
813 	list_for_each_entry(data, &named_triggers, named_list) {
814 		if (test == data)
815 			return true;
816 	}
817 
818 	return false;
819 }
820 
821 /**
822  * save_named_trigger - save the trigger in the named trigger list
823  * @name: The name of the named trigger set
824  * @data: The trigger data to save
825  *
826  * Return: 0 if successful, negative error otherwise.
827  */
828 int save_named_trigger(const char *name, struct event_trigger_data *data)
829 {
830 	data->name = kstrdup(name, GFP_KERNEL);
831 	if (!data->name)
832 		return -ENOMEM;
833 
834 	list_add(&data->named_list, &named_triggers);
835 
836 	return 0;
837 }
838 
839 /**
840  * del_named_trigger - delete a trigger from the named trigger list
841  * @data: The trigger data to delete
842  */
843 void del_named_trigger(struct event_trigger_data *data)
844 {
845 	kfree(data->name);
846 	data->name = NULL;
847 
848 	list_del(&data->named_list);
849 }
850 
851 static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
852 {
853 	struct event_trigger_data *test;
854 
855 	list_for_each_entry(test, &named_triggers, named_list) {
856 		if (strcmp(test->name, data->name) == 0) {
857 			if (pause) {
858 				test->paused_tmp = test->paused;
859 				test->paused = true;
860 			} else {
861 				test->paused = test->paused_tmp;
862 			}
863 		}
864 	}
865 }
866 
867 /**
868  * pause_named_trigger - Pause all named triggers with the same name
869  * @data: The trigger data of a named trigger to pause
870  *
871  * Pauses a named trigger along with all other triggers having the
872  * same name.  Because named triggers share a common set of data,
873  * pausing only one is meaningless, so pausing one named trigger needs
874  * to pause all triggers with the same name.
875  */
876 void pause_named_trigger(struct event_trigger_data *data)
877 {
878 	__pause_named_trigger(data, true);
879 }
880 
881 /**
882  * unpause_named_trigger - Un-pause all named triggers with the same name
883  * @data: The trigger data of a named trigger to unpause
884  *
885  * Un-pauses a named trigger along with all other triggers having the
886  * same name.  Because named triggers share a common set of data,
887  * unpausing only one is meaningless, so unpausing one named trigger
888  * needs to unpause all triggers with the same name.
889  */
890 void unpause_named_trigger(struct event_trigger_data *data)
891 {
892 	__pause_named_trigger(data, false);
893 }
894 
895 /**
896  * set_named_trigger_data - Associate common named trigger data
897  * @data: The trigger data of a named trigger to unpause
898  *
899  * Named triggers are sets of triggers that share a common set of
900  * trigger data.  The first named trigger registered with a given name
901  * owns the common trigger data that the others subsequently
902  * registered with the same name will reference.  This function
903  * associates the common trigger data from the first trigger with the
904  * given trigger.
905  */
906 void set_named_trigger_data(struct event_trigger_data *data,
907 			    struct event_trigger_data *named_data)
908 {
909 	data->named_data = named_data;
910 }
911 
912 struct event_trigger_data *
913 get_named_trigger_data(struct event_trigger_data *data)
914 {
915 	return data->named_data;
916 }
917 
918 static void
919 traceon_trigger(struct event_trigger_data *data, void *rec,
920 		struct ring_buffer_event *event)
921 {
922 	if (tracing_is_on())
923 		return;
924 
925 	tracing_on();
926 }
927 
928 static void
929 traceon_count_trigger(struct event_trigger_data *data, void *rec,
930 		      struct ring_buffer_event *event)
931 {
932 	if (tracing_is_on())
933 		return;
934 
935 	if (!data->count)
936 		return;
937 
938 	if (data->count != -1)
939 		(data->count)--;
940 
941 	tracing_on();
942 }
943 
944 static void
945 traceoff_trigger(struct event_trigger_data *data, void *rec,
946 		 struct ring_buffer_event *event)
947 {
948 	if (!tracing_is_on())
949 		return;
950 
951 	tracing_off();
952 }
953 
954 static void
955 traceoff_count_trigger(struct event_trigger_data *data, void *rec,
956 		       struct ring_buffer_event *event)
957 {
958 	if (!tracing_is_on())
959 		return;
960 
961 	if (!data->count)
962 		return;
963 
964 	if (data->count != -1)
965 		(data->count)--;
966 
967 	tracing_off();
968 }
969 
970 static int
971 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
972 		      struct event_trigger_data *data)
973 {
974 	return event_trigger_print("traceon", m, (void *)data->count,
975 				   data->filter_str);
976 }
977 
978 static int
979 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
980 		       struct event_trigger_data *data)
981 {
982 	return event_trigger_print("traceoff", m, (void *)data->count,
983 				   data->filter_str);
984 }
985 
986 static struct event_trigger_ops traceon_trigger_ops = {
987 	.func			= traceon_trigger,
988 	.print			= traceon_trigger_print,
989 	.init			= event_trigger_init,
990 	.free			= event_trigger_free,
991 };
992 
993 static struct event_trigger_ops traceon_count_trigger_ops = {
994 	.func			= traceon_count_trigger,
995 	.print			= traceon_trigger_print,
996 	.init			= event_trigger_init,
997 	.free			= event_trigger_free,
998 };
999 
1000 static struct event_trigger_ops traceoff_trigger_ops = {
1001 	.func			= traceoff_trigger,
1002 	.print			= traceoff_trigger_print,
1003 	.init			= event_trigger_init,
1004 	.free			= event_trigger_free,
1005 };
1006 
1007 static struct event_trigger_ops traceoff_count_trigger_ops = {
1008 	.func			= traceoff_count_trigger,
1009 	.print			= traceoff_trigger_print,
1010 	.init			= event_trigger_init,
1011 	.free			= event_trigger_free,
1012 };
1013 
1014 static struct event_trigger_ops *
1015 onoff_get_trigger_ops(char *cmd, char *param)
1016 {
1017 	struct event_trigger_ops *ops;
1018 
1019 	/* we register both traceon and traceoff to this callback */
1020 	if (strcmp(cmd, "traceon") == 0)
1021 		ops = param ? &traceon_count_trigger_ops :
1022 			&traceon_trigger_ops;
1023 	else
1024 		ops = param ? &traceoff_count_trigger_ops :
1025 			&traceoff_trigger_ops;
1026 
1027 	return ops;
1028 }
1029 
1030 static struct event_command trigger_traceon_cmd = {
1031 	.name			= "traceon",
1032 	.trigger_type		= ETT_TRACE_ONOFF,
1033 	.func			= event_trigger_callback,
1034 	.reg			= register_trigger,
1035 	.unreg			= unregister_trigger,
1036 	.get_trigger_ops	= onoff_get_trigger_ops,
1037 	.set_filter		= set_trigger_filter,
1038 };
1039 
1040 static struct event_command trigger_traceoff_cmd = {
1041 	.name			= "traceoff",
1042 	.trigger_type		= ETT_TRACE_ONOFF,
1043 	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1044 	.func			= event_trigger_callback,
1045 	.reg			= register_trigger,
1046 	.unreg			= unregister_trigger,
1047 	.get_trigger_ops	= onoff_get_trigger_ops,
1048 	.set_filter		= set_trigger_filter,
1049 };
1050 
1051 #ifdef CONFIG_TRACER_SNAPSHOT
1052 static void
1053 snapshot_trigger(struct event_trigger_data *data, void *rec,
1054 		 struct ring_buffer_event *event)
1055 {
1056 	tracing_snapshot();
1057 }
1058 
1059 static void
1060 snapshot_count_trigger(struct event_trigger_data *data, void *rec,
1061 		       struct ring_buffer_event *event)
1062 {
1063 	if (!data->count)
1064 		return;
1065 
1066 	if (data->count != -1)
1067 		(data->count)--;
1068 
1069 	snapshot_trigger(data, rec, event);
1070 }
1071 
1072 static int
1073 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1074 			  struct event_trigger_data *data,
1075 			  struct trace_event_file *file)
1076 {
1077 	int ret = register_trigger(glob, ops, data, file);
1078 
1079 	if (ret > 0 && tracing_alloc_snapshot() != 0) {
1080 		unregister_trigger(glob, ops, data, file);
1081 		ret = 0;
1082 	}
1083 
1084 	return ret;
1085 }
1086 
1087 static int
1088 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1089 		       struct event_trigger_data *data)
1090 {
1091 	return event_trigger_print("snapshot", m, (void *)data->count,
1092 				   data->filter_str);
1093 }
1094 
1095 static struct event_trigger_ops snapshot_trigger_ops = {
1096 	.func			= snapshot_trigger,
1097 	.print			= snapshot_trigger_print,
1098 	.init			= event_trigger_init,
1099 	.free			= event_trigger_free,
1100 };
1101 
1102 static struct event_trigger_ops snapshot_count_trigger_ops = {
1103 	.func			= snapshot_count_trigger,
1104 	.print			= snapshot_trigger_print,
1105 	.init			= event_trigger_init,
1106 	.free			= event_trigger_free,
1107 };
1108 
1109 static struct event_trigger_ops *
1110 snapshot_get_trigger_ops(char *cmd, char *param)
1111 {
1112 	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1113 }
1114 
1115 static struct event_command trigger_snapshot_cmd = {
1116 	.name			= "snapshot",
1117 	.trigger_type		= ETT_SNAPSHOT,
1118 	.func			= event_trigger_callback,
1119 	.reg			= register_snapshot_trigger,
1120 	.unreg			= unregister_trigger,
1121 	.get_trigger_ops	= snapshot_get_trigger_ops,
1122 	.set_filter		= set_trigger_filter,
1123 };
1124 
1125 static __init int register_trigger_snapshot_cmd(void)
1126 {
1127 	int ret;
1128 
1129 	ret = register_event_command(&trigger_snapshot_cmd);
1130 	WARN_ON(ret < 0);
1131 
1132 	return ret;
1133 }
1134 #else
1135 static __init int register_trigger_snapshot_cmd(void) { return 0; }
1136 #endif /* CONFIG_TRACER_SNAPSHOT */
1137 
1138 #ifdef CONFIG_STACKTRACE
1139 #ifdef CONFIG_UNWINDER_ORC
1140 /* Skip 2:
1141  *   event_triggers_post_call()
1142  *   trace_event_raw_event_xxx()
1143  */
1144 # define STACK_SKIP 2
1145 #else
1146 /*
1147  * Skip 4:
1148  *   stacktrace_trigger()
1149  *   event_triggers_post_call()
1150  *   trace_event_buffer_commit()
1151  *   trace_event_raw_event_xxx()
1152  */
1153 #define STACK_SKIP 4
1154 #endif
1155 
1156 static void
1157 stacktrace_trigger(struct event_trigger_data *data, void *rec,
1158 		   struct ring_buffer_event *event)
1159 {
1160 	trace_dump_stack(STACK_SKIP);
1161 }
1162 
1163 static void
1164 stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
1165 			 struct ring_buffer_event *event)
1166 {
1167 	if (!data->count)
1168 		return;
1169 
1170 	if (data->count != -1)
1171 		(data->count)--;
1172 
1173 	stacktrace_trigger(data, rec, event);
1174 }
1175 
1176 static int
1177 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1178 			 struct event_trigger_data *data)
1179 {
1180 	return event_trigger_print("stacktrace", m, (void *)data->count,
1181 				   data->filter_str);
1182 }
1183 
1184 static struct event_trigger_ops stacktrace_trigger_ops = {
1185 	.func			= stacktrace_trigger,
1186 	.print			= stacktrace_trigger_print,
1187 	.init			= event_trigger_init,
1188 	.free			= event_trigger_free,
1189 };
1190 
1191 static struct event_trigger_ops stacktrace_count_trigger_ops = {
1192 	.func			= stacktrace_count_trigger,
1193 	.print			= stacktrace_trigger_print,
1194 	.init			= event_trigger_init,
1195 	.free			= event_trigger_free,
1196 };
1197 
1198 static struct event_trigger_ops *
1199 stacktrace_get_trigger_ops(char *cmd, char *param)
1200 {
1201 	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1202 }
1203 
1204 static struct event_command trigger_stacktrace_cmd = {
1205 	.name			= "stacktrace",
1206 	.trigger_type		= ETT_STACKTRACE,
1207 	.flags			= EVENT_CMD_FL_POST_TRIGGER,
1208 	.func			= event_trigger_callback,
1209 	.reg			= register_trigger,
1210 	.unreg			= unregister_trigger,
1211 	.get_trigger_ops	= stacktrace_get_trigger_ops,
1212 	.set_filter		= set_trigger_filter,
1213 };
1214 
1215 static __init int register_trigger_stacktrace_cmd(void)
1216 {
1217 	int ret;
1218 
1219 	ret = register_event_command(&trigger_stacktrace_cmd);
1220 	WARN_ON(ret < 0);
1221 
1222 	return ret;
1223 }
1224 #else
1225 static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1226 #endif /* CONFIG_STACKTRACE */
1227 
1228 static __init void unregister_trigger_traceon_traceoff_cmds(void)
1229 {
1230 	unregister_event_command(&trigger_traceon_cmd);
1231 	unregister_event_command(&trigger_traceoff_cmd);
1232 }
1233 
1234 static void
1235 event_enable_trigger(struct event_trigger_data *data, void *rec,
1236 		     struct ring_buffer_event *event)
1237 {
1238 	struct enable_trigger_data *enable_data = data->private_data;
1239 
1240 	if (enable_data->enable)
1241 		clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1242 	else
1243 		set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1244 }
1245 
1246 static void
1247 event_enable_count_trigger(struct event_trigger_data *data, void *rec,
1248 			   struct ring_buffer_event *event)
1249 {
1250 	struct enable_trigger_data *enable_data = data->private_data;
1251 
1252 	if (!data->count)
1253 		return;
1254 
1255 	/* Skip if the event is in a state we want to switch to */
1256 	if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1257 		return;
1258 
1259 	if (data->count != -1)
1260 		(data->count)--;
1261 
1262 	event_enable_trigger(data, rec, event);
1263 }
1264 
1265 int event_enable_trigger_print(struct seq_file *m,
1266 			       struct event_trigger_ops *ops,
1267 			       struct event_trigger_data *data)
1268 {
1269 	struct enable_trigger_data *enable_data = data->private_data;
1270 
1271 	seq_printf(m, "%s:%s:%s",
1272 		   enable_data->hist ?
1273 		   (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1274 		   (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1275 		   enable_data->file->event_call->class->system,
1276 		   trace_event_name(enable_data->file->event_call));
1277 
1278 	if (data->count == -1)
1279 		seq_puts(m, ":unlimited");
1280 	else
1281 		seq_printf(m, ":count=%ld", data->count);
1282 
1283 	if (data->filter_str)
1284 		seq_printf(m, " if %s\n", data->filter_str);
1285 	else
1286 		seq_putc(m, '\n');
1287 
1288 	return 0;
1289 }
1290 
1291 void event_enable_trigger_free(struct event_trigger_ops *ops,
1292 			       struct event_trigger_data *data)
1293 {
1294 	struct enable_trigger_data *enable_data = data->private_data;
1295 
1296 	if (WARN_ON_ONCE(data->ref <= 0))
1297 		return;
1298 
1299 	data->ref--;
1300 	if (!data->ref) {
1301 		/* Remove the SOFT_MODE flag */
1302 		trace_event_enable_disable(enable_data->file, 0, 1);
1303 		module_put(enable_data->file->event_call->mod);
1304 		trigger_data_free(data);
1305 		kfree(enable_data);
1306 	}
1307 }
1308 
1309 static struct event_trigger_ops event_enable_trigger_ops = {
1310 	.func			= event_enable_trigger,
1311 	.print			= event_enable_trigger_print,
1312 	.init			= event_trigger_init,
1313 	.free			= event_enable_trigger_free,
1314 };
1315 
1316 static struct event_trigger_ops event_enable_count_trigger_ops = {
1317 	.func			= event_enable_count_trigger,
1318 	.print			= event_enable_trigger_print,
1319 	.init			= event_trigger_init,
1320 	.free			= event_enable_trigger_free,
1321 };
1322 
1323 static struct event_trigger_ops event_disable_trigger_ops = {
1324 	.func			= event_enable_trigger,
1325 	.print			= event_enable_trigger_print,
1326 	.init			= event_trigger_init,
1327 	.free			= event_enable_trigger_free,
1328 };
1329 
1330 static struct event_trigger_ops event_disable_count_trigger_ops = {
1331 	.func			= event_enable_count_trigger,
1332 	.print			= event_enable_trigger_print,
1333 	.init			= event_trigger_init,
1334 	.free			= event_enable_trigger_free,
1335 };
1336 
1337 int event_enable_trigger_func(struct event_command *cmd_ops,
1338 			      struct trace_event_file *file,
1339 			      char *glob, char *cmd, char *param)
1340 {
1341 	struct trace_event_file *event_enable_file;
1342 	struct enable_trigger_data *enable_data;
1343 	struct event_trigger_data *trigger_data;
1344 	struct event_trigger_ops *trigger_ops;
1345 	struct trace_array *tr = file->tr;
1346 	const char *system;
1347 	const char *event;
1348 	bool hist = false;
1349 	char *trigger;
1350 	char *number;
1351 	bool enable;
1352 	int ret;
1353 
1354 	if (!param)
1355 		return -EINVAL;
1356 
1357 	/* separate the trigger from the filter (s:e:n [if filter]) */
1358 	trigger = strsep(&param, " \t");
1359 	if (!trigger)
1360 		return -EINVAL;
1361 
1362 	system = strsep(&trigger, ":");
1363 	if (!trigger)
1364 		return -EINVAL;
1365 
1366 	event = strsep(&trigger, ":");
1367 
1368 	ret = -EINVAL;
1369 	event_enable_file = find_event_file(tr, system, event);
1370 	if (!event_enable_file)
1371 		goto out;
1372 
1373 #ifdef CONFIG_HIST_TRIGGERS
1374 	hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1375 		(strcmp(cmd, DISABLE_HIST_STR) == 0));
1376 
1377 	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1378 		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1379 #else
1380 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1381 #endif
1382 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1383 
1384 	ret = -ENOMEM;
1385 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1386 	if (!trigger_data)
1387 		goto out;
1388 
1389 	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1390 	if (!enable_data) {
1391 		kfree(trigger_data);
1392 		goto out;
1393 	}
1394 
1395 	trigger_data->count = -1;
1396 	trigger_data->ops = trigger_ops;
1397 	trigger_data->cmd_ops = cmd_ops;
1398 	INIT_LIST_HEAD(&trigger_data->list);
1399 	RCU_INIT_POINTER(trigger_data->filter, NULL);
1400 
1401 	enable_data->hist = hist;
1402 	enable_data->enable = enable;
1403 	enable_data->file = event_enable_file;
1404 	trigger_data->private_data = enable_data;
1405 
1406 	if (glob[0] == '!') {
1407 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1408 		kfree(trigger_data);
1409 		kfree(enable_data);
1410 		ret = 0;
1411 		goto out;
1412 	}
1413 
1414 	if (trigger) {
1415 		number = strsep(&trigger, ":");
1416 
1417 		ret = -EINVAL;
1418 		if (!strlen(number))
1419 			goto out_free;
1420 
1421 		/*
1422 		 * We use the callback data field (which is a pointer)
1423 		 * as our counter.
1424 		 */
1425 		ret = kstrtoul(number, 0, &trigger_data->count);
1426 		if (ret)
1427 			goto out_free;
1428 	}
1429 
1430 	if (!param) /* if param is non-empty, it's supposed to be a filter */
1431 		goto out_reg;
1432 
1433 	if (!cmd_ops->set_filter)
1434 		goto out_reg;
1435 
1436 	ret = cmd_ops->set_filter(param, trigger_data, file);
1437 	if (ret < 0)
1438 		goto out_free;
1439 
1440  out_reg:
1441 	/* Don't let event modules unload while probe registered */
1442 	ret = try_module_get(event_enable_file->event_call->mod);
1443 	if (!ret) {
1444 		ret = -EBUSY;
1445 		goto out_free;
1446 	}
1447 
1448 	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1449 	if (ret < 0)
1450 		goto out_put;
1451 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1452 	/*
1453 	 * The above returns on success the # of functions enabled,
1454 	 * but if it didn't find any functions it returns zero.
1455 	 * Consider no functions a failure too.
1456 	 */
1457 	if (!ret) {
1458 		ret = -ENOENT;
1459 		goto out_disable;
1460 	} else if (ret < 0)
1461 		goto out_disable;
1462 	/* Just return zero, not the number of enabled functions */
1463 	ret = 0;
1464  out:
1465 	return ret;
1466 
1467  out_disable:
1468 	trace_event_enable_disable(event_enable_file, 0, 1);
1469  out_put:
1470 	module_put(event_enable_file->event_call->mod);
1471  out_free:
1472 	if (cmd_ops->set_filter)
1473 		cmd_ops->set_filter(NULL, trigger_data, NULL);
1474 	kfree(trigger_data);
1475 	kfree(enable_data);
1476 	goto out;
1477 }
1478 
1479 int event_enable_register_trigger(char *glob,
1480 				  struct event_trigger_ops *ops,
1481 				  struct event_trigger_data *data,
1482 				  struct trace_event_file *file)
1483 {
1484 	struct enable_trigger_data *enable_data = data->private_data;
1485 	struct enable_trigger_data *test_enable_data;
1486 	struct event_trigger_data *test;
1487 	int ret = 0;
1488 
1489 	list_for_each_entry_rcu(test, &file->triggers, list) {
1490 		test_enable_data = test->private_data;
1491 		if (test_enable_data &&
1492 		    (test->cmd_ops->trigger_type ==
1493 		     data->cmd_ops->trigger_type) &&
1494 		    (test_enable_data->file == enable_data->file)) {
1495 			ret = -EEXIST;
1496 			goto out;
1497 		}
1498 	}
1499 
1500 	if (data->ops->init) {
1501 		ret = data->ops->init(data->ops, data);
1502 		if (ret < 0)
1503 			goto out;
1504 	}
1505 
1506 	list_add_rcu(&data->list, &file->triggers);
1507 	ret++;
1508 
1509 	update_cond_flag(file);
1510 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
1511 		list_del_rcu(&data->list);
1512 		update_cond_flag(file);
1513 		ret--;
1514 	}
1515 out:
1516 	return ret;
1517 }
1518 
1519 void event_enable_unregister_trigger(char *glob,
1520 				     struct event_trigger_ops *ops,
1521 				     struct event_trigger_data *test,
1522 				     struct trace_event_file *file)
1523 {
1524 	struct enable_trigger_data *test_enable_data = test->private_data;
1525 	struct enable_trigger_data *enable_data;
1526 	struct event_trigger_data *data;
1527 	bool unregistered = false;
1528 
1529 	list_for_each_entry_rcu(data, &file->triggers, list) {
1530 		enable_data = data->private_data;
1531 		if (enable_data &&
1532 		    (data->cmd_ops->trigger_type ==
1533 		     test->cmd_ops->trigger_type) &&
1534 		    (enable_data->file == test_enable_data->file)) {
1535 			unregistered = true;
1536 			list_del_rcu(&data->list);
1537 			trace_event_trigger_enable_disable(file, 0);
1538 			update_cond_flag(file);
1539 			break;
1540 		}
1541 	}
1542 
1543 	if (unregistered && data->ops->free)
1544 		data->ops->free(data->ops, data);
1545 }
1546 
1547 static struct event_trigger_ops *
1548 event_enable_get_trigger_ops(char *cmd, char *param)
1549 {
1550 	struct event_trigger_ops *ops;
1551 	bool enable;
1552 
1553 #ifdef CONFIG_HIST_TRIGGERS
1554 	enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1555 		  (strcmp(cmd, ENABLE_HIST_STR) == 0));
1556 #else
1557 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1558 #endif
1559 	if (enable)
1560 		ops = param ? &event_enable_count_trigger_ops :
1561 			&event_enable_trigger_ops;
1562 	else
1563 		ops = param ? &event_disable_count_trigger_ops :
1564 			&event_disable_trigger_ops;
1565 
1566 	return ops;
1567 }
1568 
1569 static struct event_command trigger_enable_cmd = {
1570 	.name			= ENABLE_EVENT_STR,
1571 	.trigger_type		= ETT_EVENT_ENABLE,
1572 	.func			= event_enable_trigger_func,
1573 	.reg			= event_enable_register_trigger,
1574 	.unreg			= event_enable_unregister_trigger,
1575 	.get_trigger_ops	= event_enable_get_trigger_ops,
1576 	.set_filter		= set_trigger_filter,
1577 };
1578 
1579 static struct event_command trigger_disable_cmd = {
1580 	.name			= DISABLE_EVENT_STR,
1581 	.trigger_type		= ETT_EVENT_ENABLE,
1582 	.func			= event_enable_trigger_func,
1583 	.reg			= event_enable_register_trigger,
1584 	.unreg			= event_enable_unregister_trigger,
1585 	.get_trigger_ops	= event_enable_get_trigger_ops,
1586 	.set_filter		= set_trigger_filter,
1587 };
1588 
1589 static __init void unregister_trigger_enable_disable_cmds(void)
1590 {
1591 	unregister_event_command(&trigger_enable_cmd);
1592 	unregister_event_command(&trigger_disable_cmd);
1593 }
1594 
1595 static __init int register_trigger_enable_disable_cmds(void)
1596 {
1597 	int ret;
1598 
1599 	ret = register_event_command(&trigger_enable_cmd);
1600 	if (WARN_ON(ret < 0))
1601 		return ret;
1602 	ret = register_event_command(&trigger_disable_cmd);
1603 	if (WARN_ON(ret < 0))
1604 		unregister_trigger_enable_disable_cmds();
1605 
1606 	return ret;
1607 }
1608 
1609 static __init int register_trigger_traceon_traceoff_cmds(void)
1610 {
1611 	int ret;
1612 
1613 	ret = register_event_command(&trigger_traceon_cmd);
1614 	if (WARN_ON(ret < 0))
1615 		return ret;
1616 	ret = register_event_command(&trigger_traceoff_cmd);
1617 	if (WARN_ON(ret < 0))
1618 		unregister_trigger_traceon_traceoff_cmds();
1619 
1620 	return ret;
1621 }
1622 
1623 __init int register_trigger_cmds(void)
1624 {
1625 	register_trigger_traceon_traceoff_cmds();
1626 	register_trigger_snapshot_cmd();
1627 	register_trigger_stacktrace_cmd();
1628 	register_trigger_enable_disable_cmds();
1629 	register_trigger_hist_enable_disable_cmds();
1630 	register_trigger_hist_cmd();
1631 
1632 	return 0;
1633 }
1634