1 /*
2  * trace_events_trigger - trace event triggers
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
19  */
20 
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 
26 #include "trace.h"
27 
28 static LIST_HEAD(trigger_commands);
29 static DEFINE_MUTEX(trigger_cmd_mutex);
30 
31 static void
32 trigger_data_free(struct event_trigger_data *data)
33 {
34 	if (data->cmd_ops->set_filter)
35 		data->cmd_ops->set_filter(NULL, data, NULL);
36 
37 	synchronize_sched(); /* make sure current triggers exit before free */
38 	kfree(data);
39 }
40 
41 /**
42  * event_triggers_call - Call triggers associated with a trace event
43  * @file: The ftrace_event_file associated with the event
44  * @rec: The trace entry for the event, NULL for unconditional invocation
45  *
46  * For each trigger associated with an event, invoke the trigger
47  * function registered with the associated trigger command.  If rec is
48  * non-NULL, it means that the trigger requires further processing and
49  * shouldn't be unconditionally invoked.  If rec is non-NULL and the
50  * trigger has a filter associated with it, rec will checked against
51  * the filter and if the record matches the trigger will be invoked.
52  * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
53  * in any case until the current event is written, the trigger
54  * function isn't invoked but the bit associated with the deferred
55  * trigger is set in the return value.
56  *
57  * Returns an enum event_trigger_type value containing a set bit for
58  * any trigger that should be deferred, ETT_NONE if nothing to defer.
59  *
60  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
61  *
62  * Return: an enum event_trigger_type value containing a set bit for
63  * any trigger that should be deferred, ETT_NONE if nothing to defer.
64  */
65 enum event_trigger_type
66 event_triggers_call(struct ftrace_event_file *file, void *rec)
67 {
68 	struct event_trigger_data *data;
69 	enum event_trigger_type tt = ETT_NONE;
70 	struct event_filter *filter;
71 
72 	if (list_empty(&file->triggers))
73 		return tt;
74 
75 	list_for_each_entry_rcu(data, &file->triggers, list) {
76 		if (!rec) {
77 			data->ops->func(data);
78 			continue;
79 		}
80 		filter = rcu_dereference_sched(data->filter);
81 		if (filter && !filter_match_preds(filter, rec))
82 			continue;
83 		if (data->cmd_ops->post_trigger) {
84 			tt |= data->cmd_ops->trigger_type;
85 			continue;
86 		}
87 		data->ops->func(data);
88 	}
89 	return tt;
90 }
91 EXPORT_SYMBOL_GPL(event_triggers_call);
92 
93 /**
94  * event_triggers_post_call - Call 'post_triggers' for a trace event
95  * @file: The ftrace_event_file associated with the event
96  * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
97  *
98  * For each trigger associated with an event, invoke the trigger
99  * function registered with the associated trigger command, if the
100  * corresponding bit is set in the tt enum passed into this function.
101  * See @event_triggers_call for details on how those bits are set.
102  *
103  * Called from tracepoint handlers (with rcu_read_lock_sched() held).
104  */
105 void
106 event_triggers_post_call(struct ftrace_event_file *file,
107 			 enum event_trigger_type tt)
108 {
109 	struct event_trigger_data *data;
110 
111 	list_for_each_entry_rcu(data, &file->triggers, list) {
112 		if (data->cmd_ops->trigger_type & tt)
113 			data->ops->func(data);
114 	}
115 }
116 EXPORT_SYMBOL_GPL(event_triggers_post_call);
117 
118 #define SHOW_AVAILABLE_TRIGGERS	(void *)(1UL)
119 
120 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
121 {
122 	struct ftrace_event_file *event_file = event_file_data(m->private);
123 
124 	if (t == SHOW_AVAILABLE_TRIGGERS)
125 		return NULL;
126 
127 	return seq_list_next(t, &event_file->triggers, pos);
128 }
129 
130 static void *trigger_start(struct seq_file *m, loff_t *pos)
131 {
132 	struct ftrace_event_file *event_file;
133 
134 	/* ->stop() is called even if ->start() fails */
135 	mutex_lock(&event_mutex);
136 	event_file = event_file_data(m->private);
137 	if (unlikely(!event_file))
138 		return ERR_PTR(-ENODEV);
139 
140 	if (list_empty(&event_file->triggers))
141 		return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
142 
143 	return seq_list_start(&event_file->triggers, *pos);
144 }
145 
146 static void trigger_stop(struct seq_file *m, void *t)
147 {
148 	mutex_unlock(&event_mutex);
149 }
150 
151 static int trigger_show(struct seq_file *m, void *v)
152 {
153 	struct event_trigger_data *data;
154 	struct event_command *p;
155 
156 	if (v == SHOW_AVAILABLE_TRIGGERS) {
157 		seq_puts(m, "# Available triggers:\n");
158 		seq_putc(m, '#');
159 		mutex_lock(&trigger_cmd_mutex);
160 		list_for_each_entry_reverse(p, &trigger_commands, list)
161 			seq_printf(m, " %s", p->name);
162 		seq_putc(m, '\n');
163 		mutex_unlock(&trigger_cmd_mutex);
164 		return 0;
165 	}
166 
167 	data = list_entry(v, struct event_trigger_data, list);
168 	data->ops->print(m, data->ops, data);
169 
170 	return 0;
171 }
172 
173 static const struct seq_operations event_triggers_seq_ops = {
174 	.start = trigger_start,
175 	.next = trigger_next,
176 	.stop = trigger_stop,
177 	.show = trigger_show,
178 };
179 
180 static int event_trigger_regex_open(struct inode *inode, struct file *file)
181 {
182 	int ret = 0;
183 
184 	mutex_lock(&event_mutex);
185 
186 	if (unlikely(!event_file_data(file))) {
187 		mutex_unlock(&event_mutex);
188 		return -ENODEV;
189 	}
190 
191 	if (file->f_mode & FMODE_READ) {
192 		ret = seq_open(file, &event_triggers_seq_ops);
193 		if (!ret) {
194 			struct seq_file *m = file->private_data;
195 			m->private = file;
196 		}
197 	}
198 
199 	mutex_unlock(&event_mutex);
200 
201 	return ret;
202 }
203 
204 static int trigger_process_regex(struct ftrace_event_file *file, char *buff)
205 {
206 	char *command, *next = buff;
207 	struct event_command *p;
208 	int ret = -EINVAL;
209 
210 	command = strsep(&next, ": \t");
211 	command = (command[0] != '!') ? command : command + 1;
212 
213 	mutex_lock(&trigger_cmd_mutex);
214 	list_for_each_entry(p, &trigger_commands, list) {
215 		if (strcmp(p->name, command) == 0) {
216 			ret = p->func(p, file, buff, command, next);
217 			goto out_unlock;
218 		}
219 	}
220  out_unlock:
221 	mutex_unlock(&trigger_cmd_mutex);
222 
223 	return ret;
224 }
225 
226 static ssize_t event_trigger_regex_write(struct file *file,
227 					 const char __user *ubuf,
228 					 size_t cnt, loff_t *ppos)
229 {
230 	struct ftrace_event_file *event_file;
231 	ssize_t ret;
232 	char *buf;
233 
234 	if (!cnt)
235 		return 0;
236 
237 	if (cnt >= PAGE_SIZE)
238 		return -EINVAL;
239 
240 	buf = (char *)__get_free_page(GFP_TEMPORARY);
241 	if (!buf)
242 		return -ENOMEM;
243 
244 	if (copy_from_user(buf, ubuf, cnt)) {
245 		free_page((unsigned long)buf);
246 		return -EFAULT;
247 	}
248 	buf[cnt] = '\0';
249 	strim(buf);
250 
251 	mutex_lock(&event_mutex);
252 	event_file = event_file_data(file);
253 	if (unlikely(!event_file)) {
254 		mutex_unlock(&event_mutex);
255 		free_page((unsigned long)buf);
256 		return -ENODEV;
257 	}
258 	ret = trigger_process_regex(event_file, buf);
259 	mutex_unlock(&event_mutex);
260 
261 	free_page((unsigned long)buf);
262 	if (ret < 0)
263 		goto out;
264 
265 	*ppos += cnt;
266 	ret = cnt;
267  out:
268 	return ret;
269 }
270 
271 static int event_trigger_regex_release(struct inode *inode, struct file *file)
272 {
273 	mutex_lock(&event_mutex);
274 
275 	if (file->f_mode & FMODE_READ)
276 		seq_release(inode, file);
277 
278 	mutex_unlock(&event_mutex);
279 
280 	return 0;
281 }
282 
283 static ssize_t
284 event_trigger_write(struct file *filp, const char __user *ubuf,
285 		    size_t cnt, loff_t *ppos)
286 {
287 	return event_trigger_regex_write(filp, ubuf, cnt, ppos);
288 }
289 
290 static int
291 event_trigger_open(struct inode *inode, struct file *filp)
292 {
293 	return event_trigger_regex_open(inode, filp);
294 }
295 
296 static int
297 event_trigger_release(struct inode *inode, struct file *file)
298 {
299 	return event_trigger_regex_release(inode, file);
300 }
301 
302 const struct file_operations event_trigger_fops = {
303 	.open = event_trigger_open,
304 	.read = seq_read,
305 	.write = event_trigger_write,
306 	.llseek = tracing_lseek,
307 	.release = event_trigger_release,
308 };
309 
310 /*
311  * Currently we only register event commands from __init, so mark this
312  * __init too.
313  */
314 static __init int register_event_command(struct event_command *cmd)
315 {
316 	struct event_command *p;
317 	int ret = 0;
318 
319 	mutex_lock(&trigger_cmd_mutex);
320 	list_for_each_entry(p, &trigger_commands, list) {
321 		if (strcmp(cmd->name, p->name) == 0) {
322 			ret = -EBUSY;
323 			goto out_unlock;
324 		}
325 	}
326 	list_add(&cmd->list, &trigger_commands);
327  out_unlock:
328 	mutex_unlock(&trigger_cmd_mutex);
329 
330 	return ret;
331 }
332 
333 /*
334  * Currently we only unregister event commands from __init, so mark
335  * this __init too.
336  */
337 static __init int unregister_event_command(struct event_command *cmd)
338 {
339 	struct event_command *p, *n;
340 	int ret = -ENODEV;
341 
342 	mutex_lock(&trigger_cmd_mutex);
343 	list_for_each_entry_safe(p, n, &trigger_commands, list) {
344 		if (strcmp(cmd->name, p->name) == 0) {
345 			ret = 0;
346 			list_del_init(&p->list);
347 			goto out_unlock;
348 		}
349 	}
350  out_unlock:
351 	mutex_unlock(&trigger_cmd_mutex);
352 
353 	return ret;
354 }
355 
356 /**
357  * event_trigger_print - Generic event_trigger_ops @print implementation
358  * @name: The name of the event trigger
359  * @m: The seq_file being printed to
360  * @data: Trigger-specific data
361  * @filter_str: filter_str to print, if present
362  *
363  * Common implementation for event triggers to print themselves.
364  *
365  * Usually wrapped by a function that simply sets the @name of the
366  * trigger command and then invokes this.
367  *
368  * Return: 0 on success, errno otherwise
369  */
370 static int
371 event_trigger_print(const char *name, struct seq_file *m,
372 		    void *data, char *filter_str)
373 {
374 	long count = (long)data;
375 
376 	seq_printf(m, "%s", name);
377 
378 	if (count == -1)
379 		seq_puts(m, ":unlimited");
380 	else
381 		seq_printf(m, ":count=%ld", count);
382 
383 	if (filter_str)
384 		seq_printf(m, " if %s\n", filter_str);
385 	else
386 		seq_puts(m, "\n");
387 
388 	return 0;
389 }
390 
391 /**
392  * event_trigger_init - Generic event_trigger_ops @init implementation
393  * @ops: The trigger ops associated with the trigger
394  * @data: Trigger-specific data
395  *
396  * Common implementation of event trigger initialization.
397  *
398  * Usually used directly as the @init method in event trigger
399  * implementations.
400  *
401  * Return: 0 on success, errno otherwise
402  */
403 static int
404 event_trigger_init(struct event_trigger_ops *ops,
405 		   struct event_trigger_data *data)
406 {
407 	data->ref++;
408 	return 0;
409 }
410 
411 /**
412  * event_trigger_free - Generic event_trigger_ops @free implementation
413  * @ops: The trigger ops associated with the trigger
414  * @data: Trigger-specific data
415  *
416  * Common implementation of event trigger de-initialization.
417  *
418  * Usually used directly as the @free method in event trigger
419  * implementations.
420  */
421 static void
422 event_trigger_free(struct event_trigger_ops *ops,
423 		   struct event_trigger_data *data)
424 {
425 	if (WARN_ON_ONCE(data->ref <= 0))
426 		return;
427 
428 	data->ref--;
429 	if (!data->ref)
430 		trigger_data_free(data);
431 }
432 
433 static int trace_event_trigger_enable_disable(struct ftrace_event_file *file,
434 					      int trigger_enable)
435 {
436 	int ret = 0;
437 
438 	if (trigger_enable) {
439 		if (atomic_inc_return(&file->tm_ref) > 1)
440 			return ret;
441 		set_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &file->flags);
442 		ret = trace_event_enable_disable(file, 1, 1);
443 	} else {
444 		if (atomic_dec_return(&file->tm_ref) > 0)
445 			return ret;
446 		clear_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &file->flags);
447 		ret = trace_event_enable_disable(file, 0, 1);
448 	}
449 
450 	return ret;
451 }
452 
453 /**
454  * clear_event_triggers - Clear all triggers associated with a trace array
455  * @tr: The trace array to clear
456  *
457  * For each trigger, the triggering event has its tm_ref decremented
458  * via trace_event_trigger_enable_disable(), and any associated event
459  * (in the case of enable/disable_event triggers) will have its sm_ref
460  * decremented via free()->trace_event_enable_disable().  That
461  * combination effectively reverses the soft-mode/trigger state added
462  * by trigger registration.
463  *
464  * Must be called with event_mutex held.
465  */
466 void
467 clear_event_triggers(struct trace_array *tr)
468 {
469 	struct ftrace_event_file *file;
470 
471 	list_for_each_entry(file, &tr->events, list) {
472 		struct event_trigger_data *data;
473 		list_for_each_entry_rcu(data, &file->triggers, list) {
474 			trace_event_trigger_enable_disable(file, 0);
475 			if (data->ops->free)
476 				data->ops->free(data->ops, data);
477 		}
478 	}
479 }
480 
481 /**
482  * update_cond_flag - Set or reset the TRIGGER_COND bit
483  * @file: The ftrace_event_file associated with the event
484  *
485  * If an event has triggers and any of those triggers has a filter or
486  * a post_trigger, trigger invocation needs to be deferred until after
487  * the current event has logged its data, and the event should have
488  * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
489  * cleared.
490  */
491 static void update_cond_flag(struct ftrace_event_file *file)
492 {
493 	struct event_trigger_data *data;
494 	bool set_cond = false;
495 
496 	list_for_each_entry_rcu(data, &file->triggers, list) {
497 		if (data->filter || data->cmd_ops->post_trigger) {
498 			set_cond = true;
499 			break;
500 		}
501 	}
502 
503 	if (set_cond)
504 		set_bit(FTRACE_EVENT_FL_TRIGGER_COND_BIT, &file->flags);
505 	else
506 		clear_bit(FTRACE_EVENT_FL_TRIGGER_COND_BIT, &file->flags);
507 }
508 
509 /**
510  * register_trigger - Generic event_command @reg implementation
511  * @glob: The raw string used to register the trigger
512  * @ops: The trigger ops associated with the trigger
513  * @data: Trigger-specific data to associate with the trigger
514  * @file: The ftrace_event_file associated with the event
515  *
516  * Common implementation for event trigger registration.
517  *
518  * Usually used directly as the @reg method in event command
519  * implementations.
520  *
521  * Return: 0 on success, errno otherwise
522  */
523 static int register_trigger(char *glob, struct event_trigger_ops *ops,
524 			    struct event_trigger_data *data,
525 			    struct ftrace_event_file *file)
526 {
527 	struct event_trigger_data *test;
528 	int ret = 0;
529 
530 	list_for_each_entry_rcu(test, &file->triggers, list) {
531 		if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
532 			ret = -EEXIST;
533 			goto out;
534 		}
535 	}
536 
537 	if (data->ops->init) {
538 		ret = data->ops->init(data->ops, data);
539 		if (ret < 0)
540 			goto out;
541 	}
542 
543 	list_add_rcu(&data->list, &file->triggers);
544 	ret++;
545 
546 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
547 		list_del_rcu(&data->list);
548 		ret--;
549 	}
550 	update_cond_flag(file);
551 out:
552 	return ret;
553 }
554 
555 /**
556  * unregister_trigger - Generic event_command @unreg implementation
557  * @glob: The raw string used to register the trigger
558  * @ops: The trigger ops associated with the trigger
559  * @test: Trigger-specific data used to find the trigger to remove
560  * @file: The ftrace_event_file associated with the event
561  *
562  * Common implementation for event trigger unregistration.
563  *
564  * Usually used directly as the @unreg method in event command
565  * implementations.
566  */
567 static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
568 			       struct event_trigger_data *test,
569 			       struct ftrace_event_file *file)
570 {
571 	struct event_trigger_data *data;
572 	bool unregistered = false;
573 
574 	list_for_each_entry_rcu(data, &file->triggers, list) {
575 		if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
576 			unregistered = true;
577 			list_del_rcu(&data->list);
578 			update_cond_flag(file);
579 			trace_event_trigger_enable_disable(file, 0);
580 			break;
581 		}
582 	}
583 
584 	if (unregistered && data->ops->free)
585 		data->ops->free(data->ops, data);
586 }
587 
588 /**
589  * event_trigger_callback - Generic event_command @func implementation
590  * @cmd_ops: The command ops, used for trigger registration
591  * @file: The ftrace_event_file associated with the event
592  * @glob: The raw string used to register the trigger
593  * @cmd: The cmd portion of the string used to register the trigger
594  * @param: The params portion of the string used to register the trigger
595  *
596  * Common implementation for event command parsing and trigger
597  * instantiation.
598  *
599  * Usually used directly as the @func method in event command
600  * implementations.
601  *
602  * Return: 0 on success, errno otherwise
603  */
604 static int
605 event_trigger_callback(struct event_command *cmd_ops,
606 		       struct ftrace_event_file *file,
607 		       char *glob, char *cmd, char *param)
608 {
609 	struct event_trigger_data *trigger_data;
610 	struct event_trigger_ops *trigger_ops;
611 	char *trigger = NULL;
612 	char *number;
613 	int ret;
614 
615 	/* separate the trigger from the filter (t:n [if filter]) */
616 	if (param && isdigit(param[0]))
617 		trigger = strsep(&param, " \t");
618 
619 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
620 
621 	ret = -ENOMEM;
622 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
623 	if (!trigger_data)
624 		goto out;
625 
626 	trigger_data->count = -1;
627 	trigger_data->ops = trigger_ops;
628 	trigger_data->cmd_ops = cmd_ops;
629 	INIT_LIST_HEAD(&trigger_data->list);
630 
631 	if (glob[0] == '!') {
632 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
633 		kfree(trigger_data);
634 		ret = 0;
635 		goto out;
636 	}
637 
638 	if (trigger) {
639 		number = strsep(&trigger, ":");
640 
641 		ret = -EINVAL;
642 		if (!strlen(number))
643 			goto out_free;
644 
645 		/*
646 		 * We use the callback data field (which is a pointer)
647 		 * as our counter.
648 		 */
649 		ret = kstrtoul(number, 0, &trigger_data->count);
650 		if (ret)
651 			goto out_free;
652 	}
653 
654 	if (!param) /* if param is non-empty, it's supposed to be a filter */
655 		goto out_reg;
656 
657 	if (!cmd_ops->set_filter)
658 		goto out_reg;
659 
660 	ret = cmd_ops->set_filter(param, trigger_data, file);
661 	if (ret < 0)
662 		goto out_free;
663 
664  out_reg:
665 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
666 	/*
667 	 * The above returns on success the # of functions enabled,
668 	 * but if it didn't find any functions it returns zero.
669 	 * Consider no functions a failure too.
670 	 */
671 	if (!ret) {
672 		ret = -ENOENT;
673 		goto out_free;
674 	} else if (ret < 0)
675 		goto out_free;
676 	ret = 0;
677  out:
678 	return ret;
679 
680  out_free:
681 	if (cmd_ops->set_filter)
682 		cmd_ops->set_filter(NULL, trigger_data, NULL);
683 	kfree(trigger_data);
684 	goto out;
685 }
686 
687 /**
688  * set_trigger_filter - Generic event_command @set_filter implementation
689  * @filter_str: The filter string for the trigger, NULL to remove filter
690  * @trigger_data: Trigger-specific data
691  * @file: The ftrace_event_file associated with the event
692  *
693  * Common implementation for event command filter parsing and filter
694  * instantiation.
695  *
696  * Usually used directly as the @set_filter method in event command
697  * implementations.
698  *
699  * Also used to remove a filter (if filter_str = NULL).
700  *
701  * Return: 0 on success, errno otherwise
702  */
703 static int set_trigger_filter(char *filter_str,
704 			      struct event_trigger_data *trigger_data,
705 			      struct ftrace_event_file *file)
706 {
707 	struct event_trigger_data *data = trigger_data;
708 	struct event_filter *filter = NULL, *tmp;
709 	int ret = -EINVAL;
710 	char *s;
711 
712 	if (!filter_str) /* clear the current filter */
713 		goto assign;
714 
715 	s = strsep(&filter_str, " \t");
716 
717 	if (!strlen(s) || strcmp(s, "if") != 0)
718 		goto out;
719 
720 	if (!filter_str)
721 		goto out;
722 
723 	/* The filter is for the 'trigger' event, not the triggered event */
724 	ret = create_event_filter(file->event_call, filter_str, false, &filter);
725 	if (ret)
726 		goto out;
727  assign:
728 	tmp = rcu_access_pointer(data->filter);
729 
730 	rcu_assign_pointer(data->filter, filter);
731 
732 	if (tmp) {
733 		/* Make sure the call is done with the filter */
734 		synchronize_sched();
735 		free_event_filter(tmp);
736 	}
737 
738 	kfree(data->filter_str);
739 	data->filter_str = NULL;
740 
741 	if (filter_str) {
742 		data->filter_str = kstrdup(filter_str, GFP_KERNEL);
743 		if (!data->filter_str) {
744 			free_event_filter(rcu_access_pointer(data->filter));
745 			data->filter = NULL;
746 			ret = -ENOMEM;
747 		}
748 	}
749  out:
750 	return ret;
751 }
752 
753 static void
754 traceon_trigger(struct event_trigger_data *data)
755 {
756 	if (tracing_is_on())
757 		return;
758 
759 	tracing_on();
760 }
761 
762 static void
763 traceon_count_trigger(struct event_trigger_data *data)
764 {
765 	if (tracing_is_on())
766 		return;
767 
768 	if (!data->count)
769 		return;
770 
771 	if (data->count != -1)
772 		(data->count)--;
773 
774 	tracing_on();
775 }
776 
777 static void
778 traceoff_trigger(struct event_trigger_data *data)
779 {
780 	if (!tracing_is_on())
781 		return;
782 
783 	tracing_off();
784 }
785 
786 static void
787 traceoff_count_trigger(struct event_trigger_data *data)
788 {
789 	if (!tracing_is_on())
790 		return;
791 
792 	if (!data->count)
793 		return;
794 
795 	if (data->count != -1)
796 		(data->count)--;
797 
798 	tracing_off();
799 }
800 
801 static int
802 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
803 		      struct event_trigger_data *data)
804 {
805 	return event_trigger_print("traceon", m, (void *)data->count,
806 				   data->filter_str);
807 }
808 
809 static int
810 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
811 		       struct event_trigger_data *data)
812 {
813 	return event_trigger_print("traceoff", m, (void *)data->count,
814 				   data->filter_str);
815 }
816 
817 static struct event_trigger_ops traceon_trigger_ops = {
818 	.func			= traceon_trigger,
819 	.print			= traceon_trigger_print,
820 	.init			= event_trigger_init,
821 	.free			= event_trigger_free,
822 };
823 
824 static struct event_trigger_ops traceon_count_trigger_ops = {
825 	.func			= traceon_count_trigger,
826 	.print			= traceon_trigger_print,
827 	.init			= event_trigger_init,
828 	.free			= event_trigger_free,
829 };
830 
831 static struct event_trigger_ops traceoff_trigger_ops = {
832 	.func			= traceoff_trigger,
833 	.print			= traceoff_trigger_print,
834 	.init			= event_trigger_init,
835 	.free			= event_trigger_free,
836 };
837 
838 static struct event_trigger_ops traceoff_count_trigger_ops = {
839 	.func			= traceoff_count_trigger,
840 	.print			= traceoff_trigger_print,
841 	.init			= event_trigger_init,
842 	.free			= event_trigger_free,
843 };
844 
845 static struct event_trigger_ops *
846 onoff_get_trigger_ops(char *cmd, char *param)
847 {
848 	struct event_trigger_ops *ops;
849 
850 	/* we register both traceon and traceoff to this callback */
851 	if (strcmp(cmd, "traceon") == 0)
852 		ops = param ? &traceon_count_trigger_ops :
853 			&traceon_trigger_ops;
854 	else
855 		ops = param ? &traceoff_count_trigger_ops :
856 			&traceoff_trigger_ops;
857 
858 	return ops;
859 }
860 
861 static struct event_command trigger_traceon_cmd = {
862 	.name			= "traceon",
863 	.trigger_type		= ETT_TRACE_ONOFF,
864 	.func			= event_trigger_callback,
865 	.reg			= register_trigger,
866 	.unreg			= unregister_trigger,
867 	.get_trigger_ops	= onoff_get_trigger_ops,
868 	.set_filter		= set_trigger_filter,
869 };
870 
871 static struct event_command trigger_traceoff_cmd = {
872 	.name			= "traceoff",
873 	.trigger_type		= ETT_TRACE_ONOFF,
874 	.func			= event_trigger_callback,
875 	.reg			= register_trigger,
876 	.unreg			= unregister_trigger,
877 	.get_trigger_ops	= onoff_get_trigger_ops,
878 	.set_filter		= set_trigger_filter,
879 };
880 
881 #ifdef CONFIG_TRACER_SNAPSHOT
882 static void
883 snapshot_trigger(struct event_trigger_data *data)
884 {
885 	tracing_snapshot();
886 }
887 
888 static void
889 snapshot_count_trigger(struct event_trigger_data *data)
890 {
891 	if (!data->count)
892 		return;
893 
894 	if (data->count != -1)
895 		(data->count)--;
896 
897 	snapshot_trigger(data);
898 }
899 
900 static int
901 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
902 			  struct event_trigger_data *data,
903 			  struct ftrace_event_file *file)
904 {
905 	int ret = register_trigger(glob, ops, data, file);
906 
907 	if (ret > 0 && tracing_alloc_snapshot() != 0) {
908 		unregister_trigger(glob, ops, data, file);
909 		ret = 0;
910 	}
911 
912 	return ret;
913 }
914 
915 static int
916 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
917 		       struct event_trigger_data *data)
918 {
919 	return event_trigger_print("snapshot", m, (void *)data->count,
920 				   data->filter_str);
921 }
922 
923 static struct event_trigger_ops snapshot_trigger_ops = {
924 	.func			= snapshot_trigger,
925 	.print			= snapshot_trigger_print,
926 	.init			= event_trigger_init,
927 	.free			= event_trigger_free,
928 };
929 
930 static struct event_trigger_ops snapshot_count_trigger_ops = {
931 	.func			= snapshot_count_trigger,
932 	.print			= snapshot_trigger_print,
933 	.init			= event_trigger_init,
934 	.free			= event_trigger_free,
935 };
936 
937 static struct event_trigger_ops *
938 snapshot_get_trigger_ops(char *cmd, char *param)
939 {
940 	return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
941 }
942 
943 static struct event_command trigger_snapshot_cmd = {
944 	.name			= "snapshot",
945 	.trigger_type		= ETT_SNAPSHOT,
946 	.func			= event_trigger_callback,
947 	.reg			= register_snapshot_trigger,
948 	.unreg			= unregister_trigger,
949 	.get_trigger_ops	= snapshot_get_trigger_ops,
950 	.set_filter		= set_trigger_filter,
951 };
952 
953 static __init int register_trigger_snapshot_cmd(void)
954 {
955 	int ret;
956 
957 	ret = register_event_command(&trigger_snapshot_cmd);
958 	WARN_ON(ret < 0);
959 
960 	return ret;
961 }
962 #else
963 static __init int register_trigger_snapshot_cmd(void) { return 0; }
964 #endif /* CONFIG_TRACER_SNAPSHOT */
965 
966 #ifdef CONFIG_STACKTRACE
967 /*
968  * Skip 3:
969  *   stacktrace_trigger()
970  *   event_triggers_post_call()
971  *   ftrace_raw_event_xxx()
972  */
973 #define STACK_SKIP 3
974 
975 static void
976 stacktrace_trigger(struct event_trigger_data *data)
977 {
978 	trace_dump_stack(STACK_SKIP);
979 }
980 
981 static void
982 stacktrace_count_trigger(struct event_trigger_data *data)
983 {
984 	if (!data->count)
985 		return;
986 
987 	if (data->count != -1)
988 		(data->count)--;
989 
990 	stacktrace_trigger(data);
991 }
992 
993 static int
994 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
995 			 struct event_trigger_data *data)
996 {
997 	return event_trigger_print("stacktrace", m, (void *)data->count,
998 				   data->filter_str);
999 }
1000 
1001 static struct event_trigger_ops stacktrace_trigger_ops = {
1002 	.func			= stacktrace_trigger,
1003 	.print			= stacktrace_trigger_print,
1004 	.init			= event_trigger_init,
1005 	.free			= event_trigger_free,
1006 };
1007 
1008 static struct event_trigger_ops stacktrace_count_trigger_ops = {
1009 	.func			= stacktrace_count_trigger,
1010 	.print			= stacktrace_trigger_print,
1011 	.init			= event_trigger_init,
1012 	.free			= event_trigger_free,
1013 };
1014 
1015 static struct event_trigger_ops *
1016 stacktrace_get_trigger_ops(char *cmd, char *param)
1017 {
1018 	return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1019 }
1020 
1021 static struct event_command trigger_stacktrace_cmd = {
1022 	.name			= "stacktrace",
1023 	.trigger_type		= ETT_STACKTRACE,
1024 	.post_trigger		= true,
1025 	.func			= event_trigger_callback,
1026 	.reg			= register_trigger,
1027 	.unreg			= unregister_trigger,
1028 	.get_trigger_ops	= stacktrace_get_trigger_ops,
1029 	.set_filter		= set_trigger_filter,
1030 };
1031 
1032 static __init int register_trigger_stacktrace_cmd(void)
1033 {
1034 	int ret;
1035 
1036 	ret = register_event_command(&trigger_stacktrace_cmd);
1037 	WARN_ON(ret < 0);
1038 
1039 	return ret;
1040 }
1041 #else
1042 static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1043 #endif /* CONFIG_STACKTRACE */
1044 
1045 static __init void unregister_trigger_traceon_traceoff_cmds(void)
1046 {
1047 	unregister_event_command(&trigger_traceon_cmd);
1048 	unregister_event_command(&trigger_traceoff_cmd);
1049 }
1050 
1051 /* Avoid typos */
1052 #define ENABLE_EVENT_STR	"enable_event"
1053 #define DISABLE_EVENT_STR	"disable_event"
1054 
1055 struct enable_trigger_data {
1056 	struct ftrace_event_file	*file;
1057 	bool				enable;
1058 };
1059 
1060 static void
1061 event_enable_trigger(struct event_trigger_data *data)
1062 {
1063 	struct enable_trigger_data *enable_data = data->private_data;
1064 
1065 	if (enable_data->enable)
1066 		clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1067 	else
1068 		set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1069 }
1070 
1071 static void
1072 event_enable_count_trigger(struct event_trigger_data *data)
1073 {
1074 	struct enable_trigger_data *enable_data = data->private_data;
1075 
1076 	if (!data->count)
1077 		return;
1078 
1079 	/* Skip if the event is in a state we want to switch to */
1080 	if (enable_data->enable == !(enable_data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
1081 		return;
1082 
1083 	if (data->count != -1)
1084 		(data->count)--;
1085 
1086 	event_enable_trigger(data);
1087 }
1088 
1089 static int
1090 event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1091 			   struct event_trigger_data *data)
1092 {
1093 	struct enable_trigger_data *enable_data = data->private_data;
1094 
1095 	seq_printf(m, "%s:%s:%s",
1096 		   enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1097 		   enable_data->file->event_call->class->system,
1098 		   ftrace_event_name(enable_data->file->event_call));
1099 
1100 	if (data->count == -1)
1101 		seq_puts(m, ":unlimited");
1102 	else
1103 		seq_printf(m, ":count=%ld", data->count);
1104 
1105 	if (data->filter_str)
1106 		seq_printf(m, " if %s\n", data->filter_str);
1107 	else
1108 		seq_puts(m, "\n");
1109 
1110 	return 0;
1111 }
1112 
1113 static void
1114 event_enable_trigger_free(struct event_trigger_ops *ops,
1115 			  struct event_trigger_data *data)
1116 {
1117 	struct enable_trigger_data *enable_data = data->private_data;
1118 
1119 	if (WARN_ON_ONCE(data->ref <= 0))
1120 		return;
1121 
1122 	data->ref--;
1123 	if (!data->ref) {
1124 		/* Remove the SOFT_MODE flag */
1125 		trace_event_enable_disable(enable_data->file, 0, 1);
1126 		module_put(enable_data->file->event_call->mod);
1127 		trigger_data_free(data);
1128 		kfree(enable_data);
1129 	}
1130 }
1131 
1132 static struct event_trigger_ops event_enable_trigger_ops = {
1133 	.func			= event_enable_trigger,
1134 	.print			= event_enable_trigger_print,
1135 	.init			= event_trigger_init,
1136 	.free			= event_enable_trigger_free,
1137 };
1138 
1139 static struct event_trigger_ops event_enable_count_trigger_ops = {
1140 	.func			= event_enable_count_trigger,
1141 	.print			= event_enable_trigger_print,
1142 	.init			= event_trigger_init,
1143 	.free			= event_enable_trigger_free,
1144 };
1145 
1146 static struct event_trigger_ops event_disable_trigger_ops = {
1147 	.func			= event_enable_trigger,
1148 	.print			= event_enable_trigger_print,
1149 	.init			= event_trigger_init,
1150 	.free			= event_enable_trigger_free,
1151 };
1152 
1153 static struct event_trigger_ops event_disable_count_trigger_ops = {
1154 	.func			= event_enable_count_trigger,
1155 	.print			= event_enable_trigger_print,
1156 	.init			= event_trigger_init,
1157 	.free			= event_enable_trigger_free,
1158 };
1159 
1160 static int
1161 event_enable_trigger_func(struct event_command *cmd_ops,
1162 			  struct ftrace_event_file *file,
1163 			  char *glob, char *cmd, char *param)
1164 {
1165 	struct ftrace_event_file *event_enable_file;
1166 	struct enable_trigger_data *enable_data;
1167 	struct event_trigger_data *trigger_data;
1168 	struct event_trigger_ops *trigger_ops;
1169 	struct trace_array *tr = file->tr;
1170 	const char *system;
1171 	const char *event;
1172 	char *trigger;
1173 	char *number;
1174 	bool enable;
1175 	int ret;
1176 
1177 	if (!param)
1178 		return -EINVAL;
1179 
1180 	/* separate the trigger from the filter (s:e:n [if filter]) */
1181 	trigger = strsep(&param, " \t");
1182 	if (!trigger)
1183 		return -EINVAL;
1184 
1185 	system = strsep(&trigger, ":");
1186 	if (!trigger)
1187 		return -EINVAL;
1188 
1189 	event = strsep(&trigger, ":");
1190 
1191 	ret = -EINVAL;
1192 	event_enable_file = find_event_file(tr, system, event);
1193 	if (!event_enable_file)
1194 		goto out;
1195 
1196 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1197 
1198 	trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1199 
1200 	ret = -ENOMEM;
1201 	trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1202 	if (!trigger_data)
1203 		goto out;
1204 
1205 	enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1206 	if (!enable_data) {
1207 		kfree(trigger_data);
1208 		goto out;
1209 	}
1210 
1211 	trigger_data->count = -1;
1212 	trigger_data->ops = trigger_ops;
1213 	trigger_data->cmd_ops = cmd_ops;
1214 	INIT_LIST_HEAD(&trigger_data->list);
1215 	RCU_INIT_POINTER(trigger_data->filter, NULL);
1216 
1217 	enable_data->enable = enable;
1218 	enable_data->file = event_enable_file;
1219 	trigger_data->private_data = enable_data;
1220 
1221 	if (glob[0] == '!') {
1222 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1223 		kfree(trigger_data);
1224 		kfree(enable_data);
1225 		ret = 0;
1226 		goto out;
1227 	}
1228 
1229 	if (trigger) {
1230 		number = strsep(&trigger, ":");
1231 
1232 		ret = -EINVAL;
1233 		if (!strlen(number))
1234 			goto out_free;
1235 
1236 		/*
1237 		 * We use the callback data field (which is a pointer)
1238 		 * as our counter.
1239 		 */
1240 		ret = kstrtoul(number, 0, &trigger_data->count);
1241 		if (ret)
1242 			goto out_free;
1243 	}
1244 
1245 	if (!param) /* if param is non-empty, it's supposed to be a filter */
1246 		goto out_reg;
1247 
1248 	if (!cmd_ops->set_filter)
1249 		goto out_reg;
1250 
1251 	ret = cmd_ops->set_filter(param, trigger_data, file);
1252 	if (ret < 0)
1253 		goto out_free;
1254 
1255  out_reg:
1256 	/* Don't let event modules unload while probe registered */
1257 	ret = try_module_get(event_enable_file->event_call->mod);
1258 	if (!ret) {
1259 		ret = -EBUSY;
1260 		goto out_free;
1261 	}
1262 
1263 	ret = trace_event_enable_disable(event_enable_file, 1, 1);
1264 	if (ret < 0)
1265 		goto out_put;
1266 	ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1267 	/*
1268 	 * The above returns on success the # of functions enabled,
1269 	 * but if it didn't find any functions it returns zero.
1270 	 * Consider no functions a failure too.
1271 	 */
1272 	if (!ret) {
1273 		ret = -ENOENT;
1274 		goto out_disable;
1275 	} else if (ret < 0)
1276 		goto out_disable;
1277 	/* Just return zero, not the number of enabled functions */
1278 	ret = 0;
1279  out:
1280 	return ret;
1281 
1282  out_disable:
1283 	trace_event_enable_disable(event_enable_file, 0, 1);
1284  out_put:
1285 	module_put(event_enable_file->event_call->mod);
1286  out_free:
1287 	if (cmd_ops->set_filter)
1288 		cmd_ops->set_filter(NULL, trigger_data, NULL);
1289 	kfree(trigger_data);
1290 	kfree(enable_data);
1291 	goto out;
1292 }
1293 
1294 static int event_enable_register_trigger(char *glob,
1295 					 struct event_trigger_ops *ops,
1296 					 struct event_trigger_data *data,
1297 					 struct ftrace_event_file *file)
1298 {
1299 	struct enable_trigger_data *enable_data = data->private_data;
1300 	struct enable_trigger_data *test_enable_data;
1301 	struct event_trigger_data *test;
1302 	int ret = 0;
1303 
1304 	list_for_each_entry_rcu(test, &file->triggers, list) {
1305 		test_enable_data = test->private_data;
1306 		if (test_enable_data &&
1307 		    (test_enable_data->file == enable_data->file)) {
1308 			ret = -EEXIST;
1309 			goto out;
1310 		}
1311 	}
1312 
1313 	if (data->ops->init) {
1314 		ret = data->ops->init(data->ops, data);
1315 		if (ret < 0)
1316 			goto out;
1317 	}
1318 
1319 	list_add_rcu(&data->list, &file->triggers);
1320 	ret++;
1321 
1322 	if (trace_event_trigger_enable_disable(file, 1) < 0) {
1323 		list_del_rcu(&data->list);
1324 		ret--;
1325 	}
1326 	update_cond_flag(file);
1327 out:
1328 	return ret;
1329 }
1330 
1331 static void event_enable_unregister_trigger(char *glob,
1332 					    struct event_trigger_ops *ops,
1333 					    struct event_trigger_data *test,
1334 					    struct ftrace_event_file *file)
1335 {
1336 	struct enable_trigger_data *test_enable_data = test->private_data;
1337 	struct enable_trigger_data *enable_data;
1338 	struct event_trigger_data *data;
1339 	bool unregistered = false;
1340 
1341 	list_for_each_entry_rcu(data, &file->triggers, list) {
1342 		enable_data = data->private_data;
1343 		if (enable_data &&
1344 		    (enable_data->file == test_enable_data->file)) {
1345 			unregistered = true;
1346 			list_del_rcu(&data->list);
1347 			update_cond_flag(file);
1348 			trace_event_trigger_enable_disable(file, 0);
1349 			break;
1350 		}
1351 	}
1352 
1353 	if (unregistered && data->ops->free)
1354 		data->ops->free(data->ops, data);
1355 }
1356 
1357 static struct event_trigger_ops *
1358 event_enable_get_trigger_ops(char *cmd, char *param)
1359 {
1360 	struct event_trigger_ops *ops;
1361 	bool enable;
1362 
1363 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1364 
1365 	if (enable)
1366 		ops = param ? &event_enable_count_trigger_ops :
1367 			&event_enable_trigger_ops;
1368 	else
1369 		ops = param ? &event_disable_count_trigger_ops :
1370 			&event_disable_trigger_ops;
1371 
1372 	return ops;
1373 }
1374 
1375 static struct event_command trigger_enable_cmd = {
1376 	.name			= ENABLE_EVENT_STR,
1377 	.trigger_type		= ETT_EVENT_ENABLE,
1378 	.func			= event_enable_trigger_func,
1379 	.reg			= event_enable_register_trigger,
1380 	.unreg			= event_enable_unregister_trigger,
1381 	.get_trigger_ops	= event_enable_get_trigger_ops,
1382 	.set_filter		= set_trigger_filter,
1383 };
1384 
1385 static struct event_command trigger_disable_cmd = {
1386 	.name			= DISABLE_EVENT_STR,
1387 	.trigger_type		= ETT_EVENT_ENABLE,
1388 	.func			= event_enable_trigger_func,
1389 	.reg			= event_enable_register_trigger,
1390 	.unreg			= event_enable_unregister_trigger,
1391 	.get_trigger_ops	= event_enable_get_trigger_ops,
1392 	.set_filter		= set_trigger_filter,
1393 };
1394 
1395 static __init void unregister_trigger_enable_disable_cmds(void)
1396 {
1397 	unregister_event_command(&trigger_enable_cmd);
1398 	unregister_event_command(&trigger_disable_cmd);
1399 }
1400 
1401 static __init int register_trigger_enable_disable_cmds(void)
1402 {
1403 	int ret;
1404 
1405 	ret = register_event_command(&trigger_enable_cmd);
1406 	if (WARN_ON(ret < 0))
1407 		return ret;
1408 	ret = register_event_command(&trigger_disable_cmd);
1409 	if (WARN_ON(ret < 0))
1410 		unregister_trigger_enable_disable_cmds();
1411 
1412 	return ret;
1413 }
1414 
1415 static __init int register_trigger_traceon_traceoff_cmds(void)
1416 {
1417 	int ret;
1418 
1419 	ret = register_event_command(&trigger_traceon_cmd);
1420 	if (WARN_ON(ret < 0))
1421 		return ret;
1422 	ret = register_event_command(&trigger_traceoff_cmd);
1423 	if (WARN_ON(ret < 0))
1424 		unregister_trigger_traceon_traceoff_cmds();
1425 
1426 	return ret;
1427 }
1428 
1429 __init int register_trigger_cmds(void)
1430 {
1431 	register_trigger_traceon_traceoff_cmds();
1432 	register_trigger_snapshot_cmd();
1433 	register_trigger_stacktrace_cmd();
1434 	register_trigger_enable_disable_cmds();
1435 
1436 	return 0;
1437 }
1438