xref: /openbmc/linux/kernel/trace/trace_events.c (revision 8a10bc9d)
1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10 
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 
21 #include <asm/setup.h>
22 
23 #include "trace_output.h"
24 
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27 
28 DEFINE_MUTEX(event_mutex);
29 
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
32 
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
35 
36 LIST_HEAD(ftrace_events);
37 static LIST_HEAD(ftrace_common_fields);
38 
39 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
40 
41 static struct kmem_cache *field_cachep;
42 static struct kmem_cache *file_cachep;
43 
44 #define SYSTEM_FL_FREE_NAME		(1 << 31)
45 
46 static inline int system_refcount(struct event_subsystem *system)
47 {
48 	return system->ref_count & ~SYSTEM_FL_FREE_NAME;
49 }
50 
51 static int system_refcount_inc(struct event_subsystem *system)
52 {
53 	return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
54 }
55 
56 static int system_refcount_dec(struct event_subsystem *system)
57 {
58 	return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
59 }
60 
61 /* Double loops, do not use break, only goto's work */
62 #define do_for_each_event_file(tr, file)			\
63 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {	\
64 		list_for_each_entry(file, &tr->events, list)
65 
66 #define do_for_each_event_file_safe(tr, file)			\
67 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {	\
68 		struct ftrace_event_file *___n;				\
69 		list_for_each_entry_safe(file, ___n, &tr->events, list)
70 
71 #define while_for_each_event_file()		\
72 	}
73 
74 static struct list_head *
75 trace_get_fields(struct ftrace_event_call *event_call)
76 {
77 	if (!event_call->class->get_fields)
78 		return &event_call->class->fields;
79 	return event_call->class->get_fields(event_call);
80 }
81 
82 static struct ftrace_event_field *
83 __find_event_field(struct list_head *head, char *name)
84 {
85 	struct ftrace_event_field *field;
86 
87 	list_for_each_entry(field, head, link) {
88 		if (!strcmp(field->name, name))
89 			return field;
90 	}
91 
92 	return NULL;
93 }
94 
95 struct ftrace_event_field *
96 trace_find_event_field(struct ftrace_event_call *call, char *name)
97 {
98 	struct ftrace_event_field *field;
99 	struct list_head *head;
100 
101 	field = __find_event_field(&ftrace_common_fields, name);
102 	if (field)
103 		return field;
104 
105 	head = trace_get_fields(call);
106 	return __find_event_field(head, name);
107 }
108 
109 static int __trace_define_field(struct list_head *head, const char *type,
110 				const char *name, int offset, int size,
111 				int is_signed, int filter_type)
112 {
113 	struct ftrace_event_field *field;
114 
115 	field = kmem_cache_alloc(field_cachep, GFP_TRACE);
116 	if (!field)
117 		return -ENOMEM;
118 
119 	field->name = name;
120 	field->type = type;
121 
122 	if (filter_type == FILTER_OTHER)
123 		field->filter_type = filter_assign_type(type);
124 	else
125 		field->filter_type = filter_type;
126 
127 	field->offset = offset;
128 	field->size = size;
129 	field->is_signed = is_signed;
130 
131 	list_add(&field->link, head);
132 
133 	return 0;
134 }
135 
136 int trace_define_field(struct ftrace_event_call *call, const char *type,
137 		       const char *name, int offset, int size, int is_signed,
138 		       int filter_type)
139 {
140 	struct list_head *head;
141 
142 	if (WARN_ON(!call->class))
143 		return 0;
144 
145 	head = trace_get_fields(call);
146 	return __trace_define_field(head, type, name, offset, size,
147 				    is_signed, filter_type);
148 }
149 EXPORT_SYMBOL_GPL(trace_define_field);
150 
151 #define __common_field(type, item)					\
152 	ret = __trace_define_field(&ftrace_common_fields, #type,	\
153 				   "common_" #item,			\
154 				   offsetof(typeof(ent), item),		\
155 				   sizeof(ent.item),			\
156 				   is_signed_type(type), FILTER_OTHER);	\
157 	if (ret)							\
158 		return ret;
159 
160 static int trace_define_common_fields(void)
161 {
162 	int ret;
163 	struct trace_entry ent;
164 
165 	__common_field(unsigned short, type);
166 	__common_field(unsigned char, flags);
167 	__common_field(unsigned char, preempt_count);
168 	__common_field(int, pid);
169 
170 	return ret;
171 }
172 
173 static void trace_destroy_fields(struct ftrace_event_call *call)
174 {
175 	struct ftrace_event_field *field, *next;
176 	struct list_head *head;
177 
178 	head = trace_get_fields(call);
179 	list_for_each_entry_safe(field, next, head, link) {
180 		list_del(&field->link);
181 		kmem_cache_free(field_cachep, field);
182 	}
183 }
184 
185 int trace_event_raw_init(struct ftrace_event_call *call)
186 {
187 	int id;
188 
189 	id = register_ftrace_event(&call->event);
190 	if (!id)
191 		return -ENODEV;
192 
193 	return 0;
194 }
195 EXPORT_SYMBOL_GPL(trace_event_raw_init);
196 
197 int ftrace_event_reg(struct ftrace_event_call *call,
198 		     enum trace_reg type, void *data)
199 {
200 	struct ftrace_event_file *file = data;
201 
202 	switch (type) {
203 	case TRACE_REG_REGISTER:
204 		return tracepoint_probe_register(call->name,
205 						 call->class->probe,
206 						 file);
207 	case TRACE_REG_UNREGISTER:
208 		tracepoint_probe_unregister(call->name,
209 					    call->class->probe,
210 					    file);
211 		return 0;
212 
213 #ifdef CONFIG_PERF_EVENTS
214 	case TRACE_REG_PERF_REGISTER:
215 		return tracepoint_probe_register(call->name,
216 						 call->class->perf_probe,
217 						 call);
218 	case TRACE_REG_PERF_UNREGISTER:
219 		tracepoint_probe_unregister(call->name,
220 					    call->class->perf_probe,
221 					    call);
222 		return 0;
223 	case TRACE_REG_PERF_OPEN:
224 	case TRACE_REG_PERF_CLOSE:
225 	case TRACE_REG_PERF_ADD:
226 	case TRACE_REG_PERF_DEL:
227 		return 0;
228 #endif
229 	}
230 	return 0;
231 }
232 EXPORT_SYMBOL_GPL(ftrace_event_reg);
233 
234 void trace_event_enable_cmd_record(bool enable)
235 {
236 	struct ftrace_event_file *file;
237 	struct trace_array *tr;
238 
239 	mutex_lock(&event_mutex);
240 	do_for_each_event_file(tr, file) {
241 
242 		if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
243 			continue;
244 
245 		if (enable) {
246 			tracing_start_cmdline_record();
247 			set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
248 		} else {
249 			tracing_stop_cmdline_record();
250 			clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
251 		}
252 	} while_for_each_event_file();
253 	mutex_unlock(&event_mutex);
254 }
255 
256 static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
257 					 int enable, int soft_disable)
258 {
259 	struct ftrace_event_call *call = file->event_call;
260 	int ret = 0;
261 	int disable;
262 
263 	switch (enable) {
264 	case 0:
265 		/*
266 		 * When soft_disable is set and enable is cleared, the sm_ref
267 		 * reference counter is decremented. If it reaches 0, we want
268 		 * to clear the SOFT_DISABLED flag but leave the event in the
269 		 * state that it was. That is, if the event was enabled and
270 		 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
271 		 * is set we do not want the event to be enabled before we
272 		 * clear the bit.
273 		 *
274 		 * When soft_disable is not set but the SOFT_MODE flag is,
275 		 * we do nothing. Do not disable the tracepoint, otherwise
276 		 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
277 		 */
278 		if (soft_disable) {
279 			if (atomic_dec_return(&file->sm_ref) > 0)
280 				break;
281 			disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
282 			clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
283 		} else
284 			disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
285 
286 		if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
287 			clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
288 			if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
289 				tracing_stop_cmdline_record();
290 				clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
291 			}
292 			call->class->reg(call, TRACE_REG_UNREGISTER, file);
293 		}
294 		/* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
295 		if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
296 			set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
297 		else
298 			clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
299 		break;
300 	case 1:
301 		/*
302 		 * When soft_disable is set and enable is set, we want to
303 		 * register the tracepoint for the event, but leave the event
304 		 * as is. That means, if the event was already enabled, we do
305 		 * nothing (but set SOFT_MODE). If the event is disabled, we
306 		 * set SOFT_DISABLED before enabling the event tracepoint, so
307 		 * it still seems to be disabled.
308 		 */
309 		if (!soft_disable)
310 			clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
311 		else {
312 			if (atomic_inc_return(&file->sm_ref) > 1)
313 				break;
314 			set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
315 		}
316 
317 		if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
318 
319 			/* Keep the event disabled, when going to SOFT_MODE. */
320 			if (soft_disable)
321 				set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
322 
323 			if (trace_flags & TRACE_ITER_RECORD_CMD) {
324 				tracing_start_cmdline_record();
325 				set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
326 			}
327 			ret = call->class->reg(call, TRACE_REG_REGISTER, file);
328 			if (ret) {
329 				tracing_stop_cmdline_record();
330 				pr_info("event trace: Could not enable event "
331 					"%s\n", call->name);
332 				break;
333 			}
334 			set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
335 
336 			/* WAS_ENABLED gets set but never cleared. */
337 			call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
338 		}
339 		break;
340 	}
341 
342 	return ret;
343 }
344 
345 int trace_event_enable_disable(struct ftrace_event_file *file,
346 			       int enable, int soft_disable)
347 {
348 	return __ftrace_event_enable_disable(file, enable, soft_disable);
349 }
350 
351 static int ftrace_event_enable_disable(struct ftrace_event_file *file,
352 				       int enable)
353 {
354 	return __ftrace_event_enable_disable(file, enable, 0);
355 }
356 
357 static void ftrace_clear_events(struct trace_array *tr)
358 {
359 	struct ftrace_event_file *file;
360 
361 	mutex_lock(&event_mutex);
362 	list_for_each_entry(file, &tr->events, list) {
363 		ftrace_event_enable_disable(file, 0);
364 	}
365 	mutex_unlock(&event_mutex);
366 }
367 
368 static void __put_system(struct event_subsystem *system)
369 {
370 	struct event_filter *filter = system->filter;
371 
372 	WARN_ON_ONCE(system_refcount(system) == 0);
373 	if (system_refcount_dec(system))
374 		return;
375 
376 	list_del(&system->list);
377 
378 	if (filter) {
379 		kfree(filter->filter_string);
380 		kfree(filter);
381 	}
382 	if (system->ref_count & SYSTEM_FL_FREE_NAME)
383 		kfree(system->name);
384 	kfree(system);
385 }
386 
387 static void __get_system(struct event_subsystem *system)
388 {
389 	WARN_ON_ONCE(system_refcount(system) == 0);
390 	system_refcount_inc(system);
391 }
392 
393 static void __get_system_dir(struct ftrace_subsystem_dir *dir)
394 {
395 	WARN_ON_ONCE(dir->ref_count == 0);
396 	dir->ref_count++;
397 	__get_system(dir->subsystem);
398 }
399 
400 static void __put_system_dir(struct ftrace_subsystem_dir *dir)
401 {
402 	WARN_ON_ONCE(dir->ref_count == 0);
403 	/* If the subsystem is about to be freed, the dir must be too */
404 	WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
405 
406 	__put_system(dir->subsystem);
407 	if (!--dir->ref_count)
408 		kfree(dir);
409 }
410 
411 static void put_system(struct ftrace_subsystem_dir *dir)
412 {
413 	mutex_lock(&event_mutex);
414 	__put_system_dir(dir);
415 	mutex_unlock(&event_mutex);
416 }
417 
418 static void remove_subsystem(struct ftrace_subsystem_dir *dir)
419 {
420 	if (!dir)
421 		return;
422 
423 	if (!--dir->nr_events) {
424 		debugfs_remove_recursive(dir->entry);
425 		list_del(&dir->list);
426 		__put_system_dir(dir);
427 	}
428 }
429 
430 static void remove_event_file_dir(struct ftrace_event_file *file)
431 {
432 	struct dentry *dir = file->dir;
433 	struct dentry *child;
434 
435 	if (dir) {
436 		spin_lock(&dir->d_lock);	/* probably unneeded */
437 		list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
438 			if (child->d_inode)	/* probably unneeded */
439 				child->d_inode->i_private = NULL;
440 		}
441 		spin_unlock(&dir->d_lock);
442 
443 		debugfs_remove_recursive(dir);
444 	}
445 
446 	list_del(&file->list);
447 	remove_subsystem(file->system);
448 	kmem_cache_free(file_cachep, file);
449 }
450 
451 /*
452  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
453  */
454 static int
455 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
456 			      const char *sub, const char *event, int set)
457 {
458 	struct ftrace_event_file *file;
459 	struct ftrace_event_call *call;
460 	int ret = -EINVAL;
461 
462 	list_for_each_entry(file, &tr->events, list) {
463 
464 		call = file->event_call;
465 
466 		if (!call->name || !call->class || !call->class->reg)
467 			continue;
468 
469 		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
470 			continue;
471 
472 		if (match &&
473 		    strcmp(match, call->name) != 0 &&
474 		    strcmp(match, call->class->system) != 0)
475 			continue;
476 
477 		if (sub && strcmp(sub, call->class->system) != 0)
478 			continue;
479 
480 		if (event && strcmp(event, call->name) != 0)
481 			continue;
482 
483 		ftrace_event_enable_disable(file, set);
484 
485 		ret = 0;
486 	}
487 
488 	return ret;
489 }
490 
491 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
492 				  const char *sub, const char *event, int set)
493 {
494 	int ret;
495 
496 	mutex_lock(&event_mutex);
497 	ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
498 	mutex_unlock(&event_mutex);
499 
500 	return ret;
501 }
502 
503 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
504 {
505 	char *event = NULL, *sub = NULL, *match;
506 
507 	/*
508 	 * The buf format can be <subsystem>:<event-name>
509 	 *  *:<event-name> means any event by that name.
510 	 *  :<event-name> is the same.
511 	 *
512 	 *  <subsystem>:* means all events in that subsystem
513 	 *  <subsystem>: means the same.
514 	 *
515 	 *  <name> (no ':') means all events in a subsystem with
516 	 *  the name <name> or any event that matches <name>
517 	 */
518 
519 	match = strsep(&buf, ":");
520 	if (buf) {
521 		sub = match;
522 		event = buf;
523 		match = NULL;
524 
525 		if (!strlen(sub) || strcmp(sub, "*") == 0)
526 			sub = NULL;
527 		if (!strlen(event) || strcmp(event, "*") == 0)
528 			event = NULL;
529 	}
530 
531 	return __ftrace_set_clr_event(tr, match, sub, event, set);
532 }
533 
534 /**
535  * trace_set_clr_event - enable or disable an event
536  * @system: system name to match (NULL for any system)
537  * @event: event name to match (NULL for all events, within system)
538  * @set: 1 to enable, 0 to disable
539  *
540  * This is a way for other parts of the kernel to enable or disable
541  * event recording.
542  *
543  * Returns 0 on success, -EINVAL if the parameters do not match any
544  * registered events.
545  */
546 int trace_set_clr_event(const char *system, const char *event, int set)
547 {
548 	struct trace_array *tr = top_trace_array();
549 
550 	return __ftrace_set_clr_event(tr, NULL, system, event, set);
551 }
552 EXPORT_SYMBOL_GPL(trace_set_clr_event);
553 
554 /* 128 should be much more than enough */
555 #define EVENT_BUF_SIZE		127
556 
557 static ssize_t
558 ftrace_event_write(struct file *file, const char __user *ubuf,
559 		   size_t cnt, loff_t *ppos)
560 {
561 	struct trace_parser parser;
562 	struct seq_file *m = file->private_data;
563 	struct trace_array *tr = m->private;
564 	ssize_t read, ret;
565 
566 	if (!cnt)
567 		return 0;
568 
569 	ret = tracing_update_buffers();
570 	if (ret < 0)
571 		return ret;
572 
573 	if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
574 		return -ENOMEM;
575 
576 	read = trace_get_user(&parser, ubuf, cnt, ppos);
577 
578 	if (read >= 0 && trace_parser_loaded((&parser))) {
579 		int set = 1;
580 
581 		if (*parser.buffer == '!')
582 			set = 0;
583 
584 		parser.buffer[parser.idx] = 0;
585 
586 		ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
587 		if (ret)
588 			goto out_put;
589 	}
590 
591 	ret = read;
592 
593  out_put:
594 	trace_parser_put(&parser);
595 
596 	return ret;
597 }
598 
599 static void *
600 t_next(struct seq_file *m, void *v, loff_t *pos)
601 {
602 	struct ftrace_event_file *file = v;
603 	struct ftrace_event_call *call;
604 	struct trace_array *tr = m->private;
605 
606 	(*pos)++;
607 
608 	list_for_each_entry_continue(file, &tr->events, list) {
609 		call = file->event_call;
610 		/*
611 		 * The ftrace subsystem is for showing formats only.
612 		 * They can not be enabled or disabled via the event files.
613 		 */
614 		if (call->class && call->class->reg)
615 			return file;
616 	}
617 
618 	return NULL;
619 }
620 
621 static void *t_start(struct seq_file *m, loff_t *pos)
622 {
623 	struct ftrace_event_file *file;
624 	struct trace_array *tr = m->private;
625 	loff_t l;
626 
627 	mutex_lock(&event_mutex);
628 
629 	file = list_entry(&tr->events, struct ftrace_event_file, list);
630 	for (l = 0; l <= *pos; ) {
631 		file = t_next(m, file, &l);
632 		if (!file)
633 			break;
634 	}
635 	return file;
636 }
637 
638 static void *
639 s_next(struct seq_file *m, void *v, loff_t *pos)
640 {
641 	struct ftrace_event_file *file = v;
642 	struct trace_array *tr = m->private;
643 
644 	(*pos)++;
645 
646 	list_for_each_entry_continue(file, &tr->events, list) {
647 		if (file->flags & FTRACE_EVENT_FL_ENABLED)
648 			return file;
649 	}
650 
651 	return NULL;
652 }
653 
654 static void *s_start(struct seq_file *m, loff_t *pos)
655 {
656 	struct ftrace_event_file *file;
657 	struct trace_array *tr = m->private;
658 	loff_t l;
659 
660 	mutex_lock(&event_mutex);
661 
662 	file = list_entry(&tr->events, struct ftrace_event_file, list);
663 	for (l = 0; l <= *pos; ) {
664 		file = s_next(m, file, &l);
665 		if (!file)
666 			break;
667 	}
668 	return file;
669 }
670 
671 static int t_show(struct seq_file *m, void *v)
672 {
673 	struct ftrace_event_file *file = v;
674 	struct ftrace_event_call *call = file->event_call;
675 
676 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
677 		seq_printf(m, "%s:", call->class->system);
678 	seq_printf(m, "%s\n", call->name);
679 
680 	return 0;
681 }
682 
683 static void t_stop(struct seq_file *m, void *p)
684 {
685 	mutex_unlock(&event_mutex);
686 }
687 
688 static ssize_t
689 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
690 		  loff_t *ppos)
691 {
692 	struct ftrace_event_file *file;
693 	unsigned long flags;
694 	char buf[4] = "0";
695 
696 	mutex_lock(&event_mutex);
697 	file = event_file_data(filp);
698 	if (likely(file))
699 		flags = file->flags;
700 	mutex_unlock(&event_mutex);
701 
702 	if (!file)
703 		return -ENODEV;
704 
705 	if (flags & FTRACE_EVENT_FL_ENABLED &&
706 	    !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
707 		strcpy(buf, "1");
708 
709 	if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
710 	    flags & FTRACE_EVENT_FL_SOFT_MODE)
711 		strcat(buf, "*");
712 
713 	strcat(buf, "\n");
714 
715 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
716 }
717 
718 static ssize_t
719 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
720 		   loff_t *ppos)
721 {
722 	struct ftrace_event_file *file;
723 	unsigned long val;
724 	int ret;
725 
726 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
727 	if (ret)
728 		return ret;
729 
730 	ret = tracing_update_buffers();
731 	if (ret < 0)
732 		return ret;
733 
734 	switch (val) {
735 	case 0:
736 	case 1:
737 		ret = -ENODEV;
738 		mutex_lock(&event_mutex);
739 		file = event_file_data(filp);
740 		if (likely(file))
741 			ret = ftrace_event_enable_disable(file, val);
742 		mutex_unlock(&event_mutex);
743 		break;
744 
745 	default:
746 		return -EINVAL;
747 	}
748 
749 	*ppos += cnt;
750 
751 	return ret ? ret : cnt;
752 }
753 
754 static ssize_t
755 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
756 		   loff_t *ppos)
757 {
758 	const char set_to_char[4] = { '?', '0', '1', 'X' };
759 	struct ftrace_subsystem_dir *dir = filp->private_data;
760 	struct event_subsystem *system = dir->subsystem;
761 	struct ftrace_event_call *call;
762 	struct ftrace_event_file *file;
763 	struct trace_array *tr = dir->tr;
764 	char buf[2];
765 	int set = 0;
766 	int ret;
767 
768 	mutex_lock(&event_mutex);
769 	list_for_each_entry(file, &tr->events, list) {
770 		call = file->event_call;
771 		if (!call->name || !call->class || !call->class->reg)
772 			continue;
773 
774 		if (system && strcmp(call->class->system, system->name) != 0)
775 			continue;
776 
777 		/*
778 		 * We need to find out if all the events are set
779 		 * or if all events or cleared, or if we have
780 		 * a mixture.
781 		 */
782 		set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
783 
784 		/*
785 		 * If we have a mixture, no need to look further.
786 		 */
787 		if (set == 3)
788 			break;
789 	}
790 	mutex_unlock(&event_mutex);
791 
792 	buf[0] = set_to_char[set];
793 	buf[1] = '\n';
794 
795 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
796 
797 	return ret;
798 }
799 
800 static ssize_t
801 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
802 		    loff_t *ppos)
803 {
804 	struct ftrace_subsystem_dir *dir = filp->private_data;
805 	struct event_subsystem *system = dir->subsystem;
806 	const char *name = NULL;
807 	unsigned long val;
808 	ssize_t ret;
809 
810 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
811 	if (ret)
812 		return ret;
813 
814 	ret = tracing_update_buffers();
815 	if (ret < 0)
816 		return ret;
817 
818 	if (val != 0 && val != 1)
819 		return -EINVAL;
820 
821 	/*
822 	 * Opening of "enable" adds a ref count to system,
823 	 * so the name is safe to use.
824 	 */
825 	if (system)
826 		name = system->name;
827 
828 	ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
829 	if (ret)
830 		goto out;
831 
832 	ret = cnt;
833 
834 out:
835 	*ppos += cnt;
836 
837 	return ret;
838 }
839 
840 enum {
841 	FORMAT_HEADER		= 1,
842 	FORMAT_FIELD_SEPERATOR	= 2,
843 	FORMAT_PRINTFMT		= 3,
844 };
845 
846 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
847 {
848 	struct ftrace_event_call *call = event_file_data(m->private);
849 	struct list_head *common_head = &ftrace_common_fields;
850 	struct list_head *head = trace_get_fields(call);
851 	struct list_head *node = v;
852 
853 	(*pos)++;
854 
855 	switch ((unsigned long)v) {
856 	case FORMAT_HEADER:
857 		node = common_head;
858 		break;
859 
860 	case FORMAT_FIELD_SEPERATOR:
861 		node = head;
862 		break;
863 
864 	case FORMAT_PRINTFMT:
865 		/* all done */
866 		return NULL;
867 	}
868 
869 	node = node->prev;
870 	if (node == common_head)
871 		return (void *)FORMAT_FIELD_SEPERATOR;
872 	else if (node == head)
873 		return (void *)FORMAT_PRINTFMT;
874 	else
875 		return node;
876 }
877 
878 static int f_show(struct seq_file *m, void *v)
879 {
880 	struct ftrace_event_call *call = event_file_data(m->private);
881 	struct ftrace_event_field *field;
882 	const char *array_descriptor;
883 
884 	switch ((unsigned long)v) {
885 	case FORMAT_HEADER:
886 		seq_printf(m, "name: %s\n", call->name);
887 		seq_printf(m, "ID: %d\n", call->event.type);
888 		seq_printf(m, "format:\n");
889 		return 0;
890 
891 	case FORMAT_FIELD_SEPERATOR:
892 		seq_putc(m, '\n');
893 		return 0;
894 
895 	case FORMAT_PRINTFMT:
896 		seq_printf(m, "\nprint fmt: %s\n",
897 			   call->print_fmt);
898 		return 0;
899 	}
900 
901 	field = list_entry(v, struct ftrace_event_field, link);
902 	/*
903 	 * Smartly shows the array type(except dynamic array).
904 	 * Normal:
905 	 *	field:TYPE VAR
906 	 * If TYPE := TYPE[LEN], it is shown:
907 	 *	field:TYPE VAR[LEN]
908 	 */
909 	array_descriptor = strchr(field->type, '[');
910 
911 	if (!strncmp(field->type, "__data_loc", 10))
912 		array_descriptor = NULL;
913 
914 	if (!array_descriptor)
915 		seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
916 			   field->type, field->name, field->offset,
917 			   field->size, !!field->is_signed);
918 	else
919 		seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
920 			   (int)(array_descriptor - field->type),
921 			   field->type, field->name,
922 			   array_descriptor, field->offset,
923 			   field->size, !!field->is_signed);
924 
925 	return 0;
926 }
927 
928 static void *f_start(struct seq_file *m, loff_t *pos)
929 {
930 	void *p = (void *)FORMAT_HEADER;
931 	loff_t l = 0;
932 
933 	/* ->stop() is called even if ->start() fails */
934 	mutex_lock(&event_mutex);
935 	if (!event_file_data(m->private))
936 		return ERR_PTR(-ENODEV);
937 
938 	while (l < *pos && p)
939 		p = f_next(m, p, &l);
940 
941 	return p;
942 }
943 
944 static void f_stop(struct seq_file *m, void *p)
945 {
946 	mutex_unlock(&event_mutex);
947 }
948 
949 static const struct seq_operations trace_format_seq_ops = {
950 	.start		= f_start,
951 	.next		= f_next,
952 	.stop		= f_stop,
953 	.show		= f_show,
954 };
955 
956 static int trace_format_open(struct inode *inode, struct file *file)
957 {
958 	struct seq_file *m;
959 	int ret;
960 
961 	ret = seq_open(file, &trace_format_seq_ops);
962 	if (ret < 0)
963 		return ret;
964 
965 	m = file->private_data;
966 	m->private = file;
967 
968 	return 0;
969 }
970 
971 static ssize_t
972 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
973 {
974 	int id = (long)event_file_data(filp);
975 	char buf[32];
976 	int len;
977 
978 	if (*ppos)
979 		return 0;
980 
981 	if (unlikely(!id))
982 		return -ENODEV;
983 
984 	len = sprintf(buf, "%d\n", id);
985 
986 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
987 }
988 
989 static ssize_t
990 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
991 		  loff_t *ppos)
992 {
993 	struct ftrace_event_file *file;
994 	struct trace_seq *s;
995 	int r = -ENODEV;
996 
997 	if (*ppos)
998 		return 0;
999 
1000 	s = kmalloc(sizeof(*s), GFP_KERNEL);
1001 
1002 	if (!s)
1003 		return -ENOMEM;
1004 
1005 	trace_seq_init(s);
1006 
1007 	mutex_lock(&event_mutex);
1008 	file = event_file_data(filp);
1009 	if (file)
1010 		print_event_filter(file, s);
1011 	mutex_unlock(&event_mutex);
1012 
1013 	if (file)
1014 		r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1015 
1016 	kfree(s);
1017 
1018 	return r;
1019 }
1020 
1021 static ssize_t
1022 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1023 		   loff_t *ppos)
1024 {
1025 	struct ftrace_event_file *file;
1026 	char *buf;
1027 	int err = -ENODEV;
1028 
1029 	if (cnt >= PAGE_SIZE)
1030 		return -EINVAL;
1031 
1032 	buf = (char *)__get_free_page(GFP_TEMPORARY);
1033 	if (!buf)
1034 		return -ENOMEM;
1035 
1036 	if (copy_from_user(buf, ubuf, cnt)) {
1037 		free_page((unsigned long) buf);
1038 		return -EFAULT;
1039 	}
1040 	buf[cnt] = '\0';
1041 
1042 	mutex_lock(&event_mutex);
1043 	file = event_file_data(filp);
1044 	if (file)
1045 		err = apply_event_filter(file, buf);
1046 	mutex_unlock(&event_mutex);
1047 
1048 	free_page((unsigned long) buf);
1049 	if (err < 0)
1050 		return err;
1051 
1052 	*ppos += cnt;
1053 
1054 	return cnt;
1055 }
1056 
1057 static LIST_HEAD(event_subsystems);
1058 
1059 static int subsystem_open(struct inode *inode, struct file *filp)
1060 {
1061 	struct event_subsystem *system = NULL;
1062 	struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1063 	struct trace_array *tr;
1064 	int ret;
1065 
1066 	if (tracing_is_disabled())
1067 		return -ENODEV;
1068 
1069 	/* Make sure the system still exists */
1070 	mutex_lock(&trace_types_lock);
1071 	mutex_lock(&event_mutex);
1072 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1073 		list_for_each_entry(dir, &tr->systems, list) {
1074 			if (dir == inode->i_private) {
1075 				/* Don't open systems with no events */
1076 				if (dir->nr_events) {
1077 					__get_system_dir(dir);
1078 					system = dir->subsystem;
1079 				}
1080 				goto exit_loop;
1081 			}
1082 		}
1083 	}
1084  exit_loop:
1085 	mutex_unlock(&event_mutex);
1086 	mutex_unlock(&trace_types_lock);
1087 
1088 	if (!system)
1089 		return -ENODEV;
1090 
1091 	/* Some versions of gcc think dir can be uninitialized here */
1092 	WARN_ON(!dir);
1093 
1094 	/* Still need to increment the ref count of the system */
1095 	if (trace_array_get(tr) < 0) {
1096 		put_system(dir);
1097 		return -ENODEV;
1098 	}
1099 
1100 	ret = tracing_open_generic(inode, filp);
1101 	if (ret < 0) {
1102 		trace_array_put(tr);
1103 		put_system(dir);
1104 	}
1105 
1106 	return ret;
1107 }
1108 
1109 static int system_tr_open(struct inode *inode, struct file *filp)
1110 {
1111 	struct ftrace_subsystem_dir *dir;
1112 	struct trace_array *tr = inode->i_private;
1113 	int ret;
1114 
1115 	if (tracing_is_disabled())
1116 		return -ENODEV;
1117 
1118 	if (trace_array_get(tr) < 0)
1119 		return -ENODEV;
1120 
1121 	/* Make a temporary dir that has no system but points to tr */
1122 	dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1123 	if (!dir) {
1124 		trace_array_put(tr);
1125 		return -ENOMEM;
1126 	}
1127 
1128 	dir->tr = tr;
1129 
1130 	ret = tracing_open_generic(inode, filp);
1131 	if (ret < 0) {
1132 		trace_array_put(tr);
1133 		kfree(dir);
1134 		return ret;
1135 	}
1136 
1137 	filp->private_data = dir;
1138 
1139 	return 0;
1140 }
1141 
1142 static int subsystem_release(struct inode *inode, struct file *file)
1143 {
1144 	struct ftrace_subsystem_dir *dir = file->private_data;
1145 
1146 	trace_array_put(dir->tr);
1147 
1148 	/*
1149 	 * If dir->subsystem is NULL, then this is a temporary
1150 	 * descriptor that was made for a trace_array to enable
1151 	 * all subsystems.
1152 	 */
1153 	if (dir->subsystem)
1154 		put_system(dir);
1155 	else
1156 		kfree(dir);
1157 
1158 	return 0;
1159 }
1160 
1161 static ssize_t
1162 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1163 		      loff_t *ppos)
1164 {
1165 	struct ftrace_subsystem_dir *dir = filp->private_data;
1166 	struct event_subsystem *system = dir->subsystem;
1167 	struct trace_seq *s;
1168 	int r;
1169 
1170 	if (*ppos)
1171 		return 0;
1172 
1173 	s = kmalloc(sizeof(*s), GFP_KERNEL);
1174 	if (!s)
1175 		return -ENOMEM;
1176 
1177 	trace_seq_init(s);
1178 
1179 	print_subsystem_event_filter(system, s);
1180 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1181 
1182 	kfree(s);
1183 
1184 	return r;
1185 }
1186 
1187 static ssize_t
1188 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1189 		       loff_t *ppos)
1190 {
1191 	struct ftrace_subsystem_dir *dir = filp->private_data;
1192 	char *buf;
1193 	int err;
1194 
1195 	if (cnt >= PAGE_SIZE)
1196 		return -EINVAL;
1197 
1198 	buf = (char *)__get_free_page(GFP_TEMPORARY);
1199 	if (!buf)
1200 		return -ENOMEM;
1201 
1202 	if (copy_from_user(buf, ubuf, cnt)) {
1203 		free_page((unsigned long) buf);
1204 		return -EFAULT;
1205 	}
1206 	buf[cnt] = '\0';
1207 
1208 	err = apply_subsystem_event_filter(dir, buf);
1209 	free_page((unsigned long) buf);
1210 	if (err < 0)
1211 		return err;
1212 
1213 	*ppos += cnt;
1214 
1215 	return cnt;
1216 }
1217 
1218 static ssize_t
1219 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1220 {
1221 	int (*func)(struct trace_seq *s) = filp->private_data;
1222 	struct trace_seq *s;
1223 	int r;
1224 
1225 	if (*ppos)
1226 		return 0;
1227 
1228 	s = kmalloc(sizeof(*s), GFP_KERNEL);
1229 	if (!s)
1230 		return -ENOMEM;
1231 
1232 	trace_seq_init(s);
1233 
1234 	func(s);
1235 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1236 
1237 	kfree(s);
1238 
1239 	return r;
1240 }
1241 
1242 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1243 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1244 static int ftrace_event_release(struct inode *inode, struct file *file);
1245 
1246 static const struct seq_operations show_event_seq_ops = {
1247 	.start = t_start,
1248 	.next = t_next,
1249 	.show = t_show,
1250 	.stop = t_stop,
1251 };
1252 
1253 static const struct seq_operations show_set_event_seq_ops = {
1254 	.start = s_start,
1255 	.next = s_next,
1256 	.show = t_show,
1257 	.stop = t_stop,
1258 };
1259 
1260 static const struct file_operations ftrace_avail_fops = {
1261 	.open = ftrace_event_avail_open,
1262 	.read = seq_read,
1263 	.llseek = seq_lseek,
1264 	.release = seq_release,
1265 };
1266 
1267 static const struct file_operations ftrace_set_event_fops = {
1268 	.open = ftrace_event_set_open,
1269 	.read = seq_read,
1270 	.write = ftrace_event_write,
1271 	.llseek = seq_lseek,
1272 	.release = ftrace_event_release,
1273 };
1274 
1275 static const struct file_operations ftrace_enable_fops = {
1276 	.open = tracing_open_generic,
1277 	.read = event_enable_read,
1278 	.write = event_enable_write,
1279 	.llseek = default_llseek,
1280 };
1281 
1282 static const struct file_operations ftrace_event_format_fops = {
1283 	.open = trace_format_open,
1284 	.read = seq_read,
1285 	.llseek = seq_lseek,
1286 	.release = seq_release,
1287 };
1288 
1289 static const struct file_operations ftrace_event_id_fops = {
1290 	.read = event_id_read,
1291 	.llseek = default_llseek,
1292 };
1293 
1294 static const struct file_operations ftrace_event_filter_fops = {
1295 	.open = tracing_open_generic,
1296 	.read = event_filter_read,
1297 	.write = event_filter_write,
1298 	.llseek = default_llseek,
1299 };
1300 
1301 static const struct file_operations ftrace_subsystem_filter_fops = {
1302 	.open = subsystem_open,
1303 	.read = subsystem_filter_read,
1304 	.write = subsystem_filter_write,
1305 	.llseek = default_llseek,
1306 	.release = subsystem_release,
1307 };
1308 
1309 static const struct file_operations ftrace_system_enable_fops = {
1310 	.open = subsystem_open,
1311 	.read = system_enable_read,
1312 	.write = system_enable_write,
1313 	.llseek = default_llseek,
1314 	.release = subsystem_release,
1315 };
1316 
1317 static const struct file_operations ftrace_tr_enable_fops = {
1318 	.open = system_tr_open,
1319 	.read = system_enable_read,
1320 	.write = system_enable_write,
1321 	.llseek = default_llseek,
1322 	.release = subsystem_release,
1323 };
1324 
1325 static const struct file_operations ftrace_show_header_fops = {
1326 	.open = tracing_open_generic,
1327 	.read = show_header,
1328 	.llseek = default_llseek,
1329 };
1330 
1331 static int
1332 ftrace_event_open(struct inode *inode, struct file *file,
1333 		  const struct seq_operations *seq_ops)
1334 {
1335 	struct seq_file *m;
1336 	int ret;
1337 
1338 	ret = seq_open(file, seq_ops);
1339 	if (ret < 0)
1340 		return ret;
1341 	m = file->private_data;
1342 	/* copy tr over to seq ops */
1343 	m->private = inode->i_private;
1344 
1345 	return ret;
1346 }
1347 
1348 static int ftrace_event_release(struct inode *inode, struct file *file)
1349 {
1350 	struct trace_array *tr = inode->i_private;
1351 
1352 	trace_array_put(tr);
1353 
1354 	return seq_release(inode, file);
1355 }
1356 
1357 static int
1358 ftrace_event_avail_open(struct inode *inode, struct file *file)
1359 {
1360 	const struct seq_operations *seq_ops = &show_event_seq_ops;
1361 
1362 	return ftrace_event_open(inode, file, seq_ops);
1363 }
1364 
1365 static int
1366 ftrace_event_set_open(struct inode *inode, struct file *file)
1367 {
1368 	const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1369 	struct trace_array *tr = inode->i_private;
1370 	int ret;
1371 
1372 	if (trace_array_get(tr) < 0)
1373 		return -ENODEV;
1374 
1375 	if ((file->f_mode & FMODE_WRITE) &&
1376 	    (file->f_flags & O_TRUNC))
1377 		ftrace_clear_events(tr);
1378 
1379 	ret = ftrace_event_open(inode, file, seq_ops);
1380 	if (ret < 0)
1381 		trace_array_put(tr);
1382 	return ret;
1383 }
1384 
1385 static struct event_subsystem *
1386 create_new_subsystem(const char *name)
1387 {
1388 	struct event_subsystem *system;
1389 
1390 	/* need to create new entry */
1391 	system = kmalloc(sizeof(*system), GFP_KERNEL);
1392 	if (!system)
1393 		return NULL;
1394 
1395 	system->ref_count = 1;
1396 
1397 	/* Only allocate if dynamic (kprobes and modules) */
1398 	if (!core_kernel_data((unsigned long)name)) {
1399 		system->ref_count |= SYSTEM_FL_FREE_NAME;
1400 		system->name = kstrdup(name, GFP_KERNEL);
1401 		if (!system->name)
1402 			goto out_free;
1403 	} else
1404 		system->name = name;
1405 
1406 	system->filter = NULL;
1407 
1408 	system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1409 	if (!system->filter)
1410 		goto out_free;
1411 
1412 	list_add(&system->list, &event_subsystems);
1413 
1414 	return system;
1415 
1416  out_free:
1417 	if (system->ref_count & SYSTEM_FL_FREE_NAME)
1418 		kfree(system->name);
1419 	kfree(system);
1420 	return NULL;
1421 }
1422 
1423 static struct dentry *
1424 event_subsystem_dir(struct trace_array *tr, const char *name,
1425 		    struct ftrace_event_file *file, struct dentry *parent)
1426 {
1427 	struct ftrace_subsystem_dir *dir;
1428 	struct event_subsystem *system;
1429 	struct dentry *entry;
1430 
1431 	/* First see if we did not already create this dir */
1432 	list_for_each_entry(dir, &tr->systems, list) {
1433 		system = dir->subsystem;
1434 		if (strcmp(system->name, name) == 0) {
1435 			dir->nr_events++;
1436 			file->system = dir;
1437 			return dir->entry;
1438 		}
1439 	}
1440 
1441 	/* Now see if the system itself exists. */
1442 	list_for_each_entry(system, &event_subsystems, list) {
1443 		if (strcmp(system->name, name) == 0)
1444 			break;
1445 	}
1446 	/* Reset system variable when not found */
1447 	if (&system->list == &event_subsystems)
1448 		system = NULL;
1449 
1450 	dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1451 	if (!dir)
1452 		goto out_fail;
1453 
1454 	if (!system) {
1455 		system = create_new_subsystem(name);
1456 		if (!system)
1457 			goto out_free;
1458 	} else
1459 		__get_system(system);
1460 
1461 	dir->entry = debugfs_create_dir(name, parent);
1462 	if (!dir->entry) {
1463 		pr_warning("Failed to create system directory %s\n", name);
1464 		__put_system(system);
1465 		goto out_free;
1466 	}
1467 
1468 	dir->tr = tr;
1469 	dir->ref_count = 1;
1470 	dir->nr_events = 1;
1471 	dir->subsystem = system;
1472 	file->system = dir;
1473 
1474 	entry = debugfs_create_file("filter", 0644, dir->entry, dir,
1475 				    &ftrace_subsystem_filter_fops);
1476 	if (!entry) {
1477 		kfree(system->filter);
1478 		system->filter = NULL;
1479 		pr_warning("Could not create debugfs '%s/filter' entry\n", name);
1480 	}
1481 
1482 	trace_create_file("enable", 0644, dir->entry, dir,
1483 			  &ftrace_system_enable_fops);
1484 
1485 	list_add(&dir->list, &tr->systems);
1486 
1487 	return dir->entry;
1488 
1489  out_free:
1490 	kfree(dir);
1491  out_fail:
1492 	/* Only print this message if failed on memory allocation */
1493 	if (!dir || !system)
1494 		pr_warning("No memory to create event subsystem %s\n",
1495 			   name);
1496 	return NULL;
1497 }
1498 
1499 static int
1500 event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
1501 {
1502 	struct ftrace_event_call *call = file->event_call;
1503 	struct trace_array *tr = file->tr;
1504 	struct list_head *head;
1505 	struct dentry *d_events;
1506 	int ret;
1507 
1508 	/*
1509 	 * If the trace point header did not define TRACE_SYSTEM
1510 	 * then the system would be called "TRACE_SYSTEM".
1511 	 */
1512 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1513 		d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1514 		if (!d_events)
1515 			return -ENOMEM;
1516 	} else
1517 		d_events = parent;
1518 
1519 	file->dir = debugfs_create_dir(call->name, d_events);
1520 	if (!file->dir) {
1521 		pr_warning("Could not create debugfs '%s' directory\n",
1522 			   call->name);
1523 		return -1;
1524 	}
1525 
1526 	if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1527 		trace_create_file("enable", 0644, file->dir, file,
1528 				  &ftrace_enable_fops);
1529 
1530 #ifdef CONFIG_PERF_EVENTS
1531 	if (call->event.type && call->class->reg)
1532 		trace_create_file("id", 0444, file->dir,
1533 				  (void *)(long)call->event.type,
1534 				  &ftrace_event_id_fops);
1535 #endif
1536 
1537 	/*
1538 	 * Other events may have the same class. Only update
1539 	 * the fields if they are not already defined.
1540 	 */
1541 	head = trace_get_fields(call);
1542 	if (list_empty(head)) {
1543 		ret = call->class->define_fields(call);
1544 		if (ret < 0) {
1545 			pr_warning("Could not initialize trace point"
1546 				   " events/%s\n", call->name);
1547 			return -1;
1548 		}
1549 	}
1550 	trace_create_file("filter", 0644, file->dir, file,
1551 			  &ftrace_event_filter_fops);
1552 
1553 	trace_create_file("trigger", 0644, file->dir, file,
1554 			  &event_trigger_fops);
1555 
1556 	trace_create_file("format", 0444, file->dir, call,
1557 			  &ftrace_event_format_fops);
1558 
1559 	return 0;
1560 }
1561 
1562 static void remove_event_from_tracers(struct ftrace_event_call *call)
1563 {
1564 	struct ftrace_event_file *file;
1565 	struct trace_array *tr;
1566 
1567 	do_for_each_event_file_safe(tr, file) {
1568 		if (file->event_call != call)
1569 			continue;
1570 
1571 		remove_event_file_dir(file);
1572 		/*
1573 		 * The do_for_each_event_file_safe() is
1574 		 * a double loop. After finding the call for this
1575 		 * trace_array, we use break to jump to the next
1576 		 * trace_array.
1577 		 */
1578 		break;
1579 	} while_for_each_event_file();
1580 }
1581 
1582 static void event_remove(struct ftrace_event_call *call)
1583 {
1584 	struct trace_array *tr;
1585 	struct ftrace_event_file *file;
1586 
1587 	do_for_each_event_file(tr, file) {
1588 		if (file->event_call != call)
1589 			continue;
1590 		ftrace_event_enable_disable(file, 0);
1591 		destroy_preds(file);
1592 		/*
1593 		 * The do_for_each_event_file() is
1594 		 * a double loop. After finding the call for this
1595 		 * trace_array, we use break to jump to the next
1596 		 * trace_array.
1597 		 */
1598 		break;
1599 	} while_for_each_event_file();
1600 
1601 	if (call->event.funcs)
1602 		__unregister_ftrace_event(&call->event);
1603 	remove_event_from_tracers(call);
1604 	list_del(&call->list);
1605 }
1606 
1607 static int event_init(struct ftrace_event_call *call)
1608 {
1609 	int ret = 0;
1610 
1611 	if (WARN_ON(!call->name))
1612 		return -EINVAL;
1613 
1614 	if (call->class->raw_init) {
1615 		ret = call->class->raw_init(call);
1616 		if (ret < 0 && ret != -ENOSYS)
1617 			pr_warn("Could not initialize trace events/%s\n",
1618 				call->name);
1619 	}
1620 
1621 	return ret;
1622 }
1623 
1624 static int
1625 __register_event(struct ftrace_event_call *call, struct module *mod)
1626 {
1627 	int ret;
1628 
1629 	ret = event_init(call);
1630 	if (ret < 0)
1631 		return ret;
1632 
1633 	list_add(&call->list, &ftrace_events);
1634 	call->mod = mod;
1635 
1636 	return 0;
1637 }
1638 
1639 static struct ftrace_event_file *
1640 trace_create_new_event(struct ftrace_event_call *call,
1641 		       struct trace_array *tr)
1642 {
1643 	struct ftrace_event_file *file;
1644 
1645 	file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1646 	if (!file)
1647 		return NULL;
1648 
1649 	file->event_call = call;
1650 	file->tr = tr;
1651 	atomic_set(&file->sm_ref, 0);
1652 	atomic_set(&file->tm_ref, 0);
1653 	INIT_LIST_HEAD(&file->triggers);
1654 	list_add(&file->list, &tr->events);
1655 
1656 	return file;
1657 }
1658 
1659 /* Add an event to a trace directory */
1660 static int
1661 __trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr)
1662 {
1663 	struct ftrace_event_file *file;
1664 
1665 	file = trace_create_new_event(call, tr);
1666 	if (!file)
1667 		return -ENOMEM;
1668 
1669 	return event_create_dir(tr->event_dir, file);
1670 }
1671 
1672 /*
1673  * Just create a decriptor for early init. A descriptor is required
1674  * for enabling events at boot. We want to enable events before
1675  * the filesystem is initialized.
1676  */
1677 static __init int
1678 __trace_early_add_new_event(struct ftrace_event_call *call,
1679 			    struct trace_array *tr)
1680 {
1681 	struct ftrace_event_file *file;
1682 
1683 	file = trace_create_new_event(call, tr);
1684 	if (!file)
1685 		return -ENOMEM;
1686 
1687 	return 0;
1688 }
1689 
1690 struct ftrace_module_file_ops;
1691 static void __add_event_to_tracers(struct ftrace_event_call *call);
1692 
1693 /* Add an additional event_call dynamically */
1694 int trace_add_event_call(struct ftrace_event_call *call)
1695 {
1696 	int ret;
1697 	mutex_lock(&trace_types_lock);
1698 	mutex_lock(&event_mutex);
1699 
1700 	ret = __register_event(call, NULL);
1701 	if (ret >= 0)
1702 		__add_event_to_tracers(call);
1703 
1704 	mutex_unlock(&event_mutex);
1705 	mutex_unlock(&trace_types_lock);
1706 	return ret;
1707 }
1708 
1709 /*
1710  * Must be called under locking of trace_types_lock, event_mutex and
1711  * trace_event_sem.
1712  */
1713 static void __trace_remove_event_call(struct ftrace_event_call *call)
1714 {
1715 	event_remove(call);
1716 	trace_destroy_fields(call);
1717 	destroy_call_preds(call);
1718 }
1719 
1720 static int probe_remove_event_call(struct ftrace_event_call *call)
1721 {
1722 	struct trace_array *tr;
1723 	struct ftrace_event_file *file;
1724 
1725 #ifdef CONFIG_PERF_EVENTS
1726 	if (call->perf_refcount)
1727 		return -EBUSY;
1728 #endif
1729 	do_for_each_event_file(tr, file) {
1730 		if (file->event_call != call)
1731 			continue;
1732 		/*
1733 		 * We can't rely on ftrace_event_enable_disable(enable => 0)
1734 		 * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
1735 		 * TRACE_REG_UNREGISTER.
1736 		 */
1737 		if (file->flags & FTRACE_EVENT_FL_ENABLED)
1738 			return -EBUSY;
1739 		/*
1740 		 * The do_for_each_event_file_safe() is
1741 		 * a double loop. After finding the call for this
1742 		 * trace_array, we use break to jump to the next
1743 		 * trace_array.
1744 		 */
1745 		break;
1746 	} while_for_each_event_file();
1747 
1748 	__trace_remove_event_call(call);
1749 
1750 	return 0;
1751 }
1752 
1753 /* Remove an event_call */
1754 int trace_remove_event_call(struct ftrace_event_call *call)
1755 {
1756 	int ret;
1757 
1758 	mutex_lock(&trace_types_lock);
1759 	mutex_lock(&event_mutex);
1760 	down_write(&trace_event_sem);
1761 	ret = probe_remove_event_call(call);
1762 	up_write(&trace_event_sem);
1763 	mutex_unlock(&event_mutex);
1764 	mutex_unlock(&trace_types_lock);
1765 
1766 	return ret;
1767 }
1768 
1769 #define for_each_event(event, start, end)			\
1770 	for (event = start;					\
1771 	     (unsigned long)event < (unsigned long)end;		\
1772 	     event++)
1773 
1774 #ifdef CONFIG_MODULES
1775 
1776 static void trace_module_add_events(struct module *mod)
1777 {
1778 	struct ftrace_event_call **call, **start, **end;
1779 
1780 	start = mod->trace_events;
1781 	end = mod->trace_events + mod->num_trace_events;
1782 
1783 	for_each_event(call, start, end) {
1784 		__register_event(*call, mod);
1785 		__add_event_to_tracers(*call);
1786 	}
1787 }
1788 
1789 static void trace_module_remove_events(struct module *mod)
1790 {
1791 	struct ftrace_event_call *call, *p;
1792 	bool clear_trace = false;
1793 
1794 	down_write(&trace_event_sem);
1795 	list_for_each_entry_safe(call, p, &ftrace_events, list) {
1796 		if (call->mod == mod) {
1797 			if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1798 				clear_trace = true;
1799 			__trace_remove_event_call(call);
1800 		}
1801 	}
1802 	up_write(&trace_event_sem);
1803 
1804 	/*
1805 	 * It is safest to reset the ring buffer if the module being unloaded
1806 	 * registered any events that were used. The only worry is if
1807 	 * a new module gets loaded, and takes on the same id as the events
1808 	 * of this module. When printing out the buffer, traced events left
1809 	 * over from this module may be passed to the new module events and
1810 	 * unexpected results may occur.
1811 	 */
1812 	if (clear_trace)
1813 		tracing_reset_all_online_cpus();
1814 }
1815 
1816 static int trace_module_notify(struct notifier_block *self,
1817 			       unsigned long val, void *data)
1818 {
1819 	struct module *mod = data;
1820 
1821 	mutex_lock(&trace_types_lock);
1822 	mutex_lock(&event_mutex);
1823 	switch (val) {
1824 	case MODULE_STATE_COMING:
1825 		trace_module_add_events(mod);
1826 		break;
1827 	case MODULE_STATE_GOING:
1828 		trace_module_remove_events(mod);
1829 		break;
1830 	}
1831 	mutex_unlock(&event_mutex);
1832 	mutex_unlock(&trace_types_lock);
1833 
1834 	return 0;
1835 }
1836 
1837 static struct notifier_block trace_module_nb = {
1838 	.notifier_call = trace_module_notify,
1839 	.priority = 0,
1840 };
1841 #endif /* CONFIG_MODULES */
1842 
1843 /* Create a new event directory structure for a trace directory. */
1844 static void
1845 __trace_add_event_dirs(struct trace_array *tr)
1846 {
1847 	struct ftrace_event_call *call;
1848 	int ret;
1849 
1850 	list_for_each_entry(call, &ftrace_events, list) {
1851 		ret = __trace_add_new_event(call, tr);
1852 		if (ret < 0)
1853 			pr_warning("Could not create directory for event %s\n",
1854 				   call->name);
1855 	}
1856 }
1857 
1858 struct ftrace_event_file *
1859 find_event_file(struct trace_array *tr, const char *system,  const char *event)
1860 {
1861 	struct ftrace_event_file *file;
1862 	struct ftrace_event_call *call;
1863 
1864 	list_for_each_entry(file, &tr->events, list) {
1865 
1866 		call = file->event_call;
1867 
1868 		if (!call->name || !call->class || !call->class->reg)
1869 			continue;
1870 
1871 		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1872 			continue;
1873 
1874 		if (strcmp(event, call->name) == 0 &&
1875 		    strcmp(system, call->class->system) == 0)
1876 			return file;
1877 	}
1878 	return NULL;
1879 }
1880 
1881 #ifdef CONFIG_DYNAMIC_FTRACE
1882 
1883 /* Avoid typos */
1884 #define ENABLE_EVENT_STR	"enable_event"
1885 #define DISABLE_EVENT_STR	"disable_event"
1886 
1887 struct event_probe_data {
1888 	struct ftrace_event_file	*file;
1889 	unsigned long			count;
1890 	int				ref;
1891 	bool				enable;
1892 };
1893 
1894 static void
1895 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1896 {
1897 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
1898 	struct event_probe_data *data = *pdata;
1899 
1900 	if (!data)
1901 		return;
1902 
1903 	if (data->enable)
1904 		clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1905 	else
1906 		set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1907 }
1908 
1909 static void
1910 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1911 {
1912 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
1913 	struct event_probe_data *data = *pdata;
1914 
1915 	if (!data)
1916 		return;
1917 
1918 	if (!data->count)
1919 		return;
1920 
1921 	/* Skip if the event is in a state we want to switch to */
1922 	if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
1923 		return;
1924 
1925 	if (data->count != -1)
1926 		(data->count)--;
1927 
1928 	event_enable_probe(ip, parent_ip, _data);
1929 }
1930 
1931 static int
1932 event_enable_print(struct seq_file *m, unsigned long ip,
1933 		      struct ftrace_probe_ops *ops, void *_data)
1934 {
1935 	struct event_probe_data *data = _data;
1936 
1937 	seq_printf(m, "%ps:", (void *)ip);
1938 
1939 	seq_printf(m, "%s:%s:%s",
1940 		   data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1941 		   data->file->event_call->class->system,
1942 		   data->file->event_call->name);
1943 
1944 	if (data->count == -1)
1945 		seq_printf(m, ":unlimited\n");
1946 	else
1947 		seq_printf(m, ":count=%ld\n", data->count);
1948 
1949 	return 0;
1950 }
1951 
1952 static int
1953 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
1954 		  void **_data)
1955 {
1956 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
1957 	struct event_probe_data *data = *pdata;
1958 
1959 	data->ref++;
1960 	return 0;
1961 }
1962 
1963 static void
1964 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
1965 		  void **_data)
1966 {
1967 	struct event_probe_data **pdata = (struct event_probe_data **)_data;
1968 	struct event_probe_data *data = *pdata;
1969 
1970 	if (WARN_ON_ONCE(data->ref <= 0))
1971 		return;
1972 
1973 	data->ref--;
1974 	if (!data->ref) {
1975 		/* Remove the SOFT_MODE flag */
1976 		__ftrace_event_enable_disable(data->file, 0, 1);
1977 		module_put(data->file->event_call->mod);
1978 		kfree(data);
1979 	}
1980 	*pdata = NULL;
1981 }
1982 
1983 static struct ftrace_probe_ops event_enable_probe_ops = {
1984 	.func			= event_enable_probe,
1985 	.print			= event_enable_print,
1986 	.init			= event_enable_init,
1987 	.free			= event_enable_free,
1988 };
1989 
1990 static struct ftrace_probe_ops event_enable_count_probe_ops = {
1991 	.func			= event_enable_count_probe,
1992 	.print			= event_enable_print,
1993 	.init			= event_enable_init,
1994 	.free			= event_enable_free,
1995 };
1996 
1997 static struct ftrace_probe_ops event_disable_probe_ops = {
1998 	.func			= event_enable_probe,
1999 	.print			= event_enable_print,
2000 	.init			= event_enable_init,
2001 	.free			= event_enable_free,
2002 };
2003 
2004 static struct ftrace_probe_ops event_disable_count_probe_ops = {
2005 	.func			= event_enable_count_probe,
2006 	.print			= event_enable_print,
2007 	.init			= event_enable_init,
2008 	.free			= event_enable_free,
2009 };
2010 
2011 static int
2012 event_enable_func(struct ftrace_hash *hash,
2013 		  char *glob, char *cmd, char *param, int enabled)
2014 {
2015 	struct trace_array *tr = top_trace_array();
2016 	struct ftrace_event_file *file;
2017 	struct ftrace_probe_ops *ops;
2018 	struct event_probe_data *data;
2019 	const char *system;
2020 	const char *event;
2021 	char *number;
2022 	bool enable;
2023 	int ret;
2024 
2025 	/* hash funcs only work with set_ftrace_filter */
2026 	if (!enabled || !param)
2027 		return -EINVAL;
2028 
2029 	system = strsep(&param, ":");
2030 	if (!param)
2031 		return -EINVAL;
2032 
2033 	event = strsep(&param, ":");
2034 
2035 	mutex_lock(&event_mutex);
2036 
2037 	ret = -EINVAL;
2038 	file = find_event_file(tr, system, event);
2039 	if (!file)
2040 		goto out;
2041 
2042 	enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2043 
2044 	if (enable)
2045 		ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2046 	else
2047 		ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2048 
2049 	if (glob[0] == '!') {
2050 		unregister_ftrace_function_probe_func(glob+1, ops);
2051 		ret = 0;
2052 		goto out;
2053 	}
2054 
2055 	ret = -ENOMEM;
2056 	data = kzalloc(sizeof(*data), GFP_KERNEL);
2057 	if (!data)
2058 		goto out;
2059 
2060 	data->enable = enable;
2061 	data->count = -1;
2062 	data->file = file;
2063 
2064 	if (!param)
2065 		goto out_reg;
2066 
2067 	number = strsep(&param, ":");
2068 
2069 	ret = -EINVAL;
2070 	if (!strlen(number))
2071 		goto out_free;
2072 
2073 	/*
2074 	 * We use the callback data field (which is a pointer)
2075 	 * as our counter.
2076 	 */
2077 	ret = kstrtoul(number, 0, &data->count);
2078 	if (ret)
2079 		goto out_free;
2080 
2081  out_reg:
2082 	/* Don't let event modules unload while probe registered */
2083 	ret = try_module_get(file->event_call->mod);
2084 	if (!ret) {
2085 		ret = -EBUSY;
2086 		goto out_free;
2087 	}
2088 
2089 	ret = __ftrace_event_enable_disable(file, 1, 1);
2090 	if (ret < 0)
2091 		goto out_put;
2092 	ret = register_ftrace_function_probe(glob, ops, data);
2093 	/*
2094 	 * The above returns on success the # of functions enabled,
2095 	 * but if it didn't find any functions it returns zero.
2096 	 * Consider no functions a failure too.
2097 	 */
2098 	if (!ret) {
2099 		ret = -ENOENT;
2100 		goto out_disable;
2101 	} else if (ret < 0)
2102 		goto out_disable;
2103 	/* Just return zero, not the number of enabled functions */
2104 	ret = 0;
2105  out:
2106 	mutex_unlock(&event_mutex);
2107 	return ret;
2108 
2109  out_disable:
2110 	__ftrace_event_enable_disable(file, 0, 1);
2111  out_put:
2112 	module_put(file->event_call->mod);
2113  out_free:
2114 	kfree(data);
2115 	goto out;
2116 }
2117 
2118 static struct ftrace_func_command event_enable_cmd = {
2119 	.name			= ENABLE_EVENT_STR,
2120 	.func			= event_enable_func,
2121 };
2122 
2123 static struct ftrace_func_command event_disable_cmd = {
2124 	.name			= DISABLE_EVENT_STR,
2125 	.func			= event_enable_func,
2126 };
2127 
2128 static __init int register_event_cmds(void)
2129 {
2130 	int ret;
2131 
2132 	ret = register_ftrace_command(&event_enable_cmd);
2133 	if (WARN_ON(ret < 0))
2134 		return ret;
2135 	ret = register_ftrace_command(&event_disable_cmd);
2136 	if (WARN_ON(ret < 0))
2137 		unregister_ftrace_command(&event_enable_cmd);
2138 	return ret;
2139 }
2140 #else
2141 static inline int register_event_cmds(void) { return 0; }
2142 #endif /* CONFIG_DYNAMIC_FTRACE */
2143 
2144 /*
2145  * The top level array has already had its ftrace_event_file
2146  * descriptors created in order to allow for early events to
2147  * be recorded. This function is called after the debugfs has been
2148  * initialized, and we now have to create the files associated
2149  * to the events.
2150  */
2151 static __init void
2152 __trace_early_add_event_dirs(struct trace_array *tr)
2153 {
2154 	struct ftrace_event_file *file;
2155 	int ret;
2156 
2157 
2158 	list_for_each_entry(file, &tr->events, list) {
2159 		ret = event_create_dir(tr->event_dir, file);
2160 		if (ret < 0)
2161 			pr_warning("Could not create directory for event %s\n",
2162 				   file->event_call->name);
2163 	}
2164 }
2165 
2166 /*
2167  * For early boot up, the top trace array requires to have
2168  * a list of events that can be enabled. This must be done before
2169  * the filesystem is set up in order to allow events to be traced
2170  * early.
2171  */
2172 static __init void
2173 __trace_early_add_events(struct trace_array *tr)
2174 {
2175 	struct ftrace_event_call *call;
2176 	int ret;
2177 
2178 	list_for_each_entry(call, &ftrace_events, list) {
2179 		/* Early boot up should not have any modules loaded */
2180 		if (WARN_ON_ONCE(call->mod))
2181 			continue;
2182 
2183 		ret = __trace_early_add_new_event(call, tr);
2184 		if (ret < 0)
2185 			pr_warning("Could not create early event %s\n",
2186 				   call->name);
2187 	}
2188 }
2189 
2190 /* Remove the event directory structure for a trace directory. */
2191 static void
2192 __trace_remove_event_dirs(struct trace_array *tr)
2193 {
2194 	struct ftrace_event_file *file, *next;
2195 
2196 	list_for_each_entry_safe(file, next, &tr->events, list)
2197 		remove_event_file_dir(file);
2198 }
2199 
2200 static void __add_event_to_tracers(struct ftrace_event_call *call)
2201 {
2202 	struct trace_array *tr;
2203 
2204 	list_for_each_entry(tr, &ftrace_trace_arrays, list)
2205 		__trace_add_new_event(call, tr);
2206 }
2207 
2208 extern struct ftrace_event_call *__start_ftrace_events[];
2209 extern struct ftrace_event_call *__stop_ftrace_events[];
2210 
2211 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2212 
2213 static __init int setup_trace_event(char *str)
2214 {
2215 	strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2216 	ring_buffer_expanded = true;
2217 	tracing_selftest_disabled = true;
2218 
2219 	return 1;
2220 }
2221 __setup("trace_event=", setup_trace_event);
2222 
2223 /* Expects to have event_mutex held when called */
2224 static int
2225 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2226 {
2227 	struct dentry *d_events;
2228 	struct dentry *entry;
2229 
2230 	entry = debugfs_create_file("set_event", 0644, parent,
2231 				    tr, &ftrace_set_event_fops);
2232 	if (!entry) {
2233 		pr_warning("Could not create debugfs 'set_event' entry\n");
2234 		return -ENOMEM;
2235 	}
2236 
2237 	d_events = debugfs_create_dir("events", parent);
2238 	if (!d_events) {
2239 		pr_warning("Could not create debugfs 'events' directory\n");
2240 		return -ENOMEM;
2241 	}
2242 
2243 	/* ring buffer internal formats */
2244 	trace_create_file("header_page", 0444, d_events,
2245 			  ring_buffer_print_page_header,
2246 			  &ftrace_show_header_fops);
2247 
2248 	trace_create_file("header_event", 0444, d_events,
2249 			  ring_buffer_print_entry_header,
2250 			  &ftrace_show_header_fops);
2251 
2252 	trace_create_file("enable", 0644, d_events,
2253 			  tr, &ftrace_tr_enable_fops);
2254 
2255 	tr->event_dir = d_events;
2256 
2257 	return 0;
2258 }
2259 
2260 /**
2261  * event_trace_add_tracer - add a instance of a trace_array to events
2262  * @parent: The parent dentry to place the files/directories for events in
2263  * @tr: The trace array associated with these events
2264  *
2265  * When a new instance is created, it needs to set up its events
2266  * directory, as well as other files associated with events. It also
2267  * creates the event hierachry in the @parent/events directory.
2268  *
2269  * Returns 0 on success.
2270  */
2271 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2272 {
2273 	int ret;
2274 
2275 	mutex_lock(&event_mutex);
2276 
2277 	ret = create_event_toplevel_files(parent, tr);
2278 	if (ret)
2279 		goto out_unlock;
2280 
2281 	down_write(&trace_event_sem);
2282 	__trace_add_event_dirs(tr);
2283 	up_write(&trace_event_sem);
2284 
2285  out_unlock:
2286 	mutex_unlock(&event_mutex);
2287 
2288 	return ret;
2289 }
2290 
2291 /*
2292  * The top trace array already had its file descriptors created.
2293  * Now the files themselves need to be created.
2294  */
2295 static __init int
2296 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2297 {
2298 	int ret;
2299 
2300 	mutex_lock(&event_mutex);
2301 
2302 	ret = create_event_toplevel_files(parent, tr);
2303 	if (ret)
2304 		goto out_unlock;
2305 
2306 	down_write(&trace_event_sem);
2307 	__trace_early_add_event_dirs(tr);
2308 	up_write(&trace_event_sem);
2309 
2310  out_unlock:
2311 	mutex_unlock(&event_mutex);
2312 
2313 	return ret;
2314 }
2315 
2316 int event_trace_del_tracer(struct trace_array *tr)
2317 {
2318 	mutex_lock(&event_mutex);
2319 
2320 	/* Disable any event triggers and associated soft-disabled events */
2321 	clear_event_triggers(tr);
2322 
2323 	/* Disable any running events */
2324 	__ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2325 
2326 	/* Access to events are within rcu_read_lock_sched() */
2327 	synchronize_sched();
2328 
2329 	down_write(&trace_event_sem);
2330 	__trace_remove_event_dirs(tr);
2331 	debugfs_remove_recursive(tr->event_dir);
2332 	up_write(&trace_event_sem);
2333 
2334 	tr->event_dir = NULL;
2335 
2336 	mutex_unlock(&event_mutex);
2337 
2338 	return 0;
2339 }
2340 
2341 static __init int event_trace_memsetup(void)
2342 {
2343 	field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2344 	file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2345 	return 0;
2346 }
2347 
2348 static __init int event_trace_enable(void)
2349 {
2350 	struct trace_array *tr = top_trace_array();
2351 	struct ftrace_event_call **iter, *call;
2352 	char *buf = bootup_event_buf;
2353 	char *token;
2354 	int ret;
2355 
2356 	for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2357 
2358 		call = *iter;
2359 		ret = event_init(call);
2360 		if (!ret)
2361 			list_add(&call->list, &ftrace_events);
2362 	}
2363 
2364 	/*
2365 	 * We need the top trace array to have a working set of trace
2366 	 * points at early init, before the debug files and directories
2367 	 * are created. Create the file entries now, and attach them
2368 	 * to the actual file dentries later.
2369 	 */
2370 	__trace_early_add_events(tr);
2371 
2372 	while (true) {
2373 		token = strsep(&buf, ",");
2374 
2375 		if (!token)
2376 			break;
2377 		if (!*token)
2378 			continue;
2379 
2380 		ret = ftrace_set_clr_event(tr, token, 1);
2381 		if (ret)
2382 			pr_warn("Failed to enable trace event: %s\n", token);
2383 	}
2384 
2385 	trace_printk_start_comm();
2386 
2387 	register_event_cmds();
2388 
2389 	register_trigger_cmds();
2390 
2391 	return 0;
2392 }
2393 
2394 static __init int event_trace_init(void)
2395 {
2396 	struct trace_array *tr;
2397 	struct dentry *d_tracer;
2398 	struct dentry *entry;
2399 	int ret;
2400 
2401 	tr = top_trace_array();
2402 
2403 	d_tracer = tracing_init_dentry();
2404 	if (!d_tracer)
2405 		return 0;
2406 
2407 	entry = debugfs_create_file("available_events", 0444, d_tracer,
2408 				    tr, &ftrace_avail_fops);
2409 	if (!entry)
2410 		pr_warning("Could not create debugfs "
2411 			   "'available_events' entry\n");
2412 
2413 	if (trace_define_common_fields())
2414 		pr_warning("tracing: Failed to allocate common fields");
2415 
2416 	ret = early_event_add_tracer(d_tracer, tr);
2417 	if (ret)
2418 		return ret;
2419 
2420 #ifdef CONFIG_MODULES
2421 	ret = register_module_notifier(&trace_module_nb);
2422 	if (ret)
2423 		pr_warning("Failed to register trace events module notifier\n");
2424 #endif
2425 	return 0;
2426 }
2427 early_initcall(event_trace_memsetup);
2428 core_initcall(event_trace_enable);
2429 fs_initcall(event_trace_init);
2430 
2431 #ifdef CONFIG_FTRACE_STARTUP_TEST
2432 
2433 static DEFINE_SPINLOCK(test_spinlock);
2434 static DEFINE_SPINLOCK(test_spinlock_irq);
2435 static DEFINE_MUTEX(test_mutex);
2436 
2437 static __init void test_work(struct work_struct *dummy)
2438 {
2439 	spin_lock(&test_spinlock);
2440 	spin_lock_irq(&test_spinlock_irq);
2441 	udelay(1);
2442 	spin_unlock_irq(&test_spinlock_irq);
2443 	spin_unlock(&test_spinlock);
2444 
2445 	mutex_lock(&test_mutex);
2446 	msleep(1);
2447 	mutex_unlock(&test_mutex);
2448 }
2449 
2450 static __init int event_test_thread(void *unused)
2451 {
2452 	void *test_malloc;
2453 
2454 	test_malloc = kmalloc(1234, GFP_KERNEL);
2455 	if (!test_malloc)
2456 		pr_info("failed to kmalloc\n");
2457 
2458 	schedule_on_each_cpu(test_work);
2459 
2460 	kfree(test_malloc);
2461 
2462 	set_current_state(TASK_INTERRUPTIBLE);
2463 	while (!kthread_should_stop())
2464 		schedule();
2465 
2466 	return 0;
2467 }
2468 
2469 /*
2470  * Do various things that may trigger events.
2471  */
2472 static __init void event_test_stuff(void)
2473 {
2474 	struct task_struct *test_thread;
2475 
2476 	test_thread = kthread_run(event_test_thread, NULL, "test-events");
2477 	msleep(1);
2478 	kthread_stop(test_thread);
2479 }
2480 
2481 /*
2482  * For every trace event defined, we will test each trace point separately,
2483  * and then by groups, and finally all trace points.
2484  */
2485 static __init void event_trace_self_tests(void)
2486 {
2487 	struct ftrace_subsystem_dir *dir;
2488 	struct ftrace_event_file *file;
2489 	struct ftrace_event_call *call;
2490 	struct event_subsystem *system;
2491 	struct trace_array *tr;
2492 	int ret;
2493 
2494 	tr = top_trace_array();
2495 
2496 	pr_info("Running tests on trace events:\n");
2497 
2498 	list_for_each_entry(file, &tr->events, list) {
2499 
2500 		call = file->event_call;
2501 
2502 		/* Only test those that have a probe */
2503 		if (!call->class || !call->class->probe)
2504 			continue;
2505 
2506 /*
2507  * Testing syscall events here is pretty useless, but
2508  * we still do it if configured. But this is time consuming.
2509  * What we really need is a user thread to perform the
2510  * syscalls as we test.
2511  */
2512 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
2513 		if (call->class->system &&
2514 		    strcmp(call->class->system, "syscalls") == 0)
2515 			continue;
2516 #endif
2517 
2518 		pr_info("Testing event %s: ", call->name);
2519 
2520 		/*
2521 		 * If an event is already enabled, someone is using
2522 		 * it and the self test should not be on.
2523 		 */
2524 		if (file->flags & FTRACE_EVENT_FL_ENABLED) {
2525 			pr_warning("Enabled event during self test!\n");
2526 			WARN_ON_ONCE(1);
2527 			continue;
2528 		}
2529 
2530 		ftrace_event_enable_disable(file, 1);
2531 		event_test_stuff();
2532 		ftrace_event_enable_disable(file, 0);
2533 
2534 		pr_cont("OK\n");
2535 	}
2536 
2537 	/* Now test at the sub system level */
2538 
2539 	pr_info("Running tests on trace event systems:\n");
2540 
2541 	list_for_each_entry(dir, &tr->systems, list) {
2542 
2543 		system = dir->subsystem;
2544 
2545 		/* the ftrace system is special, skip it */
2546 		if (strcmp(system->name, "ftrace") == 0)
2547 			continue;
2548 
2549 		pr_info("Testing event system %s: ", system->name);
2550 
2551 		ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
2552 		if (WARN_ON_ONCE(ret)) {
2553 			pr_warning("error enabling system %s\n",
2554 				   system->name);
2555 			continue;
2556 		}
2557 
2558 		event_test_stuff();
2559 
2560 		ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
2561 		if (WARN_ON_ONCE(ret)) {
2562 			pr_warning("error disabling system %s\n",
2563 				   system->name);
2564 			continue;
2565 		}
2566 
2567 		pr_cont("OK\n");
2568 	}
2569 
2570 	/* Test with all events enabled */
2571 
2572 	pr_info("Running tests on all trace events:\n");
2573 	pr_info("Testing all events: ");
2574 
2575 	ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
2576 	if (WARN_ON_ONCE(ret)) {
2577 		pr_warning("error enabling all events\n");
2578 		return;
2579 	}
2580 
2581 	event_test_stuff();
2582 
2583 	/* reset sysname */
2584 	ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
2585 	if (WARN_ON_ONCE(ret)) {
2586 		pr_warning("error disabling all events\n");
2587 		return;
2588 	}
2589 
2590 	pr_cont("OK\n");
2591 }
2592 
2593 #ifdef CONFIG_FUNCTION_TRACER
2594 
2595 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
2596 
2597 static void
2598 function_test_events_call(unsigned long ip, unsigned long parent_ip,
2599 			  struct ftrace_ops *op, struct pt_regs *pt_regs)
2600 {
2601 	struct ring_buffer_event *event;
2602 	struct ring_buffer *buffer;
2603 	struct ftrace_entry *entry;
2604 	unsigned long flags;
2605 	long disabled;
2606 	int cpu;
2607 	int pc;
2608 
2609 	pc = preempt_count();
2610 	preempt_disable_notrace();
2611 	cpu = raw_smp_processor_id();
2612 	disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
2613 
2614 	if (disabled != 1)
2615 		goto out;
2616 
2617 	local_save_flags(flags);
2618 
2619 	event = trace_current_buffer_lock_reserve(&buffer,
2620 						  TRACE_FN, sizeof(*entry),
2621 						  flags, pc);
2622 	if (!event)
2623 		goto out;
2624 	entry	= ring_buffer_event_data(event);
2625 	entry->ip			= ip;
2626 	entry->parent_ip		= parent_ip;
2627 
2628 	trace_buffer_unlock_commit(buffer, event, flags, pc);
2629 
2630  out:
2631 	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
2632 	preempt_enable_notrace();
2633 }
2634 
2635 static struct ftrace_ops trace_ops __initdata  =
2636 {
2637 	.func = function_test_events_call,
2638 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
2639 };
2640 
2641 static __init void event_trace_self_test_with_function(void)
2642 {
2643 	int ret;
2644 	ret = register_ftrace_function(&trace_ops);
2645 	if (WARN_ON(ret < 0)) {
2646 		pr_info("Failed to enable function tracer for event tests\n");
2647 		return;
2648 	}
2649 	pr_info("Running tests again, along with the function tracer\n");
2650 	event_trace_self_tests();
2651 	unregister_ftrace_function(&trace_ops);
2652 }
2653 #else
2654 static __init void event_trace_self_test_with_function(void)
2655 {
2656 }
2657 #endif
2658 
2659 static __init int event_trace_self_tests_init(void)
2660 {
2661 	if (!tracing_selftest_disabled) {
2662 		event_trace_self_tests();
2663 		event_trace_self_test_with_function();
2664 	}
2665 
2666 	return 0;
2667 }
2668 
2669 late_initcall(event_trace_self_tests_init);
2670 
2671 #endif
2672