xref: /openbmc/linux/kernel/trace/trace_events.c (revision 0d456bad)
1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10 
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 
21 #include <asm/setup.h>
22 
23 #include "trace_output.h"
24 
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27 
28 DEFINE_MUTEX(event_mutex);
29 
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
32 
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
35 
36 LIST_HEAD(ftrace_events);
37 LIST_HEAD(ftrace_common_fields);
38 
39 struct list_head *
40 trace_get_fields(struct ftrace_event_call *event_call)
41 {
42 	if (!event_call->class->get_fields)
43 		return &event_call->class->fields;
44 	return event_call->class->get_fields(event_call);
45 }
46 
47 static int __trace_define_field(struct list_head *head, const char *type,
48 				const char *name, int offset, int size,
49 				int is_signed, int filter_type)
50 {
51 	struct ftrace_event_field *field;
52 
53 	field = kzalloc(sizeof(*field), GFP_KERNEL);
54 	if (!field)
55 		goto err;
56 
57 	field->name = kstrdup(name, GFP_KERNEL);
58 	if (!field->name)
59 		goto err;
60 
61 	field->type = kstrdup(type, GFP_KERNEL);
62 	if (!field->type)
63 		goto err;
64 
65 	if (filter_type == FILTER_OTHER)
66 		field->filter_type = filter_assign_type(type);
67 	else
68 		field->filter_type = filter_type;
69 
70 	field->offset = offset;
71 	field->size = size;
72 	field->is_signed = is_signed;
73 
74 	list_add(&field->link, head);
75 
76 	return 0;
77 
78 err:
79 	if (field)
80 		kfree(field->name);
81 	kfree(field);
82 
83 	return -ENOMEM;
84 }
85 
86 int trace_define_field(struct ftrace_event_call *call, const char *type,
87 		       const char *name, int offset, int size, int is_signed,
88 		       int filter_type)
89 {
90 	struct list_head *head;
91 
92 	if (WARN_ON(!call->class))
93 		return 0;
94 
95 	head = trace_get_fields(call);
96 	return __trace_define_field(head, type, name, offset, size,
97 				    is_signed, filter_type);
98 }
99 EXPORT_SYMBOL_GPL(trace_define_field);
100 
101 #define __common_field(type, item)					\
102 	ret = __trace_define_field(&ftrace_common_fields, #type,	\
103 				   "common_" #item,			\
104 				   offsetof(typeof(ent), item),		\
105 				   sizeof(ent.item),			\
106 				   is_signed_type(type), FILTER_OTHER);	\
107 	if (ret)							\
108 		return ret;
109 
110 static int trace_define_common_fields(void)
111 {
112 	int ret;
113 	struct trace_entry ent;
114 
115 	__common_field(unsigned short, type);
116 	__common_field(unsigned char, flags);
117 	__common_field(unsigned char, preempt_count);
118 	__common_field(int, pid);
119 	__common_field(int, padding);
120 
121 	return ret;
122 }
123 
124 void trace_destroy_fields(struct ftrace_event_call *call)
125 {
126 	struct ftrace_event_field *field, *next;
127 	struct list_head *head;
128 
129 	head = trace_get_fields(call);
130 	list_for_each_entry_safe(field, next, head, link) {
131 		list_del(&field->link);
132 		kfree(field->type);
133 		kfree(field->name);
134 		kfree(field);
135 	}
136 }
137 
138 int trace_event_raw_init(struct ftrace_event_call *call)
139 {
140 	int id;
141 
142 	id = register_ftrace_event(&call->event);
143 	if (!id)
144 		return -ENODEV;
145 
146 	return 0;
147 }
148 EXPORT_SYMBOL_GPL(trace_event_raw_init);
149 
150 int ftrace_event_reg(struct ftrace_event_call *call,
151 		     enum trace_reg type, void *data)
152 {
153 	switch (type) {
154 	case TRACE_REG_REGISTER:
155 		return tracepoint_probe_register(call->name,
156 						 call->class->probe,
157 						 call);
158 	case TRACE_REG_UNREGISTER:
159 		tracepoint_probe_unregister(call->name,
160 					    call->class->probe,
161 					    call);
162 		return 0;
163 
164 #ifdef CONFIG_PERF_EVENTS
165 	case TRACE_REG_PERF_REGISTER:
166 		return tracepoint_probe_register(call->name,
167 						 call->class->perf_probe,
168 						 call);
169 	case TRACE_REG_PERF_UNREGISTER:
170 		tracepoint_probe_unregister(call->name,
171 					    call->class->perf_probe,
172 					    call);
173 		return 0;
174 	case TRACE_REG_PERF_OPEN:
175 	case TRACE_REG_PERF_CLOSE:
176 	case TRACE_REG_PERF_ADD:
177 	case TRACE_REG_PERF_DEL:
178 		return 0;
179 #endif
180 	}
181 	return 0;
182 }
183 EXPORT_SYMBOL_GPL(ftrace_event_reg);
184 
185 void trace_event_enable_cmd_record(bool enable)
186 {
187 	struct ftrace_event_call *call;
188 
189 	mutex_lock(&event_mutex);
190 	list_for_each_entry(call, &ftrace_events, list) {
191 		if (!(call->flags & TRACE_EVENT_FL_ENABLED))
192 			continue;
193 
194 		if (enable) {
195 			tracing_start_cmdline_record();
196 			call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
197 		} else {
198 			tracing_stop_cmdline_record();
199 			call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
200 		}
201 	}
202 	mutex_unlock(&event_mutex);
203 }
204 
205 static int ftrace_event_enable_disable(struct ftrace_event_call *call,
206 					int enable)
207 {
208 	int ret = 0;
209 
210 	switch (enable) {
211 	case 0:
212 		if (call->flags & TRACE_EVENT_FL_ENABLED) {
213 			call->flags &= ~TRACE_EVENT_FL_ENABLED;
214 			if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) {
215 				tracing_stop_cmdline_record();
216 				call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
217 			}
218 			call->class->reg(call, TRACE_REG_UNREGISTER, NULL);
219 		}
220 		break;
221 	case 1:
222 		if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
223 			if (trace_flags & TRACE_ITER_RECORD_CMD) {
224 				tracing_start_cmdline_record();
225 				call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
226 			}
227 			ret = call->class->reg(call, TRACE_REG_REGISTER, NULL);
228 			if (ret) {
229 				tracing_stop_cmdline_record();
230 				pr_info("event trace: Could not enable event "
231 					"%s\n", call->name);
232 				break;
233 			}
234 			call->flags |= TRACE_EVENT_FL_ENABLED;
235 		}
236 		break;
237 	}
238 
239 	return ret;
240 }
241 
242 static void ftrace_clear_events(void)
243 {
244 	struct ftrace_event_call *call;
245 
246 	mutex_lock(&event_mutex);
247 	list_for_each_entry(call, &ftrace_events, list) {
248 		ftrace_event_enable_disable(call, 0);
249 	}
250 	mutex_unlock(&event_mutex);
251 }
252 
253 static void __put_system(struct event_subsystem *system)
254 {
255 	struct event_filter *filter = system->filter;
256 
257 	WARN_ON_ONCE(system->ref_count == 0);
258 	if (--system->ref_count)
259 		return;
260 
261 	if (filter) {
262 		kfree(filter->filter_string);
263 		kfree(filter);
264 	}
265 	kfree(system->name);
266 	kfree(system);
267 }
268 
269 static void __get_system(struct event_subsystem *system)
270 {
271 	WARN_ON_ONCE(system->ref_count == 0);
272 	system->ref_count++;
273 }
274 
275 static void put_system(struct event_subsystem *system)
276 {
277 	mutex_lock(&event_mutex);
278 	__put_system(system);
279 	mutex_unlock(&event_mutex);
280 }
281 
282 /*
283  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
284  */
285 static int __ftrace_set_clr_event(const char *match, const char *sub,
286 				  const char *event, int set)
287 {
288 	struct ftrace_event_call *call;
289 	int ret = -EINVAL;
290 
291 	mutex_lock(&event_mutex);
292 	list_for_each_entry(call, &ftrace_events, list) {
293 
294 		if (!call->name || !call->class || !call->class->reg)
295 			continue;
296 
297 		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
298 			continue;
299 
300 		if (match &&
301 		    strcmp(match, call->name) != 0 &&
302 		    strcmp(match, call->class->system) != 0)
303 			continue;
304 
305 		if (sub && strcmp(sub, call->class->system) != 0)
306 			continue;
307 
308 		if (event && strcmp(event, call->name) != 0)
309 			continue;
310 
311 		ftrace_event_enable_disable(call, set);
312 
313 		ret = 0;
314 	}
315 	mutex_unlock(&event_mutex);
316 
317 	return ret;
318 }
319 
320 static int ftrace_set_clr_event(char *buf, int set)
321 {
322 	char *event = NULL, *sub = NULL, *match;
323 
324 	/*
325 	 * The buf format can be <subsystem>:<event-name>
326 	 *  *:<event-name> means any event by that name.
327 	 *  :<event-name> is the same.
328 	 *
329 	 *  <subsystem>:* means all events in that subsystem
330 	 *  <subsystem>: means the same.
331 	 *
332 	 *  <name> (no ':') means all events in a subsystem with
333 	 *  the name <name> or any event that matches <name>
334 	 */
335 
336 	match = strsep(&buf, ":");
337 	if (buf) {
338 		sub = match;
339 		event = buf;
340 		match = NULL;
341 
342 		if (!strlen(sub) || strcmp(sub, "*") == 0)
343 			sub = NULL;
344 		if (!strlen(event) || strcmp(event, "*") == 0)
345 			event = NULL;
346 	}
347 
348 	return __ftrace_set_clr_event(match, sub, event, set);
349 }
350 
351 /**
352  * trace_set_clr_event - enable or disable an event
353  * @system: system name to match (NULL for any system)
354  * @event: event name to match (NULL for all events, within system)
355  * @set: 1 to enable, 0 to disable
356  *
357  * This is a way for other parts of the kernel to enable or disable
358  * event recording.
359  *
360  * Returns 0 on success, -EINVAL if the parameters do not match any
361  * registered events.
362  */
363 int trace_set_clr_event(const char *system, const char *event, int set)
364 {
365 	return __ftrace_set_clr_event(NULL, system, event, set);
366 }
367 EXPORT_SYMBOL_GPL(trace_set_clr_event);
368 
369 /* 128 should be much more than enough */
370 #define EVENT_BUF_SIZE		127
371 
372 static ssize_t
373 ftrace_event_write(struct file *file, const char __user *ubuf,
374 		   size_t cnt, loff_t *ppos)
375 {
376 	struct trace_parser parser;
377 	ssize_t read, ret;
378 
379 	if (!cnt)
380 		return 0;
381 
382 	ret = tracing_update_buffers();
383 	if (ret < 0)
384 		return ret;
385 
386 	if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
387 		return -ENOMEM;
388 
389 	read = trace_get_user(&parser, ubuf, cnt, ppos);
390 
391 	if (read >= 0 && trace_parser_loaded((&parser))) {
392 		int set = 1;
393 
394 		if (*parser.buffer == '!')
395 			set = 0;
396 
397 		parser.buffer[parser.idx] = 0;
398 
399 		ret = ftrace_set_clr_event(parser.buffer + !set, set);
400 		if (ret)
401 			goto out_put;
402 	}
403 
404 	ret = read;
405 
406  out_put:
407 	trace_parser_put(&parser);
408 
409 	return ret;
410 }
411 
412 static void *
413 t_next(struct seq_file *m, void *v, loff_t *pos)
414 {
415 	struct ftrace_event_call *call = v;
416 
417 	(*pos)++;
418 
419 	list_for_each_entry_continue(call, &ftrace_events, list) {
420 		/*
421 		 * The ftrace subsystem is for showing formats only.
422 		 * They can not be enabled or disabled via the event files.
423 		 */
424 		if (call->class && call->class->reg)
425 			return call;
426 	}
427 
428 	return NULL;
429 }
430 
431 static void *t_start(struct seq_file *m, loff_t *pos)
432 {
433 	struct ftrace_event_call *call;
434 	loff_t l;
435 
436 	mutex_lock(&event_mutex);
437 
438 	call = list_entry(&ftrace_events, struct ftrace_event_call, list);
439 	for (l = 0; l <= *pos; ) {
440 		call = t_next(m, call, &l);
441 		if (!call)
442 			break;
443 	}
444 	return call;
445 }
446 
447 static void *
448 s_next(struct seq_file *m, void *v, loff_t *pos)
449 {
450 	struct ftrace_event_call *call = v;
451 
452 	(*pos)++;
453 
454 	list_for_each_entry_continue(call, &ftrace_events, list) {
455 		if (call->flags & TRACE_EVENT_FL_ENABLED)
456 			return call;
457 	}
458 
459 	return NULL;
460 }
461 
462 static void *s_start(struct seq_file *m, loff_t *pos)
463 {
464 	struct ftrace_event_call *call;
465 	loff_t l;
466 
467 	mutex_lock(&event_mutex);
468 
469 	call = list_entry(&ftrace_events, struct ftrace_event_call, list);
470 	for (l = 0; l <= *pos; ) {
471 		call = s_next(m, call, &l);
472 		if (!call)
473 			break;
474 	}
475 	return call;
476 }
477 
478 static int t_show(struct seq_file *m, void *v)
479 {
480 	struct ftrace_event_call *call = v;
481 
482 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
483 		seq_printf(m, "%s:", call->class->system);
484 	seq_printf(m, "%s\n", call->name);
485 
486 	return 0;
487 }
488 
489 static void t_stop(struct seq_file *m, void *p)
490 {
491 	mutex_unlock(&event_mutex);
492 }
493 
494 static ssize_t
495 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
496 		  loff_t *ppos)
497 {
498 	struct ftrace_event_call *call = filp->private_data;
499 	char *buf;
500 
501 	if (call->flags & TRACE_EVENT_FL_ENABLED)
502 		buf = "1\n";
503 	else
504 		buf = "0\n";
505 
506 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
507 }
508 
509 static ssize_t
510 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
511 		   loff_t *ppos)
512 {
513 	struct ftrace_event_call *call = filp->private_data;
514 	unsigned long val;
515 	int ret;
516 
517 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
518 	if (ret)
519 		return ret;
520 
521 	ret = tracing_update_buffers();
522 	if (ret < 0)
523 		return ret;
524 
525 	switch (val) {
526 	case 0:
527 	case 1:
528 		mutex_lock(&event_mutex);
529 		ret = ftrace_event_enable_disable(call, val);
530 		mutex_unlock(&event_mutex);
531 		break;
532 
533 	default:
534 		return -EINVAL;
535 	}
536 
537 	*ppos += cnt;
538 
539 	return ret ? ret : cnt;
540 }
541 
542 static ssize_t
543 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
544 		   loff_t *ppos)
545 {
546 	const char set_to_char[4] = { '?', '0', '1', 'X' };
547 	struct event_subsystem *system = filp->private_data;
548 	struct ftrace_event_call *call;
549 	char buf[2];
550 	int set = 0;
551 	int ret;
552 
553 	mutex_lock(&event_mutex);
554 	list_for_each_entry(call, &ftrace_events, list) {
555 		if (!call->name || !call->class || !call->class->reg)
556 			continue;
557 
558 		if (system && strcmp(call->class->system, system->name) != 0)
559 			continue;
560 
561 		/*
562 		 * We need to find out if all the events are set
563 		 * or if all events or cleared, or if we have
564 		 * a mixture.
565 		 */
566 		set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
567 
568 		/*
569 		 * If we have a mixture, no need to look further.
570 		 */
571 		if (set == 3)
572 			break;
573 	}
574 	mutex_unlock(&event_mutex);
575 
576 	buf[0] = set_to_char[set];
577 	buf[1] = '\n';
578 
579 	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
580 
581 	return ret;
582 }
583 
584 static ssize_t
585 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
586 		    loff_t *ppos)
587 {
588 	struct event_subsystem *system = filp->private_data;
589 	const char *name = NULL;
590 	unsigned long val;
591 	ssize_t ret;
592 
593 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
594 	if (ret)
595 		return ret;
596 
597 	ret = tracing_update_buffers();
598 	if (ret < 0)
599 		return ret;
600 
601 	if (val != 0 && val != 1)
602 		return -EINVAL;
603 
604 	/*
605 	 * Opening of "enable" adds a ref count to system,
606 	 * so the name is safe to use.
607 	 */
608 	if (system)
609 		name = system->name;
610 
611 	ret = __ftrace_set_clr_event(NULL, name, NULL, val);
612 	if (ret)
613 		goto out;
614 
615 	ret = cnt;
616 
617 out:
618 	*ppos += cnt;
619 
620 	return ret;
621 }
622 
623 enum {
624 	FORMAT_HEADER		= 1,
625 	FORMAT_FIELD_SEPERATOR	= 2,
626 	FORMAT_PRINTFMT		= 3,
627 };
628 
629 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
630 {
631 	struct ftrace_event_call *call = m->private;
632 	struct ftrace_event_field *field;
633 	struct list_head *common_head = &ftrace_common_fields;
634 	struct list_head *head = trace_get_fields(call);
635 
636 	(*pos)++;
637 
638 	switch ((unsigned long)v) {
639 	case FORMAT_HEADER:
640 		if (unlikely(list_empty(common_head)))
641 			return NULL;
642 
643 		field = list_entry(common_head->prev,
644 				   struct ftrace_event_field, link);
645 		return field;
646 
647 	case FORMAT_FIELD_SEPERATOR:
648 		if (unlikely(list_empty(head)))
649 			return NULL;
650 
651 		field = list_entry(head->prev, struct ftrace_event_field, link);
652 		return field;
653 
654 	case FORMAT_PRINTFMT:
655 		/* all done */
656 		return NULL;
657 	}
658 
659 	field = v;
660 	if (field->link.prev == common_head)
661 		return (void *)FORMAT_FIELD_SEPERATOR;
662 	else if (field->link.prev == head)
663 		return (void *)FORMAT_PRINTFMT;
664 
665 	field = list_entry(field->link.prev, struct ftrace_event_field, link);
666 
667 	return field;
668 }
669 
670 static void *f_start(struct seq_file *m, loff_t *pos)
671 {
672 	loff_t l = 0;
673 	void *p;
674 
675 	/* Start by showing the header */
676 	if (!*pos)
677 		return (void *)FORMAT_HEADER;
678 
679 	p = (void *)FORMAT_HEADER;
680 	do {
681 		p = f_next(m, p, &l);
682 	} while (p && l < *pos);
683 
684 	return p;
685 }
686 
687 static int f_show(struct seq_file *m, void *v)
688 {
689 	struct ftrace_event_call *call = m->private;
690 	struct ftrace_event_field *field;
691 	const char *array_descriptor;
692 
693 	switch ((unsigned long)v) {
694 	case FORMAT_HEADER:
695 		seq_printf(m, "name: %s\n", call->name);
696 		seq_printf(m, "ID: %d\n", call->event.type);
697 		seq_printf(m, "format:\n");
698 		return 0;
699 
700 	case FORMAT_FIELD_SEPERATOR:
701 		seq_putc(m, '\n');
702 		return 0;
703 
704 	case FORMAT_PRINTFMT:
705 		seq_printf(m, "\nprint fmt: %s\n",
706 			   call->print_fmt);
707 		return 0;
708 	}
709 
710 	field = v;
711 
712 	/*
713 	 * Smartly shows the array type(except dynamic array).
714 	 * Normal:
715 	 *	field:TYPE VAR
716 	 * If TYPE := TYPE[LEN], it is shown:
717 	 *	field:TYPE VAR[LEN]
718 	 */
719 	array_descriptor = strchr(field->type, '[');
720 
721 	if (!strncmp(field->type, "__data_loc", 10))
722 		array_descriptor = NULL;
723 
724 	if (!array_descriptor)
725 		seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
726 			   field->type, field->name, field->offset,
727 			   field->size, !!field->is_signed);
728 	else
729 		seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
730 			   (int)(array_descriptor - field->type),
731 			   field->type, field->name,
732 			   array_descriptor, field->offset,
733 			   field->size, !!field->is_signed);
734 
735 	return 0;
736 }
737 
738 static void f_stop(struct seq_file *m, void *p)
739 {
740 }
741 
742 static const struct seq_operations trace_format_seq_ops = {
743 	.start		= f_start,
744 	.next		= f_next,
745 	.stop		= f_stop,
746 	.show		= f_show,
747 };
748 
749 static int trace_format_open(struct inode *inode, struct file *file)
750 {
751 	struct ftrace_event_call *call = inode->i_private;
752 	struct seq_file *m;
753 	int ret;
754 
755 	ret = seq_open(file, &trace_format_seq_ops);
756 	if (ret < 0)
757 		return ret;
758 
759 	m = file->private_data;
760 	m->private = call;
761 
762 	return 0;
763 }
764 
765 static ssize_t
766 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
767 {
768 	struct ftrace_event_call *call = filp->private_data;
769 	struct trace_seq *s;
770 	int r;
771 
772 	if (*ppos)
773 		return 0;
774 
775 	s = kmalloc(sizeof(*s), GFP_KERNEL);
776 	if (!s)
777 		return -ENOMEM;
778 
779 	trace_seq_init(s);
780 	trace_seq_printf(s, "%d\n", call->event.type);
781 
782 	r = simple_read_from_buffer(ubuf, cnt, ppos,
783 				    s->buffer, s->len);
784 	kfree(s);
785 	return r;
786 }
787 
788 static ssize_t
789 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
790 		  loff_t *ppos)
791 {
792 	struct ftrace_event_call *call = filp->private_data;
793 	struct trace_seq *s;
794 	int r;
795 
796 	if (*ppos)
797 		return 0;
798 
799 	s = kmalloc(sizeof(*s), GFP_KERNEL);
800 	if (!s)
801 		return -ENOMEM;
802 
803 	trace_seq_init(s);
804 
805 	print_event_filter(call, s);
806 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
807 
808 	kfree(s);
809 
810 	return r;
811 }
812 
813 static ssize_t
814 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
815 		   loff_t *ppos)
816 {
817 	struct ftrace_event_call *call = filp->private_data;
818 	char *buf;
819 	int err;
820 
821 	if (cnt >= PAGE_SIZE)
822 		return -EINVAL;
823 
824 	buf = (char *)__get_free_page(GFP_TEMPORARY);
825 	if (!buf)
826 		return -ENOMEM;
827 
828 	if (copy_from_user(buf, ubuf, cnt)) {
829 		free_page((unsigned long) buf);
830 		return -EFAULT;
831 	}
832 	buf[cnt] = '\0';
833 
834 	err = apply_event_filter(call, buf);
835 	free_page((unsigned long) buf);
836 	if (err < 0)
837 		return err;
838 
839 	*ppos += cnt;
840 
841 	return cnt;
842 }
843 
844 static LIST_HEAD(event_subsystems);
845 
846 static int subsystem_open(struct inode *inode, struct file *filp)
847 {
848 	struct event_subsystem *system = NULL;
849 	int ret;
850 
851 	if (!inode->i_private)
852 		goto skip_search;
853 
854 	/* Make sure the system still exists */
855 	mutex_lock(&event_mutex);
856 	list_for_each_entry(system, &event_subsystems, list) {
857 		if (system == inode->i_private) {
858 			/* Don't open systems with no events */
859 			if (!system->nr_events) {
860 				system = NULL;
861 				break;
862 			}
863 			__get_system(system);
864 			break;
865 		}
866 	}
867 	mutex_unlock(&event_mutex);
868 
869 	if (system != inode->i_private)
870 		return -ENODEV;
871 
872  skip_search:
873 	ret = tracing_open_generic(inode, filp);
874 	if (ret < 0 && system)
875 		put_system(system);
876 
877 	return ret;
878 }
879 
880 static int subsystem_release(struct inode *inode, struct file *file)
881 {
882 	struct event_subsystem *system = inode->i_private;
883 
884 	if (system)
885 		put_system(system);
886 
887 	return 0;
888 }
889 
890 static ssize_t
891 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
892 		      loff_t *ppos)
893 {
894 	struct event_subsystem *system = filp->private_data;
895 	struct trace_seq *s;
896 	int r;
897 
898 	if (*ppos)
899 		return 0;
900 
901 	s = kmalloc(sizeof(*s), GFP_KERNEL);
902 	if (!s)
903 		return -ENOMEM;
904 
905 	trace_seq_init(s);
906 
907 	print_subsystem_event_filter(system, s);
908 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
909 
910 	kfree(s);
911 
912 	return r;
913 }
914 
915 static ssize_t
916 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
917 		       loff_t *ppos)
918 {
919 	struct event_subsystem *system = filp->private_data;
920 	char *buf;
921 	int err;
922 
923 	if (cnt >= PAGE_SIZE)
924 		return -EINVAL;
925 
926 	buf = (char *)__get_free_page(GFP_TEMPORARY);
927 	if (!buf)
928 		return -ENOMEM;
929 
930 	if (copy_from_user(buf, ubuf, cnt)) {
931 		free_page((unsigned long) buf);
932 		return -EFAULT;
933 	}
934 	buf[cnt] = '\0';
935 
936 	err = apply_subsystem_event_filter(system, buf);
937 	free_page((unsigned long) buf);
938 	if (err < 0)
939 		return err;
940 
941 	*ppos += cnt;
942 
943 	return cnt;
944 }
945 
946 static ssize_t
947 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
948 {
949 	int (*func)(struct trace_seq *s) = filp->private_data;
950 	struct trace_seq *s;
951 	int r;
952 
953 	if (*ppos)
954 		return 0;
955 
956 	s = kmalloc(sizeof(*s), GFP_KERNEL);
957 	if (!s)
958 		return -ENOMEM;
959 
960 	trace_seq_init(s);
961 
962 	func(s);
963 	r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
964 
965 	kfree(s);
966 
967 	return r;
968 }
969 
970 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
971 static int ftrace_event_set_open(struct inode *inode, struct file *file);
972 
973 static const struct seq_operations show_event_seq_ops = {
974 	.start = t_start,
975 	.next = t_next,
976 	.show = t_show,
977 	.stop = t_stop,
978 };
979 
980 static const struct seq_operations show_set_event_seq_ops = {
981 	.start = s_start,
982 	.next = s_next,
983 	.show = t_show,
984 	.stop = t_stop,
985 };
986 
987 static const struct file_operations ftrace_avail_fops = {
988 	.open = ftrace_event_avail_open,
989 	.read = seq_read,
990 	.llseek = seq_lseek,
991 	.release = seq_release,
992 };
993 
994 static const struct file_operations ftrace_set_event_fops = {
995 	.open = ftrace_event_set_open,
996 	.read = seq_read,
997 	.write = ftrace_event_write,
998 	.llseek = seq_lseek,
999 	.release = seq_release,
1000 };
1001 
1002 static const struct file_operations ftrace_enable_fops = {
1003 	.open = tracing_open_generic,
1004 	.read = event_enable_read,
1005 	.write = event_enable_write,
1006 	.llseek = default_llseek,
1007 };
1008 
1009 static const struct file_operations ftrace_event_format_fops = {
1010 	.open = trace_format_open,
1011 	.read = seq_read,
1012 	.llseek = seq_lseek,
1013 	.release = seq_release,
1014 };
1015 
1016 static const struct file_operations ftrace_event_id_fops = {
1017 	.open = tracing_open_generic,
1018 	.read = event_id_read,
1019 	.llseek = default_llseek,
1020 };
1021 
1022 static const struct file_operations ftrace_event_filter_fops = {
1023 	.open = tracing_open_generic,
1024 	.read = event_filter_read,
1025 	.write = event_filter_write,
1026 	.llseek = default_llseek,
1027 };
1028 
1029 static const struct file_operations ftrace_subsystem_filter_fops = {
1030 	.open = subsystem_open,
1031 	.read = subsystem_filter_read,
1032 	.write = subsystem_filter_write,
1033 	.llseek = default_llseek,
1034 	.release = subsystem_release,
1035 };
1036 
1037 static const struct file_operations ftrace_system_enable_fops = {
1038 	.open = subsystem_open,
1039 	.read = system_enable_read,
1040 	.write = system_enable_write,
1041 	.llseek = default_llseek,
1042 	.release = subsystem_release,
1043 };
1044 
1045 static const struct file_operations ftrace_show_header_fops = {
1046 	.open = tracing_open_generic,
1047 	.read = show_header,
1048 	.llseek = default_llseek,
1049 };
1050 
1051 static struct dentry *event_trace_events_dir(void)
1052 {
1053 	static struct dentry *d_tracer;
1054 	static struct dentry *d_events;
1055 
1056 	if (d_events)
1057 		return d_events;
1058 
1059 	d_tracer = tracing_init_dentry();
1060 	if (!d_tracer)
1061 		return NULL;
1062 
1063 	d_events = debugfs_create_dir("events", d_tracer);
1064 	if (!d_events)
1065 		pr_warning("Could not create debugfs "
1066 			   "'events' directory\n");
1067 
1068 	return d_events;
1069 }
1070 
1071 static int
1072 ftrace_event_avail_open(struct inode *inode, struct file *file)
1073 {
1074 	const struct seq_operations *seq_ops = &show_event_seq_ops;
1075 
1076 	return seq_open(file, seq_ops);
1077 }
1078 
1079 static int
1080 ftrace_event_set_open(struct inode *inode, struct file *file)
1081 {
1082 	const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1083 
1084 	if ((file->f_mode & FMODE_WRITE) &&
1085 	    (file->f_flags & O_TRUNC))
1086 		ftrace_clear_events();
1087 
1088 	return seq_open(file, seq_ops);
1089 }
1090 
1091 static struct dentry *
1092 event_subsystem_dir(const char *name, struct dentry *d_events)
1093 {
1094 	struct event_subsystem *system;
1095 	struct dentry *entry;
1096 
1097 	/* First see if we did not already create this dir */
1098 	list_for_each_entry(system, &event_subsystems, list) {
1099 		if (strcmp(system->name, name) == 0) {
1100 			system->nr_events++;
1101 			return system->entry;
1102 		}
1103 	}
1104 
1105 	/* need to create new entry */
1106 	system = kmalloc(sizeof(*system), GFP_KERNEL);
1107 	if (!system) {
1108 		pr_warning("No memory to create event subsystem %s\n",
1109 			   name);
1110 		return d_events;
1111 	}
1112 
1113 	system->entry = debugfs_create_dir(name, d_events);
1114 	if (!system->entry) {
1115 		pr_warning("Could not create event subsystem %s\n",
1116 			   name);
1117 		kfree(system);
1118 		return d_events;
1119 	}
1120 
1121 	system->nr_events = 1;
1122 	system->ref_count = 1;
1123 	system->name = kstrdup(name, GFP_KERNEL);
1124 	if (!system->name) {
1125 		debugfs_remove(system->entry);
1126 		kfree(system);
1127 		return d_events;
1128 	}
1129 
1130 	list_add(&system->list, &event_subsystems);
1131 
1132 	system->filter = NULL;
1133 
1134 	system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1135 	if (!system->filter) {
1136 		pr_warning("Could not allocate filter for subsystem "
1137 			   "'%s'\n", name);
1138 		return system->entry;
1139 	}
1140 
1141 	entry = debugfs_create_file("filter", 0644, system->entry, system,
1142 				    &ftrace_subsystem_filter_fops);
1143 	if (!entry) {
1144 		kfree(system->filter);
1145 		system->filter = NULL;
1146 		pr_warning("Could not create debugfs "
1147 			   "'%s/filter' entry\n", name);
1148 	}
1149 
1150 	trace_create_file("enable", 0644, system->entry, system,
1151 			  &ftrace_system_enable_fops);
1152 
1153 	return system->entry;
1154 }
1155 
1156 static int
1157 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
1158 		 const struct file_operations *id,
1159 		 const struct file_operations *enable,
1160 		 const struct file_operations *filter,
1161 		 const struct file_operations *format)
1162 {
1163 	struct list_head *head;
1164 	int ret;
1165 
1166 	/*
1167 	 * If the trace point header did not define TRACE_SYSTEM
1168 	 * then the system would be called "TRACE_SYSTEM".
1169 	 */
1170 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1171 		d_events = event_subsystem_dir(call->class->system, d_events);
1172 
1173 	call->dir = debugfs_create_dir(call->name, d_events);
1174 	if (!call->dir) {
1175 		pr_warning("Could not create debugfs "
1176 			   "'%s' directory\n", call->name);
1177 		return -1;
1178 	}
1179 
1180 	if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1181 		trace_create_file("enable", 0644, call->dir, call,
1182 				  enable);
1183 
1184 #ifdef CONFIG_PERF_EVENTS
1185 	if (call->event.type && call->class->reg)
1186 		trace_create_file("id", 0444, call->dir, call,
1187 		 		  id);
1188 #endif
1189 
1190 	/*
1191 	 * Other events may have the same class. Only update
1192 	 * the fields if they are not already defined.
1193 	 */
1194 	head = trace_get_fields(call);
1195 	if (list_empty(head)) {
1196 		ret = call->class->define_fields(call);
1197 		if (ret < 0) {
1198 			pr_warning("Could not initialize trace point"
1199 				   " events/%s\n", call->name);
1200 			return ret;
1201 		}
1202 	}
1203 	trace_create_file("filter", 0644, call->dir, call,
1204 			  filter);
1205 
1206 	trace_create_file("format", 0444, call->dir, call,
1207 			  format);
1208 
1209 	return 0;
1210 }
1211 
1212 static void event_remove(struct ftrace_event_call *call)
1213 {
1214 	ftrace_event_enable_disable(call, 0);
1215 	if (call->event.funcs)
1216 		__unregister_ftrace_event(&call->event);
1217 	list_del(&call->list);
1218 }
1219 
1220 static int event_init(struct ftrace_event_call *call)
1221 {
1222 	int ret = 0;
1223 
1224 	if (WARN_ON(!call->name))
1225 		return -EINVAL;
1226 
1227 	if (call->class->raw_init) {
1228 		ret = call->class->raw_init(call);
1229 		if (ret < 0 && ret != -ENOSYS)
1230 			pr_warn("Could not initialize trace events/%s\n",
1231 				call->name);
1232 	}
1233 
1234 	return ret;
1235 }
1236 
1237 static int
1238 __trace_add_event_call(struct ftrace_event_call *call, struct module *mod,
1239 		       const struct file_operations *id,
1240 		       const struct file_operations *enable,
1241 		       const struct file_operations *filter,
1242 		       const struct file_operations *format)
1243 {
1244 	struct dentry *d_events;
1245 	int ret;
1246 
1247 	ret = event_init(call);
1248 	if (ret < 0)
1249 		return ret;
1250 
1251 	d_events = event_trace_events_dir();
1252 	if (!d_events)
1253 		return -ENOENT;
1254 
1255 	ret = event_create_dir(call, d_events, id, enable, filter, format);
1256 	if (!ret)
1257 		list_add(&call->list, &ftrace_events);
1258 	call->mod = mod;
1259 
1260 	return ret;
1261 }
1262 
1263 /* Add an additional event_call dynamically */
1264 int trace_add_event_call(struct ftrace_event_call *call)
1265 {
1266 	int ret;
1267 	mutex_lock(&event_mutex);
1268 	ret = __trace_add_event_call(call, NULL, &ftrace_event_id_fops,
1269 				     &ftrace_enable_fops,
1270 				     &ftrace_event_filter_fops,
1271 				     &ftrace_event_format_fops);
1272 	mutex_unlock(&event_mutex);
1273 	return ret;
1274 }
1275 
1276 static void remove_subsystem_dir(const char *name)
1277 {
1278 	struct event_subsystem *system;
1279 
1280 	if (strcmp(name, TRACE_SYSTEM) == 0)
1281 		return;
1282 
1283 	list_for_each_entry(system, &event_subsystems, list) {
1284 		if (strcmp(system->name, name) == 0) {
1285 			if (!--system->nr_events) {
1286 				debugfs_remove_recursive(system->entry);
1287 				list_del(&system->list);
1288 				__put_system(system);
1289 			}
1290 			break;
1291 		}
1292 	}
1293 }
1294 
1295 /*
1296  * Must be called under locking both of event_mutex and trace_event_mutex.
1297  */
1298 static void __trace_remove_event_call(struct ftrace_event_call *call)
1299 {
1300 	event_remove(call);
1301 	trace_destroy_fields(call);
1302 	destroy_preds(call);
1303 	debugfs_remove_recursive(call->dir);
1304 	remove_subsystem_dir(call->class->system);
1305 }
1306 
1307 /* Remove an event_call */
1308 void trace_remove_event_call(struct ftrace_event_call *call)
1309 {
1310 	mutex_lock(&event_mutex);
1311 	down_write(&trace_event_mutex);
1312 	__trace_remove_event_call(call);
1313 	up_write(&trace_event_mutex);
1314 	mutex_unlock(&event_mutex);
1315 }
1316 
1317 #define for_each_event(event, start, end)			\
1318 	for (event = start;					\
1319 	     (unsigned long)event < (unsigned long)end;		\
1320 	     event++)
1321 
1322 #ifdef CONFIG_MODULES
1323 
1324 static LIST_HEAD(ftrace_module_file_list);
1325 
1326 /*
1327  * Modules must own their file_operations to keep up with
1328  * reference counting.
1329  */
1330 struct ftrace_module_file_ops {
1331 	struct list_head		list;
1332 	struct module			*mod;
1333 	struct file_operations		id;
1334 	struct file_operations		enable;
1335 	struct file_operations		format;
1336 	struct file_operations		filter;
1337 };
1338 
1339 static struct ftrace_module_file_ops *
1340 trace_create_file_ops(struct module *mod)
1341 {
1342 	struct ftrace_module_file_ops *file_ops;
1343 
1344 	/*
1345 	 * This is a bit of a PITA. To allow for correct reference
1346 	 * counting, modules must "own" their file_operations.
1347 	 * To do this, we allocate the file operations that will be
1348 	 * used in the event directory.
1349 	 */
1350 
1351 	file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1352 	if (!file_ops)
1353 		return NULL;
1354 
1355 	file_ops->mod = mod;
1356 
1357 	file_ops->id = ftrace_event_id_fops;
1358 	file_ops->id.owner = mod;
1359 
1360 	file_ops->enable = ftrace_enable_fops;
1361 	file_ops->enable.owner = mod;
1362 
1363 	file_ops->filter = ftrace_event_filter_fops;
1364 	file_ops->filter.owner = mod;
1365 
1366 	file_ops->format = ftrace_event_format_fops;
1367 	file_ops->format.owner = mod;
1368 
1369 	list_add(&file_ops->list, &ftrace_module_file_list);
1370 
1371 	return file_ops;
1372 }
1373 
1374 static void trace_module_add_events(struct module *mod)
1375 {
1376 	struct ftrace_module_file_ops *file_ops = NULL;
1377 	struct ftrace_event_call **call, **start, **end;
1378 
1379 	start = mod->trace_events;
1380 	end = mod->trace_events + mod->num_trace_events;
1381 
1382 	if (start == end)
1383 		return;
1384 
1385 	file_ops = trace_create_file_ops(mod);
1386 	if (!file_ops)
1387 		return;
1388 
1389 	for_each_event(call, start, end) {
1390 		__trace_add_event_call(*call, mod,
1391 				       &file_ops->id, &file_ops->enable,
1392 				       &file_ops->filter, &file_ops->format);
1393 	}
1394 }
1395 
1396 static void trace_module_remove_events(struct module *mod)
1397 {
1398 	struct ftrace_module_file_ops *file_ops;
1399 	struct ftrace_event_call *call, *p;
1400 	bool found = false;
1401 
1402 	down_write(&trace_event_mutex);
1403 	list_for_each_entry_safe(call, p, &ftrace_events, list) {
1404 		if (call->mod == mod) {
1405 			found = true;
1406 			__trace_remove_event_call(call);
1407 		}
1408 	}
1409 
1410 	/* Now free the file_operations */
1411 	list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1412 		if (file_ops->mod == mod)
1413 			break;
1414 	}
1415 	if (&file_ops->list != &ftrace_module_file_list) {
1416 		list_del(&file_ops->list);
1417 		kfree(file_ops);
1418 	}
1419 
1420 	/*
1421 	 * It is safest to reset the ring buffer if the module being unloaded
1422 	 * registered any events.
1423 	 */
1424 	if (found)
1425 		tracing_reset_current_online_cpus();
1426 	up_write(&trace_event_mutex);
1427 }
1428 
1429 static int trace_module_notify(struct notifier_block *self,
1430 			       unsigned long val, void *data)
1431 {
1432 	struct module *mod = data;
1433 
1434 	mutex_lock(&event_mutex);
1435 	switch (val) {
1436 	case MODULE_STATE_COMING:
1437 		trace_module_add_events(mod);
1438 		break;
1439 	case MODULE_STATE_GOING:
1440 		trace_module_remove_events(mod);
1441 		break;
1442 	}
1443 	mutex_unlock(&event_mutex);
1444 
1445 	return 0;
1446 }
1447 #else
1448 static int trace_module_notify(struct notifier_block *self,
1449 			       unsigned long val, void *data)
1450 {
1451 	return 0;
1452 }
1453 #endif /* CONFIG_MODULES */
1454 
1455 static struct notifier_block trace_module_nb = {
1456 	.notifier_call = trace_module_notify,
1457 	.priority = 0,
1458 };
1459 
1460 extern struct ftrace_event_call *__start_ftrace_events[];
1461 extern struct ftrace_event_call *__stop_ftrace_events[];
1462 
1463 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1464 
1465 static __init int setup_trace_event(char *str)
1466 {
1467 	strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1468 	ring_buffer_expanded = 1;
1469 	tracing_selftest_disabled = 1;
1470 
1471 	return 1;
1472 }
1473 __setup("trace_event=", setup_trace_event);
1474 
1475 static __init int event_trace_enable(void)
1476 {
1477 	struct ftrace_event_call **iter, *call;
1478 	char *buf = bootup_event_buf;
1479 	char *token;
1480 	int ret;
1481 
1482 	for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
1483 
1484 		call = *iter;
1485 		ret = event_init(call);
1486 		if (!ret)
1487 			list_add(&call->list, &ftrace_events);
1488 	}
1489 
1490 	while (true) {
1491 		token = strsep(&buf, ",");
1492 
1493 		if (!token)
1494 			break;
1495 		if (!*token)
1496 			continue;
1497 
1498 		ret = ftrace_set_clr_event(token, 1);
1499 		if (ret)
1500 			pr_warn("Failed to enable trace event: %s\n", token);
1501 	}
1502 
1503 	trace_printk_start_comm();
1504 
1505 	return 0;
1506 }
1507 
1508 static __init int event_trace_init(void)
1509 {
1510 	struct ftrace_event_call *call;
1511 	struct dentry *d_tracer;
1512 	struct dentry *entry;
1513 	struct dentry *d_events;
1514 	int ret;
1515 
1516 	d_tracer = tracing_init_dentry();
1517 	if (!d_tracer)
1518 		return 0;
1519 
1520 	entry = debugfs_create_file("available_events", 0444, d_tracer,
1521 				    NULL, &ftrace_avail_fops);
1522 	if (!entry)
1523 		pr_warning("Could not create debugfs "
1524 			   "'available_events' entry\n");
1525 
1526 	entry = debugfs_create_file("set_event", 0644, d_tracer,
1527 				    NULL, &ftrace_set_event_fops);
1528 	if (!entry)
1529 		pr_warning("Could not create debugfs "
1530 			   "'set_event' entry\n");
1531 
1532 	d_events = event_trace_events_dir();
1533 	if (!d_events)
1534 		return 0;
1535 
1536 	/* ring buffer internal formats */
1537 	trace_create_file("header_page", 0444, d_events,
1538 			  ring_buffer_print_page_header,
1539 			  &ftrace_show_header_fops);
1540 
1541 	trace_create_file("header_event", 0444, d_events,
1542 			  ring_buffer_print_entry_header,
1543 			  &ftrace_show_header_fops);
1544 
1545 	trace_create_file("enable", 0644, d_events,
1546 			  NULL, &ftrace_system_enable_fops);
1547 
1548 	if (trace_define_common_fields())
1549 		pr_warning("tracing: Failed to allocate common fields");
1550 
1551 	/*
1552 	 * Early initialization already enabled ftrace event.
1553 	 * Now it's only necessary to create the event directory.
1554 	 */
1555 	list_for_each_entry(call, &ftrace_events, list) {
1556 
1557 		ret = event_create_dir(call, d_events,
1558 				       &ftrace_event_id_fops,
1559 				       &ftrace_enable_fops,
1560 				       &ftrace_event_filter_fops,
1561 				       &ftrace_event_format_fops);
1562 		if (ret < 0)
1563 			event_remove(call);
1564 	}
1565 
1566 	ret = register_module_notifier(&trace_module_nb);
1567 	if (ret)
1568 		pr_warning("Failed to register trace events module notifier\n");
1569 
1570 	return 0;
1571 }
1572 core_initcall(event_trace_enable);
1573 fs_initcall(event_trace_init);
1574 
1575 #ifdef CONFIG_FTRACE_STARTUP_TEST
1576 
1577 static DEFINE_SPINLOCK(test_spinlock);
1578 static DEFINE_SPINLOCK(test_spinlock_irq);
1579 static DEFINE_MUTEX(test_mutex);
1580 
1581 static __init void test_work(struct work_struct *dummy)
1582 {
1583 	spin_lock(&test_spinlock);
1584 	spin_lock_irq(&test_spinlock_irq);
1585 	udelay(1);
1586 	spin_unlock_irq(&test_spinlock_irq);
1587 	spin_unlock(&test_spinlock);
1588 
1589 	mutex_lock(&test_mutex);
1590 	msleep(1);
1591 	mutex_unlock(&test_mutex);
1592 }
1593 
1594 static __init int event_test_thread(void *unused)
1595 {
1596 	void *test_malloc;
1597 
1598 	test_malloc = kmalloc(1234, GFP_KERNEL);
1599 	if (!test_malloc)
1600 		pr_info("failed to kmalloc\n");
1601 
1602 	schedule_on_each_cpu(test_work);
1603 
1604 	kfree(test_malloc);
1605 
1606 	set_current_state(TASK_INTERRUPTIBLE);
1607 	while (!kthread_should_stop())
1608 		schedule();
1609 
1610 	return 0;
1611 }
1612 
1613 /*
1614  * Do various things that may trigger events.
1615  */
1616 static __init void event_test_stuff(void)
1617 {
1618 	struct task_struct *test_thread;
1619 
1620 	test_thread = kthread_run(event_test_thread, NULL, "test-events");
1621 	msleep(1);
1622 	kthread_stop(test_thread);
1623 }
1624 
1625 /*
1626  * For every trace event defined, we will test each trace point separately,
1627  * and then by groups, and finally all trace points.
1628  */
1629 static __init void event_trace_self_tests(void)
1630 {
1631 	struct ftrace_event_call *call;
1632 	struct event_subsystem *system;
1633 	int ret;
1634 
1635 	pr_info("Running tests on trace events:\n");
1636 
1637 	list_for_each_entry(call, &ftrace_events, list) {
1638 
1639 		/* Only test those that have a probe */
1640 		if (!call->class || !call->class->probe)
1641 			continue;
1642 
1643 /*
1644  * Testing syscall events here is pretty useless, but
1645  * we still do it if configured. But this is time consuming.
1646  * What we really need is a user thread to perform the
1647  * syscalls as we test.
1648  */
1649 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1650 		if (call->class->system &&
1651 		    strcmp(call->class->system, "syscalls") == 0)
1652 			continue;
1653 #endif
1654 
1655 		pr_info("Testing event %s: ", call->name);
1656 
1657 		/*
1658 		 * If an event is already enabled, someone is using
1659 		 * it and the self test should not be on.
1660 		 */
1661 		if (call->flags & TRACE_EVENT_FL_ENABLED) {
1662 			pr_warning("Enabled event during self test!\n");
1663 			WARN_ON_ONCE(1);
1664 			continue;
1665 		}
1666 
1667 		ftrace_event_enable_disable(call, 1);
1668 		event_test_stuff();
1669 		ftrace_event_enable_disable(call, 0);
1670 
1671 		pr_cont("OK\n");
1672 	}
1673 
1674 	/* Now test at the sub system level */
1675 
1676 	pr_info("Running tests on trace event systems:\n");
1677 
1678 	list_for_each_entry(system, &event_subsystems, list) {
1679 
1680 		/* the ftrace system is special, skip it */
1681 		if (strcmp(system->name, "ftrace") == 0)
1682 			continue;
1683 
1684 		pr_info("Testing event system %s: ", system->name);
1685 
1686 		ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
1687 		if (WARN_ON_ONCE(ret)) {
1688 			pr_warning("error enabling system %s\n",
1689 				   system->name);
1690 			continue;
1691 		}
1692 
1693 		event_test_stuff();
1694 
1695 		ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
1696 		if (WARN_ON_ONCE(ret)) {
1697 			pr_warning("error disabling system %s\n",
1698 				   system->name);
1699 			continue;
1700 		}
1701 
1702 		pr_cont("OK\n");
1703 	}
1704 
1705 	/* Test with all events enabled */
1706 
1707 	pr_info("Running tests on all trace events:\n");
1708 	pr_info("Testing all events: ");
1709 
1710 	ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
1711 	if (WARN_ON_ONCE(ret)) {
1712 		pr_warning("error enabling all events\n");
1713 		return;
1714 	}
1715 
1716 	event_test_stuff();
1717 
1718 	/* reset sysname */
1719 	ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
1720 	if (WARN_ON_ONCE(ret)) {
1721 		pr_warning("error disabling all events\n");
1722 		return;
1723 	}
1724 
1725 	pr_cont("OK\n");
1726 }
1727 
1728 #ifdef CONFIG_FUNCTION_TRACER
1729 
1730 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1731 
1732 static void
1733 function_test_events_call(unsigned long ip, unsigned long parent_ip,
1734 			  struct ftrace_ops *op, struct pt_regs *pt_regs)
1735 {
1736 	struct ring_buffer_event *event;
1737 	struct ring_buffer *buffer;
1738 	struct ftrace_entry *entry;
1739 	unsigned long flags;
1740 	long disabled;
1741 	int cpu;
1742 	int pc;
1743 
1744 	pc = preempt_count();
1745 	preempt_disable_notrace();
1746 	cpu = raw_smp_processor_id();
1747 	disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1748 
1749 	if (disabled != 1)
1750 		goto out;
1751 
1752 	local_save_flags(flags);
1753 
1754 	event = trace_current_buffer_lock_reserve(&buffer,
1755 						  TRACE_FN, sizeof(*entry),
1756 						  flags, pc);
1757 	if (!event)
1758 		goto out;
1759 	entry	= ring_buffer_event_data(event);
1760 	entry->ip			= ip;
1761 	entry->parent_ip		= parent_ip;
1762 
1763 	trace_buffer_unlock_commit(buffer, event, flags, pc);
1764 
1765  out:
1766 	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1767 	preempt_enable_notrace();
1768 }
1769 
1770 static struct ftrace_ops trace_ops __initdata  =
1771 {
1772 	.func = function_test_events_call,
1773 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
1774 };
1775 
1776 static __init void event_trace_self_test_with_function(void)
1777 {
1778 	int ret;
1779 	ret = register_ftrace_function(&trace_ops);
1780 	if (WARN_ON(ret < 0)) {
1781 		pr_info("Failed to enable function tracer for event tests\n");
1782 		return;
1783 	}
1784 	pr_info("Running tests again, along with the function tracer\n");
1785 	event_trace_self_tests();
1786 	unregister_ftrace_function(&trace_ops);
1787 }
1788 #else
1789 static __init void event_trace_self_test_with_function(void)
1790 {
1791 }
1792 #endif
1793 
1794 static __init int event_trace_self_tests_init(void)
1795 {
1796 	if (!tracing_selftest_disabled) {
1797 		event_trace_self_tests();
1798 		event_trace_self_test_with_function();
1799 	}
1800 
1801 	return 0;
1802 }
1803 
1804 late_initcall(event_trace_self_tests_init);
1805 
1806 #endif
1807