xref: /openbmc/linux/kernel/trace/trace_output.c (revision b6dcefde)
1 /*
2  * trace_output.c
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7 
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
11 
12 #include "trace_output.h"
13 
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE	128
16 
17 DECLARE_RWSEM(trace_event_mutex);
18 
19 DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
20 EXPORT_PER_CPU_SYMBOL(ftrace_event_seq);
21 
22 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
23 
24 static int next_event_type = __TRACE_LAST_TYPE + 1;
25 
26 int trace_print_seq(struct seq_file *m, struct trace_seq *s)
27 {
28 	int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
29 	int ret;
30 
31 	ret = seq_write(m, s->buffer, len);
32 
33 	/*
34 	 * Only reset this buffer if we successfully wrote to the
35 	 * seq_file buffer.
36 	 */
37 	if (!ret)
38 		trace_seq_init(s);
39 
40 	return ret;
41 }
42 
43 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
44 {
45 	struct trace_seq *s = &iter->seq;
46 	struct trace_entry *entry = iter->ent;
47 	struct bprint_entry *field;
48 	int ret;
49 
50 	trace_assign_type(field, entry);
51 
52 	ret = trace_seq_bprintf(s, field->fmt, field->buf);
53 	if (!ret)
54 		return TRACE_TYPE_PARTIAL_LINE;
55 
56 	return TRACE_TYPE_HANDLED;
57 }
58 
59 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
60 {
61 	struct trace_seq *s = &iter->seq;
62 	struct trace_entry *entry = iter->ent;
63 	struct print_entry *field;
64 	int ret;
65 
66 	trace_assign_type(field, entry);
67 
68 	ret = trace_seq_printf(s, "%s", field->buf);
69 	if (!ret)
70 		return TRACE_TYPE_PARTIAL_LINE;
71 
72 	return TRACE_TYPE_HANDLED;
73 }
74 
75 /**
76  * trace_seq_printf - sequence printing of trace information
77  * @s: trace sequence descriptor
78  * @fmt: printf format string
79  *
80  * It returns 0 if the trace oversizes the buffer's free
81  * space, 1 otherwise.
82  *
83  * The tracer may use either sequence operations or its own
84  * copy to user routines. To simplify formating of a trace
85  * trace_seq_printf is used to store strings into a special
86  * buffer (@s). Then the output may be either used by
87  * the sequencer or pulled into another buffer.
88  */
89 int
90 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
91 {
92 	int len = (PAGE_SIZE - 1) - s->len;
93 	va_list ap;
94 	int ret;
95 
96 	if (s->full || !len)
97 		return 0;
98 
99 	va_start(ap, fmt);
100 	ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
101 	va_end(ap);
102 
103 	/* If we can't write it all, don't bother writing anything */
104 	if (ret >= len) {
105 		s->full = 1;
106 		return 0;
107 	}
108 
109 	s->len += ret;
110 
111 	return 1;
112 }
113 EXPORT_SYMBOL_GPL(trace_seq_printf);
114 
115 /**
116  * trace_seq_vprintf - sequence printing of trace information
117  * @s: trace sequence descriptor
118  * @fmt: printf format string
119  *
120  * The tracer may use either sequence operations or its own
121  * copy to user routines. To simplify formating of a trace
122  * trace_seq_printf is used to store strings into a special
123  * buffer (@s). Then the output may be either used by
124  * the sequencer or pulled into another buffer.
125  */
126 int
127 trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
128 {
129 	int len = (PAGE_SIZE - 1) - s->len;
130 	int ret;
131 
132 	if (s->full || !len)
133 		return 0;
134 
135 	ret = vsnprintf(s->buffer + s->len, len, fmt, args);
136 
137 	/* If we can't write it all, don't bother writing anything */
138 	if (ret >= len) {
139 		s->full = 1;
140 		return 0;
141 	}
142 
143 	s->len += ret;
144 
145 	return len;
146 }
147 EXPORT_SYMBOL_GPL(trace_seq_vprintf);
148 
149 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
150 {
151 	int len = (PAGE_SIZE - 1) - s->len;
152 	int ret;
153 
154 	if (s->full || !len)
155 		return 0;
156 
157 	ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
158 
159 	/* If we can't write it all, don't bother writing anything */
160 	if (ret >= len) {
161 		s->full = 1;
162 		return 0;
163 	}
164 
165 	s->len += ret;
166 
167 	return len;
168 }
169 
170 /**
171  * trace_seq_puts - trace sequence printing of simple string
172  * @s: trace sequence descriptor
173  * @str: simple string to record
174  *
175  * The tracer may use either the sequence operations or its own
176  * copy to user routines. This function records a simple string
177  * into a special buffer (@s) for later retrieval by a sequencer
178  * or other mechanism.
179  */
180 int trace_seq_puts(struct trace_seq *s, const char *str)
181 {
182 	int len = strlen(str);
183 
184 	if (s->full)
185 		return 0;
186 
187 	if (len > ((PAGE_SIZE - 1) - s->len)) {
188 		s->full = 1;
189 		return 0;
190 	}
191 
192 	memcpy(s->buffer + s->len, str, len);
193 	s->len += len;
194 
195 	return len;
196 }
197 
198 int trace_seq_putc(struct trace_seq *s, unsigned char c)
199 {
200 	if (s->full)
201 		return 0;
202 
203 	if (s->len >= (PAGE_SIZE - 1)) {
204 		s->full = 1;
205 		return 0;
206 	}
207 
208 	s->buffer[s->len++] = c;
209 
210 	return 1;
211 }
212 
213 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
214 {
215 	if (s->full)
216 		return 0;
217 
218 	if (len > ((PAGE_SIZE - 1) - s->len)) {
219 		s->full = 1;
220 		return 0;
221 	}
222 
223 	memcpy(s->buffer + s->len, mem, len);
224 	s->len += len;
225 
226 	return len;
227 }
228 
229 int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
230 {
231 	unsigned char hex[HEX_CHARS];
232 	const unsigned char *data = mem;
233 	int i, j;
234 
235 	if (s->full)
236 		return 0;
237 
238 #ifdef __BIG_ENDIAN
239 	for (i = 0, j = 0; i < len; i++) {
240 #else
241 	for (i = len-1, j = 0; i >= 0; i--) {
242 #endif
243 		hex[j++] = hex_asc_hi(data[i]);
244 		hex[j++] = hex_asc_lo(data[i]);
245 	}
246 	hex[j++] = ' ';
247 
248 	return trace_seq_putmem(s, hex, j);
249 }
250 
251 void *trace_seq_reserve(struct trace_seq *s, size_t len)
252 {
253 	void *ret;
254 
255 	if (s->full)
256 		return 0;
257 
258 	if (len > ((PAGE_SIZE - 1) - s->len)) {
259 		s->full = 1;
260 		return NULL;
261 	}
262 
263 	ret = s->buffer + s->len;
264 	s->len += len;
265 
266 	return ret;
267 }
268 
269 int trace_seq_path(struct trace_seq *s, struct path *path)
270 {
271 	unsigned char *p;
272 
273 	if (s->full)
274 		return 0;
275 
276 	if (s->len >= (PAGE_SIZE - 1)) {
277 		s->full = 1;
278 		return 0;
279 	}
280 
281 	p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
282 	if (!IS_ERR(p)) {
283 		p = mangle_path(s->buffer + s->len, p, "\n");
284 		if (p) {
285 			s->len = p - s->buffer;
286 			return 1;
287 		}
288 	} else {
289 		s->buffer[s->len++] = '?';
290 		return 1;
291 	}
292 
293 	s->full = 1;
294 	return 0;
295 }
296 
297 const char *
298 ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
299 		       unsigned long flags,
300 		       const struct trace_print_flags *flag_array)
301 {
302 	unsigned long mask;
303 	const char *str;
304 	const char *ret = p->buffer + p->len;
305 	int i;
306 
307 	for (i = 0;  flag_array[i].name && flags; i++) {
308 
309 		mask = flag_array[i].mask;
310 		if ((flags & mask) != mask)
311 			continue;
312 
313 		str = flag_array[i].name;
314 		flags &= ~mask;
315 		if (p->len && delim)
316 			trace_seq_puts(p, delim);
317 		trace_seq_puts(p, str);
318 	}
319 
320 	/* check for left over flags */
321 	if (flags) {
322 		if (p->len && delim)
323 			trace_seq_puts(p, delim);
324 		trace_seq_printf(p, "0x%lx", flags);
325 	}
326 
327 	trace_seq_putc(p, 0);
328 
329 	return ret;
330 }
331 EXPORT_SYMBOL(ftrace_print_flags_seq);
332 
333 const char *
334 ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
335 			 const struct trace_print_flags *symbol_array)
336 {
337 	int i;
338 	const char *ret = p->buffer + p->len;
339 
340 	for (i = 0;  symbol_array[i].name; i++) {
341 
342 		if (val != symbol_array[i].mask)
343 			continue;
344 
345 		trace_seq_puts(p, symbol_array[i].name);
346 		break;
347 	}
348 
349 	if (!p->len)
350 		trace_seq_printf(p, "0x%lx", val);
351 
352 	trace_seq_putc(p, 0);
353 
354 	return ret;
355 }
356 EXPORT_SYMBOL(ftrace_print_symbols_seq);
357 
358 #ifdef CONFIG_KRETPROBES
359 static inline const char *kretprobed(const char *name)
360 {
361 	static const char tramp_name[] = "kretprobe_trampoline";
362 	int size = sizeof(tramp_name);
363 
364 	if (strncmp(tramp_name, name, size) == 0)
365 		return "[unknown/kretprobe'd]";
366 	return name;
367 }
368 #else
369 static inline const char *kretprobed(const char *name)
370 {
371 	return name;
372 }
373 #endif /* CONFIG_KRETPROBES */
374 
375 static int
376 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
377 {
378 #ifdef CONFIG_KALLSYMS
379 	char str[KSYM_SYMBOL_LEN];
380 	const char *name;
381 
382 	kallsyms_lookup(address, NULL, NULL, NULL, str);
383 
384 	name = kretprobed(str);
385 
386 	return trace_seq_printf(s, fmt, name);
387 #endif
388 	return 1;
389 }
390 
391 static int
392 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
393 		     unsigned long address)
394 {
395 #ifdef CONFIG_KALLSYMS
396 	char str[KSYM_SYMBOL_LEN];
397 	const char *name;
398 
399 	sprint_symbol(str, address);
400 	name = kretprobed(str);
401 
402 	return trace_seq_printf(s, fmt, name);
403 #endif
404 	return 1;
405 }
406 
407 #ifndef CONFIG_64BIT
408 # define IP_FMT "%08lx"
409 #else
410 # define IP_FMT "%016lx"
411 #endif
412 
413 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
414 		      unsigned long ip, unsigned long sym_flags)
415 {
416 	struct file *file = NULL;
417 	unsigned long vmstart = 0;
418 	int ret = 1;
419 
420 	if (s->full)
421 		return 0;
422 
423 	if (mm) {
424 		const struct vm_area_struct *vma;
425 
426 		down_read(&mm->mmap_sem);
427 		vma = find_vma(mm, ip);
428 		if (vma) {
429 			file = vma->vm_file;
430 			vmstart = vma->vm_start;
431 		}
432 		if (file) {
433 			ret = trace_seq_path(s, &file->f_path);
434 			if (ret)
435 				ret = trace_seq_printf(s, "[+0x%lx]",
436 						       ip - vmstart);
437 		}
438 		up_read(&mm->mmap_sem);
439 	}
440 	if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
441 		ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
442 	return ret;
443 }
444 
445 int
446 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
447 		      unsigned long sym_flags)
448 {
449 	struct mm_struct *mm = NULL;
450 	int ret = 1;
451 	unsigned int i;
452 
453 	if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
454 		struct task_struct *task;
455 		/*
456 		 * we do the lookup on the thread group leader,
457 		 * since individual threads might have already quit!
458 		 */
459 		rcu_read_lock();
460 		task = find_task_by_vpid(entry->tgid);
461 		if (task)
462 			mm = get_task_mm(task);
463 		rcu_read_unlock();
464 	}
465 
466 	for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
467 		unsigned long ip = entry->caller[i];
468 
469 		if (ip == ULONG_MAX || !ret)
470 			break;
471 		if (ret)
472 			ret = trace_seq_puts(s, " => ");
473 		if (!ip) {
474 			if (ret)
475 				ret = trace_seq_puts(s, "??");
476 			if (ret)
477 				ret = trace_seq_puts(s, "\n");
478 			continue;
479 		}
480 		if (!ret)
481 			break;
482 		if (ret)
483 			ret = seq_print_user_ip(s, mm, ip, sym_flags);
484 		ret = trace_seq_puts(s, "\n");
485 	}
486 
487 	if (mm)
488 		mmput(mm);
489 	return ret;
490 }
491 
492 int
493 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
494 {
495 	int ret;
496 
497 	if (!ip)
498 		return trace_seq_printf(s, "0");
499 
500 	if (sym_flags & TRACE_ITER_SYM_OFFSET)
501 		ret = seq_print_sym_offset(s, "%s", ip);
502 	else
503 		ret = seq_print_sym_short(s, "%s", ip);
504 
505 	if (!ret)
506 		return 0;
507 
508 	if (sym_flags & TRACE_ITER_SYM_ADDR)
509 		ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
510 	return ret;
511 }
512 
513 /**
514  * trace_print_lat_fmt - print the irq, preempt and lockdep fields
515  * @s: trace seq struct to write to
516  * @entry: The trace entry field from the ring buffer
517  *
518  * Prints the generic fields of irqs off, in hard or softirq, preempt
519  * count and lock depth.
520  */
521 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
522 {
523 	int hardirq, softirq;
524 	int ret;
525 
526 	hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
527 	softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
528 
529 	if (!trace_seq_printf(s, "%c%c%c",
530 			      (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
531 				(entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
532 				  'X' : '.',
533 			      (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
534 				'N' : '.',
535 			      (hardirq && softirq) ? 'H' :
536 				hardirq ? 'h' : softirq ? 's' : '.'))
537 		return 0;
538 
539 	if (entry->preempt_count)
540 		ret = trace_seq_printf(s, "%x", entry->preempt_count);
541 	else
542 		ret = trace_seq_putc(s, '.');
543 
544 	if (!ret)
545 		return 0;
546 
547 	if (entry->lock_depth < 0)
548 		return trace_seq_putc(s, '.');
549 
550 	return trace_seq_printf(s, "%d", entry->lock_depth);
551 }
552 
553 static int
554 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
555 {
556 	char comm[TASK_COMM_LEN];
557 
558 	trace_find_cmdline(entry->pid, comm);
559 
560 	if (!trace_seq_printf(s, "%8.8s-%-5d %3d",
561 			      comm, entry->pid, cpu))
562 		return 0;
563 
564 	return trace_print_lat_fmt(s, entry);
565 }
566 
567 static unsigned long preempt_mark_thresh = 100;
568 
569 static int
570 lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
571 		    unsigned long rel_usecs)
572 {
573 	return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
574 				rel_usecs > preempt_mark_thresh ? '!' :
575 				  rel_usecs > 1 ? '+' : ' ');
576 }
577 
578 int trace_print_context(struct trace_iterator *iter)
579 {
580 	struct trace_seq *s = &iter->seq;
581 	struct trace_entry *entry = iter->ent;
582 	unsigned long long t = ns2usecs(iter->ts);
583 	unsigned long usec_rem = do_div(t, USEC_PER_SEC);
584 	unsigned long secs = (unsigned long)t;
585 	char comm[TASK_COMM_LEN];
586 
587 	trace_find_cmdline(entry->pid, comm);
588 
589 	return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
590 				comm, entry->pid, iter->cpu, secs, usec_rem);
591 }
592 
593 int trace_print_lat_context(struct trace_iterator *iter)
594 {
595 	u64 next_ts;
596 	int ret;
597 	struct trace_seq *s = &iter->seq;
598 	struct trace_entry *entry = iter->ent,
599 			   *next_entry = trace_find_next_entry(iter, NULL,
600 							       &next_ts);
601 	unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
602 	unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
603 	unsigned long rel_usecs;
604 
605 	if (!next_entry)
606 		next_ts = iter->ts;
607 	rel_usecs = ns2usecs(next_ts - iter->ts);
608 
609 	if (verbose) {
610 		char comm[TASK_COMM_LEN];
611 
612 		trace_find_cmdline(entry->pid, comm);
613 
614 		ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
615 				       " %ld.%03ldms (+%ld.%03ldms): ", comm,
616 				       entry->pid, iter->cpu, entry->flags,
617 				       entry->preempt_count, iter->idx,
618 				       ns2usecs(iter->ts),
619 				       abs_usecs / USEC_PER_MSEC,
620 				       abs_usecs % USEC_PER_MSEC,
621 				       rel_usecs / USEC_PER_MSEC,
622 				       rel_usecs % USEC_PER_MSEC);
623 	} else {
624 		ret = lat_print_generic(s, entry, iter->cpu);
625 		if (ret)
626 			ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
627 	}
628 
629 	return ret;
630 }
631 
632 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
633 
634 static int task_state_char(unsigned long state)
635 {
636 	int bit = state ? __ffs(state) + 1 : 0;
637 
638 	return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
639 }
640 
641 /**
642  * ftrace_find_event - find a registered event
643  * @type: the type of event to look for
644  *
645  * Returns an event of type @type otherwise NULL
646  * Called with trace_event_read_lock() held.
647  */
648 struct trace_event *ftrace_find_event(int type)
649 {
650 	struct trace_event *event;
651 	struct hlist_node *n;
652 	unsigned key;
653 
654 	key = type & (EVENT_HASHSIZE - 1);
655 
656 	hlist_for_each_entry(event, n, &event_hash[key], node) {
657 		if (event->type == type)
658 			return event;
659 	}
660 
661 	return NULL;
662 }
663 
664 static LIST_HEAD(ftrace_event_list);
665 
666 static int trace_search_list(struct list_head **list)
667 {
668 	struct trace_event *e;
669 	int last = __TRACE_LAST_TYPE;
670 
671 	if (list_empty(&ftrace_event_list)) {
672 		*list = &ftrace_event_list;
673 		return last + 1;
674 	}
675 
676 	/*
677 	 * We used up all possible max events,
678 	 * lets see if somebody freed one.
679 	 */
680 	list_for_each_entry(e, &ftrace_event_list, list) {
681 		if (e->type != last + 1)
682 			break;
683 		last++;
684 	}
685 
686 	/* Did we used up all 65 thousand events??? */
687 	if ((last + 1) > FTRACE_MAX_EVENT)
688 		return 0;
689 
690 	*list = &e->list;
691 	return last + 1;
692 }
693 
694 void trace_event_read_lock(void)
695 {
696 	down_read(&trace_event_mutex);
697 }
698 
699 void trace_event_read_unlock(void)
700 {
701 	up_read(&trace_event_mutex);
702 }
703 
704 /**
705  * register_ftrace_event - register output for an event type
706  * @event: the event type to register
707  *
708  * Event types are stored in a hash and this hash is used to
709  * find a way to print an event. If the @event->type is set
710  * then it will use that type, otherwise it will assign a
711  * type to use.
712  *
713  * If you assign your own type, please make sure it is added
714  * to the trace_type enum in trace.h, to avoid collisions
715  * with the dynamic types.
716  *
717  * Returns the event type number or zero on error.
718  */
719 int register_ftrace_event(struct trace_event *event)
720 {
721 	unsigned key;
722 	int ret = 0;
723 
724 	down_write(&trace_event_mutex);
725 
726 	if (WARN_ON(!event))
727 		goto out;
728 
729 	INIT_LIST_HEAD(&event->list);
730 
731 	if (!event->type) {
732 		struct list_head *list = NULL;
733 
734 		if (next_event_type > FTRACE_MAX_EVENT) {
735 
736 			event->type = trace_search_list(&list);
737 			if (!event->type)
738 				goto out;
739 
740 		} else {
741 
742 			event->type = next_event_type++;
743 			list = &ftrace_event_list;
744 		}
745 
746 		if (WARN_ON(ftrace_find_event(event->type)))
747 			goto out;
748 
749 		list_add_tail(&event->list, list);
750 
751 	} else if (event->type > __TRACE_LAST_TYPE) {
752 		printk(KERN_WARNING "Need to add type to trace.h\n");
753 		WARN_ON(1);
754 		goto out;
755 	} else {
756 		/* Is this event already used */
757 		if (ftrace_find_event(event->type))
758 			goto out;
759 	}
760 
761 	if (event->trace == NULL)
762 		event->trace = trace_nop_print;
763 	if (event->raw == NULL)
764 		event->raw = trace_nop_print;
765 	if (event->hex == NULL)
766 		event->hex = trace_nop_print;
767 	if (event->binary == NULL)
768 		event->binary = trace_nop_print;
769 
770 	key = event->type & (EVENT_HASHSIZE - 1);
771 
772 	hlist_add_head(&event->node, &event_hash[key]);
773 
774 	ret = event->type;
775  out:
776 	up_write(&trace_event_mutex);
777 
778 	return ret;
779 }
780 EXPORT_SYMBOL_GPL(register_ftrace_event);
781 
782 /*
783  * Used by module code with the trace_event_mutex held for write.
784  */
785 int __unregister_ftrace_event(struct trace_event *event)
786 {
787 	hlist_del(&event->node);
788 	list_del(&event->list);
789 	return 0;
790 }
791 
792 /**
793  * unregister_ftrace_event - remove a no longer used event
794  * @event: the event to remove
795  */
796 int unregister_ftrace_event(struct trace_event *event)
797 {
798 	down_write(&trace_event_mutex);
799 	__unregister_ftrace_event(event);
800 	up_write(&trace_event_mutex);
801 
802 	return 0;
803 }
804 EXPORT_SYMBOL_GPL(unregister_ftrace_event);
805 
806 /*
807  * Standard events
808  */
809 
810 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
811 {
812 	return TRACE_TYPE_HANDLED;
813 }
814 
815 /* TRACE_FN */
816 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
817 {
818 	struct ftrace_entry *field;
819 	struct trace_seq *s = &iter->seq;
820 
821 	trace_assign_type(field, iter->ent);
822 
823 	if (!seq_print_ip_sym(s, field->ip, flags))
824 		goto partial;
825 
826 	if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
827 		if (!trace_seq_printf(s, " <-"))
828 			goto partial;
829 		if (!seq_print_ip_sym(s,
830 				      field->parent_ip,
831 				      flags))
832 			goto partial;
833 	}
834 	if (!trace_seq_printf(s, "\n"))
835 		goto partial;
836 
837 	return TRACE_TYPE_HANDLED;
838 
839  partial:
840 	return TRACE_TYPE_PARTIAL_LINE;
841 }
842 
843 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
844 {
845 	struct ftrace_entry *field;
846 
847 	trace_assign_type(field, iter->ent);
848 
849 	if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
850 			      field->ip,
851 			      field->parent_ip))
852 		return TRACE_TYPE_PARTIAL_LINE;
853 
854 	return TRACE_TYPE_HANDLED;
855 }
856 
857 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
858 {
859 	struct ftrace_entry *field;
860 	struct trace_seq *s = &iter->seq;
861 
862 	trace_assign_type(field, iter->ent);
863 
864 	SEQ_PUT_HEX_FIELD_RET(s, field->ip);
865 	SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
866 
867 	return TRACE_TYPE_HANDLED;
868 }
869 
870 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
871 {
872 	struct ftrace_entry *field;
873 	struct trace_seq *s = &iter->seq;
874 
875 	trace_assign_type(field, iter->ent);
876 
877 	SEQ_PUT_FIELD_RET(s, field->ip);
878 	SEQ_PUT_FIELD_RET(s, field->parent_ip);
879 
880 	return TRACE_TYPE_HANDLED;
881 }
882 
883 static struct trace_event trace_fn_event = {
884 	.type		= TRACE_FN,
885 	.trace		= trace_fn_trace,
886 	.raw		= trace_fn_raw,
887 	.hex		= trace_fn_hex,
888 	.binary		= trace_fn_bin,
889 };
890 
891 /* TRACE_CTX an TRACE_WAKE */
892 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
893 					     char *delim)
894 {
895 	struct ctx_switch_entry *field;
896 	char comm[TASK_COMM_LEN];
897 	int S, T;
898 
899 
900 	trace_assign_type(field, iter->ent);
901 
902 	T = task_state_char(field->next_state);
903 	S = task_state_char(field->prev_state);
904 	trace_find_cmdline(field->next_pid, comm);
905 	if (!trace_seq_printf(&iter->seq,
906 			      " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
907 			      field->prev_pid,
908 			      field->prev_prio,
909 			      S, delim,
910 			      field->next_cpu,
911 			      field->next_pid,
912 			      field->next_prio,
913 			      T, comm))
914 		return TRACE_TYPE_PARTIAL_LINE;
915 
916 	return TRACE_TYPE_HANDLED;
917 }
918 
919 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags)
920 {
921 	return trace_ctxwake_print(iter, "==>");
922 }
923 
924 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
925 					  int flags)
926 {
927 	return trace_ctxwake_print(iter, "  +");
928 }
929 
930 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
931 {
932 	struct ctx_switch_entry *field;
933 	int T;
934 
935 	trace_assign_type(field, iter->ent);
936 
937 	if (!S)
938 		S = task_state_char(field->prev_state);
939 	T = task_state_char(field->next_state);
940 	if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
941 			      field->prev_pid,
942 			      field->prev_prio,
943 			      S,
944 			      field->next_cpu,
945 			      field->next_pid,
946 			      field->next_prio,
947 			      T))
948 		return TRACE_TYPE_PARTIAL_LINE;
949 
950 	return TRACE_TYPE_HANDLED;
951 }
952 
953 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags)
954 {
955 	return trace_ctxwake_raw(iter, 0);
956 }
957 
958 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags)
959 {
960 	return trace_ctxwake_raw(iter, '+');
961 }
962 
963 
964 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
965 {
966 	struct ctx_switch_entry *field;
967 	struct trace_seq *s = &iter->seq;
968 	int T;
969 
970 	trace_assign_type(field, iter->ent);
971 
972 	if (!S)
973 		S = task_state_char(field->prev_state);
974 	T = task_state_char(field->next_state);
975 
976 	SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
977 	SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
978 	SEQ_PUT_HEX_FIELD_RET(s, S);
979 	SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
980 	SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
981 	SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
982 	SEQ_PUT_HEX_FIELD_RET(s, T);
983 
984 	return TRACE_TYPE_HANDLED;
985 }
986 
987 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags)
988 {
989 	return trace_ctxwake_hex(iter, 0);
990 }
991 
992 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags)
993 {
994 	return trace_ctxwake_hex(iter, '+');
995 }
996 
997 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
998 					   int flags)
999 {
1000 	struct ctx_switch_entry *field;
1001 	struct trace_seq *s = &iter->seq;
1002 
1003 	trace_assign_type(field, iter->ent);
1004 
1005 	SEQ_PUT_FIELD_RET(s, field->prev_pid);
1006 	SEQ_PUT_FIELD_RET(s, field->prev_prio);
1007 	SEQ_PUT_FIELD_RET(s, field->prev_state);
1008 	SEQ_PUT_FIELD_RET(s, field->next_pid);
1009 	SEQ_PUT_FIELD_RET(s, field->next_prio);
1010 	SEQ_PUT_FIELD_RET(s, field->next_state);
1011 
1012 	return TRACE_TYPE_HANDLED;
1013 }
1014 
1015 static struct trace_event trace_ctx_event = {
1016 	.type		= TRACE_CTX,
1017 	.trace		= trace_ctx_print,
1018 	.raw		= trace_ctx_raw,
1019 	.hex		= trace_ctx_hex,
1020 	.binary		= trace_ctxwake_bin,
1021 };
1022 
1023 static struct trace_event trace_wake_event = {
1024 	.type		= TRACE_WAKE,
1025 	.trace		= trace_wake_print,
1026 	.raw		= trace_wake_raw,
1027 	.hex		= trace_wake_hex,
1028 	.binary		= trace_ctxwake_bin,
1029 };
1030 
1031 /* TRACE_SPECIAL */
1032 static enum print_line_t trace_special_print(struct trace_iterator *iter,
1033 					     int flags)
1034 {
1035 	struct special_entry *field;
1036 
1037 	trace_assign_type(field, iter->ent);
1038 
1039 	if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
1040 			      field->arg1,
1041 			      field->arg2,
1042 			      field->arg3))
1043 		return TRACE_TYPE_PARTIAL_LINE;
1044 
1045 	return TRACE_TYPE_HANDLED;
1046 }
1047 
1048 static enum print_line_t trace_special_hex(struct trace_iterator *iter,
1049 					   int flags)
1050 {
1051 	struct special_entry *field;
1052 	struct trace_seq *s = &iter->seq;
1053 
1054 	trace_assign_type(field, iter->ent);
1055 
1056 	SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
1057 	SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
1058 	SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
1059 
1060 	return TRACE_TYPE_HANDLED;
1061 }
1062 
1063 static enum print_line_t trace_special_bin(struct trace_iterator *iter,
1064 					   int flags)
1065 {
1066 	struct special_entry *field;
1067 	struct trace_seq *s = &iter->seq;
1068 
1069 	trace_assign_type(field, iter->ent);
1070 
1071 	SEQ_PUT_FIELD_RET(s, field->arg1);
1072 	SEQ_PUT_FIELD_RET(s, field->arg2);
1073 	SEQ_PUT_FIELD_RET(s, field->arg3);
1074 
1075 	return TRACE_TYPE_HANDLED;
1076 }
1077 
1078 static struct trace_event trace_special_event = {
1079 	.type		= TRACE_SPECIAL,
1080 	.trace		= trace_special_print,
1081 	.raw		= trace_special_print,
1082 	.hex		= trace_special_hex,
1083 	.binary		= trace_special_bin,
1084 };
1085 
1086 /* TRACE_STACK */
1087 
1088 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1089 					   int flags)
1090 {
1091 	struct stack_entry *field;
1092 	struct trace_seq *s = &iter->seq;
1093 	int i;
1094 
1095 	trace_assign_type(field, iter->ent);
1096 
1097 	if (!trace_seq_puts(s, "<stack trace>\n"))
1098 		goto partial;
1099 	for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1100 		if (!field->caller[i] || (field->caller[i] == ULONG_MAX))
1101 			break;
1102 		if (!trace_seq_puts(s, " => "))
1103 			goto partial;
1104 
1105 		if (!seq_print_ip_sym(s, field->caller[i], flags))
1106 			goto partial;
1107 		if (!trace_seq_puts(s, "\n"))
1108 			goto partial;
1109 	}
1110 
1111 	return TRACE_TYPE_HANDLED;
1112 
1113  partial:
1114 	return TRACE_TYPE_PARTIAL_LINE;
1115 }
1116 
1117 static struct trace_event trace_stack_event = {
1118 	.type		= TRACE_STACK,
1119 	.trace		= trace_stack_print,
1120 	.raw		= trace_special_print,
1121 	.hex		= trace_special_hex,
1122 	.binary		= trace_special_bin,
1123 };
1124 
1125 /* TRACE_USER_STACK */
1126 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1127 						int flags)
1128 {
1129 	struct userstack_entry *field;
1130 	struct trace_seq *s = &iter->seq;
1131 
1132 	trace_assign_type(field, iter->ent);
1133 
1134 	if (!trace_seq_puts(s, "<user stack trace>\n"))
1135 		goto partial;
1136 
1137 	if (!seq_print_userip_objs(field, s, flags))
1138 		goto partial;
1139 
1140 	return TRACE_TYPE_HANDLED;
1141 
1142  partial:
1143 	return TRACE_TYPE_PARTIAL_LINE;
1144 }
1145 
1146 static struct trace_event trace_user_stack_event = {
1147 	.type		= TRACE_USER_STACK,
1148 	.trace		= trace_user_stack_print,
1149 	.raw		= trace_special_print,
1150 	.hex		= trace_special_hex,
1151 	.binary		= trace_special_bin,
1152 };
1153 
1154 /* TRACE_BPRINT */
1155 static enum print_line_t
1156 trace_bprint_print(struct trace_iterator *iter, int flags)
1157 {
1158 	struct trace_entry *entry = iter->ent;
1159 	struct trace_seq *s = &iter->seq;
1160 	struct bprint_entry *field;
1161 
1162 	trace_assign_type(field, entry);
1163 
1164 	if (!seq_print_ip_sym(s, field->ip, flags))
1165 		goto partial;
1166 
1167 	if (!trace_seq_puts(s, ": "))
1168 		goto partial;
1169 
1170 	if (!trace_seq_bprintf(s, field->fmt, field->buf))
1171 		goto partial;
1172 
1173 	return TRACE_TYPE_HANDLED;
1174 
1175  partial:
1176 	return TRACE_TYPE_PARTIAL_LINE;
1177 }
1178 
1179 
1180 static enum print_line_t
1181 trace_bprint_raw(struct trace_iterator *iter, int flags)
1182 {
1183 	struct bprint_entry *field;
1184 	struct trace_seq *s = &iter->seq;
1185 
1186 	trace_assign_type(field, iter->ent);
1187 
1188 	if (!trace_seq_printf(s, ": %lx : ", field->ip))
1189 		goto partial;
1190 
1191 	if (!trace_seq_bprintf(s, field->fmt, field->buf))
1192 		goto partial;
1193 
1194 	return TRACE_TYPE_HANDLED;
1195 
1196  partial:
1197 	return TRACE_TYPE_PARTIAL_LINE;
1198 }
1199 
1200 
1201 static struct trace_event trace_bprint_event = {
1202 	.type		= TRACE_BPRINT,
1203 	.trace		= trace_bprint_print,
1204 	.raw		= trace_bprint_raw,
1205 };
1206 
1207 /* TRACE_PRINT */
1208 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1209 					   int flags)
1210 {
1211 	struct print_entry *field;
1212 	struct trace_seq *s = &iter->seq;
1213 
1214 	trace_assign_type(field, iter->ent);
1215 
1216 	if (!seq_print_ip_sym(s, field->ip, flags))
1217 		goto partial;
1218 
1219 	if (!trace_seq_printf(s, ": %s", field->buf))
1220 		goto partial;
1221 
1222 	return TRACE_TYPE_HANDLED;
1223 
1224  partial:
1225 	return TRACE_TYPE_PARTIAL_LINE;
1226 }
1227 
1228 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
1229 {
1230 	struct print_entry *field;
1231 
1232 	trace_assign_type(field, iter->ent);
1233 
1234 	if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
1235 		goto partial;
1236 
1237 	return TRACE_TYPE_HANDLED;
1238 
1239  partial:
1240 	return TRACE_TYPE_PARTIAL_LINE;
1241 }
1242 
1243 static struct trace_event trace_print_event = {
1244 	.type	 	= TRACE_PRINT,
1245 	.trace		= trace_print_print,
1246 	.raw		= trace_print_raw,
1247 };
1248 
1249 
1250 static struct trace_event *events[] __initdata = {
1251 	&trace_fn_event,
1252 	&trace_ctx_event,
1253 	&trace_wake_event,
1254 	&trace_special_event,
1255 	&trace_stack_event,
1256 	&trace_user_stack_event,
1257 	&trace_bprint_event,
1258 	&trace_print_event,
1259 	NULL
1260 };
1261 
1262 __init static int init_events(void)
1263 {
1264 	struct trace_event *event;
1265 	int i, ret;
1266 
1267 	for (i = 0; events[i]; i++) {
1268 		event = events[i];
1269 
1270 		ret = register_ftrace_event(event);
1271 		if (!ret) {
1272 			printk(KERN_WARNING "event %d failed to register\n",
1273 			       event->type);
1274 			WARN_ON_ONCE(1);
1275 		}
1276 	}
1277 
1278 	return 0;
1279 }
1280 device_initcall(init_events);
1281