xref: /openbmc/linux/kernel/trace/trace_output.c (revision 3932b9ca)
1 /*
2  * trace_output.c
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7 
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
11 
12 #include "trace_output.h"
13 
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE	128
16 
17 DECLARE_RWSEM(trace_event_sem);
18 
19 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
20 
21 static int next_event_type = __TRACE_LAST_TYPE + 1;
22 
23 enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
24 {
25 	struct trace_seq *s = &iter->seq;
26 	struct trace_entry *entry = iter->ent;
27 	struct bputs_entry *field;
28 	int ret;
29 
30 	trace_assign_type(field, entry);
31 
32 	ret = trace_seq_puts(s, field->str);
33 	if (!ret)
34 		return TRACE_TYPE_PARTIAL_LINE;
35 
36 	return TRACE_TYPE_HANDLED;
37 }
38 
39 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
40 {
41 	struct trace_seq *s = &iter->seq;
42 	struct trace_entry *entry = iter->ent;
43 	struct bprint_entry *field;
44 	int ret;
45 
46 	trace_assign_type(field, entry);
47 
48 	ret = trace_seq_bprintf(s, field->fmt, field->buf);
49 	if (!ret)
50 		return TRACE_TYPE_PARTIAL_LINE;
51 
52 	return TRACE_TYPE_HANDLED;
53 }
54 
55 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
56 {
57 	struct trace_seq *s = &iter->seq;
58 	struct trace_entry *entry = iter->ent;
59 	struct print_entry *field;
60 	int ret;
61 
62 	trace_assign_type(field, entry);
63 
64 	ret = trace_seq_puts(s, field->buf);
65 	if (!ret)
66 		return TRACE_TYPE_PARTIAL_LINE;
67 
68 	return TRACE_TYPE_HANDLED;
69 }
70 
71 const char *
72 ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
73 		       unsigned long flags,
74 		       const struct trace_print_flags *flag_array)
75 {
76 	unsigned long mask;
77 	const char *str;
78 	const char *ret = trace_seq_buffer_ptr(p);
79 	int i, first = 1;
80 
81 	for (i = 0;  flag_array[i].name && flags; i++) {
82 
83 		mask = flag_array[i].mask;
84 		if ((flags & mask) != mask)
85 			continue;
86 
87 		str = flag_array[i].name;
88 		flags &= ~mask;
89 		if (!first && delim)
90 			trace_seq_puts(p, delim);
91 		else
92 			first = 0;
93 		trace_seq_puts(p, str);
94 	}
95 
96 	/* check for left over flags */
97 	if (flags) {
98 		if (!first && delim)
99 			trace_seq_puts(p, delim);
100 		trace_seq_printf(p, "0x%lx", flags);
101 	}
102 
103 	trace_seq_putc(p, 0);
104 
105 	return ret;
106 }
107 EXPORT_SYMBOL(ftrace_print_flags_seq);
108 
109 const char *
110 ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
111 			 const struct trace_print_flags *symbol_array)
112 {
113 	int i;
114 	const char *ret = trace_seq_buffer_ptr(p);
115 
116 	for (i = 0;  symbol_array[i].name; i++) {
117 
118 		if (val != symbol_array[i].mask)
119 			continue;
120 
121 		trace_seq_puts(p, symbol_array[i].name);
122 		break;
123 	}
124 
125 	if (ret == (const char *)(trace_seq_buffer_ptr(p)))
126 		trace_seq_printf(p, "0x%lx", val);
127 
128 	trace_seq_putc(p, 0);
129 
130 	return ret;
131 }
132 EXPORT_SYMBOL(ftrace_print_symbols_seq);
133 
134 #if BITS_PER_LONG == 32
135 const char *
136 ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
137 			 const struct trace_print_flags_u64 *symbol_array)
138 {
139 	int i;
140 	const char *ret = trace_seq_buffer_ptr(p);
141 
142 	for (i = 0;  symbol_array[i].name; i++) {
143 
144 		if (val != symbol_array[i].mask)
145 			continue;
146 
147 		trace_seq_puts(p, symbol_array[i].name);
148 		break;
149 	}
150 
151 	if (ret == (const char *)(trace_seq_buffer_ptr(p)))
152 		trace_seq_printf(p, "0x%llx", val);
153 
154 	trace_seq_putc(p, 0);
155 
156 	return ret;
157 }
158 EXPORT_SYMBOL(ftrace_print_symbols_seq_u64);
159 #endif
160 
161 const char *
162 ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
163 			 unsigned int bitmask_size)
164 {
165 	const char *ret = trace_seq_buffer_ptr(p);
166 
167 	trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8);
168 	trace_seq_putc(p, 0);
169 
170 	return ret;
171 }
172 EXPORT_SYMBOL_GPL(ftrace_print_bitmask_seq);
173 
174 const char *
175 ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
176 {
177 	int i;
178 	const char *ret = trace_seq_buffer_ptr(p);
179 
180 	for (i = 0; i < buf_len; i++)
181 		trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]);
182 
183 	trace_seq_putc(p, 0);
184 
185 	return ret;
186 }
187 EXPORT_SYMBOL(ftrace_print_hex_seq);
188 
189 int ftrace_raw_output_prep(struct trace_iterator *iter,
190 			   struct trace_event *trace_event)
191 {
192 	struct ftrace_event_call *event;
193 	struct trace_seq *s = &iter->seq;
194 	struct trace_seq *p = &iter->tmp_seq;
195 	struct trace_entry *entry;
196 	int ret;
197 
198 	event = container_of(trace_event, struct ftrace_event_call, event);
199 	entry = iter->ent;
200 
201 	if (entry->type != event->event.type) {
202 		WARN_ON_ONCE(1);
203 		return TRACE_TYPE_UNHANDLED;
204 	}
205 
206 	trace_seq_init(p);
207 	ret = trace_seq_printf(s, "%s: ", ftrace_event_name(event));
208 	if (!ret)
209 		return TRACE_TYPE_PARTIAL_LINE;
210 
211 	return 0;
212 }
213 EXPORT_SYMBOL(ftrace_raw_output_prep);
214 
215 static int ftrace_output_raw(struct trace_iterator *iter, char *name,
216 			     char *fmt, va_list ap)
217 {
218 	struct trace_seq *s = &iter->seq;
219 	int ret;
220 
221 	ret = trace_seq_printf(s, "%s: ", name);
222 	if (!ret)
223 		return TRACE_TYPE_PARTIAL_LINE;
224 
225 	ret = trace_seq_vprintf(s, fmt, ap);
226 
227 	if (!ret)
228 		return TRACE_TYPE_PARTIAL_LINE;
229 
230 	return TRACE_TYPE_HANDLED;
231 }
232 
233 int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
234 {
235 	va_list ap;
236 	int ret;
237 
238 	va_start(ap, fmt);
239 	ret = ftrace_output_raw(iter, name, fmt, ap);
240 	va_end(ap);
241 
242 	return ret;
243 }
244 EXPORT_SYMBOL_GPL(ftrace_output_call);
245 
246 #ifdef CONFIG_KRETPROBES
247 static inline const char *kretprobed(const char *name)
248 {
249 	static const char tramp_name[] = "kretprobe_trampoline";
250 	int size = sizeof(tramp_name);
251 
252 	if (strncmp(tramp_name, name, size) == 0)
253 		return "[unknown/kretprobe'd]";
254 	return name;
255 }
256 #else
257 static inline const char *kretprobed(const char *name)
258 {
259 	return name;
260 }
261 #endif /* CONFIG_KRETPROBES */
262 
263 static int
264 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
265 {
266 #ifdef CONFIG_KALLSYMS
267 	char str[KSYM_SYMBOL_LEN];
268 	const char *name;
269 
270 	kallsyms_lookup(address, NULL, NULL, NULL, str);
271 
272 	name = kretprobed(str);
273 
274 	return trace_seq_printf(s, fmt, name);
275 #endif
276 	return 1;
277 }
278 
279 static int
280 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
281 		     unsigned long address)
282 {
283 #ifdef CONFIG_KALLSYMS
284 	char str[KSYM_SYMBOL_LEN];
285 	const char *name;
286 
287 	sprint_symbol(str, address);
288 	name = kretprobed(str);
289 
290 	return trace_seq_printf(s, fmt, name);
291 #endif
292 	return 1;
293 }
294 
295 #ifndef CONFIG_64BIT
296 # define IP_FMT "%08lx"
297 #else
298 # define IP_FMT "%016lx"
299 #endif
300 
301 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
302 		      unsigned long ip, unsigned long sym_flags)
303 {
304 	struct file *file = NULL;
305 	unsigned long vmstart = 0;
306 	int ret = 1;
307 
308 	if (s->full)
309 		return 0;
310 
311 	if (mm) {
312 		const struct vm_area_struct *vma;
313 
314 		down_read(&mm->mmap_sem);
315 		vma = find_vma(mm, ip);
316 		if (vma) {
317 			file = vma->vm_file;
318 			vmstart = vma->vm_start;
319 		}
320 		if (file) {
321 			ret = trace_seq_path(s, &file->f_path);
322 			if (ret)
323 				ret = trace_seq_printf(s, "[+0x%lx]",
324 						       ip - vmstart);
325 		}
326 		up_read(&mm->mmap_sem);
327 	}
328 	if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
329 		ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
330 	return ret;
331 }
332 
333 int
334 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
335 		      unsigned long sym_flags)
336 {
337 	struct mm_struct *mm = NULL;
338 	int ret = 1;
339 	unsigned int i;
340 
341 	if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
342 		struct task_struct *task;
343 		/*
344 		 * we do the lookup on the thread group leader,
345 		 * since individual threads might have already quit!
346 		 */
347 		rcu_read_lock();
348 		task = find_task_by_vpid(entry->tgid);
349 		if (task)
350 			mm = get_task_mm(task);
351 		rcu_read_unlock();
352 	}
353 
354 	for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
355 		unsigned long ip = entry->caller[i];
356 
357 		if (ip == ULONG_MAX || !ret)
358 			break;
359 		if (ret)
360 			ret = trace_seq_puts(s, " => ");
361 		if (!ip) {
362 			if (ret)
363 				ret = trace_seq_puts(s, "??");
364 			if (ret)
365 				ret = trace_seq_putc(s, '\n');
366 			continue;
367 		}
368 		if (!ret)
369 			break;
370 		if (ret)
371 			ret = seq_print_user_ip(s, mm, ip, sym_flags);
372 		ret = trace_seq_putc(s, '\n');
373 	}
374 
375 	if (mm)
376 		mmput(mm);
377 	return ret;
378 }
379 
380 int
381 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
382 {
383 	int ret;
384 
385 	if (!ip)
386 		return trace_seq_putc(s, '0');
387 
388 	if (sym_flags & TRACE_ITER_SYM_OFFSET)
389 		ret = seq_print_sym_offset(s, "%s", ip);
390 	else
391 		ret = seq_print_sym_short(s, "%s", ip);
392 
393 	if (!ret)
394 		return 0;
395 
396 	if (sym_flags & TRACE_ITER_SYM_ADDR)
397 		ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
398 	return ret;
399 }
400 
401 /**
402  * trace_print_lat_fmt - print the irq, preempt and lockdep fields
403  * @s: trace seq struct to write to
404  * @entry: The trace entry field from the ring buffer
405  *
406  * Prints the generic fields of irqs off, in hard or softirq, preempt
407  * count.
408  */
409 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
410 {
411 	char hardsoft_irq;
412 	char need_resched;
413 	char irqs_off;
414 	int hardirq;
415 	int softirq;
416 	int ret;
417 
418 	hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
419 	softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
420 
421 	irqs_off =
422 		(entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
423 		(entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' :
424 		'.';
425 
426 	switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
427 				TRACE_FLAG_PREEMPT_RESCHED)) {
428 	case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
429 		need_resched = 'N';
430 		break;
431 	case TRACE_FLAG_NEED_RESCHED:
432 		need_resched = 'n';
433 		break;
434 	case TRACE_FLAG_PREEMPT_RESCHED:
435 		need_resched = 'p';
436 		break;
437 	default:
438 		need_resched = '.';
439 		break;
440 	}
441 
442 	hardsoft_irq =
443 		(hardirq && softirq) ? 'H' :
444 		hardirq ? 'h' :
445 		softirq ? 's' :
446 		'.';
447 
448 	if (!trace_seq_printf(s, "%c%c%c",
449 			      irqs_off, need_resched, hardsoft_irq))
450 		return 0;
451 
452 	if (entry->preempt_count)
453 		ret = trace_seq_printf(s, "%x", entry->preempt_count);
454 	else
455 		ret = trace_seq_putc(s, '.');
456 
457 	return ret;
458 }
459 
460 static int
461 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
462 {
463 	char comm[TASK_COMM_LEN];
464 
465 	trace_find_cmdline(entry->pid, comm);
466 
467 	if (!trace_seq_printf(s, "%8.8s-%-5d %3d",
468 			      comm, entry->pid, cpu))
469 		return 0;
470 
471 	return trace_print_lat_fmt(s, entry);
472 }
473 
474 static unsigned long preempt_mark_thresh_us = 100;
475 
476 static int
477 lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
478 {
479 	unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE;
480 	unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
481 	unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start;
482 	unsigned long long rel_ts = next_ts - iter->ts;
483 	struct trace_seq *s = &iter->seq;
484 
485 	if (in_ns) {
486 		abs_ts = ns2usecs(abs_ts);
487 		rel_ts = ns2usecs(rel_ts);
488 	}
489 
490 	if (verbose && in_ns) {
491 		unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC);
492 		unsigned long abs_msec = (unsigned long)abs_ts;
493 		unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC);
494 		unsigned long rel_msec = (unsigned long)rel_ts;
495 
496 		return trace_seq_printf(
497 				s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ",
498 				ns2usecs(iter->ts),
499 				abs_msec, abs_usec,
500 				rel_msec, rel_usec);
501 	} else if (verbose && !in_ns) {
502 		return trace_seq_printf(
503 				s, "[%016llx] %lld (+%lld): ",
504 				iter->ts, abs_ts, rel_ts);
505 	} else if (!verbose && in_ns) {
506 		return trace_seq_printf(
507 				s, " %4lldus%c: ",
508 				abs_ts,
509 				rel_ts > preempt_mark_thresh_us ? '!' :
510 				  rel_ts > 1 ? '+' : ' ');
511 	} else { /* !verbose && !in_ns */
512 		return trace_seq_printf(s, " %4lld: ", abs_ts);
513 	}
514 }
515 
516 int trace_print_context(struct trace_iterator *iter)
517 {
518 	struct trace_seq *s = &iter->seq;
519 	struct trace_entry *entry = iter->ent;
520 	unsigned long long t;
521 	unsigned long secs, usec_rem;
522 	char comm[TASK_COMM_LEN];
523 	int ret;
524 
525 	trace_find_cmdline(entry->pid, comm);
526 
527 	ret = trace_seq_printf(s, "%16s-%-5d [%03d] ",
528 			       comm, entry->pid, iter->cpu);
529 	if (!ret)
530 		return 0;
531 
532 	if (trace_flags & TRACE_ITER_IRQ_INFO) {
533 		ret = trace_print_lat_fmt(s, entry);
534 		if (!ret)
535 			return 0;
536 	}
537 
538 	if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
539 		t = ns2usecs(iter->ts);
540 		usec_rem = do_div(t, USEC_PER_SEC);
541 		secs = (unsigned long)t;
542 		return trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem);
543 	} else
544 		return trace_seq_printf(s, " %12llu: ", iter->ts);
545 }
546 
547 int trace_print_lat_context(struct trace_iterator *iter)
548 {
549 	u64 next_ts;
550 	int ret;
551 	/* trace_find_next_entry will reset ent_size */
552 	int ent_size = iter->ent_size;
553 	struct trace_seq *s = &iter->seq;
554 	struct trace_entry *entry = iter->ent,
555 			   *next_entry = trace_find_next_entry(iter, NULL,
556 							       &next_ts);
557 	unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
558 
559 	/* Restore the original ent_size */
560 	iter->ent_size = ent_size;
561 
562 	if (!next_entry)
563 		next_ts = iter->ts;
564 
565 	if (verbose) {
566 		char comm[TASK_COMM_LEN];
567 
568 		trace_find_cmdline(entry->pid, comm);
569 
570 		ret = trace_seq_printf(
571 				s, "%16s %5d %3d %d %08x %08lx ",
572 				comm, entry->pid, iter->cpu, entry->flags,
573 				entry->preempt_count, iter->idx);
574 	} else {
575 		ret = lat_print_generic(s, entry, iter->cpu);
576 	}
577 
578 	if (ret)
579 		ret = lat_print_timestamp(iter, next_ts);
580 
581 	return ret;
582 }
583 
584 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
585 
586 static int task_state_char(unsigned long state)
587 {
588 	int bit = state ? __ffs(state) + 1 : 0;
589 
590 	return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
591 }
592 
593 /**
594  * ftrace_find_event - find a registered event
595  * @type: the type of event to look for
596  *
597  * Returns an event of type @type otherwise NULL
598  * Called with trace_event_read_lock() held.
599  */
600 struct trace_event *ftrace_find_event(int type)
601 {
602 	struct trace_event *event;
603 	unsigned key;
604 
605 	key = type & (EVENT_HASHSIZE - 1);
606 
607 	hlist_for_each_entry(event, &event_hash[key], node) {
608 		if (event->type == type)
609 			return event;
610 	}
611 
612 	return NULL;
613 }
614 
615 static LIST_HEAD(ftrace_event_list);
616 
617 static int trace_search_list(struct list_head **list)
618 {
619 	struct trace_event *e;
620 	int last = __TRACE_LAST_TYPE;
621 
622 	if (list_empty(&ftrace_event_list)) {
623 		*list = &ftrace_event_list;
624 		return last + 1;
625 	}
626 
627 	/*
628 	 * We used up all possible max events,
629 	 * lets see if somebody freed one.
630 	 */
631 	list_for_each_entry(e, &ftrace_event_list, list) {
632 		if (e->type != last + 1)
633 			break;
634 		last++;
635 	}
636 
637 	/* Did we used up all 65 thousand events??? */
638 	if ((last + 1) > FTRACE_MAX_EVENT)
639 		return 0;
640 
641 	*list = &e->list;
642 	return last + 1;
643 }
644 
645 void trace_event_read_lock(void)
646 {
647 	down_read(&trace_event_sem);
648 }
649 
650 void trace_event_read_unlock(void)
651 {
652 	up_read(&trace_event_sem);
653 }
654 
655 /**
656  * register_ftrace_event - register output for an event type
657  * @event: the event type to register
658  *
659  * Event types are stored in a hash and this hash is used to
660  * find a way to print an event. If the @event->type is set
661  * then it will use that type, otherwise it will assign a
662  * type to use.
663  *
664  * If you assign your own type, please make sure it is added
665  * to the trace_type enum in trace.h, to avoid collisions
666  * with the dynamic types.
667  *
668  * Returns the event type number or zero on error.
669  */
670 int register_ftrace_event(struct trace_event *event)
671 {
672 	unsigned key;
673 	int ret = 0;
674 
675 	down_write(&trace_event_sem);
676 
677 	if (WARN_ON(!event))
678 		goto out;
679 
680 	if (WARN_ON(!event->funcs))
681 		goto out;
682 
683 	INIT_LIST_HEAD(&event->list);
684 
685 	if (!event->type) {
686 		struct list_head *list = NULL;
687 
688 		if (next_event_type > FTRACE_MAX_EVENT) {
689 
690 			event->type = trace_search_list(&list);
691 			if (!event->type)
692 				goto out;
693 
694 		} else {
695 
696 			event->type = next_event_type++;
697 			list = &ftrace_event_list;
698 		}
699 
700 		if (WARN_ON(ftrace_find_event(event->type)))
701 			goto out;
702 
703 		list_add_tail(&event->list, list);
704 
705 	} else if (event->type > __TRACE_LAST_TYPE) {
706 		printk(KERN_WARNING "Need to add type to trace.h\n");
707 		WARN_ON(1);
708 		goto out;
709 	} else {
710 		/* Is this event already used */
711 		if (ftrace_find_event(event->type))
712 			goto out;
713 	}
714 
715 	if (event->funcs->trace == NULL)
716 		event->funcs->trace = trace_nop_print;
717 	if (event->funcs->raw == NULL)
718 		event->funcs->raw = trace_nop_print;
719 	if (event->funcs->hex == NULL)
720 		event->funcs->hex = trace_nop_print;
721 	if (event->funcs->binary == NULL)
722 		event->funcs->binary = trace_nop_print;
723 
724 	key = event->type & (EVENT_HASHSIZE - 1);
725 
726 	hlist_add_head(&event->node, &event_hash[key]);
727 
728 	ret = event->type;
729  out:
730 	up_write(&trace_event_sem);
731 
732 	return ret;
733 }
734 EXPORT_SYMBOL_GPL(register_ftrace_event);
735 
736 /*
737  * Used by module code with the trace_event_sem held for write.
738  */
739 int __unregister_ftrace_event(struct trace_event *event)
740 {
741 	hlist_del(&event->node);
742 	list_del(&event->list);
743 	return 0;
744 }
745 
746 /**
747  * unregister_ftrace_event - remove a no longer used event
748  * @event: the event to remove
749  */
750 int unregister_ftrace_event(struct trace_event *event)
751 {
752 	down_write(&trace_event_sem);
753 	__unregister_ftrace_event(event);
754 	up_write(&trace_event_sem);
755 
756 	return 0;
757 }
758 EXPORT_SYMBOL_GPL(unregister_ftrace_event);
759 
760 /*
761  * Standard events
762  */
763 
764 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
765 				  struct trace_event *event)
766 {
767 	if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type))
768 		return TRACE_TYPE_PARTIAL_LINE;
769 
770 	return TRACE_TYPE_HANDLED;
771 }
772 
773 /* TRACE_FN */
774 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
775 					struct trace_event *event)
776 {
777 	struct ftrace_entry *field;
778 	struct trace_seq *s = &iter->seq;
779 
780 	trace_assign_type(field, iter->ent);
781 
782 	if (!seq_print_ip_sym(s, field->ip, flags))
783 		goto partial;
784 
785 	if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
786 		if (!trace_seq_puts(s, " <-"))
787 			goto partial;
788 		if (!seq_print_ip_sym(s,
789 				      field->parent_ip,
790 				      flags))
791 			goto partial;
792 	}
793 	if (!trace_seq_putc(s, '\n'))
794 		goto partial;
795 
796 	return TRACE_TYPE_HANDLED;
797 
798  partial:
799 	return TRACE_TYPE_PARTIAL_LINE;
800 }
801 
802 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags,
803 				      struct trace_event *event)
804 {
805 	struct ftrace_entry *field;
806 
807 	trace_assign_type(field, iter->ent);
808 
809 	if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
810 			      field->ip,
811 			      field->parent_ip))
812 		return TRACE_TYPE_PARTIAL_LINE;
813 
814 	return TRACE_TYPE_HANDLED;
815 }
816 
817 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags,
818 				      struct trace_event *event)
819 {
820 	struct ftrace_entry *field;
821 	struct trace_seq *s = &iter->seq;
822 
823 	trace_assign_type(field, iter->ent);
824 
825 	SEQ_PUT_HEX_FIELD_RET(s, field->ip);
826 	SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
827 
828 	return TRACE_TYPE_HANDLED;
829 }
830 
831 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags,
832 				      struct trace_event *event)
833 {
834 	struct ftrace_entry *field;
835 	struct trace_seq *s = &iter->seq;
836 
837 	trace_assign_type(field, iter->ent);
838 
839 	SEQ_PUT_FIELD_RET(s, field->ip);
840 	SEQ_PUT_FIELD_RET(s, field->parent_ip);
841 
842 	return TRACE_TYPE_HANDLED;
843 }
844 
845 static struct trace_event_functions trace_fn_funcs = {
846 	.trace		= trace_fn_trace,
847 	.raw		= trace_fn_raw,
848 	.hex		= trace_fn_hex,
849 	.binary		= trace_fn_bin,
850 };
851 
852 static struct trace_event trace_fn_event = {
853 	.type		= TRACE_FN,
854 	.funcs		= &trace_fn_funcs,
855 };
856 
857 /* TRACE_CTX an TRACE_WAKE */
858 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
859 					     char *delim)
860 {
861 	struct ctx_switch_entry *field;
862 	char comm[TASK_COMM_LEN];
863 	int S, T;
864 
865 
866 	trace_assign_type(field, iter->ent);
867 
868 	T = task_state_char(field->next_state);
869 	S = task_state_char(field->prev_state);
870 	trace_find_cmdline(field->next_pid, comm);
871 	if (!trace_seq_printf(&iter->seq,
872 			      " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
873 			      field->prev_pid,
874 			      field->prev_prio,
875 			      S, delim,
876 			      field->next_cpu,
877 			      field->next_pid,
878 			      field->next_prio,
879 			      T, comm))
880 		return TRACE_TYPE_PARTIAL_LINE;
881 
882 	return TRACE_TYPE_HANDLED;
883 }
884 
885 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags,
886 					 struct trace_event *event)
887 {
888 	return trace_ctxwake_print(iter, "==>");
889 }
890 
891 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
892 					  int flags, struct trace_event *event)
893 {
894 	return trace_ctxwake_print(iter, "  +");
895 }
896 
897 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
898 {
899 	struct ctx_switch_entry *field;
900 	int T;
901 
902 	trace_assign_type(field, iter->ent);
903 
904 	if (!S)
905 		S = task_state_char(field->prev_state);
906 	T = task_state_char(field->next_state);
907 	if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
908 			      field->prev_pid,
909 			      field->prev_prio,
910 			      S,
911 			      field->next_cpu,
912 			      field->next_pid,
913 			      field->next_prio,
914 			      T))
915 		return TRACE_TYPE_PARTIAL_LINE;
916 
917 	return TRACE_TYPE_HANDLED;
918 }
919 
920 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags,
921 				       struct trace_event *event)
922 {
923 	return trace_ctxwake_raw(iter, 0);
924 }
925 
926 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags,
927 					struct trace_event *event)
928 {
929 	return trace_ctxwake_raw(iter, '+');
930 }
931 
932 
933 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
934 {
935 	struct ctx_switch_entry *field;
936 	struct trace_seq *s = &iter->seq;
937 	int T;
938 
939 	trace_assign_type(field, iter->ent);
940 
941 	if (!S)
942 		S = task_state_char(field->prev_state);
943 	T = task_state_char(field->next_state);
944 
945 	SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
946 	SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
947 	SEQ_PUT_HEX_FIELD_RET(s, S);
948 	SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
949 	SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
950 	SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
951 	SEQ_PUT_HEX_FIELD_RET(s, T);
952 
953 	return TRACE_TYPE_HANDLED;
954 }
955 
956 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags,
957 				       struct trace_event *event)
958 {
959 	return trace_ctxwake_hex(iter, 0);
960 }
961 
962 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags,
963 					struct trace_event *event)
964 {
965 	return trace_ctxwake_hex(iter, '+');
966 }
967 
968 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
969 					   int flags, struct trace_event *event)
970 {
971 	struct ctx_switch_entry *field;
972 	struct trace_seq *s = &iter->seq;
973 
974 	trace_assign_type(field, iter->ent);
975 
976 	SEQ_PUT_FIELD_RET(s, field->prev_pid);
977 	SEQ_PUT_FIELD_RET(s, field->prev_prio);
978 	SEQ_PUT_FIELD_RET(s, field->prev_state);
979 	SEQ_PUT_FIELD_RET(s, field->next_pid);
980 	SEQ_PUT_FIELD_RET(s, field->next_prio);
981 	SEQ_PUT_FIELD_RET(s, field->next_state);
982 
983 	return TRACE_TYPE_HANDLED;
984 }
985 
986 static struct trace_event_functions trace_ctx_funcs = {
987 	.trace		= trace_ctx_print,
988 	.raw		= trace_ctx_raw,
989 	.hex		= trace_ctx_hex,
990 	.binary		= trace_ctxwake_bin,
991 };
992 
993 static struct trace_event trace_ctx_event = {
994 	.type		= TRACE_CTX,
995 	.funcs		= &trace_ctx_funcs,
996 };
997 
998 static struct trace_event_functions trace_wake_funcs = {
999 	.trace		= trace_wake_print,
1000 	.raw		= trace_wake_raw,
1001 	.hex		= trace_wake_hex,
1002 	.binary		= trace_ctxwake_bin,
1003 };
1004 
1005 static struct trace_event trace_wake_event = {
1006 	.type		= TRACE_WAKE,
1007 	.funcs		= &trace_wake_funcs,
1008 };
1009 
1010 /* TRACE_STACK */
1011 
1012 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1013 					   int flags, struct trace_event *event)
1014 {
1015 	struct stack_entry *field;
1016 	struct trace_seq *s = &iter->seq;
1017 	unsigned long *p;
1018 	unsigned long *end;
1019 
1020 	trace_assign_type(field, iter->ent);
1021 	end = (unsigned long *)((long)iter->ent + iter->ent_size);
1022 
1023 	if (!trace_seq_puts(s, "<stack trace>\n"))
1024 		goto partial;
1025 
1026 	for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) {
1027 		if (!trace_seq_puts(s, " => "))
1028 			goto partial;
1029 
1030 		if (!seq_print_ip_sym(s, *p, flags))
1031 			goto partial;
1032 		if (!trace_seq_putc(s, '\n'))
1033 			goto partial;
1034 	}
1035 
1036 	return TRACE_TYPE_HANDLED;
1037 
1038  partial:
1039 	return TRACE_TYPE_PARTIAL_LINE;
1040 }
1041 
1042 static struct trace_event_functions trace_stack_funcs = {
1043 	.trace		= trace_stack_print,
1044 };
1045 
1046 static struct trace_event trace_stack_event = {
1047 	.type		= TRACE_STACK,
1048 	.funcs		= &trace_stack_funcs,
1049 };
1050 
1051 /* TRACE_USER_STACK */
1052 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1053 						int flags, struct trace_event *event)
1054 {
1055 	struct userstack_entry *field;
1056 	struct trace_seq *s = &iter->seq;
1057 
1058 	trace_assign_type(field, iter->ent);
1059 
1060 	if (!trace_seq_puts(s, "<user stack trace>\n"))
1061 		goto partial;
1062 
1063 	if (!seq_print_userip_objs(field, s, flags))
1064 		goto partial;
1065 
1066 	return TRACE_TYPE_HANDLED;
1067 
1068  partial:
1069 	return TRACE_TYPE_PARTIAL_LINE;
1070 }
1071 
1072 static struct trace_event_functions trace_user_stack_funcs = {
1073 	.trace		= trace_user_stack_print,
1074 };
1075 
1076 static struct trace_event trace_user_stack_event = {
1077 	.type		= TRACE_USER_STACK,
1078 	.funcs		= &trace_user_stack_funcs,
1079 };
1080 
1081 /* TRACE_BPUTS */
1082 static enum print_line_t
1083 trace_bputs_print(struct trace_iterator *iter, int flags,
1084 		   struct trace_event *event)
1085 {
1086 	struct trace_entry *entry = iter->ent;
1087 	struct trace_seq *s = &iter->seq;
1088 	struct bputs_entry *field;
1089 
1090 	trace_assign_type(field, entry);
1091 
1092 	if (!seq_print_ip_sym(s, field->ip, flags))
1093 		goto partial;
1094 
1095 	if (!trace_seq_puts(s, ": "))
1096 		goto partial;
1097 
1098 	if (!trace_seq_puts(s, field->str))
1099 		goto partial;
1100 
1101 	return TRACE_TYPE_HANDLED;
1102 
1103  partial:
1104 	return TRACE_TYPE_PARTIAL_LINE;
1105 }
1106 
1107 
1108 static enum print_line_t
1109 trace_bputs_raw(struct trace_iterator *iter, int flags,
1110 		struct trace_event *event)
1111 {
1112 	struct bputs_entry *field;
1113 	struct trace_seq *s = &iter->seq;
1114 
1115 	trace_assign_type(field, iter->ent);
1116 
1117 	if (!trace_seq_printf(s, ": %lx : ", field->ip))
1118 		goto partial;
1119 
1120 	if (!trace_seq_puts(s, field->str))
1121 		goto partial;
1122 
1123 	return TRACE_TYPE_HANDLED;
1124 
1125  partial:
1126 	return TRACE_TYPE_PARTIAL_LINE;
1127 }
1128 
1129 static struct trace_event_functions trace_bputs_funcs = {
1130 	.trace		= trace_bputs_print,
1131 	.raw		= trace_bputs_raw,
1132 };
1133 
1134 static struct trace_event trace_bputs_event = {
1135 	.type		= TRACE_BPUTS,
1136 	.funcs		= &trace_bputs_funcs,
1137 };
1138 
1139 /* TRACE_BPRINT */
1140 static enum print_line_t
1141 trace_bprint_print(struct trace_iterator *iter, int flags,
1142 		   struct trace_event *event)
1143 {
1144 	struct trace_entry *entry = iter->ent;
1145 	struct trace_seq *s = &iter->seq;
1146 	struct bprint_entry *field;
1147 
1148 	trace_assign_type(field, entry);
1149 
1150 	if (!seq_print_ip_sym(s, field->ip, flags))
1151 		goto partial;
1152 
1153 	if (!trace_seq_puts(s, ": "))
1154 		goto partial;
1155 
1156 	if (!trace_seq_bprintf(s, field->fmt, field->buf))
1157 		goto partial;
1158 
1159 	return TRACE_TYPE_HANDLED;
1160 
1161  partial:
1162 	return TRACE_TYPE_PARTIAL_LINE;
1163 }
1164 
1165 
1166 static enum print_line_t
1167 trace_bprint_raw(struct trace_iterator *iter, int flags,
1168 		 struct trace_event *event)
1169 {
1170 	struct bprint_entry *field;
1171 	struct trace_seq *s = &iter->seq;
1172 
1173 	trace_assign_type(field, iter->ent);
1174 
1175 	if (!trace_seq_printf(s, ": %lx : ", field->ip))
1176 		goto partial;
1177 
1178 	if (!trace_seq_bprintf(s, field->fmt, field->buf))
1179 		goto partial;
1180 
1181 	return TRACE_TYPE_HANDLED;
1182 
1183  partial:
1184 	return TRACE_TYPE_PARTIAL_LINE;
1185 }
1186 
1187 static struct trace_event_functions trace_bprint_funcs = {
1188 	.trace		= trace_bprint_print,
1189 	.raw		= trace_bprint_raw,
1190 };
1191 
1192 static struct trace_event trace_bprint_event = {
1193 	.type		= TRACE_BPRINT,
1194 	.funcs		= &trace_bprint_funcs,
1195 };
1196 
1197 /* TRACE_PRINT */
1198 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1199 					   int flags, struct trace_event *event)
1200 {
1201 	struct print_entry *field;
1202 	struct trace_seq *s = &iter->seq;
1203 
1204 	trace_assign_type(field, iter->ent);
1205 
1206 	if (!seq_print_ip_sym(s, field->ip, flags))
1207 		goto partial;
1208 
1209 	if (!trace_seq_printf(s, ": %s", field->buf))
1210 		goto partial;
1211 
1212 	return TRACE_TYPE_HANDLED;
1213 
1214  partial:
1215 	return TRACE_TYPE_PARTIAL_LINE;
1216 }
1217 
1218 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
1219 					 struct trace_event *event)
1220 {
1221 	struct print_entry *field;
1222 
1223 	trace_assign_type(field, iter->ent);
1224 
1225 	if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
1226 		goto partial;
1227 
1228 	return TRACE_TYPE_HANDLED;
1229 
1230  partial:
1231 	return TRACE_TYPE_PARTIAL_LINE;
1232 }
1233 
1234 static struct trace_event_functions trace_print_funcs = {
1235 	.trace		= trace_print_print,
1236 	.raw		= trace_print_raw,
1237 };
1238 
1239 static struct trace_event trace_print_event = {
1240 	.type	 	= TRACE_PRINT,
1241 	.funcs		= &trace_print_funcs,
1242 };
1243 
1244 
1245 static struct trace_event *events[] __initdata = {
1246 	&trace_fn_event,
1247 	&trace_ctx_event,
1248 	&trace_wake_event,
1249 	&trace_stack_event,
1250 	&trace_user_stack_event,
1251 	&trace_bputs_event,
1252 	&trace_bprint_event,
1253 	&trace_print_event,
1254 	NULL
1255 };
1256 
1257 __init static int init_events(void)
1258 {
1259 	struct trace_event *event;
1260 	int i, ret;
1261 
1262 	for (i = 0; events[i]; i++) {
1263 		event = events[i];
1264 
1265 		ret = register_ftrace_event(event);
1266 		if (!ret) {
1267 			printk(KERN_WARNING "event %d failed to register\n",
1268 			       event->type);
1269 			WARN_ON_ONCE(1);
1270 		}
1271 	}
1272 
1273 	return 0;
1274 }
1275 early_initcall(init_events);
1276