xref: /openbmc/linux/kernel/trace/trace_output.c (revision b627b4ed)
1 /*
2  * trace_output.c
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7 
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
11 
12 #include "trace_output.h"
13 
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE	128
16 
17 static DEFINE_MUTEX(trace_event_mutex);
18 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
19 
20 static int next_event_type = __TRACE_LAST_TYPE + 1;
21 
22 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
23 {
24 	struct trace_seq *s = &iter->seq;
25 	struct trace_entry *entry = iter->ent;
26 	struct bprint_entry *field;
27 	int ret;
28 
29 	trace_assign_type(field, entry);
30 
31 	ret = trace_seq_bprintf(s, field->fmt, field->buf);
32 	if (!ret)
33 		return TRACE_TYPE_PARTIAL_LINE;
34 
35 	return TRACE_TYPE_HANDLED;
36 }
37 
38 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
39 {
40 	struct trace_seq *s = &iter->seq;
41 	struct trace_entry *entry = iter->ent;
42 	struct print_entry *field;
43 	int ret;
44 
45 	trace_assign_type(field, entry);
46 
47 	ret = trace_seq_printf(s, "%s", field->buf);
48 	if (!ret)
49 		return TRACE_TYPE_PARTIAL_LINE;
50 
51 	return TRACE_TYPE_HANDLED;
52 }
53 
54 /**
55  * trace_seq_printf - sequence printing of trace information
56  * @s: trace sequence descriptor
57  * @fmt: printf format string
58  *
59  * The tracer may use either sequence operations or its own
60  * copy to user routines. To simplify formating of a trace
61  * trace_seq_printf is used to store strings into a special
62  * buffer (@s). Then the output may be either used by
63  * the sequencer or pulled into another buffer.
64  */
65 int
66 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
67 {
68 	int len = (PAGE_SIZE - 1) - s->len;
69 	va_list ap;
70 	int ret;
71 
72 	if (!len)
73 		return 0;
74 
75 	va_start(ap, fmt);
76 	ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
77 	va_end(ap);
78 
79 	/* If we can't write it all, don't bother writing anything */
80 	if (ret >= len)
81 		return 0;
82 
83 	s->len += ret;
84 
85 	return len;
86 }
87 
88 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
89 {
90 	int len = (PAGE_SIZE - 1) - s->len;
91 	int ret;
92 
93 	if (!len)
94 		return 0;
95 
96 	ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
97 
98 	/* If we can't write it all, don't bother writing anything */
99 	if (ret >= len)
100 		return 0;
101 
102 	s->len += ret;
103 
104 	return len;
105 }
106 
107 /**
108  * trace_seq_puts - trace sequence printing of simple string
109  * @s: trace sequence descriptor
110  * @str: simple string to record
111  *
112  * The tracer may use either the sequence operations or its own
113  * copy to user routines. This function records a simple string
114  * into a special buffer (@s) for later retrieval by a sequencer
115  * or other mechanism.
116  */
117 int trace_seq_puts(struct trace_seq *s, const char *str)
118 {
119 	int len = strlen(str);
120 
121 	if (len > ((PAGE_SIZE - 1) - s->len))
122 		return 0;
123 
124 	memcpy(s->buffer + s->len, str, len);
125 	s->len += len;
126 
127 	return len;
128 }
129 
130 int trace_seq_putc(struct trace_seq *s, unsigned char c)
131 {
132 	if (s->len >= (PAGE_SIZE - 1))
133 		return 0;
134 
135 	s->buffer[s->len++] = c;
136 
137 	return 1;
138 }
139 
140 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
141 {
142 	if (len > ((PAGE_SIZE - 1) - s->len))
143 		return 0;
144 
145 	memcpy(s->buffer + s->len, mem, len);
146 	s->len += len;
147 
148 	return len;
149 }
150 
151 int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
152 {
153 	unsigned char hex[HEX_CHARS];
154 	const unsigned char *data = mem;
155 	int i, j;
156 
157 #ifdef __BIG_ENDIAN
158 	for (i = 0, j = 0; i < len; i++) {
159 #else
160 	for (i = len-1, j = 0; i >= 0; i--) {
161 #endif
162 		hex[j++] = hex_asc_hi(data[i]);
163 		hex[j++] = hex_asc_lo(data[i]);
164 	}
165 	hex[j++] = ' ';
166 
167 	return trace_seq_putmem(s, hex, j);
168 }
169 
170 void *trace_seq_reserve(struct trace_seq *s, size_t len)
171 {
172 	void *ret;
173 
174 	if (len > ((PAGE_SIZE - 1) - s->len))
175 		return NULL;
176 
177 	ret = s->buffer + s->len;
178 	s->len += len;
179 
180 	return ret;
181 }
182 
183 int trace_seq_path(struct trace_seq *s, struct path *path)
184 {
185 	unsigned char *p;
186 
187 	if (s->len >= (PAGE_SIZE - 1))
188 		return 0;
189 	p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
190 	if (!IS_ERR(p)) {
191 		p = mangle_path(s->buffer + s->len, p, "\n");
192 		if (p) {
193 			s->len = p - s->buffer;
194 			return 1;
195 		}
196 	} else {
197 		s->buffer[s->len++] = '?';
198 		return 1;
199 	}
200 
201 	return 0;
202 }
203 
204 #ifdef CONFIG_KRETPROBES
205 static inline const char *kretprobed(const char *name)
206 {
207 	static const char tramp_name[] = "kretprobe_trampoline";
208 	int size = sizeof(tramp_name);
209 
210 	if (strncmp(tramp_name, name, size) == 0)
211 		return "[unknown/kretprobe'd]";
212 	return name;
213 }
214 #else
215 static inline const char *kretprobed(const char *name)
216 {
217 	return name;
218 }
219 #endif /* CONFIG_KRETPROBES */
220 
221 static int
222 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
223 {
224 #ifdef CONFIG_KALLSYMS
225 	char str[KSYM_SYMBOL_LEN];
226 	const char *name;
227 
228 	kallsyms_lookup(address, NULL, NULL, NULL, str);
229 
230 	name = kretprobed(str);
231 
232 	return trace_seq_printf(s, fmt, name);
233 #endif
234 	return 1;
235 }
236 
237 static int
238 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
239 		     unsigned long address)
240 {
241 #ifdef CONFIG_KALLSYMS
242 	char str[KSYM_SYMBOL_LEN];
243 	const char *name;
244 
245 	sprint_symbol(str, address);
246 	name = kretprobed(str);
247 
248 	return trace_seq_printf(s, fmt, name);
249 #endif
250 	return 1;
251 }
252 
253 #ifndef CONFIG_64BIT
254 # define IP_FMT "%08lx"
255 #else
256 # define IP_FMT "%016lx"
257 #endif
258 
259 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
260 		      unsigned long ip, unsigned long sym_flags)
261 {
262 	struct file *file = NULL;
263 	unsigned long vmstart = 0;
264 	int ret = 1;
265 
266 	if (mm) {
267 		const struct vm_area_struct *vma;
268 
269 		down_read(&mm->mmap_sem);
270 		vma = find_vma(mm, ip);
271 		if (vma) {
272 			file = vma->vm_file;
273 			vmstart = vma->vm_start;
274 		}
275 		if (file) {
276 			ret = trace_seq_path(s, &file->f_path);
277 			if (ret)
278 				ret = trace_seq_printf(s, "[+0x%lx]",
279 						       ip - vmstart);
280 		}
281 		up_read(&mm->mmap_sem);
282 	}
283 	if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
284 		ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
285 	return ret;
286 }
287 
288 int
289 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
290 		      unsigned long sym_flags)
291 {
292 	struct mm_struct *mm = NULL;
293 	int ret = 1;
294 	unsigned int i;
295 
296 	if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
297 		struct task_struct *task;
298 		/*
299 		 * we do the lookup on the thread group leader,
300 		 * since individual threads might have already quit!
301 		 */
302 		rcu_read_lock();
303 		task = find_task_by_vpid(entry->ent.tgid);
304 		if (task)
305 			mm = get_task_mm(task);
306 		rcu_read_unlock();
307 	}
308 
309 	for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
310 		unsigned long ip = entry->caller[i];
311 
312 		if (ip == ULONG_MAX || !ret)
313 			break;
314 		if (i && ret)
315 			ret = trace_seq_puts(s, " <- ");
316 		if (!ip) {
317 			if (ret)
318 				ret = trace_seq_puts(s, "??");
319 			continue;
320 		}
321 		if (!ret)
322 			break;
323 		if (ret)
324 			ret = seq_print_user_ip(s, mm, ip, sym_flags);
325 	}
326 
327 	if (mm)
328 		mmput(mm);
329 	return ret;
330 }
331 
332 int
333 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
334 {
335 	int ret;
336 
337 	if (!ip)
338 		return trace_seq_printf(s, "0");
339 
340 	if (sym_flags & TRACE_ITER_SYM_OFFSET)
341 		ret = seq_print_sym_offset(s, "%s", ip);
342 	else
343 		ret = seq_print_sym_short(s, "%s", ip);
344 
345 	if (!ret)
346 		return 0;
347 
348 	if (sym_flags & TRACE_ITER_SYM_ADDR)
349 		ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
350 	return ret;
351 }
352 
353 static int
354 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
355 {
356 	int hardirq, softirq;
357 	char comm[TASK_COMM_LEN];
358 
359 	trace_find_cmdline(entry->pid, comm);
360 	hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
361 	softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
362 
363 	if (!trace_seq_printf(s, "%8.8s-%-5d %3d%c%c%c",
364 			      comm, entry->pid, cpu,
365 			      (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
366 				(entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
367 				  'X' : '.',
368 			      (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
369 				'N' : '.',
370 			      (hardirq && softirq) ? 'H' :
371 				hardirq ? 'h' : softirq ? 's' : '.'))
372 		return 0;
373 
374 	if (entry->preempt_count)
375 		return trace_seq_printf(s, "%x", entry->preempt_count);
376 	return trace_seq_puts(s, ".");
377 }
378 
379 static unsigned long preempt_mark_thresh = 100;
380 
381 static int
382 lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
383 		    unsigned long rel_usecs)
384 {
385 	return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
386 				rel_usecs > preempt_mark_thresh ? '!' :
387 				  rel_usecs > 1 ? '+' : ' ');
388 }
389 
390 int trace_print_context(struct trace_iterator *iter)
391 {
392 	struct trace_seq *s = &iter->seq;
393 	struct trace_entry *entry = iter->ent;
394 	unsigned long long t = ns2usecs(iter->ts);
395 	unsigned long usec_rem = do_div(t, USEC_PER_SEC);
396 	unsigned long secs = (unsigned long)t;
397 	char comm[TASK_COMM_LEN];
398 
399 	trace_find_cmdline(entry->pid, comm);
400 
401 	return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
402 				comm, entry->pid, iter->cpu, secs, usec_rem);
403 }
404 
405 int trace_print_lat_context(struct trace_iterator *iter)
406 {
407 	u64 next_ts;
408 	int ret;
409 	struct trace_seq *s = &iter->seq;
410 	struct trace_entry *entry = iter->ent,
411 			   *next_entry = trace_find_next_entry(iter, NULL,
412 							       &next_ts);
413 	unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
414 	unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
415 	unsigned long rel_usecs;
416 
417 	if (!next_entry)
418 		next_ts = iter->ts;
419 	rel_usecs = ns2usecs(next_ts - iter->ts);
420 
421 	if (verbose) {
422 		char comm[TASK_COMM_LEN];
423 
424 		trace_find_cmdline(entry->pid, comm);
425 
426 		ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
427 				       " %ld.%03ldms (+%ld.%03ldms): ", comm,
428 				       entry->pid, iter->cpu, entry->flags,
429 				       entry->preempt_count, iter->idx,
430 				       ns2usecs(iter->ts),
431 				       abs_usecs / USEC_PER_MSEC,
432 				       abs_usecs % USEC_PER_MSEC,
433 				       rel_usecs / USEC_PER_MSEC,
434 				       rel_usecs % USEC_PER_MSEC);
435 	} else {
436 		ret = lat_print_generic(s, entry, iter->cpu);
437 		if (ret)
438 			ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
439 	}
440 
441 	return ret;
442 }
443 
444 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
445 
446 static int task_state_char(unsigned long state)
447 {
448 	int bit = state ? __ffs(state) + 1 : 0;
449 
450 	return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
451 }
452 
453 /**
454  * ftrace_find_event - find a registered event
455  * @type: the type of event to look for
456  *
457  * Returns an event of type @type otherwise NULL
458  */
459 struct trace_event *ftrace_find_event(int type)
460 {
461 	struct trace_event *event;
462 	struct hlist_node *n;
463 	unsigned key;
464 
465 	key = type & (EVENT_HASHSIZE - 1);
466 
467 	hlist_for_each_entry_rcu(event, n, &event_hash[key], node) {
468 		if (event->type == type)
469 			return event;
470 	}
471 
472 	return NULL;
473 }
474 
475 /**
476  * register_ftrace_event - register output for an event type
477  * @event: the event type to register
478  *
479  * Event types are stored in a hash and this hash is used to
480  * find a way to print an event. If the @event->type is set
481  * then it will use that type, otherwise it will assign a
482  * type to use.
483  *
484  * If you assign your own type, please make sure it is added
485  * to the trace_type enum in trace.h, to avoid collisions
486  * with the dynamic types.
487  *
488  * Returns the event type number or zero on error.
489  */
490 int register_ftrace_event(struct trace_event *event)
491 {
492 	unsigned key;
493 	int ret = 0;
494 
495 	mutex_lock(&trace_event_mutex);
496 
497 	if (!event) {
498 		ret = next_event_type++;
499 		goto out;
500 	}
501 
502 	if (!event->type)
503 		event->type = next_event_type++;
504 	else if (event->type > __TRACE_LAST_TYPE) {
505 		printk(KERN_WARNING "Need to add type to trace.h\n");
506 		WARN_ON(1);
507 	}
508 
509 	if (ftrace_find_event(event->type))
510 		goto out;
511 
512 	if (event->trace == NULL)
513 		event->trace = trace_nop_print;
514 	if (event->raw == NULL)
515 		event->raw = trace_nop_print;
516 	if (event->hex == NULL)
517 		event->hex = trace_nop_print;
518 	if (event->binary == NULL)
519 		event->binary = trace_nop_print;
520 
521 	key = event->type & (EVENT_HASHSIZE - 1);
522 
523 	hlist_add_head_rcu(&event->node, &event_hash[key]);
524 
525 	ret = event->type;
526  out:
527 	mutex_unlock(&trace_event_mutex);
528 
529 	return ret;
530 }
531 
532 /**
533  * unregister_ftrace_event - remove a no longer used event
534  * @event: the event to remove
535  */
536 int unregister_ftrace_event(struct trace_event *event)
537 {
538 	mutex_lock(&trace_event_mutex);
539 	hlist_del(&event->node);
540 	mutex_unlock(&trace_event_mutex);
541 
542 	return 0;
543 }
544 
545 /*
546  * Standard events
547  */
548 
549 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
550 {
551 	return TRACE_TYPE_HANDLED;
552 }
553 
554 /* TRACE_FN */
555 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
556 {
557 	struct ftrace_entry *field;
558 	struct trace_seq *s = &iter->seq;
559 
560 	trace_assign_type(field, iter->ent);
561 
562 	if (!seq_print_ip_sym(s, field->ip, flags))
563 		goto partial;
564 
565 	if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
566 		if (!trace_seq_printf(s, " <-"))
567 			goto partial;
568 		if (!seq_print_ip_sym(s,
569 				      field->parent_ip,
570 				      flags))
571 			goto partial;
572 	}
573 	if (!trace_seq_printf(s, "\n"))
574 		goto partial;
575 
576 	return TRACE_TYPE_HANDLED;
577 
578  partial:
579 	return TRACE_TYPE_PARTIAL_LINE;
580 }
581 
582 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
583 {
584 	struct ftrace_entry *field;
585 
586 	trace_assign_type(field, iter->ent);
587 
588 	if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
589 			      field->ip,
590 			      field->parent_ip))
591 		return TRACE_TYPE_PARTIAL_LINE;
592 
593 	return TRACE_TYPE_HANDLED;
594 }
595 
596 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
597 {
598 	struct ftrace_entry *field;
599 	struct trace_seq *s = &iter->seq;
600 
601 	trace_assign_type(field, iter->ent);
602 
603 	SEQ_PUT_HEX_FIELD_RET(s, field->ip);
604 	SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
605 
606 	return TRACE_TYPE_HANDLED;
607 }
608 
609 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
610 {
611 	struct ftrace_entry *field;
612 	struct trace_seq *s = &iter->seq;
613 
614 	trace_assign_type(field, iter->ent);
615 
616 	SEQ_PUT_FIELD_RET(s, field->ip);
617 	SEQ_PUT_FIELD_RET(s, field->parent_ip);
618 
619 	return TRACE_TYPE_HANDLED;
620 }
621 
622 static struct trace_event trace_fn_event = {
623 	.type		= TRACE_FN,
624 	.trace		= trace_fn_trace,
625 	.raw		= trace_fn_raw,
626 	.hex		= trace_fn_hex,
627 	.binary		= trace_fn_bin,
628 };
629 
630 /* TRACE_CTX an TRACE_WAKE */
631 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
632 					     char *delim)
633 {
634 	struct ctx_switch_entry *field;
635 	char comm[TASK_COMM_LEN];
636 	int S, T;
637 
638 
639 	trace_assign_type(field, iter->ent);
640 
641 	T = task_state_char(field->next_state);
642 	S = task_state_char(field->prev_state);
643 	trace_find_cmdline(field->next_pid, comm);
644 	if (!trace_seq_printf(&iter->seq,
645 			      " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
646 			      field->prev_pid,
647 			      field->prev_prio,
648 			      S, delim,
649 			      field->next_cpu,
650 			      field->next_pid,
651 			      field->next_prio,
652 			      T, comm))
653 		return TRACE_TYPE_PARTIAL_LINE;
654 
655 	return TRACE_TYPE_HANDLED;
656 }
657 
658 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags)
659 {
660 	return trace_ctxwake_print(iter, "==>");
661 }
662 
663 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
664 					  int flags)
665 {
666 	return trace_ctxwake_print(iter, "  +");
667 }
668 
669 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
670 {
671 	struct ctx_switch_entry *field;
672 	int T;
673 
674 	trace_assign_type(field, iter->ent);
675 
676 	if (!S)
677 		task_state_char(field->prev_state);
678 	T = task_state_char(field->next_state);
679 	if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
680 			      field->prev_pid,
681 			      field->prev_prio,
682 			      S,
683 			      field->next_cpu,
684 			      field->next_pid,
685 			      field->next_prio,
686 			      T))
687 		return TRACE_TYPE_PARTIAL_LINE;
688 
689 	return TRACE_TYPE_HANDLED;
690 }
691 
692 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags)
693 {
694 	return trace_ctxwake_raw(iter, 0);
695 }
696 
697 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags)
698 {
699 	return trace_ctxwake_raw(iter, '+');
700 }
701 
702 
703 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
704 {
705 	struct ctx_switch_entry *field;
706 	struct trace_seq *s = &iter->seq;
707 	int T;
708 
709 	trace_assign_type(field, iter->ent);
710 
711 	if (!S)
712 		task_state_char(field->prev_state);
713 	T = task_state_char(field->next_state);
714 
715 	SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
716 	SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
717 	SEQ_PUT_HEX_FIELD_RET(s, S);
718 	SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
719 	SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
720 	SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
721 	SEQ_PUT_HEX_FIELD_RET(s, T);
722 
723 	return TRACE_TYPE_HANDLED;
724 }
725 
726 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags)
727 {
728 	return trace_ctxwake_hex(iter, 0);
729 }
730 
731 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags)
732 {
733 	return trace_ctxwake_hex(iter, '+');
734 }
735 
736 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
737 					   int flags)
738 {
739 	struct ctx_switch_entry *field;
740 	struct trace_seq *s = &iter->seq;
741 
742 	trace_assign_type(field, iter->ent);
743 
744 	SEQ_PUT_FIELD_RET(s, field->prev_pid);
745 	SEQ_PUT_FIELD_RET(s, field->prev_prio);
746 	SEQ_PUT_FIELD_RET(s, field->prev_state);
747 	SEQ_PUT_FIELD_RET(s, field->next_pid);
748 	SEQ_PUT_FIELD_RET(s, field->next_prio);
749 	SEQ_PUT_FIELD_RET(s, field->next_state);
750 
751 	return TRACE_TYPE_HANDLED;
752 }
753 
754 static struct trace_event trace_ctx_event = {
755 	.type		= TRACE_CTX,
756 	.trace		= trace_ctx_print,
757 	.raw		= trace_ctx_raw,
758 	.hex		= trace_ctx_hex,
759 	.binary		= trace_ctxwake_bin,
760 };
761 
762 static struct trace_event trace_wake_event = {
763 	.type		= TRACE_WAKE,
764 	.trace		= trace_wake_print,
765 	.raw		= trace_wake_raw,
766 	.hex		= trace_wake_hex,
767 	.binary		= trace_ctxwake_bin,
768 };
769 
770 /* TRACE_SPECIAL */
771 static enum print_line_t trace_special_print(struct trace_iterator *iter,
772 					     int flags)
773 {
774 	struct special_entry *field;
775 
776 	trace_assign_type(field, iter->ent);
777 
778 	if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
779 			      field->arg1,
780 			      field->arg2,
781 			      field->arg3))
782 		return TRACE_TYPE_PARTIAL_LINE;
783 
784 	return TRACE_TYPE_HANDLED;
785 }
786 
787 static enum print_line_t trace_special_hex(struct trace_iterator *iter,
788 					   int flags)
789 {
790 	struct special_entry *field;
791 	struct trace_seq *s = &iter->seq;
792 
793 	trace_assign_type(field, iter->ent);
794 
795 	SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
796 	SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
797 	SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
798 
799 	return TRACE_TYPE_HANDLED;
800 }
801 
802 static enum print_line_t trace_special_bin(struct trace_iterator *iter,
803 					   int flags)
804 {
805 	struct special_entry *field;
806 	struct trace_seq *s = &iter->seq;
807 
808 	trace_assign_type(field, iter->ent);
809 
810 	SEQ_PUT_FIELD_RET(s, field->arg1);
811 	SEQ_PUT_FIELD_RET(s, field->arg2);
812 	SEQ_PUT_FIELD_RET(s, field->arg3);
813 
814 	return TRACE_TYPE_HANDLED;
815 }
816 
817 static struct trace_event trace_special_event = {
818 	.type		= TRACE_SPECIAL,
819 	.trace		= trace_special_print,
820 	.raw		= trace_special_print,
821 	.hex		= trace_special_hex,
822 	.binary		= trace_special_bin,
823 };
824 
825 /* TRACE_STACK */
826 
827 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
828 					   int flags)
829 {
830 	struct stack_entry *field;
831 	struct trace_seq *s = &iter->seq;
832 	int i;
833 
834 	trace_assign_type(field, iter->ent);
835 
836 	for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
837 		if (i) {
838 			if (!trace_seq_puts(s, " <= "))
839 				goto partial;
840 
841 			if (!seq_print_ip_sym(s, field->caller[i], flags))
842 				goto partial;
843 		}
844 		if (!trace_seq_puts(s, "\n"))
845 			goto partial;
846 	}
847 
848 	return TRACE_TYPE_HANDLED;
849 
850  partial:
851 	return TRACE_TYPE_PARTIAL_LINE;
852 }
853 
854 static struct trace_event trace_stack_event = {
855 	.type		= TRACE_STACK,
856 	.trace		= trace_stack_print,
857 	.raw		= trace_special_print,
858 	.hex		= trace_special_hex,
859 	.binary		= trace_special_bin,
860 };
861 
862 /* TRACE_USER_STACK */
863 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
864 						int flags)
865 {
866 	struct userstack_entry *field;
867 	struct trace_seq *s = &iter->seq;
868 
869 	trace_assign_type(field, iter->ent);
870 
871 	if (!seq_print_userip_objs(field, s, flags))
872 		goto partial;
873 
874 	if (!trace_seq_putc(s, '\n'))
875 		goto partial;
876 
877 	return TRACE_TYPE_HANDLED;
878 
879  partial:
880 	return TRACE_TYPE_PARTIAL_LINE;
881 }
882 
883 static struct trace_event trace_user_stack_event = {
884 	.type		= TRACE_USER_STACK,
885 	.trace		= trace_user_stack_print,
886 	.raw		= trace_special_print,
887 	.hex		= trace_special_hex,
888 	.binary		= trace_special_bin,
889 };
890 
891 /* TRACE_BPRINT */
892 static enum print_line_t
893 trace_bprint_print(struct trace_iterator *iter, int flags)
894 {
895 	struct trace_entry *entry = iter->ent;
896 	struct trace_seq *s = &iter->seq;
897 	struct bprint_entry *field;
898 
899 	trace_assign_type(field, entry);
900 
901 	if (!seq_print_ip_sym(s, field->ip, flags))
902 		goto partial;
903 
904 	if (!trace_seq_puts(s, ": "))
905 		goto partial;
906 
907 	if (!trace_seq_bprintf(s, field->fmt, field->buf))
908 		goto partial;
909 
910 	return TRACE_TYPE_HANDLED;
911 
912  partial:
913 	return TRACE_TYPE_PARTIAL_LINE;
914 }
915 
916 
917 static enum print_line_t
918 trace_bprint_raw(struct trace_iterator *iter, int flags)
919 {
920 	struct bprint_entry *field;
921 	struct trace_seq *s = &iter->seq;
922 
923 	trace_assign_type(field, iter->ent);
924 
925 	if (!trace_seq_printf(s, ": %lx : ", field->ip))
926 		goto partial;
927 
928 	if (!trace_seq_bprintf(s, field->fmt, field->buf))
929 		goto partial;
930 
931 	return TRACE_TYPE_HANDLED;
932 
933  partial:
934 	return TRACE_TYPE_PARTIAL_LINE;
935 }
936 
937 
938 static struct trace_event trace_bprint_event = {
939 	.type		= TRACE_BPRINT,
940 	.trace		= trace_bprint_print,
941 	.raw		= trace_bprint_raw,
942 };
943 
944 /* TRACE_PRINT */
945 static enum print_line_t trace_print_print(struct trace_iterator *iter,
946 					   int flags)
947 {
948 	struct print_entry *field;
949 	struct trace_seq *s = &iter->seq;
950 
951 	trace_assign_type(field, iter->ent);
952 
953 	if (!seq_print_ip_sym(s, field->ip, flags))
954 		goto partial;
955 
956 	if (!trace_seq_printf(s, ": %s", field->buf))
957 		goto partial;
958 
959 	return TRACE_TYPE_HANDLED;
960 
961  partial:
962 	return TRACE_TYPE_PARTIAL_LINE;
963 }
964 
965 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
966 {
967 	struct print_entry *field;
968 
969 	trace_assign_type(field, iter->ent);
970 
971 	if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
972 		goto partial;
973 
974 	return TRACE_TYPE_HANDLED;
975 
976  partial:
977 	return TRACE_TYPE_PARTIAL_LINE;
978 }
979 
980 static struct trace_event trace_print_event = {
981 	.type	 	= TRACE_PRINT,
982 	.trace		= trace_print_print,
983 	.raw		= trace_print_raw,
984 };
985 
986 
987 static struct trace_event *events[] __initdata = {
988 	&trace_fn_event,
989 	&trace_ctx_event,
990 	&trace_wake_event,
991 	&trace_special_event,
992 	&trace_stack_event,
993 	&trace_user_stack_event,
994 	&trace_bprint_event,
995 	&trace_print_event,
996 	NULL
997 };
998 
999 __init static int init_events(void)
1000 {
1001 	struct trace_event *event;
1002 	int i, ret;
1003 
1004 	for (i = 0; events[i]; i++) {
1005 		event = events[i];
1006 
1007 		ret = register_ftrace_event(event);
1008 		if (!ret) {
1009 			printk(KERN_WARNING "event %d failed to register\n",
1010 			       event->type);
1011 			WARN_ON_ONCE(1);
1012 		}
1013 	}
1014 
1015 	return 0;
1016 }
1017 device_initcall(init_events);
1018