1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Function graph tracer.
5  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
6  * Mostly borrowed from function tracer which
7  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
8  *
9  */
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15 
16 #include "trace.h"
17 #include "trace_output.h"
18 
19 /* When set, irq functions will be ignored */
20 static int ftrace_graph_skip_irqs;
21 
22 struct fgraph_cpu_data {
23 	pid_t		last_pid;
24 	int		depth;
25 	int		depth_irq;
26 	int		ignore;
27 	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
28 };
29 
30 struct fgraph_data {
31 	struct fgraph_cpu_data __percpu *cpu_data;
32 
33 	/* Place to preserve last processed entry. */
34 	struct ftrace_graph_ent_entry	ent;
35 	struct ftrace_graph_ret_entry	ret;
36 	int				failed;
37 	int				cpu;
38 };
39 
40 #define TRACE_GRAPH_INDENT	2
41 
42 unsigned int fgraph_max_depth;
43 
44 static struct tracer_opt trace_opts[] = {
45 	/* Display overruns? (for self-debug purpose) */
46 	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
47 	/* Display CPU ? */
48 	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
49 	/* Display Overhead ? */
50 	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
51 	/* Display proc name/pid */
52 	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
53 	/* Display duration of execution */
54 	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
55 	/* Display absolute time of an entry */
56 	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
57 	/* Display interrupts */
58 	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
59 	/* Display function name after trailing } */
60 	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
61 	/* Include sleep time (scheduled out) between entry and return */
62 	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
63 
64 #ifdef CONFIG_FUNCTION_PROFILER
65 	/* Include time within nested functions */
66 	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
67 #endif
68 
69 	{ } /* Empty entry */
70 };
71 
72 static struct tracer_flags tracer_flags = {
73 	/* Don't display overruns, proc, or tail by default */
74 	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
75 	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
76 	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
77 	.opts = trace_opts
78 };
79 
80 static struct trace_array *graph_array;
81 
82 /*
83  * DURATION column is being also used to display IRQ signs,
84  * following values are used by print_graph_irq and others
85  * to fill in space into DURATION column.
86  */
87 enum {
88 	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
89 	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
90 	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
91 };
92 
93 static void
94 print_graph_duration(struct trace_array *tr, unsigned long long duration,
95 		     struct trace_seq *s, u32 flags);
96 
97 int __trace_graph_entry(struct trace_array *tr,
98 				struct ftrace_graph_ent *trace,
99 				unsigned long flags,
100 				int pc)
101 {
102 	struct trace_event_call *call = &event_funcgraph_entry;
103 	struct ring_buffer_event *event;
104 	struct ring_buffer *buffer = tr->trace_buffer.buffer;
105 	struct ftrace_graph_ent_entry *entry;
106 
107 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
108 					  sizeof(*entry), flags, pc);
109 	if (!event)
110 		return 0;
111 	entry	= ring_buffer_event_data(event);
112 	entry->graph_ent			= *trace;
113 	if (!call_filter_check_discard(call, entry, buffer, event))
114 		trace_buffer_unlock_commit_nostack(buffer, event);
115 
116 	return 1;
117 }
118 
119 static inline int ftrace_graph_ignore_irqs(void)
120 {
121 	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
122 		return 0;
123 
124 	return in_irq();
125 }
126 
127 int trace_graph_entry(struct ftrace_graph_ent *trace)
128 {
129 	struct trace_array *tr = graph_array;
130 	struct trace_array_cpu *data;
131 	unsigned long flags;
132 	long disabled;
133 	int ret;
134 	int cpu;
135 	int pc;
136 
137 	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
138 		return 0;
139 
140 	if (ftrace_graph_notrace_addr(trace->func)) {
141 		trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
142 		/*
143 		 * Need to return 1 to have the return called
144 		 * that will clear the NOTRACE bit.
145 		 */
146 		return 1;
147 	}
148 
149 	if (!ftrace_trace_task(tr))
150 		return 0;
151 
152 	if (ftrace_graph_ignore_func(trace))
153 		return 0;
154 
155 	if (ftrace_graph_ignore_irqs())
156 		return 0;
157 
158 	/*
159 	 * Do not trace a function if it's filtered by set_graph_notrace.
160 	 * Make the index of ret stack negative to indicate that it should
161 	 * ignore further functions.  But it needs its own ret stack entry
162 	 * to recover the original index in order to continue tracing after
163 	 * returning from the function.
164 	 */
165 	if (ftrace_graph_notrace_addr(trace->func))
166 		return 1;
167 
168 	/*
169 	 * Stop here if tracing_threshold is set. We only write function return
170 	 * events to the ring buffer.
171 	 */
172 	if (tracing_thresh)
173 		return 1;
174 
175 	local_irq_save(flags);
176 	cpu = raw_smp_processor_id();
177 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
178 	disabled = atomic_inc_return(&data->disabled);
179 	if (likely(disabled == 1)) {
180 		pc = preempt_count();
181 		ret = __trace_graph_entry(tr, trace, flags, pc);
182 	} else {
183 		ret = 0;
184 	}
185 
186 	atomic_dec(&data->disabled);
187 	local_irq_restore(flags);
188 
189 	return ret;
190 }
191 
192 static void
193 __trace_graph_function(struct trace_array *tr,
194 		unsigned long ip, unsigned long flags, int pc)
195 {
196 	u64 time = trace_clock_local();
197 	struct ftrace_graph_ent ent = {
198 		.func  = ip,
199 		.depth = 0,
200 	};
201 	struct ftrace_graph_ret ret = {
202 		.func     = ip,
203 		.depth    = 0,
204 		.calltime = time,
205 		.rettime  = time,
206 	};
207 
208 	__trace_graph_entry(tr, &ent, flags, pc);
209 	__trace_graph_return(tr, &ret, flags, pc);
210 }
211 
212 void
213 trace_graph_function(struct trace_array *tr,
214 		unsigned long ip, unsigned long parent_ip,
215 		unsigned long flags, int pc)
216 {
217 	__trace_graph_function(tr, ip, flags, pc);
218 }
219 
220 void __trace_graph_return(struct trace_array *tr,
221 				struct ftrace_graph_ret *trace,
222 				unsigned long flags,
223 				int pc)
224 {
225 	struct trace_event_call *call = &event_funcgraph_exit;
226 	struct ring_buffer_event *event;
227 	struct ring_buffer *buffer = tr->trace_buffer.buffer;
228 	struct ftrace_graph_ret_entry *entry;
229 
230 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
231 					  sizeof(*entry), flags, pc);
232 	if (!event)
233 		return;
234 	entry	= ring_buffer_event_data(event);
235 	entry->ret				= *trace;
236 	if (!call_filter_check_discard(call, entry, buffer, event))
237 		trace_buffer_unlock_commit_nostack(buffer, event);
238 }
239 
240 void trace_graph_return(struct ftrace_graph_ret *trace)
241 {
242 	struct trace_array *tr = graph_array;
243 	struct trace_array_cpu *data;
244 	unsigned long flags;
245 	long disabled;
246 	int cpu;
247 	int pc;
248 
249 	ftrace_graph_addr_finish(trace);
250 
251 	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
252 		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
253 		return;
254 	}
255 
256 	local_irq_save(flags);
257 	cpu = raw_smp_processor_id();
258 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
259 	disabled = atomic_inc_return(&data->disabled);
260 	if (likely(disabled == 1)) {
261 		pc = preempt_count();
262 		__trace_graph_return(tr, trace, flags, pc);
263 	}
264 	atomic_dec(&data->disabled);
265 	local_irq_restore(flags);
266 }
267 
268 void set_graph_array(struct trace_array *tr)
269 {
270 	graph_array = tr;
271 
272 	/* Make graph_array visible before we start tracing */
273 
274 	smp_mb();
275 }
276 
277 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
278 {
279 	ftrace_graph_addr_finish(trace);
280 
281 	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
282 		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
283 		return;
284 	}
285 
286 	if (tracing_thresh &&
287 	    (trace->rettime - trace->calltime < tracing_thresh))
288 		return;
289 	else
290 		trace_graph_return(trace);
291 }
292 
293 static struct fgraph_ops funcgraph_thresh_ops = {
294 	.entryfunc = &trace_graph_entry,
295 	.retfunc = &trace_graph_thresh_return,
296 };
297 
298 static struct fgraph_ops funcgraph_ops = {
299 	.entryfunc = &trace_graph_entry,
300 	.retfunc = &trace_graph_return,
301 };
302 
303 static int graph_trace_init(struct trace_array *tr)
304 {
305 	int ret;
306 
307 	set_graph_array(tr);
308 	if (tracing_thresh)
309 		ret = register_ftrace_graph(&funcgraph_thresh_ops);
310 	else
311 		ret = register_ftrace_graph(&funcgraph_ops);
312 	if (ret)
313 		return ret;
314 	tracing_start_cmdline_record();
315 
316 	return 0;
317 }
318 
319 static void graph_trace_reset(struct trace_array *tr)
320 {
321 	tracing_stop_cmdline_record();
322 	if (tracing_thresh)
323 		unregister_ftrace_graph(&funcgraph_thresh_ops);
324 	else
325 		unregister_ftrace_graph(&funcgraph_ops);
326 }
327 
328 static int graph_trace_update_thresh(struct trace_array *tr)
329 {
330 	graph_trace_reset(tr);
331 	return graph_trace_init(tr);
332 }
333 
334 static int max_bytes_for_cpu;
335 
336 static void print_graph_cpu(struct trace_seq *s, int cpu)
337 {
338 	/*
339 	 * Start with a space character - to make it stand out
340 	 * to the right a bit when trace output is pasted into
341 	 * email:
342 	 */
343 	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
344 }
345 
346 #define TRACE_GRAPH_PROCINFO_LENGTH	14
347 
348 static void print_graph_proc(struct trace_seq *s, pid_t pid)
349 {
350 	char comm[TASK_COMM_LEN];
351 	/* sign + log10(MAX_INT) + '\0' */
352 	char pid_str[11];
353 	int spaces = 0;
354 	int len;
355 	int i;
356 
357 	trace_find_cmdline(pid, comm);
358 	comm[7] = '\0';
359 	sprintf(pid_str, "%d", pid);
360 
361 	/* 1 stands for the "-" character */
362 	len = strlen(comm) + strlen(pid_str) + 1;
363 
364 	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
365 		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
366 
367 	/* First spaces to align center */
368 	for (i = 0; i < spaces / 2; i++)
369 		trace_seq_putc(s, ' ');
370 
371 	trace_seq_printf(s, "%s-%s", comm, pid_str);
372 
373 	/* Last spaces to align center */
374 	for (i = 0; i < spaces - (spaces / 2); i++)
375 		trace_seq_putc(s, ' ');
376 }
377 
378 
379 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
380 {
381 	trace_seq_putc(s, ' ');
382 	trace_print_lat_fmt(s, entry);
383 	trace_seq_puts(s, " | ");
384 }
385 
386 /* If the pid changed since the last trace, output this event */
387 static void
388 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
389 {
390 	pid_t prev_pid;
391 	pid_t *last_pid;
392 
393 	if (!data)
394 		return;
395 
396 	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
397 
398 	if (*last_pid == pid)
399 		return;
400 
401 	prev_pid = *last_pid;
402 	*last_pid = pid;
403 
404 	if (prev_pid == -1)
405 		return;
406 /*
407  * Context-switch trace line:
408 
409  ------------------------------------------
410  | 1)  migration/0--1  =>  sshd-1755
411  ------------------------------------------
412 
413  */
414 	trace_seq_puts(s, " ------------------------------------------\n");
415 	print_graph_cpu(s, cpu);
416 	print_graph_proc(s, prev_pid);
417 	trace_seq_puts(s, " => ");
418 	print_graph_proc(s, pid);
419 	trace_seq_puts(s, "\n ------------------------------------------\n\n");
420 }
421 
422 static struct ftrace_graph_ret_entry *
423 get_return_for_leaf(struct trace_iterator *iter,
424 		struct ftrace_graph_ent_entry *curr)
425 {
426 	struct fgraph_data *data = iter->private;
427 	struct ring_buffer_iter *ring_iter = NULL;
428 	struct ring_buffer_event *event;
429 	struct ftrace_graph_ret_entry *next;
430 
431 	/*
432 	 * If the previous output failed to write to the seq buffer,
433 	 * then we just reuse the data from before.
434 	 */
435 	if (data && data->failed) {
436 		curr = &data->ent;
437 		next = &data->ret;
438 	} else {
439 
440 		ring_iter = trace_buffer_iter(iter, iter->cpu);
441 
442 		/* First peek to compare current entry and the next one */
443 		if (ring_iter)
444 			event = ring_buffer_iter_peek(ring_iter, NULL);
445 		else {
446 			/*
447 			 * We need to consume the current entry to see
448 			 * the next one.
449 			 */
450 			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
451 					    NULL, NULL);
452 			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
453 						 NULL, NULL);
454 		}
455 
456 		if (!event)
457 			return NULL;
458 
459 		next = ring_buffer_event_data(event);
460 
461 		if (data) {
462 			/*
463 			 * Save current and next entries for later reference
464 			 * if the output fails.
465 			 */
466 			data->ent = *curr;
467 			/*
468 			 * If the next event is not a return type, then
469 			 * we only care about what type it is. Otherwise we can
470 			 * safely copy the entire event.
471 			 */
472 			if (next->ent.type == TRACE_GRAPH_RET)
473 				data->ret = *next;
474 			else
475 				data->ret.ent.type = next->ent.type;
476 		}
477 	}
478 
479 	if (next->ent.type != TRACE_GRAPH_RET)
480 		return NULL;
481 
482 	if (curr->ent.pid != next->ent.pid ||
483 			curr->graph_ent.func != next->ret.func)
484 		return NULL;
485 
486 	/* this is a leaf, now advance the iterator */
487 	if (ring_iter)
488 		ring_buffer_read(ring_iter, NULL);
489 
490 	return next;
491 }
492 
493 static void print_graph_abs_time(u64 t, struct trace_seq *s)
494 {
495 	unsigned long usecs_rem;
496 
497 	usecs_rem = do_div(t, NSEC_PER_SEC);
498 	usecs_rem /= 1000;
499 
500 	trace_seq_printf(s, "%5lu.%06lu |  ",
501 			 (unsigned long)t, usecs_rem);
502 }
503 
504 static void
505 print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
506 {
507 	unsigned long long usecs;
508 
509 	usecs = iter->ts - iter->trace_buffer->time_start;
510 	do_div(usecs, NSEC_PER_USEC);
511 
512 	trace_seq_printf(s, "%9llu us |  ", usecs);
513 }
514 
515 static void
516 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
517 		enum trace_type type, int cpu, pid_t pid, u32 flags)
518 {
519 	struct trace_array *tr = iter->tr;
520 	struct trace_seq *s = &iter->seq;
521 	struct trace_entry *ent = iter->ent;
522 
523 	if (addr < (unsigned long)__irqentry_text_start ||
524 		addr >= (unsigned long)__irqentry_text_end)
525 		return;
526 
527 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
528 		/* Absolute time */
529 		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
530 			print_graph_abs_time(iter->ts, s);
531 
532 		/* Relative time */
533 		if (flags & TRACE_GRAPH_PRINT_REL_TIME)
534 			print_graph_rel_time(iter, s);
535 
536 		/* Cpu */
537 		if (flags & TRACE_GRAPH_PRINT_CPU)
538 			print_graph_cpu(s, cpu);
539 
540 		/* Proc */
541 		if (flags & TRACE_GRAPH_PRINT_PROC) {
542 			print_graph_proc(s, pid);
543 			trace_seq_puts(s, " | ");
544 		}
545 
546 		/* Latency format */
547 		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
548 			print_graph_lat_fmt(s, ent);
549 	}
550 
551 	/* No overhead */
552 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
553 
554 	if (type == TRACE_GRAPH_ENT)
555 		trace_seq_puts(s, "==========>");
556 	else
557 		trace_seq_puts(s, "<==========");
558 
559 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
560 	trace_seq_putc(s, '\n');
561 }
562 
563 void
564 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
565 {
566 	unsigned long nsecs_rem = do_div(duration, 1000);
567 	/* log10(ULONG_MAX) + '\0' */
568 	char usecs_str[21];
569 	char nsecs_str[5];
570 	int len;
571 	int i;
572 
573 	sprintf(usecs_str, "%lu", (unsigned long) duration);
574 
575 	/* Print msecs */
576 	trace_seq_printf(s, "%s", usecs_str);
577 
578 	len = strlen(usecs_str);
579 
580 	/* Print nsecs (we don't want to exceed 7 numbers) */
581 	if (len < 7) {
582 		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
583 
584 		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
585 		trace_seq_printf(s, ".%s", nsecs_str);
586 		len += strlen(nsecs_str) + 1;
587 	}
588 
589 	trace_seq_puts(s, " us ");
590 
591 	/* Print remaining spaces to fit the row's width */
592 	for (i = len; i < 8; i++)
593 		trace_seq_putc(s, ' ');
594 }
595 
596 static void
597 print_graph_duration(struct trace_array *tr, unsigned long long duration,
598 		     struct trace_seq *s, u32 flags)
599 {
600 	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
601 	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
602 		return;
603 
604 	/* No real adata, just filling the column with spaces */
605 	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
606 	case FLAGS_FILL_FULL:
607 		trace_seq_puts(s, "              |  ");
608 		return;
609 	case FLAGS_FILL_START:
610 		trace_seq_puts(s, "  ");
611 		return;
612 	case FLAGS_FILL_END:
613 		trace_seq_puts(s, " |");
614 		return;
615 	}
616 
617 	/* Signal a overhead of time execution to the output */
618 	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
619 		trace_seq_printf(s, "%c ", trace_find_mark(duration));
620 	else
621 		trace_seq_puts(s, "  ");
622 
623 	trace_print_graph_duration(duration, s);
624 	trace_seq_puts(s, "|  ");
625 }
626 
627 /* Case of a leaf function on its call entry */
628 static enum print_line_t
629 print_graph_entry_leaf(struct trace_iterator *iter,
630 		struct ftrace_graph_ent_entry *entry,
631 		struct ftrace_graph_ret_entry *ret_entry,
632 		struct trace_seq *s, u32 flags)
633 {
634 	struct fgraph_data *data = iter->private;
635 	struct trace_array *tr = iter->tr;
636 	struct ftrace_graph_ret *graph_ret;
637 	struct ftrace_graph_ent *call;
638 	unsigned long long duration;
639 	int cpu = iter->cpu;
640 	int i;
641 
642 	graph_ret = &ret_entry->ret;
643 	call = &entry->graph_ent;
644 	duration = graph_ret->rettime - graph_ret->calltime;
645 
646 	if (data) {
647 		struct fgraph_cpu_data *cpu_data;
648 
649 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
650 
651 		/*
652 		 * Comments display at + 1 to depth. Since
653 		 * this is a leaf function, keep the comments
654 		 * equal to this depth.
655 		 */
656 		cpu_data->depth = call->depth - 1;
657 
658 		/* No need to keep this function around for this depth */
659 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
660 		    !WARN_ON_ONCE(call->depth < 0))
661 			cpu_data->enter_funcs[call->depth] = 0;
662 	}
663 
664 	/* Overhead and duration */
665 	print_graph_duration(tr, duration, s, flags);
666 
667 	/* Function */
668 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
669 		trace_seq_putc(s, ' ');
670 
671 	trace_seq_printf(s, "%ps();\n", (void *)call->func);
672 
673 	print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
674 			cpu, iter->ent->pid, flags);
675 
676 	return trace_handle_return(s);
677 }
678 
679 static enum print_line_t
680 print_graph_entry_nested(struct trace_iterator *iter,
681 			 struct ftrace_graph_ent_entry *entry,
682 			 struct trace_seq *s, int cpu, u32 flags)
683 {
684 	struct ftrace_graph_ent *call = &entry->graph_ent;
685 	struct fgraph_data *data = iter->private;
686 	struct trace_array *tr = iter->tr;
687 	int i;
688 
689 	if (data) {
690 		struct fgraph_cpu_data *cpu_data;
691 		int cpu = iter->cpu;
692 
693 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
694 		cpu_data->depth = call->depth;
695 
696 		/* Save this function pointer to see if the exit matches */
697 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
698 		    !WARN_ON_ONCE(call->depth < 0))
699 			cpu_data->enter_funcs[call->depth] = call->func;
700 	}
701 
702 	/* No time */
703 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
704 
705 	/* Function */
706 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
707 		trace_seq_putc(s, ' ');
708 
709 	trace_seq_printf(s, "%ps() {\n", (void *)call->func);
710 
711 	if (trace_seq_has_overflowed(s))
712 		return TRACE_TYPE_PARTIAL_LINE;
713 
714 	/*
715 	 * we already consumed the current entry to check the next one
716 	 * and see if this is a leaf.
717 	 */
718 	return TRACE_TYPE_NO_CONSUME;
719 }
720 
721 static void
722 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
723 		     int type, unsigned long addr, u32 flags)
724 {
725 	struct fgraph_data *data = iter->private;
726 	struct trace_entry *ent = iter->ent;
727 	struct trace_array *tr = iter->tr;
728 	int cpu = iter->cpu;
729 
730 	/* Pid */
731 	verif_pid(s, ent->pid, cpu, data);
732 
733 	if (type)
734 		/* Interrupt */
735 		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
736 
737 	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
738 		return;
739 
740 	/* Absolute time */
741 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
742 		print_graph_abs_time(iter->ts, s);
743 
744 	/* Relative time */
745 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
746 		print_graph_rel_time(iter, s);
747 
748 	/* Cpu */
749 	if (flags & TRACE_GRAPH_PRINT_CPU)
750 		print_graph_cpu(s, cpu);
751 
752 	/* Proc */
753 	if (flags & TRACE_GRAPH_PRINT_PROC) {
754 		print_graph_proc(s, ent->pid);
755 		trace_seq_puts(s, " | ");
756 	}
757 
758 	/* Latency format */
759 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
760 		print_graph_lat_fmt(s, ent);
761 
762 	return;
763 }
764 
765 /*
766  * Entry check for irq code
767  *
768  * returns 1 if
769  *  - we are inside irq code
770  *  - we just entered irq code
771  *
772  * retunns 0 if
773  *  - funcgraph-interrupts option is set
774  *  - we are not inside irq code
775  */
776 static int
777 check_irq_entry(struct trace_iterator *iter, u32 flags,
778 		unsigned long addr, int depth)
779 {
780 	int cpu = iter->cpu;
781 	int *depth_irq;
782 	struct fgraph_data *data = iter->private;
783 
784 	/*
785 	 * If we are either displaying irqs, or we got called as
786 	 * a graph event and private data does not exist,
787 	 * then we bypass the irq check.
788 	 */
789 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
790 	    (!data))
791 		return 0;
792 
793 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
794 
795 	/*
796 	 * We are inside the irq code
797 	 */
798 	if (*depth_irq >= 0)
799 		return 1;
800 
801 	if ((addr < (unsigned long)__irqentry_text_start) ||
802 	    (addr >= (unsigned long)__irqentry_text_end))
803 		return 0;
804 
805 	/*
806 	 * We are entering irq code.
807 	 */
808 	*depth_irq = depth;
809 	return 1;
810 }
811 
812 /*
813  * Return check for irq code
814  *
815  * returns 1 if
816  *  - we are inside irq code
817  *  - we just left irq code
818  *
819  * returns 0 if
820  *  - funcgraph-interrupts option is set
821  *  - we are not inside irq code
822  */
823 static int
824 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
825 {
826 	int cpu = iter->cpu;
827 	int *depth_irq;
828 	struct fgraph_data *data = iter->private;
829 
830 	/*
831 	 * If we are either displaying irqs, or we got called as
832 	 * a graph event and private data does not exist,
833 	 * then we bypass the irq check.
834 	 */
835 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
836 	    (!data))
837 		return 0;
838 
839 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
840 
841 	/*
842 	 * We are not inside the irq code.
843 	 */
844 	if (*depth_irq == -1)
845 		return 0;
846 
847 	/*
848 	 * We are inside the irq code, and this is returning entry.
849 	 * Let's not trace it and clear the entry depth, since
850 	 * we are out of irq code.
851 	 *
852 	 * This condition ensures that we 'leave the irq code' once
853 	 * we are out of the entry depth. Thus protecting us from
854 	 * the RETURN entry loss.
855 	 */
856 	if (*depth_irq >= depth) {
857 		*depth_irq = -1;
858 		return 1;
859 	}
860 
861 	/*
862 	 * We are inside the irq code, and this is not the entry.
863 	 */
864 	return 1;
865 }
866 
867 static enum print_line_t
868 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
869 			struct trace_iterator *iter, u32 flags)
870 {
871 	struct fgraph_data *data = iter->private;
872 	struct ftrace_graph_ent *call = &field->graph_ent;
873 	struct ftrace_graph_ret_entry *leaf_ret;
874 	static enum print_line_t ret;
875 	int cpu = iter->cpu;
876 
877 	if (check_irq_entry(iter, flags, call->func, call->depth))
878 		return TRACE_TYPE_HANDLED;
879 
880 	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
881 
882 	leaf_ret = get_return_for_leaf(iter, field);
883 	if (leaf_ret)
884 		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
885 	else
886 		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
887 
888 	if (data) {
889 		/*
890 		 * If we failed to write our output, then we need to make
891 		 * note of it. Because we already consumed our entry.
892 		 */
893 		if (s->full) {
894 			data->failed = 1;
895 			data->cpu = cpu;
896 		} else
897 			data->failed = 0;
898 	}
899 
900 	return ret;
901 }
902 
903 static enum print_line_t
904 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
905 		   struct trace_entry *ent, struct trace_iterator *iter,
906 		   u32 flags)
907 {
908 	unsigned long long duration = trace->rettime - trace->calltime;
909 	struct fgraph_data *data = iter->private;
910 	struct trace_array *tr = iter->tr;
911 	pid_t pid = ent->pid;
912 	int cpu = iter->cpu;
913 	int func_match = 1;
914 	int i;
915 
916 	if (check_irq_return(iter, flags, trace->depth))
917 		return TRACE_TYPE_HANDLED;
918 
919 	if (data) {
920 		struct fgraph_cpu_data *cpu_data;
921 		int cpu = iter->cpu;
922 
923 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
924 
925 		/*
926 		 * Comments display at + 1 to depth. This is the
927 		 * return from a function, we now want the comments
928 		 * to display at the same level of the bracket.
929 		 */
930 		cpu_data->depth = trace->depth - 1;
931 
932 		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
933 		    !WARN_ON_ONCE(trace->depth < 0)) {
934 			if (cpu_data->enter_funcs[trace->depth] != trace->func)
935 				func_match = 0;
936 			cpu_data->enter_funcs[trace->depth] = 0;
937 		}
938 	}
939 
940 	print_graph_prologue(iter, s, 0, 0, flags);
941 
942 	/* Overhead and duration */
943 	print_graph_duration(tr, duration, s, flags);
944 
945 	/* Closing brace */
946 	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
947 		trace_seq_putc(s, ' ');
948 
949 	/*
950 	 * If the return function does not have a matching entry,
951 	 * then the entry was lost. Instead of just printing
952 	 * the '}' and letting the user guess what function this
953 	 * belongs to, write out the function name. Always do
954 	 * that if the funcgraph-tail option is enabled.
955 	 */
956 	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
957 		trace_seq_puts(s, "}\n");
958 	else
959 		trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
960 
961 	/* Overrun */
962 	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
963 		trace_seq_printf(s, " (Overruns: %lu)\n",
964 				 trace->overrun);
965 
966 	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
967 			cpu, pid, flags);
968 
969 	return trace_handle_return(s);
970 }
971 
972 static enum print_line_t
973 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
974 		    struct trace_iterator *iter, u32 flags)
975 {
976 	struct trace_array *tr = iter->tr;
977 	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
978 	struct fgraph_data *data = iter->private;
979 	struct trace_event *event;
980 	int depth = 0;
981 	int ret;
982 	int i;
983 
984 	if (data)
985 		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
986 
987 	print_graph_prologue(iter, s, 0, 0, flags);
988 
989 	/* No time */
990 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
991 
992 	/* Indentation */
993 	if (depth > 0)
994 		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
995 			trace_seq_putc(s, ' ');
996 
997 	/* The comment */
998 	trace_seq_puts(s, "/* ");
999 
1000 	switch (iter->ent->type) {
1001 	case TRACE_BPUTS:
1002 		ret = trace_print_bputs_msg_only(iter);
1003 		if (ret != TRACE_TYPE_HANDLED)
1004 			return ret;
1005 		break;
1006 	case TRACE_BPRINT:
1007 		ret = trace_print_bprintk_msg_only(iter);
1008 		if (ret != TRACE_TYPE_HANDLED)
1009 			return ret;
1010 		break;
1011 	case TRACE_PRINT:
1012 		ret = trace_print_printk_msg_only(iter);
1013 		if (ret != TRACE_TYPE_HANDLED)
1014 			return ret;
1015 		break;
1016 	default:
1017 		event = ftrace_find_event(ent->type);
1018 		if (!event)
1019 			return TRACE_TYPE_UNHANDLED;
1020 
1021 		ret = event->funcs->trace(iter, sym_flags, event);
1022 		if (ret != TRACE_TYPE_HANDLED)
1023 			return ret;
1024 	}
1025 
1026 	if (trace_seq_has_overflowed(s))
1027 		goto out;
1028 
1029 	/* Strip ending newline */
1030 	if (s->buffer[s->seq.len - 1] == '\n') {
1031 		s->buffer[s->seq.len - 1] = '\0';
1032 		s->seq.len--;
1033 	}
1034 
1035 	trace_seq_puts(s, " */\n");
1036  out:
1037 	return trace_handle_return(s);
1038 }
1039 
1040 
1041 enum print_line_t
1042 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1043 {
1044 	struct ftrace_graph_ent_entry *field;
1045 	struct fgraph_data *data = iter->private;
1046 	struct trace_entry *entry = iter->ent;
1047 	struct trace_seq *s = &iter->seq;
1048 	int cpu = iter->cpu;
1049 	int ret;
1050 
1051 	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1052 		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1053 		return TRACE_TYPE_HANDLED;
1054 	}
1055 
1056 	/*
1057 	 * If the last output failed, there's a possibility we need
1058 	 * to print out the missing entry which would never go out.
1059 	 */
1060 	if (data && data->failed) {
1061 		field = &data->ent;
1062 		iter->cpu = data->cpu;
1063 		ret = print_graph_entry(field, s, iter, flags);
1064 		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1065 			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1066 			ret = TRACE_TYPE_NO_CONSUME;
1067 		}
1068 		iter->cpu = cpu;
1069 		return ret;
1070 	}
1071 
1072 	switch (entry->type) {
1073 	case TRACE_GRAPH_ENT: {
1074 		/*
1075 		 * print_graph_entry() may consume the current event,
1076 		 * thus @field may become invalid, so we need to save it.
1077 		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1078 		 * it can be safely saved at the stack.
1079 		 */
1080 		struct ftrace_graph_ent_entry saved;
1081 		trace_assign_type(field, entry);
1082 		saved = *field;
1083 		return print_graph_entry(&saved, s, iter, flags);
1084 	}
1085 	case TRACE_GRAPH_RET: {
1086 		struct ftrace_graph_ret_entry *field;
1087 		trace_assign_type(field, entry);
1088 		return print_graph_return(&field->ret, s, entry, iter, flags);
1089 	}
1090 	case TRACE_STACK:
1091 	case TRACE_FN:
1092 		/* dont trace stack and functions as comments */
1093 		return TRACE_TYPE_UNHANDLED;
1094 
1095 	default:
1096 		return print_graph_comment(s, entry, iter, flags);
1097 	}
1098 
1099 	return TRACE_TYPE_HANDLED;
1100 }
1101 
1102 static enum print_line_t
1103 print_graph_function(struct trace_iterator *iter)
1104 {
1105 	return print_graph_function_flags(iter, tracer_flags.val);
1106 }
1107 
1108 static enum print_line_t
1109 print_graph_function_event(struct trace_iterator *iter, int flags,
1110 			   struct trace_event *event)
1111 {
1112 	return print_graph_function(iter);
1113 }
1114 
1115 static void print_lat_header(struct seq_file *s, u32 flags)
1116 {
1117 	static const char spaces[] = "                "	/* 16 spaces */
1118 		"    "					/* 4 spaces */
1119 		"                 ";			/* 17 spaces */
1120 	int size = 0;
1121 
1122 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1123 		size += 16;
1124 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1125 		size += 16;
1126 	if (flags & TRACE_GRAPH_PRINT_CPU)
1127 		size += 4;
1128 	if (flags & TRACE_GRAPH_PRINT_PROC)
1129 		size += 17;
1130 
1131 	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1132 	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1133 	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1134 	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1135 	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1136 }
1137 
1138 static void __print_graph_headers_flags(struct trace_array *tr,
1139 					struct seq_file *s, u32 flags)
1140 {
1141 	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1142 
1143 	if (lat)
1144 		print_lat_header(s, flags);
1145 
1146 	/* 1st line */
1147 	seq_putc(s, '#');
1148 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1149 		seq_puts(s, "     TIME       ");
1150 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1151 		seq_puts(s, "   REL TIME     ");
1152 	if (flags & TRACE_GRAPH_PRINT_CPU)
1153 		seq_puts(s, " CPU");
1154 	if (flags & TRACE_GRAPH_PRINT_PROC)
1155 		seq_puts(s, "  TASK/PID       ");
1156 	if (lat)
1157 		seq_puts(s, "||||   ");
1158 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1159 		seq_puts(s, "  DURATION   ");
1160 	seq_puts(s, "               FUNCTION CALLS\n");
1161 
1162 	/* 2nd line */
1163 	seq_putc(s, '#');
1164 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1165 		seq_puts(s, "      |         ");
1166 	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1167 		seq_puts(s, "      |         ");
1168 	if (flags & TRACE_GRAPH_PRINT_CPU)
1169 		seq_puts(s, " |  ");
1170 	if (flags & TRACE_GRAPH_PRINT_PROC)
1171 		seq_puts(s, "   |    |        ");
1172 	if (lat)
1173 		seq_puts(s, "||||   ");
1174 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1175 		seq_puts(s, "   |   |      ");
1176 	seq_puts(s, "               |   |   |   |\n");
1177 }
1178 
1179 static void print_graph_headers(struct seq_file *s)
1180 {
1181 	print_graph_headers_flags(s, tracer_flags.val);
1182 }
1183 
1184 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1185 {
1186 	struct trace_iterator *iter = s->private;
1187 	struct trace_array *tr = iter->tr;
1188 
1189 	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1190 		return;
1191 
1192 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1193 		/* print nothing if the buffers are empty */
1194 		if (trace_empty(iter))
1195 			return;
1196 
1197 		print_trace_header(s, iter);
1198 	}
1199 
1200 	__print_graph_headers_flags(tr, s, flags);
1201 }
1202 
1203 void graph_trace_open(struct trace_iterator *iter)
1204 {
1205 	/* pid and depth on the last trace processed */
1206 	struct fgraph_data *data;
1207 	gfp_t gfpflags;
1208 	int cpu;
1209 
1210 	iter->private = NULL;
1211 
1212 	/* We can be called in atomic context via ftrace_dump() */
1213 	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1214 
1215 	data = kzalloc(sizeof(*data), gfpflags);
1216 	if (!data)
1217 		goto out_err;
1218 
1219 	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1220 	if (!data->cpu_data)
1221 		goto out_err_free;
1222 
1223 	for_each_possible_cpu(cpu) {
1224 		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1225 		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1226 		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1227 		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1228 
1229 		*pid = -1;
1230 		*depth = 0;
1231 		*ignore = 0;
1232 		*depth_irq = -1;
1233 	}
1234 
1235 	iter->private = data;
1236 
1237 	return;
1238 
1239  out_err_free:
1240 	kfree(data);
1241  out_err:
1242 	pr_warn("function graph tracer: not enough memory\n");
1243 }
1244 
1245 void graph_trace_close(struct trace_iterator *iter)
1246 {
1247 	struct fgraph_data *data = iter->private;
1248 
1249 	if (data) {
1250 		free_percpu(data->cpu_data);
1251 		kfree(data);
1252 	}
1253 }
1254 
1255 static int
1256 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1257 {
1258 	if (bit == TRACE_GRAPH_PRINT_IRQS)
1259 		ftrace_graph_skip_irqs = !set;
1260 
1261 	if (bit == TRACE_GRAPH_SLEEP_TIME)
1262 		ftrace_graph_sleep_time_control(set);
1263 
1264 	if (bit == TRACE_GRAPH_GRAPH_TIME)
1265 		ftrace_graph_graph_time_control(set);
1266 
1267 	return 0;
1268 }
1269 
1270 static struct trace_event_functions graph_functions = {
1271 	.trace		= print_graph_function_event,
1272 };
1273 
1274 static struct trace_event graph_trace_entry_event = {
1275 	.type		= TRACE_GRAPH_ENT,
1276 	.funcs		= &graph_functions,
1277 };
1278 
1279 static struct trace_event graph_trace_ret_event = {
1280 	.type		= TRACE_GRAPH_RET,
1281 	.funcs		= &graph_functions
1282 };
1283 
1284 static struct tracer graph_trace __tracer_data = {
1285 	.name		= "function_graph",
1286 	.update_thresh	= graph_trace_update_thresh,
1287 	.open		= graph_trace_open,
1288 	.pipe_open	= graph_trace_open,
1289 	.close		= graph_trace_close,
1290 	.pipe_close	= graph_trace_close,
1291 	.init		= graph_trace_init,
1292 	.reset		= graph_trace_reset,
1293 	.print_line	= print_graph_function,
1294 	.print_header	= print_graph_headers,
1295 	.flags		= &tracer_flags,
1296 	.set_flag	= func_graph_set_flag,
1297 #ifdef CONFIG_FTRACE_SELFTEST
1298 	.selftest	= trace_selftest_startup_function_graph,
1299 #endif
1300 };
1301 
1302 
1303 static ssize_t
1304 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1305 		  loff_t *ppos)
1306 {
1307 	unsigned long val;
1308 	int ret;
1309 
1310 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1311 	if (ret)
1312 		return ret;
1313 
1314 	fgraph_max_depth = val;
1315 
1316 	*ppos += cnt;
1317 
1318 	return cnt;
1319 }
1320 
1321 static ssize_t
1322 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1323 		 loff_t *ppos)
1324 {
1325 	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1326 	int n;
1327 
1328 	n = sprintf(buf, "%d\n", fgraph_max_depth);
1329 
1330 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1331 }
1332 
1333 static const struct file_operations graph_depth_fops = {
1334 	.open		= tracing_open_generic,
1335 	.write		= graph_depth_write,
1336 	.read		= graph_depth_read,
1337 	.llseek		= generic_file_llseek,
1338 };
1339 
1340 static __init int init_graph_tracefs(void)
1341 {
1342 	struct dentry *d_tracer;
1343 
1344 	d_tracer = tracing_init_dentry();
1345 	if (IS_ERR(d_tracer))
1346 		return 0;
1347 
1348 	trace_create_file("max_graph_depth", 0644, d_tracer,
1349 			  NULL, &graph_depth_fops);
1350 
1351 	return 0;
1352 }
1353 fs_initcall(init_graph_tracefs);
1354 
1355 static __init int init_graph_trace(void)
1356 {
1357 	max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1358 
1359 	if (!register_trace_event(&graph_trace_entry_event)) {
1360 		pr_warn("Warning: could not register graph trace events\n");
1361 		return 1;
1362 	}
1363 
1364 	if (!register_trace_event(&graph_trace_ret_event)) {
1365 		pr_warn("Warning: could not register graph trace events\n");
1366 		return 1;
1367 	}
1368 
1369 	return register_tracer(&graph_trace);
1370 }
1371 
1372 core_initcall(init_graph_trace);
1373