1 /*
2  *
3  * Function graph tracer.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  */
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/interrupt.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14 
15 #include "trace.h"
16 #include "trace_output.h"
17 
18 static bool kill_ftrace_graph;
19 
20 /**
21  * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
22  *
23  * ftrace_graph_stop() is called when a severe error is detected in
24  * the function graph tracing. This function is called by the critical
25  * paths of function graph to keep those paths from doing any more harm.
26  */
27 bool ftrace_graph_is_dead(void)
28 {
29 	return kill_ftrace_graph;
30 }
31 
32 /**
33  * ftrace_graph_stop - set to permanently disable function graph tracincg
34  *
35  * In case of an error int function graph tracing, this is called
36  * to try to keep function graph tracing from causing any more harm.
37  * Usually this is pretty severe and this is called to try to at least
38  * get a warning out to the user.
39  */
40 void ftrace_graph_stop(void)
41 {
42 	kill_ftrace_graph = true;
43 }
44 
45 /* When set, irq functions will be ignored */
46 static int ftrace_graph_skip_irqs;
47 
48 struct fgraph_cpu_data {
49 	pid_t		last_pid;
50 	int		depth;
51 	int		depth_irq;
52 	int		ignore;
53 	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
54 };
55 
56 struct fgraph_data {
57 	struct fgraph_cpu_data __percpu *cpu_data;
58 
59 	/* Place to preserve last processed entry. */
60 	struct ftrace_graph_ent_entry	ent;
61 	struct ftrace_graph_ret_entry	ret;
62 	int				failed;
63 	int				cpu;
64 };
65 
66 #define TRACE_GRAPH_INDENT	2
67 
68 unsigned int fgraph_max_depth;
69 
70 static struct tracer_opt trace_opts[] = {
71 	/* Display overruns? (for self-debug purpose) */
72 	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
73 	/* Display CPU ? */
74 	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
75 	/* Display Overhead ? */
76 	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
77 	/* Display proc name/pid */
78 	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
79 	/* Display duration of execution */
80 	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
81 	/* Display absolute time of an entry */
82 	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
83 	/* Display interrupts */
84 	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
85 	/* Display function name after trailing } */
86 	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
87 	/* Include sleep time (scheduled out) between entry and return */
88 	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
89 	/* Include time within nested functions */
90 	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
91 	{ } /* Empty entry */
92 };
93 
94 static struct tracer_flags tracer_flags = {
95 	/* Don't display overruns, proc, or tail by default */
96 	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
97 	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
98 	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
99 	.opts = trace_opts
100 };
101 
102 static struct trace_array *graph_array;
103 
104 /*
105  * DURATION column is being also used to display IRQ signs,
106  * following values are used by print_graph_irq and others
107  * to fill in space into DURATION column.
108  */
109 enum {
110 	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
111 	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
112 	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
113 };
114 
115 static void
116 print_graph_duration(struct trace_array *tr, unsigned long long duration,
117 		     struct trace_seq *s, u32 flags);
118 
119 /* Add a function return address to the trace stack on thread info.*/
120 int
121 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
122 			 unsigned long frame_pointer, unsigned long *retp)
123 {
124 	unsigned long long calltime;
125 	int index;
126 
127 	if (unlikely(ftrace_graph_is_dead()))
128 		return -EBUSY;
129 
130 	if (!current->ret_stack)
131 		return -EBUSY;
132 
133 	/*
134 	 * We must make sure the ret_stack is tested before we read
135 	 * anything else.
136 	 */
137 	smp_rmb();
138 
139 	/* The return trace stack is full */
140 	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
141 		atomic_inc(&current->trace_overrun);
142 		return -EBUSY;
143 	}
144 
145 	/*
146 	 * The curr_ret_stack is an index to ftrace return stack of
147 	 * current task.  Its value should be in [0, FTRACE_RETFUNC_
148 	 * DEPTH) when the function graph tracer is used.  To support
149 	 * filtering out specific functions, it makes the index
150 	 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
151 	 * so when it sees a negative index the ftrace will ignore
152 	 * the record.  And the index gets recovered when returning
153 	 * from the filtered function by adding the FTRACE_NOTRACE_
154 	 * DEPTH and then it'll continue to record functions normally.
155 	 *
156 	 * The curr_ret_stack is initialized to -1 and get increased
157 	 * in this function.  So it can be less than -1 only if it was
158 	 * filtered out via ftrace_graph_notrace_addr() which can be
159 	 * set from set_graph_notrace file in tracefs by user.
160 	 */
161 	if (current->curr_ret_stack < -1)
162 		return -EBUSY;
163 
164 	calltime = trace_clock_local();
165 
166 	index = ++current->curr_ret_stack;
167 	if (ftrace_graph_notrace_addr(func))
168 		current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
169 	barrier();
170 	current->ret_stack[index].ret = ret;
171 	current->ret_stack[index].func = func;
172 	current->ret_stack[index].calltime = calltime;
173 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
174 	current->ret_stack[index].fp = frame_pointer;
175 #endif
176 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
177 	current->ret_stack[index].retp = retp;
178 #endif
179 	*depth = current->curr_ret_stack;
180 
181 	return 0;
182 }
183 
184 /* Retrieve a function return address to the trace stack on thread info.*/
185 static void
186 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
187 			unsigned long frame_pointer)
188 {
189 	int index;
190 
191 	index = current->curr_ret_stack;
192 
193 	/*
194 	 * A negative index here means that it's just returned from a
195 	 * notrace'd function.  Recover index to get an original
196 	 * return address.  See ftrace_push_return_trace().
197 	 *
198 	 * TODO: Need to check whether the stack gets corrupted.
199 	 */
200 	if (index < 0)
201 		index += FTRACE_NOTRACE_DEPTH;
202 
203 	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
204 		ftrace_graph_stop();
205 		WARN_ON(1);
206 		/* Might as well panic, otherwise we have no where to go */
207 		*ret = (unsigned long)panic;
208 		return;
209 	}
210 
211 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
212 	/*
213 	 * The arch may choose to record the frame pointer used
214 	 * and check it here to make sure that it is what we expect it
215 	 * to be. If gcc does not set the place holder of the return
216 	 * address in the frame pointer, and does a copy instead, then
217 	 * the function graph trace will fail. This test detects this
218 	 * case.
219 	 *
220 	 * Currently, x86_32 with optimize for size (-Os) makes the latest
221 	 * gcc do the above.
222 	 *
223 	 * Note, -mfentry does not use frame pointers, and this test
224 	 *  is not needed if CC_USING_FENTRY is set.
225 	 */
226 	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
227 		ftrace_graph_stop();
228 		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
229 		     "  from func %ps return to %lx\n",
230 		     current->ret_stack[index].fp,
231 		     frame_pointer,
232 		     (void *)current->ret_stack[index].func,
233 		     current->ret_stack[index].ret);
234 		*ret = (unsigned long)panic;
235 		return;
236 	}
237 #endif
238 
239 	*ret = current->ret_stack[index].ret;
240 	trace->func = current->ret_stack[index].func;
241 	trace->calltime = current->ret_stack[index].calltime;
242 	trace->overrun = atomic_read(&current->trace_overrun);
243 	trace->depth = index;
244 }
245 
246 /*
247  * Send the trace to the ring-buffer.
248  * @return the original return address.
249  */
250 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
251 {
252 	struct ftrace_graph_ret trace;
253 	unsigned long ret;
254 
255 	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
256 	trace.rettime = trace_clock_local();
257 	barrier();
258 	current->curr_ret_stack--;
259 	/*
260 	 * The curr_ret_stack can be less than -1 only if it was
261 	 * filtered out and it's about to return from the function.
262 	 * Recover the index and continue to trace normal functions.
263 	 */
264 	if (current->curr_ret_stack < -1) {
265 		current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
266 		return ret;
267 	}
268 
269 	/*
270 	 * The trace should run after decrementing the ret counter
271 	 * in case an interrupt were to come in. We don't want to
272 	 * lose the interrupt if max_depth is set.
273 	 */
274 	ftrace_graph_return(&trace);
275 
276 	if (unlikely(!ret)) {
277 		ftrace_graph_stop();
278 		WARN_ON(1);
279 		/* Might as well panic. What else to do? */
280 		ret = (unsigned long)panic;
281 	}
282 
283 	return ret;
284 }
285 
286 /**
287  * ftrace_graph_ret_addr - convert a potentially modified stack return address
288  *			   to its original value
289  *
290  * This function can be called by stack unwinding code to convert a found stack
291  * return address ('ret') to its original value, in case the function graph
292  * tracer has modified it to be 'return_to_handler'.  If the address hasn't
293  * been modified, the unchanged value of 'ret' is returned.
294  *
295  * 'idx' is a state variable which should be initialized by the caller to zero
296  * before the first call.
297  *
298  * 'retp' is a pointer to the return address on the stack.  It's ignored if
299  * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
300  */
301 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
302 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
303 				    unsigned long ret, unsigned long *retp)
304 {
305 	int index = task->curr_ret_stack;
306 	int i;
307 
308 	if (ret != (unsigned long)return_to_handler)
309 		return ret;
310 
311 	if (index < -1)
312 		index += FTRACE_NOTRACE_DEPTH;
313 
314 	if (index < 0)
315 		return ret;
316 
317 	for (i = 0; i <= index; i++)
318 		if (task->ret_stack[i].retp == retp)
319 			return task->ret_stack[i].ret;
320 
321 	return ret;
322 }
323 #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
324 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
325 				    unsigned long ret, unsigned long *retp)
326 {
327 	int task_idx;
328 
329 	if (ret != (unsigned long)return_to_handler)
330 		return ret;
331 
332 	task_idx = task->curr_ret_stack;
333 
334 	if (!task->ret_stack || task_idx < *idx)
335 		return ret;
336 
337 	task_idx -= *idx;
338 	(*idx)++;
339 
340 	return task->ret_stack[task_idx].ret;
341 }
342 #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
343 
344 int __trace_graph_entry(struct trace_array *tr,
345 				struct ftrace_graph_ent *trace,
346 				unsigned long flags,
347 				int pc)
348 {
349 	struct trace_event_call *call = &event_funcgraph_entry;
350 	struct ring_buffer_event *event;
351 	struct ring_buffer *buffer = tr->trace_buffer.buffer;
352 	struct ftrace_graph_ent_entry *entry;
353 
354 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
355 					  sizeof(*entry), flags, pc);
356 	if (!event)
357 		return 0;
358 	entry	= ring_buffer_event_data(event);
359 	entry->graph_ent			= *trace;
360 	if (!call_filter_check_discard(call, entry, buffer, event))
361 		trace_buffer_unlock_commit_nostack(buffer, event);
362 
363 	return 1;
364 }
365 
366 static inline int ftrace_graph_ignore_irqs(void)
367 {
368 	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
369 		return 0;
370 
371 	return in_irq();
372 }
373 
374 int trace_graph_entry(struct ftrace_graph_ent *trace)
375 {
376 	struct trace_array *tr = graph_array;
377 	struct trace_array_cpu *data;
378 	unsigned long flags;
379 	long disabled;
380 	int ret;
381 	int cpu;
382 	int pc;
383 
384 	if (!ftrace_trace_task(tr))
385 		return 0;
386 
387 	if (ftrace_graph_ignore_func(trace))
388 		return 0;
389 
390 	if (ftrace_graph_ignore_irqs())
391 		return 0;
392 
393 	/*
394 	 * Do not trace a function if it's filtered by set_graph_notrace.
395 	 * Make the index of ret stack negative to indicate that it should
396 	 * ignore further functions.  But it needs its own ret stack entry
397 	 * to recover the original index in order to continue tracing after
398 	 * returning from the function.
399 	 */
400 	if (ftrace_graph_notrace_addr(trace->func))
401 		return 1;
402 
403 	/*
404 	 * Stop here if tracing_threshold is set. We only write function return
405 	 * events to the ring buffer.
406 	 */
407 	if (tracing_thresh)
408 		return 1;
409 
410 	local_irq_save(flags);
411 	cpu = raw_smp_processor_id();
412 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
413 	disabled = atomic_inc_return(&data->disabled);
414 	if (likely(disabled == 1)) {
415 		pc = preempt_count();
416 		ret = __trace_graph_entry(tr, trace, flags, pc);
417 	} else {
418 		ret = 0;
419 	}
420 
421 	atomic_dec(&data->disabled);
422 	local_irq_restore(flags);
423 
424 	return ret;
425 }
426 
427 static void
428 __trace_graph_function(struct trace_array *tr,
429 		unsigned long ip, unsigned long flags, int pc)
430 {
431 	u64 time = trace_clock_local();
432 	struct ftrace_graph_ent ent = {
433 		.func  = ip,
434 		.depth = 0,
435 	};
436 	struct ftrace_graph_ret ret = {
437 		.func     = ip,
438 		.depth    = 0,
439 		.calltime = time,
440 		.rettime  = time,
441 	};
442 
443 	__trace_graph_entry(tr, &ent, flags, pc);
444 	__trace_graph_return(tr, &ret, flags, pc);
445 }
446 
447 void
448 trace_graph_function(struct trace_array *tr,
449 		unsigned long ip, unsigned long parent_ip,
450 		unsigned long flags, int pc)
451 {
452 	__trace_graph_function(tr, ip, flags, pc);
453 }
454 
455 void __trace_graph_return(struct trace_array *tr,
456 				struct ftrace_graph_ret *trace,
457 				unsigned long flags,
458 				int pc)
459 {
460 	struct trace_event_call *call = &event_funcgraph_exit;
461 	struct ring_buffer_event *event;
462 	struct ring_buffer *buffer = tr->trace_buffer.buffer;
463 	struct ftrace_graph_ret_entry *entry;
464 
465 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
466 					  sizeof(*entry), flags, pc);
467 	if (!event)
468 		return;
469 	entry	= ring_buffer_event_data(event);
470 	entry->ret				= *trace;
471 	if (!call_filter_check_discard(call, entry, buffer, event))
472 		trace_buffer_unlock_commit_nostack(buffer, event);
473 }
474 
475 void trace_graph_return(struct ftrace_graph_ret *trace)
476 {
477 	struct trace_array *tr = graph_array;
478 	struct trace_array_cpu *data;
479 	unsigned long flags;
480 	long disabled;
481 	int cpu;
482 	int pc;
483 
484 	local_irq_save(flags);
485 	cpu = raw_smp_processor_id();
486 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
487 	disabled = atomic_inc_return(&data->disabled);
488 	if (likely(disabled == 1)) {
489 		pc = preempt_count();
490 		__trace_graph_return(tr, trace, flags, pc);
491 	}
492 	atomic_dec(&data->disabled);
493 	local_irq_restore(flags);
494 }
495 
496 void set_graph_array(struct trace_array *tr)
497 {
498 	graph_array = tr;
499 
500 	/* Make graph_array visible before we start tracing */
501 
502 	smp_mb();
503 }
504 
505 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
506 {
507 	if (tracing_thresh &&
508 	    (trace->rettime - trace->calltime < tracing_thresh))
509 		return;
510 	else
511 		trace_graph_return(trace);
512 }
513 
514 static int graph_trace_init(struct trace_array *tr)
515 {
516 	int ret;
517 
518 	set_graph_array(tr);
519 	if (tracing_thresh)
520 		ret = register_ftrace_graph(&trace_graph_thresh_return,
521 					    &trace_graph_entry);
522 	else
523 		ret = register_ftrace_graph(&trace_graph_return,
524 					    &trace_graph_entry);
525 	if (ret)
526 		return ret;
527 	tracing_start_cmdline_record();
528 
529 	return 0;
530 }
531 
532 static void graph_trace_reset(struct trace_array *tr)
533 {
534 	tracing_stop_cmdline_record();
535 	unregister_ftrace_graph();
536 }
537 
538 static int graph_trace_update_thresh(struct trace_array *tr)
539 {
540 	graph_trace_reset(tr);
541 	return graph_trace_init(tr);
542 }
543 
544 static int max_bytes_for_cpu;
545 
546 static void print_graph_cpu(struct trace_seq *s, int cpu)
547 {
548 	/*
549 	 * Start with a space character - to make it stand out
550 	 * to the right a bit when trace output is pasted into
551 	 * email:
552 	 */
553 	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
554 }
555 
556 #define TRACE_GRAPH_PROCINFO_LENGTH	14
557 
558 static void print_graph_proc(struct trace_seq *s, pid_t pid)
559 {
560 	char comm[TASK_COMM_LEN];
561 	/* sign + log10(MAX_INT) + '\0' */
562 	char pid_str[11];
563 	int spaces = 0;
564 	int len;
565 	int i;
566 
567 	trace_find_cmdline(pid, comm);
568 	comm[7] = '\0';
569 	sprintf(pid_str, "%d", pid);
570 
571 	/* 1 stands for the "-" character */
572 	len = strlen(comm) + strlen(pid_str) + 1;
573 
574 	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
575 		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
576 
577 	/* First spaces to align center */
578 	for (i = 0; i < spaces / 2; i++)
579 		trace_seq_putc(s, ' ');
580 
581 	trace_seq_printf(s, "%s-%s", comm, pid_str);
582 
583 	/* Last spaces to align center */
584 	for (i = 0; i < spaces - (spaces / 2); i++)
585 		trace_seq_putc(s, ' ');
586 }
587 
588 
589 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
590 {
591 	trace_seq_putc(s, ' ');
592 	trace_print_lat_fmt(s, entry);
593 }
594 
595 /* If the pid changed since the last trace, output this event */
596 static void
597 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
598 {
599 	pid_t prev_pid;
600 	pid_t *last_pid;
601 
602 	if (!data)
603 		return;
604 
605 	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
606 
607 	if (*last_pid == pid)
608 		return;
609 
610 	prev_pid = *last_pid;
611 	*last_pid = pid;
612 
613 	if (prev_pid == -1)
614 		return;
615 /*
616  * Context-switch trace line:
617 
618  ------------------------------------------
619  | 1)  migration/0--1  =>  sshd-1755
620  ------------------------------------------
621 
622  */
623 	trace_seq_puts(s, " ------------------------------------------\n");
624 	print_graph_cpu(s, cpu);
625 	print_graph_proc(s, prev_pid);
626 	trace_seq_puts(s, " => ");
627 	print_graph_proc(s, pid);
628 	trace_seq_puts(s, "\n ------------------------------------------\n\n");
629 }
630 
631 static struct ftrace_graph_ret_entry *
632 get_return_for_leaf(struct trace_iterator *iter,
633 		struct ftrace_graph_ent_entry *curr)
634 {
635 	struct fgraph_data *data = iter->private;
636 	struct ring_buffer_iter *ring_iter = NULL;
637 	struct ring_buffer_event *event;
638 	struct ftrace_graph_ret_entry *next;
639 
640 	/*
641 	 * If the previous output failed to write to the seq buffer,
642 	 * then we just reuse the data from before.
643 	 */
644 	if (data && data->failed) {
645 		curr = &data->ent;
646 		next = &data->ret;
647 	} else {
648 
649 		ring_iter = trace_buffer_iter(iter, iter->cpu);
650 
651 		/* First peek to compare current entry and the next one */
652 		if (ring_iter)
653 			event = ring_buffer_iter_peek(ring_iter, NULL);
654 		else {
655 			/*
656 			 * We need to consume the current entry to see
657 			 * the next one.
658 			 */
659 			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
660 					    NULL, NULL);
661 			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
662 						 NULL, NULL);
663 		}
664 
665 		if (!event)
666 			return NULL;
667 
668 		next = ring_buffer_event_data(event);
669 
670 		if (data) {
671 			/*
672 			 * Save current and next entries for later reference
673 			 * if the output fails.
674 			 */
675 			data->ent = *curr;
676 			/*
677 			 * If the next event is not a return type, then
678 			 * we only care about what type it is. Otherwise we can
679 			 * safely copy the entire event.
680 			 */
681 			if (next->ent.type == TRACE_GRAPH_RET)
682 				data->ret = *next;
683 			else
684 				data->ret.ent.type = next->ent.type;
685 		}
686 	}
687 
688 	if (next->ent.type != TRACE_GRAPH_RET)
689 		return NULL;
690 
691 	if (curr->ent.pid != next->ent.pid ||
692 			curr->graph_ent.func != next->ret.func)
693 		return NULL;
694 
695 	/* this is a leaf, now advance the iterator */
696 	if (ring_iter)
697 		ring_buffer_read(ring_iter, NULL);
698 
699 	return next;
700 }
701 
702 static void print_graph_abs_time(u64 t, struct trace_seq *s)
703 {
704 	unsigned long usecs_rem;
705 
706 	usecs_rem = do_div(t, NSEC_PER_SEC);
707 	usecs_rem /= 1000;
708 
709 	trace_seq_printf(s, "%5lu.%06lu |  ",
710 			 (unsigned long)t, usecs_rem);
711 }
712 
713 static void
714 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
715 		enum trace_type type, int cpu, pid_t pid, u32 flags)
716 {
717 	struct trace_array *tr = iter->tr;
718 	struct trace_seq *s = &iter->seq;
719 	struct trace_entry *ent = iter->ent;
720 
721 	if (addr < (unsigned long)__irqentry_text_start ||
722 		addr >= (unsigned long)__irqentry_text_end)
723 		return;
724 
725 	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
726 		/* Absolute time */
727 		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
728 			print_graph_abs_time(iter->ts, s);
729 
730 		/* Cpu */
731 		if (flags & TRACE_GRAPH_PRINT_CPU)
732 			print_graph_cpu(s, cpu);
733 
734 		/* Proc */
735 		if (flags & TRACE_GRAPH_PRINT_PROC) {
736 			print_graph_proc(s, pid);
737 			trace_seq_puts(s, " | ");
738 		}
739 
740 		/* Latency format */
741 		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
742 			print_graph_lat_fmt(s, ent);
743 	}
744 
745 	/* No overhead */
746 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
747 
748 	if (type == TRACE_GRAPH_ENT)
749 		trace_seq_puts(s, "==========>");
750 	else
751 		trace_seq_puts(s, "<==========");
752 
753 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
754 	trace_seq_putc(s, '\n');
755 }
756 
757 void
758 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
759 {
760 	unsigned long nsecs_rem = do_div(duration, 1000);
761 	/* log10(ULONG_MAX) + '\0' */
762 	char usecs_str[21];
763 	char nsecs_str[5];
764 	int len;
765 	int i;
766 
767 	sprintf(usecs_str, "%lu", (unsigned long) duration);
768 
769 	/* Print msecs */
770 	trace_seq_printf(s, "%s", usecs_str);
771 
772 	len = strlen(usecs_str);
773 
774 	/* Print nsecs (we don't want to exceed 7 numbers) */
775 	if (len < 7) {
776 		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
777 
778 		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
779 		trace_seq_printf(s, ".%s", nsecs_str);
780 		len += strlen(nsecs_str) + 1;
781 	}
782 
783 	trace_seq_puts(s, " us ");
784 
785 	/* Print remaining spaces to fit the row's width */
786 	for (i = len; i < 8; i++)
787 		trace_seq_putc(s, ' ');
788 }
789 
790 static void
791 print_graph_duration(struct trace_array *tr, unsigned long long duration,
792 		     struct trace_seq *s, u32 flags)
793 {
794 	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
795 	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
796 		return;
797 
798 	/* No real adata, just filling the column with spaces */
799 	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
800 	case FLAGS_FILL_FULL:
801 		trace_seq_puts(s, "              |  ");
802 		return;
803 	case FLAGS_FILL_START:
804 		trace_seq_puts(s, "  ");
805 		return;
806 	case FLAGS_FILL_END:
807 		trace_seq_puts(s, " |");
808 		return;
809 	}
810 
811 	/* Signal a overhead of time execution to the output */
812 	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
813 		trace_seq_printf(s, "%c ", trace_find_mark(duration));
814 	else
815 		trace_seq_puts(s, "  ");
816 
817 	trace_print_graph_duration(duration, s);
818 	trace_seq_puts(s, "|  ");
819 }
820 
821 /* Case of a leaf function on its call entry */
822 static enum print_line_t
823 print_graph_entry_leaf(struct trace_iterator *iter,
824 		struct ftrace_graph_ent_entry *entry,
825 		struct ftrace_graph_ret_entry *ret_entry,
826 		struct trace_seq *s, u32 flags)
827 {
828 	struct fgraph_data *data = iter->private;
829 	struct trace_array *tr = iter->tr;
830 	struct ftrace_graph_ret *graph_ret;
831 	struct ftrace_graph_ent *call;
832 	unsigned long long duration;
833 	int i;
834 
835 	graph_ret = &ret_entry->ret;
836 	call = &entry->graph_ent;
837 	duration = graph_ret->rettime - graph_ret->calltime;
838 
839 	if (data) {
840 		struct fgraph_cpu_data *cpu_data;
841 		int cpu = iter->cpu;
842 
843 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
844 
845 		/* If a graph tracer ignored set_graph_notrace */
846 		if (call->depth < -1)
847 			call->depth += FTRACE_NOTRACE_DEPTH;
848 
849 		/*
850 		 * Comments display at + 1 to depth. Since
851 		 * this is a leaf function, keep the comments
852 		 * equal to this depth.
853 		 */
854 		cpu_data->depth = call->depth - 1;
855 
856 		/* No need to keep this function around for this depth */
857 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
858 		    !WARN_ON_ONCE(call->depth < 0))
859 			cpu_data->enter_funcs[call->depth] = 0;
860 	}
861 
862 	/* Overhead and duration */
863 	print_graph_duration(tr, duration, s, flags);
864 
865 	/* Function */
866 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
867 		trace_seq_putc(s, ' ');
868 
869 	trace_seq_printf(s, "%ps();\n", (void *)call->func);
870 
871 	return trace_handle_return(s);
872 }
873 
874 static enum print_line_t
875 print_graph_entry_nested(struct trace_iterator *iter,
876 			 struct ftrace_graph_ent_entry *entry,
877 			 struct trace_seq *s, int cpu, u32 flags)
878 {
879 	struct ftrace_graph_ent *call = &entry->graph_ent;
880 	struct fgraph_data *data = iter->private;
881 	struct trace_array *tr = iter->tr;
882 	int i;
883 
884 	if (data) {
885 		struct fgraph_cpu_data *cpu_data;
886 		int cpu = iter->cpu;
887 
888 		/* If a graph tracer ignored set_graph_notrace */
889 		if (call->depth < -1)
890 			call->depth += FTRACE_NOTRACE_DEPTH;
891 
892 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
893 		cpu_data->depth = call->depth;
894 
895 		/* Save this function pointer to see if the exit matches */
896 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
897 		    !WARN_ON_ONCE(call->depth < 0))
898 			cpu_data->enter_funcs[call->depth] = call->func;
899 	}
900 
901 	/* No time */
902 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
903 
904 	/* Function */
905 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
906 		trace_seq_putc(s, ' ');
907 
908 	trace_seq_printf(s, "%ps() {\n", (void *)call->func);
909 
910 	if (trace_seq_has_overflowed(s))
911 		return TRACE_TYPE_PARTIAL_LINE;
912 
913 	/*
914 	 * we already consumed the current entry to check the next one
915 	 * and see if this is a leaf.
916 	 */
917 	return TRACE_TYPE_NO_CONSUME;
918 }
919 
920 static void
921 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
922 		     int type, unsigned long addr, u32 flags)
923 {
924 	struct fgraph_data *data = iter->private;
925 	struct trace_entry *ent = iter->ent;
926 	struct trace_array *tr = iter->tr;
927 	int cpu = iter->cpu;
928 
929 	/* Pid */
930 	verif_pid(s, ent->pid, cpu, data);
931 
932 	if (type)
933 		/* Interrupt */
934 		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
935 
936 	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
937 		return;
938 
939 	/* Absolute time */
940 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
941 		print_graph_abs_time(iter->ts, s);
942 
943 	/* Cpu */
944 	if (flags & TRACE_GRAPH_PRINT_CPU)
945 		print_graph_cpu(s, cpu);
946 
947 	/* Proc */
948 	if (flags & TRACE_GRAPH_PRINT_PROC) {
949 		print_graph_proc(s, ent->pid);
950 		trace_seq_puts(s, " | ");
951 	}
952 
953 	/* Latency format */
954 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
955 		print_graph_lat_fmt(s, ent);
956 
957 	return;
958 }
959 
960 /*
961  * Entry check for irq code
962  *
963  * returns 1 if
964  *  - we are inside irq code
965  *  - we just entered irq code
966  *
967  * retunns 0 if
968  *  - funcgraph-interrupts option is set
969  *  - we are not inside irq code
970  */
971 static int
972 check_irq_entry(struct trace_iterator *iter, u32 flags,
973 		unsigned long addr, int depth)
974 {
975 	int cpu = iter->cpu;
976 	int *depth_irq;
977 	struct fgraph_data *data = iter->private;
978 
979 	/*
980 	 * If we are either displaying irqs, or we got called as
981 	 * a graph event and private data does not exist,
982 	 * then we bypass the irq check.
983 	 */
984 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
985 	    (!data))
986 		return 0;
987 
988 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
989 
990 	/*
991 	 * We are inside the irq code
992 	 */
993 	if (*depth_irq >= 0)
994 		return 1;
995 
996 	if ((addr < (unsigned long)__irqentry_text_start) ||
997 	    (addr >= (unsigned long)__irqentry_text_end))
998 		return 0;
999 
1000 	/*
1001 	 * We are entering irq code.
1002 	 */
1003 	*depth_irq = depth;
1004 	return 1;
1005 }
1006 
1007 /*
1008  * Return check for irq code
1009  *
1010  * returns 1 if
1011  *  - we are inside irq code
1012  *  - we just left irq code
1013  *
1014  * returns 0 if
1015  *  - funcgraph-interrupts option is set
1016  *  - we are not inside irq code
1017  */
1018 static int
1019 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1020 {
1021 	int cpu = iter->cpu;
1022 	int *depth_irq;
1023 	struct fgraph_data *data = iter->private;
1024 
1025 	/*
1026 	 * If we are either displaying irqs, or we got called as
1027 	 * a graph event and private data does not exist,
1028 	 * then we bypass the irq check.
1029 	 */
1030 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1031 	    (!data))
1032 		return 0;
1033 
1034 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1035 
1036 	/*
1037 	 * We are not inside the irq code.
1038 	 */
1039 	if (*depth_irq == -1)
1040 		return 0;
1041 
1042 	/*
1043 	 * We are inside the irq code, and this is returning entry.
1044 	 * Let's not trace it and clear the entry depth, since
1045 	 * we are out of irq code.
1046 	 *
1047 	 * This condition ensures that we 'leave the irq code' once
1048 	 * we are out of the entry depth. Thus protecting us from
1049 	 * the RETURN entry loss.
1050 	 */
1051 	if (*depth_irq >= depth) {
1052 		*depth_irq = -1;
1053 		return 1;
1054 	}
1055 
1056 	/*
1057 	 * We are inside the irq code, and this is not the entry.
1058 	 */
1059 	return 1;
1060 }
1061 
1062 static enum print_line_t
1063 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1064 			struct trace_iterator *iter, u32 flags)
1065 {
1066 	struct fgraph_data *data = iter->private;
1067 	struct ftrace_graph_ent *call = &field->graph_ent;
1068 	struct ftrace_graph_ret_entry *leaf_ret;
1069 	static enum print_line_t ret;
1070 	int cpu = iter->cpu;
1071 
1072 	if (check_irq_entry(iter, flags, call->func, call->depth))
1073 		return TRACE_TYPE_HANDLED;
1074 
1075 	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1076 
1077 	leaf_ret = get_return_for_leaf(iter, field);
1078 	if (leaf_ret)
1079 		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1080 	else
1081 		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1082 
1083 	if (data) {
1084 		/*
1085 		 * If we failed to write our output, then we need to make
1086 		 * note of it. Because we already consumed our entry.
1087 		 */
1088 		if (s->full) {
1089 			data->failed = 1;
1090 			data->cpu = cpu;
1091 		} else
1092 			data->failed = 0;
1093 	}
1094 
1095 	return ret;
1096 }
1097 
1098 static enum print_line_t
1099 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1100 		   struct trace_entry *ent, struct trace_iterator *iter,
1101 		   u32 flags)
1102 {
1103 	unsigned long long duration = trace->rettime - trace->calltime;
1104 	struct fgraph_data *data = iter->private;
1105 	struct trace_array *tr = iter->tr;
1106 	pid_t pid = ent->pid;
1107 	int cpu = iter->cpu;
1108 	int func_match = 1;
1109 	int i;
1110 
1111 	if (check_irq_return(iter, flags, trace->depth))
1112 		return TRACE_TYPE_HANDLED;
1113 
1114 	if (data) {
1115 		struct fgraph_cpu_data *cpu_data;
1116 		int cpu = iter->cpu;
1117 
1118 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1119 
1120 		/*
1121 		 * Comments display at + 1 to depth. This is the
1122 		 * return from a function, we now want the comments
1123 		 * to display at the same level of the bracket.
1124 		 */
1125 		cpu_data->depth = trace->depth - 1;
1126 
1127 		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1128 		    !WARN_ON_ONCE(trace->depth < 0)) {
1129 			if (cpu_data->enter_funcs[trace->depth] != trace->func)
1130 				func_match = 0;
1131 			cpu_data->enter_funcs[trace->depth] = 0;
1132 		}
1133 	}
1134 
1135 	print_graph_prologue(iter, s, 0, 0, flags);
1136 
1137 	/* Overhead and duration */
1138 	print_graph_duration(tr, duration, s, flags);
1139 
1140 	/* Closing brace */
1141 	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1142 		trace_seq_putc(s, ' ');
1143 
1144 	/*
1145 	 * If the return function does not have a matching entry,
1146 	 * then the entry was lost. Instead of just printing
1147 	 * the '}' and letting the user guess what function this
1148 	 * belongs to, write out the function name. Always do
1149 	 * that if the funcgraph-tail option is enabled.
1150 	 */
1151 	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1152 		trace_seq_puts(s, "}\n");
1153 	else
1154 		trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1155 
1156 	/* Overrun */
1157 	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1158 		trace_seq_printf(s, " (Overruns: %lu)\n",
1159 				 trace->overrun);
1160 
1161 	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1162 			cpu, pid, flags);
1163 
1164 	return trace_handle_return(s);
1165 }
1166 
1167 static enum print_line_t
1168 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1169 		    struct trace_iterator *iter, u32 flags)
1170 {
1171 	struct trace_array *tr = iter->tr;
1172 	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1173 	struct fgraph_data *data = iter->private;
1174 	struct trace_event *event;
1175 	int depth = 0;
1176 	int ret;
1177 	int i;
1178 
1179 	if (data)
1180 		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1181 
1182 	print_graph_prologue(iter, s, 0, 0, flags);
1183 
1184 	/* No time */
1185 	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1186 
1187 	/* Indentation */
1188 	if (depth > 0)
1189 		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1190 			trace_seq_putc(s, ' ');
1191 
1192 	/* The comment */
1193 	trace_seq_puts(s, "/* ");
1194 
1195 	switch (iter->ent->type) {
1196 	case TRACE_BPUTS:
1197 		ret = trace_print_bputs_msg_only(iter);
1198 		if (ret != TRACE_TYPE_HANDLED)
1199 			return ret;
1200 		break;
1201 	case TRACE_BPRINT:
1202 		ret = trace_print_bprintk_msg_only(iter);
1203 		if (ret != TRACE_TYPE_HANDLED)
1204 			return ret;
1205 		break;
1206 	case TRACE_PRINT:
1207 		ret = trace_print_printk_msg_only(iter);
1208 		if (ret != TRACE_TYPE_HANDLED)
1209 			return ret;
1210 		break;
1211 	default:
1212 		event = ftrace_find_event(ent->type);
1213 		if (!event)
1214 			return TRACE_TYPE_UNHANDLED;
1215 
1216 		ret = event->funcs->trace(iter, sym_flags, event);
1217 		if (ret != TRACE_TYPE_HANDLED)
1218 			return ret;
1219 	}
1220 
1221 	if (trace_seq_has_overflowed(s))
1222 		goto out;
1223 
1224 	/* Strip ending newline */
1225 	if (s->buffer[s->seq.len - 1] == '\n') {
1226 		s->buffer[s->seq.len - 1] = '\0';
1227 		s->seq.len--;
1228 	}
1229 
1230 	trace_seq_puts(s, " */\n");
1231  out:
1232 	return trace_handle_return(s);
1233 }
1234 
1235 
1236 enum print_line_t
1237 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1238 {
1239 	struct ftrace_graph_ent_entry *field;
1240 	struct fgraph_data *data = iter->private;
1241 	struct trace_entry *entry = iter->ent;
1242 	struct trace_seq *s = &iter->seq;
1243 	int cpu = iter->cpu;
1244 	int ret;
1245 
1246 	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1247 		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1248 		return TRACE_TYPE_HANDLED;
1249 	}
1250 
1251 	/*
1252 	 * If the last output failed, there's a possibility we need
1253 	 * to print out the missing entry which would never go out.
1254 	 */
1255 	if (data && data->failed) {
1256 		field = &data->ent;
1257 		iter->cpu = data->cpu;
1258 		ret = print_graph_entry(field, s, iter, flags);
1259 		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1260 			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1261 			ret = TRACE_TYPE_NO_CONSUME;
1262 		}
1263 		iter->cpu = cpu;
1264 		return ret;
1265 	}
1266 
1267 	switch (entry->type) {
1268 	case TRACE_GRAPH_ENT: {
1269 		/*
1270 		 * print_graph_entry() may consume the current event,
1271 		 * thus @field may become invalid, so we need to save it.
1272 		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1273 		 * it can be safely saved at the stack.
1274 		 */
1275 		struct ftrace_graph_ent_entry saved;
1276 		trace_assign_type(field, entry);
1277 		saved = *field;
1278 		return print_graph_entry(&saved, s, iter, flags);
1279 	}
1280 	case TRACE_GRAPH_RET: {
1281 		struct ftrace_graph_ret_entry *field;
1282 		trace_assign_type(field, entry);
1283 		return print_graph_return(&field->ret, s, entry, iter, flags);
1284 	}
1285 	case TRACE_STACK:
1286 	case TRACE_FN:
1287 		/* dont trace stack and functions as comments */
1288 		return TRACE_TYPE_UNHANDLED;
1289 
1290 	default:
1291 		return print_graph_comment(s, entry, iter, flags);
1292 	}
1293 
1294 	return TRACE_TYPE_HANDLED;
1295 }
1296 
1297 static enum print_line_t
1298 print_graph_function(struct trace_iterator *iter)
1299 {
1300 	return print_graph_function_flags(iter, tracer_flags.val);
1301 }
1302 
1303 static enum print_line_t
1304 print_graph_function_event(struct trace_iterator *iter, int flags,
1305 			   struct trace_event *event)
1306 {
1307 	return print_graph_function(iter);
1308 }
1309 
1310 static void print_lat_header(struct seq_file *s, u32 flags)
1311 {
1312 	static const char spaces[] = "                "	/* 16 spaces */
1313 		"    "					/* 4 spaces */
1314 		"                 ";			/* 17 spaces */
1315 	int size = 0;
1316 
1317 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1318 		size += 16;
1319 	if (flags & TRACE_GRAPH_PRINT_CPU)
1320 		size += 4;
1321 	if (flags & TRACE_GRAPH_PRINT_PROC)
1322 		size += 17;
1323 
1324 	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1325 	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1326 	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1327 	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1328 	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1329 }
1330 
1331 static void __print_graph_headers_flags(struct trace_array *tr,
1332 					struct seq_file *s, u32 flags)
1333 {
1334 	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1335 
1336 	if (lat)
1337 		print_lat_header(s, flags);
1338 
1339 	/* 1st line */
1340 	seq_putc(s, '#');
1341 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1342 		seq_puts(s, "     TIME       ");
1343 	if (flags & TRACE_GRAPH_PRINT_CPU)
1344 		seq_puts(s, " CPU");
1345 	if (flags & TRACE_GRAPH_PRINT_PROC)
1346 		seq_puts(s, "  TASK/PID       ");
1347 	if (lat)
1348 		seq_puts(s, "||||");
1349 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1350 		seq_puts(s, "  DURATION   ");
1351 	seq_puts(s, "               FUNCTION CALLS\n");
1352 
1353 	/* 2nd line */
1354 	seq_putc(s, '#');
1355 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1356 		seq_puts(s, "      |         ");
1357 	if (flags & TRACE_GRAPH_PRINT_CPU)
1358 		seq_puts(s, " |  ");
1359 	if (flags & TRACE_GRAPH_PRINT_PROC)
1360 		seq_puts(s, "   |    |        ");
1361 	if (lat)
1362 		seq_puts(s, "||||");
1363 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1364 		seq_puts(s, "   |   |      ");
1365 	seq_puts(s, "               |   |   |   |\n");
1366 }
1367 
1368 static void print_graph_headers(struct seq_file *s)
1369 {
1370 	print_graph_headers_flags(s, tracer_flags.val);
1371 }
1372 
1373 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1374 {
1375 	struct trace_iterator *iter = s->private;
1376 	struct trace_array *tr = iter->tr;
1377 
1378 	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1379 		return;
1380 
1381 	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1382 		/* print nothing if the buffers are empty */
1383 		if (trace_empty(iter))
1384 			return;
1385 
1386 		print_trace_header(s, iter);
1387 	}
1388 
1389 	__print_graph_headers_flags(tr, s, flags);
1390 }
1391 
1392 void graph_trace_open(struct trace_iterator *iter)
1393 {
1394 	/* pid and depth on the last trace processed */
1395 	struct fgraph_data *data;
1396 	gfp_t gfpflags;
1397 	int cpu;
1398 
1399 	iter->private = NULL;
1400 
1401 	/* We can be called in atomic context via ftrace_dump() */
1402 	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1403 
1404 	data = kzalloc(sizeof(*data), gfpflags);
1405 	if (!data)
1406 		goto out_err;
1407 
1408 	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1409 	if (!data->cpu_data)
1410 		goto out_err_free;
1411 
1412 	for_each_possible_cpu(cpu) {
1413 		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1414 		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1415 		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1416 		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1417 
1418 		*pid = -1;
1419 		*depth = 0;
1420 		*ignore = 0;
1421 		*depth_irq = -1;
1422 	}
1423 
1424 	iter->private = data;
1425 
1426 	return;
1427 
1428  out_err_free:
1429 	kfree(data);
1430  out_err:
1431 	pr_warn("function graph tracer: not enough memory\n");
1432 }
1433 
1434 void graph_trace_close(struct trace_iterator *iter)
1435 {
1436 	struct fgraph_data *data = iter->private;
1437 
1438 	if (data) {
1439 		free_percpu(data->cpu_data);
1440 		kfree(data);
1441 	}
1442 }
1443 
1444 static int
1445 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1446 {
1447 	if (bit == TRACE_GRAPH_PRINT_IRQS)
1448 		ftrace_graph_skip_irqs = !set;
1449 
1450 	if (bit == TRACE_GRAPH_SLEEP_TIME)
1451 		ftrace_graph_sleep_time_control(set);
1452 
1453 	if (bit == TRACE_GRAPH_GRAPH_TIME)
1454 		ftrace_graph_graph_time_control(set);
1455 
1456 	return 0;
1457 }
1458 
1459 static struct trace_event_functions graph_functions = {
1460 	.trace		= print_graph_function_event,
1461 };
1462 
1463 static struct trace_event graph_trace_entry_event = {
1464 	.type		= TRACE_GRAPH_ENT,
1465 	.funcs		= &graph_functions,
1466 };
1467 
1468 static struct trace_event graph_trace_ret_event = {
1469 	.type		= TRACE_GRAPH_RET,
1470 	.funcs		= &graph_functions
1471 };
1472 
1473 static struct tracer graph_trace __tracer_data = {
1474 	.name		= "function_graph",
1475 	.update_thresh	= graph_trace_update_thresh,
1476 	.open		= graph_trace_open,
1477 	.pipe_open	= graph_trace_open,
1478 	.close		= graph_trace_close,
1479 	.pipe_close	= graph_trace_close,
1480 	.init		= graph_trace_init,
1481 	.reset		= graph_trace_reset,
1482 	.print_line	= print_graph_function,
1483 	.print_header	= print_graph_headers,
1484 	.flags		= &tracer_flags,
1485 	.set_flag	= func_graph_set_flag,
1486 #ifdef CONFIG_FTRACE_SELFTEST
1487 	.selftest	= trace_selftest_startup_function_graph,
1488 #endif
1489 };
1490 
1491 
1492 static ssize_t
1493 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1494 		  loff_t *ppos)
1495 {
1496 	unsigned long val;
1497 	int ret;
1498 
1499 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1500 	if (ret)
1501 		return ret;
1502 
1503 	fgraph_max_depth = val;
1504 
1505 	*ppos += cnt;
1506 
1507 	return cnt;
1508 }
1509 
1510 static ssize_t
1511 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1512 		 loff_t *ppos)
1513 {
1514 	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1515 	int n;
1516 
1517 	n = sprintf(buf, "%d\n", fgraph_max_depth);
1518 
1519 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1520 }
1521 
1522 static const struct file_operations graph_depth_fops = {
1523 	.open		= tracing_open_generic,
1524 	.write		= graph_depth_write,
1525 	.read		= graph_depth_read,
1526 	.llseek		= generic_file_llseek,
1527 };
1528 
1529 static __init int init_graph_tracefs(void)
1530 {
1531 	struct dentry *d_tracer;
1532 
1533 	d_tracer = tracing_init_dentry();
1534 	if (IS_ERR(d_tracer))
1535 		return 0;
1536 
1537 	trace_create_file("max_graph_depth", 0644, d_tracer,
1538 			  NULL, &graph_depth_fops);
1539 
1540 	return 0;
1541 }
1542 fs_initcall(init_graph_tracefs);
1543 
1544 static __init int init_graph_trace(void)
1545 {
1546 	max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1547 
1548 	if (!register_trace_event(&graph_trace_entry_event)) {
1549 		pr_warn("Warning: could not register graph trace events\n");
1550 		return 1;
1551 	}
1552 
1553 	if (!register_trace_event(&graph_trace_ret_event)) {
1554 		pr_warn("Warning: could not register graph trace events\n");
1555 		return 1;
1556 	}
1557 
1558 	return register_tracer(&graph_trace);
1559 }
1560 
1561 core_initcall(init_graph_trace);
1562