1 /*
2  *
3  * Function graph tracer.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14 
15 #include "trace.h"
16 #include "trace_output.h"
17 
18 /* When set, irq functions will be ignored */
19 static int ftrace_graph_skip_irqs;
20 
21 struct fgraph_cpu_data {
22 	pid_t		last_pid;
23 	int		depth;
24 	int		depth_irq;
25 	int		ignore;
26 	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
27 };
28 
29 struct fgraph_data {
30 	struct fgraph_cpu_data __percpu *cpu_data;
31 
32 	/* Place to preserve last processed entry. */
33 	struct ftrace_graph_ent_entry	ent;
34 	struct ftrace_graph_ret_entry	ret;
35 	int				failed;
36 	int				cpu;
37 };
38 
39 #define TRACE_GRAPH_INDENT	2
40 
41 static unsigned int max_depth;
42 
43 static struct tracer_opt trace_opts[] = {
44 	/* Display overruns? (for self-debug purpose) */
45 	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
46 	/* Display CPU ? */
47 	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
48 	/* Display Overhead ? */
49 	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
50 	/* Display proc name/pid */
51 	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
52 	/* Display duration of execution */
53 	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
54 	/* Display absolute time of an entry */
55 	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
56 	/* Display interrupts */
57 	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
58 	/* Display function name after trailing } */
59 	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
60 	{ } /* Empty entry */
61 };
62 
63 static struct tracer_flags tracer_flags = {
64 	/* Don't display overruns, proc, or tail by default */
65 	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
66 	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
67 	.opts = trace_opts
68 };
69 
70 static struct trace_array *graph_array;
71 
72 /*
73  * DURATION column is being also used to display IRQ signs,
74  * following values are used by print_graph_irq and others
75  * to fill in space into DURATION column.
76  */
77 enum {
78 	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
79 	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
80 	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
81 };
82 
83 static enum print_line_t
84 print_graph_duration(unsigned long long duration, struct trace_seq *s,
85 		     u32 flags);
86 
87 /* Add a function return address to the trace stack on thread info.*/
88 int
89 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
90 			 unsigned long frame_pointer)
91 {
92 	unsigned long long calltime;
93 	int index;
94 
95 	if (!current->ret_stack)
96 		return -EBUSY;
97 
98 	/*
99 	 * We must make sure the ret_stack is tested before we read
100 	 * anything else.
101 	 */
102 	smp_rmb();
103 
104 	/* The return trace stack is full */
105 	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
106 		atomic_inc(&current->trace_overrun);
107 		return -EBUSY;
108 	}
109 
110 	/*
111 	 * The curr_ret_stack is an index to ftrace return stack of
112 	 * current task.  Its value should be in [0, FTRACE_RETFUNC_
113 	 * DEPTH) when the function graph tracer is used.  To support
114 	 * filtering out specific functions, it makes the index
115 	 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
116 	 * so when it sees a negative index the ftrace will ignore
117 	 * the record.  And the index gets recovered when returning
118 	 * from the filtered function by adding the FTRACE_NOTRACE_
119 	 * DEPTH and then it'll continue to record functions normally.
120 	 *
121 	 * The curr_ret_stack is initialized to -1 and get increased
122 	 * in this function.  So it can be less than -1 only if it was
123 	 * filtered out via ftrace_graph_notrace_addr() which can be
124 	 * set from set_graph_notrace file in debugfs by user.
125 	 */
126 	if (current->curr_ret_stack < -1)
127 		return -EBUSY;
128 
129 	calltime = trace_clock_local();
130 
131 	index = ++current->curr_ret_stack;
132 	if (ftrace_graph_notrace_addr(func))
133 		current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
134 	barrier();
135 	current->ret_stack[index].ret = ret;
136 	current->ret_stack[index].func = func;
137 	current->ret_stack[index].calltime = calltime;
138 	current->ret_stack[index].subtime = 0;
139 	current->ret_stack[index].fp = frame_pointer;
140 	*depth = current->curr_ret_stack;
141 
142 	return 0;
143 }
144 
145 /* Retrieve a function return address to the trace stack on thread info.*/
146 static void
147 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
148 			unsigned long frame_pointer)
149 {
150 	int index;
151 
152 	index = current->curr_ret_stack;
153 
154 	/*
155 	 * A negative index here means that it's just returned from a
156 	 * notrace'd function.  Recover index to get an original
157 	 * return address.  See ftrace_push_return_trace().
158 	 *
159 	 * TODO: Need to check whether the stack gets corrupted.
160 	 */
161 	if (index < 0)
162 		index += FTRACE_NOTRACE_DEPTH;
163 
164 	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
165 		ftrace_graph_stop();
166 		WARN_ON(1);
167 		/* Might as well panic, otherwise we have no where to go */
168 		*ret = (unsigned long)panic;
169 		return;
170 	}
171 
172 #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
173 	/*
174 	 * The arch may choose to record the frame pointer used
175 	 * and check it here to make sure that it is what we expect it
176 	 * to be. If gcc does not set the place holder of the return
177 	 * address in the frame pointer, and does a copy instead, then
178 	 * the function graph trace will fail. This test detects this
179 	 * case.
180 	 *
181 	 * Currently, x86_32 with optimize for size (-Os) makes the latest
182 	 * gcc do the above.
183 	 *
184 	 * Note, -mfentry does not use frame pointers, and this test
185 	 *  is not needed if CC_USING_FENTRY is set.
186 	 */
187 	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
188 		ftrace_graph_stop();
189 		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
190 		     "  from func %ps return to %lx\n",
191 		     current->ret_stack[index].fp,
192 		     frame_pointer,
193 		     (void *)current->ret_stack[index].func,
194 		     current->ret_stack[index].ret);
195 		*ret = (unsigned long)panic;
196 		return;
197 	}
198 #endif
199 
200 	*ret = current->ret_stack[index].ret;
201 	trace->func = current->ret_stack[index].func;
202 	trace->calltime = current->ret_stack[index].calltime;
203 	trace->overrun = atomic_read(&current->trace_overrun);
204 	trace->depth = index;
205 }
206 
207 /*
208  * Send the trace to the ring-buffer.
209  * @return the original return address.
210  */
211 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
212 {
213 	struct ftrace_graph_ret trace;
214 	unsigned long ret;
215 
216 	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
217 	trace.rettime = trace_clock_local();
218 	barrier();
219 	current->curr_ret_stack--;
220 	/*
221 	 * The curr_ret_stack can be less than -1 only if it was
222 	 * filtered out and it's about to return from the function.
223 	 * Recover the index and continue to trace normal functions.
224 	 */
225 	if (current->curr_ret_stack < -1) {
226 		current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
227 		return ret;
228 	}
229 
230 	/*
231 	 * The trace should run after decrementing the ret counter
232 	 * in case an interrupt were to come in. We don't want to
233 	 * lose the interrupt if max_depth is set.
234 	 */
235 	ftrace_graph_return(&trace);
236 
237 	if (unlikely(!ret)) {
238 		ftrace_graph_stop();
239 		WARN_ON(1);
240 		/* Might as well panic. What else to do? */
241 		ret = (unsigned long)panic;
242 	}
243 
244 	return ret;
245 }
246 
247 int __trace_graph_entry(struct trace_array *tr,
248 				struct ftrace_graph_ent *trace,
249 				unsigned long flags,
250 				int pc)
251 {
252 	struct ftrace_event_call *call = &event_funcgraph_entry;
253 	struct ring_buffer_event *event;
254 	struct ring_buffer *buffer = tr->trace_buffer.buffer;
255 	struct ftrace_graph_ent_entry *entry;
256 
257 	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
258 		return 0;
259 
260 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
261 					  sizeof(*entry), flags, pc);
262 	if (!event)
263 		return 0;
264 	entry	= ring_buffer_event_data(event);
265 	entry->graph_ent			= *trace;
266 	if (!call_filter_check_discard(call, entry, buffer, event))
267 		__buffer_unlock_commit(buffer, event);
268 
269 	return 1;
270 }
271 
272 static inline int ftrace_graph_ignore_irqs(void)
273 {
274 	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
275 		return 0;
276 
277 	return in_irq();
278 }
279 
280 int trace_graph_entry(struct ftrace_graph_ent *trace)
281 {
282 	struct trace_array *tr = graph_array;
283 	struct trace_array_cpu *data;
284 	unsigned long flags;
285 	long disabled;
286 	int ret;
287 	int cpu;
288 	int pc;
289 
290 	if (!ftrace_trace_task(current))
291 		return 0;
292 
293 	/* trace it when it is-nested-in or is a function enabled. */
294 	if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
295 	     ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
296 	    (max_depth && trace->depth >= max_depth))
297 		return 0;
298 
299 	/*
300 	 * Do not trace a function if it's filtered by set_graph_notrace.
301 	 * Make the index of ret stack negative to indicate that it should
302 	 * ignore further functions.  But it needs its own ret stack entry
303 	 * to recover the original index in order to continue tracing after
304 	 * returning from the function.
305 	 */
306 	if (ftrace_graph_notrace_addr(trace->func))
307 		return 1;
308 
309 	local_irq_save(flags);
310 	cpu = raw_smp_processor_id();
311 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
312 	disabled = atomic_inc_return(&data->disabled);
313 	if (likely(disabled == 1)) {
314 		pc = preempt_count();
315 		ret = __trace_graph_entry(tr, trace, flags, pc);
316 	} else {
317 		ret = 0;
318 	}
319 
320 	atomic_dec(&data->disabled);
321 	local_irq_restore(flags);
322 
323 	return ret;
324 }
325 
326 int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
327 {
328 	if (tracing_thresh)
329 		return 1;
330 	else
331 		return trace_graph_entry(trace);
332 }
333 
334 static void
335 __trace_graph_function(struct trace_array *tr,
336 		unsigned long ip, unsigned long flags, int pc)
337 {
338 	u64 time = trace_clock_local();
339 	struct ftrace_graph_ent ent = {
340 		.func  = ip,
341 		.depth = 0,
342 	};
343 	struct ftrace_graph_ret ret = {
344 		.func     = ip,
345 		.depth    = 0,
346 		.calltime = time,
347 		.rettime  = time,
348 	};
349 
350 	__trace_graph_entry(tr, &ent, flags, pc);
351 	__trace_graph_return(tr, &ret, flags, pc);
352 }
353 
354 void
355 trace_graph_function(struct trace_array *tr,
356 		unsigned long ip, unsigned long parent_ip,
357 		unsigned long flags, int pc)
358 {
359 	__trace_graph_function(tr, ip, flags, pc);
360 }
361 
362 void __trace_graph_return(struct trace_array *tr,
363 				struct ftrace_graph_ret *trace,
364 				unsigned long flags,
365 				int pc)
366 {
367 	struct ftrace_event_call *call = &event_funcgraph_exit;
368 	struct ring_buffer_event *event;
369 	struct ring_buffer *buffer = tr->trace_buffer.buffer;
370 	struct ftrace_graph_ret_entry *entry;
371 
372 	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
373 		return;
374 
375 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
376 					  sizeof(*entry), flags, pc);
377 	if (!event)
378 		return;
379 	entry	= ring_buffer_event_data(event);
380 	entry->ret				= *trace;
381 	if (!call_filter_check_discard(call, entry, buffer, event))
382 		__buffer_unlock_commit(buffer, event);
383 }
384 
385 void trace_graph_return(struct ftrace_graph_ret *trace)
386 {
387 	struct trace_array *tr = graph_array;
388 	struct trace_array_cpu *data;
389 	unsigned long flags;
390 	long disabled;
391 	int cpu;
392 	int pc;
393 
394 	local_irq_save(flags);
395 	cpu = raw_smp_processor_id();
396 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
397 	disabled = atomic_inc_return(&data->disabled);
398 	if (likely(disabled == 1)) {
399 		pc = preempt_count();
400 		__trace_graph_return(tr, trace, flags, pc);
401 	}
402 	atomic_dec(&data->disabled);
403 	local_irq_restore(flags);
404 }
405 
406 void set_graph_array(struct trace_array *tr)
407 {
408 	graph_array = tr;
409 
410 	/* Make graph_array visible before we start tracing */
411 
412 	smp_mb();
413 }
414 
415 void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
416 {
417 	if (tracing_thresh &&
418 	    (trace->rettime - trace->calltime < tracing_thresh))
419 		return;
420 	else
421 		trace_graph_return(trace);
422 }
423 
424 static int graph_trace_init(struct trace_array *tr)
425 {
426 	int ret;
427 
428 	set_graph_array(tr);
429 	if (tracing_thresh)
430 		ret = register_ftrace_graph(&trace_graph_thresh_return,
431 					    &trace_graph_thresh_entry);
432 	else
433 		ret = register_ftrace_graph(&trace_graph_return,
434 					    &trace_graph_entry);
435 	if (ret)
436 		return ret;
437 	tracing_start_cmdline_record();
438 
439 	return 0;
440 }
441 
442 static void graph_trace_reset(struct trace_array *tr)
443 {
444 	tracing_stop_cmdline_record();
445 	unregister_ftrace_graph();
446 }
447 
448 static int max_bytes_for_cpu;
449 
450 static enum print_line_t
451 print_graph_cpu(struct trace_seq *s, int cpu)
452 {
453 	int ret;
454 
455 	/*
456 	 * Start with a space character - to make it stand out
457 	 * to the right a bit when trace output is pasted into
458 	 * email:
459 	 */
460 	ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
461 	if (!ret)
462 		return TRACE_TYPE_PARTIAL_LINE;
463 
464 	return TRACE_TYPE_HANDLED;
465 }
466 
467 #define TRACE_GRAPH_PROCINFO_LENGTH	14
468 
469 static enum print_line_t
470 print_graph_proc(struct trace_seq *s, pid_t pid)
471 {
472 	char comm[TASK_COMM_LEN];
473 	/* sign + log10(MAX_INT) + '\0' */
474 	char pid_str[11];
475 	int spaces = 0;
476 	int ret;
477 	int len;
478 	int i;
479 
480 	trace_find_cmdline(pid, comm);
481 	comm[7] = '\0';
482 	sprintf(pid_str, "%d", pid);
483 
484 	/* 1 stands for the "-" character */
485 	len = strlen(comm) + strlen(pid_str) + 1;
486 
487 	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
488 		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
489 
490 	/* First spaces to align center */
491 	for (i = 0; i < spaces / 2; i++) {
492 		ret = trace_seq_putc(s, ' ');
493 		if (!ret)
494 			return TRACE_TYPE_PARTIAL_LINE;
495 	}
496 
497 	ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
498 	if (!ret)
499 		return TRACE_TYPE_PARTIAL_LINE;
500 
501 	/* Last spaces to align center */
502 	for (i = 0; i < spaces - (spaces / 2); i++) {
503 		ret = trace_seq_putc(s, ' ');
504 		if (!ret)
505 			return TRACE_TYPE_PARTIAL_LINE;
506 	}
507 	return TRACE_TYPE_HANDLED;
508 }
509 
510 
511 static enum print_line_t
512 print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
513 {
514 	if (!trace_seq_putc(s, ' '))
515 		return 0;
516 
517 	return trace_print_lat_fmt(s, entry);
518 }
519 
520 /* If the pid changed since the last trace, output this event */
521 static enum print_line_t
522 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
523 {
524 	pid_t prev_pid;
525 	pid_t *last_pid;
526 	int ret;
527 
528 	if (!data)
529 		return TRACE_TYPE_HANDLED;
530 
531 	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
532 
533 	if (*last_pid == pid)
534 		return TRACE_TYPE_HANDLED;
535 
536 	prev_pid = *last_pid;
537 	*last_pid = pid;
538 
539 	if (prev_pid == -1)
540 		return TRACE_TYPE_HANDLED;
541 /*
542  * Context-switch trace line:
543 
544  ------------------------------------------
545  | 1)  migration/0--1  =>  sshd-1755
546  ------------------------------------------
547 
548  */
549 	ret = trace_seq_puts(s,
550 		" ------------------------------------------\n");
551 	if (!ret)
552 		return TRACE_TYPE_PARTIAL_LINE;
553 
554 	ret = print_graph_cpu(s, cpu);
555 	if (ret == TRACE_TYPE_PARTIAL_LINE)
556 		return TRACE_TYPE_PARTIAL_LINE;
557 
558 	ret = print_graph_proc(s, prev_pid);
559 	if (ret == TRACE_TYPE_PARTIAL_LINE)
560 		return TRACE_TYPE_PARTIAL_LINE;
561 
562 	ret = trace_seq_puts(s, " => ");
563 	if (!ret)
564 		return TRACE_TYPE_PARTIAL_LINE;
565 
566 	ret = print_graph_proc(s, pid);
567 	if (ret == TRACE_TYPE_PARTIAL_LINE)
568 		return TRACE_TYPE_PARTIAL_LINE;
569 
570 	ret = trace_seq_puts(s,
571 		"\n ------------------------------------------\n\n");
572 	if (!ret)
573 		return TRACE_TYPE_PARTIAL_LINE;
574 
575 	return TRACE_TYPE_HANDLED;
576 }
577 
578 static struct ftrace_graph_ret_entry *
579 get_return_for_leaf(struct trace_iterator *iter,
580 		struct ftrace_graph_ent_entry *curr)
581 {
582 	struct fgraph_data *data = iter->private;
583 	struct ring_buffer_iter *ring_iter = NULL;
584 	struct ring_buffer_event *event;
585 	struct ftrace_graph_ret_entry *next;
586 
587 	/*
588 	 * If the previous output failed to write to the seq buffer,
589 	 * then we just reuse the data from before.
590 	 */
591 	if (data && data->failed) {
592 		curr = &data->ent;
593 		next = &data->ret;
594 	} else {
595 
596 		ring_iter = trace_buffer_iter(iter, iter->cpu);
597 
598 		/* First peek to compare current entry and the next one */
599 		if (ring_iter)
600 			event = ring_buffer_iter_peek(ring_iter, NULL);
601 		else {
602 			/*
603 			 * We need to consume the current entry to see
604 			 * the next one.
605 			 */
606 			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
607 					    NULL, NULL);
608 			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
609 						 NULL, NULL);
610 		}
611 
612 		if (!event)
613 			return NULL;
614 
615 		next = ring_buffer_event_data(event);
616 
617 		if (data) {
618 			/*
619 			 * Save current and next entries for later reference
620 			 * if the output fails.
621 			 */
622 			data->ent = *curr;
623 			/*
624 			 * If the next event is not a return type, then
625 			 * we only care about what type it is. Otherwise we can
626 			 * safely copy the entire event.
627 			 */
628 			if (next->ent.type == TRACE_GRAPH_RET)
629 				data->ret = *next;
630 			else
631 				data->ret.ent.type = next->ent.type;
632 		}
633 	}
634 
635 	if (next->ent.type != TRACE_GRAPH_RET)
636 		return NULL;
637 
638 	if (curr->ent.pid != next->ent.pid ||
639 			curr->graph_ent.func != next->ret.func)
640 		return NULL;
641 
642 	/* this is a leaf, now advance the iterator */
643 	if (ring_iter)
644 		ring_buffer_read(ring_iter, NULL);
645 
646 	return next;
647 }
648 
649 static int print_graph_abs_time(u64 t, struct trace_seq *s)
650 {
651 	unsigned long usecs_rem;
652 
653 	usecs_rem = do_div(t, NSEC_PER_SEC);
654 	usecs_rem /= 1000;
655 
656 	return trace_seq_printf(s, "%5lu.%06lu |  ",
657 			(unsigned long)t, usecs_rem);
658 }
659 
660 static enum print_line_t
661 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
662 		enum trace_type type, int cpu, pid_t pid, u32 flags)
663 {
664 	int ret;
665 	struct trace_seq *s = &iter->seq;
666 
667 	if (addr < (unsigned long)__irqentry_text_start ||
668 		addr >= (unsigned long)__irqentry_text_end)
669 		return TRACE_TYPE_UNHANDLED;
670 
671 	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
672 		/* Absolute time */
673 		if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
674 			ret = print_graph_abs_time(iter->ts, s);
675 			if (!ret)
676 				return TRACE_TYPE_PARTIAL_LINE;
677 		}
678 
679 		/* Cpu */
680 		if (flags & TRACE_GRAPH_PRINT_CPU) {
681 			ret = print_graph_cpu(s, cpu);
682 			if (ret == TRACE_TYPE_PARTIAL_LINE)
683 				return TRACE_TYPE_PARTIAL_LINE;
684 		}
685 
686 		/* Proc */
687 		if (flags & TRACE_GRAPH_PRINT_PROC) {
688 			ret = print_graph_proc(s, pid);
689 			if (ret == TRACE_TYPE_PARTIAL_LINE)
690 				return TRACE_TYPE_PARTIAL_LINE;
691 			ret = trace_seq_puts(s, " | ");
692 			if (!ret)
693 				return TRACE_TYPE_PARTIAL_LINE;
694 		}
695 	}
696 
697 	/* No overhead */
698 	ret = print_graph_duration(0, s, flags | FLAGS_FILL_START);
699 	if (ret != TRACE_TYPE_HANDLED)
700 		return ret;
701 
702 	if (type == TRACE_GRAPH_ENT)
703 		ret = trace_seq_puts(s, "==========>");
704 	else
705 		ret = trace_seq_puts(s, "<==========");
706 
707 	if (!ret)
708 		return TRACE_TYPE_PARTIAL_LINE;
709 
710 	ret = print_graph_duration(0, s, flags | FLAGS_FILL_END);
711 	if (ret != TRACE_TYPE_HANDLED)
712 		return ret;
713 
714 	ret = trace_seq_putc(s, '\n');
715 
716 	if (!ret)
717 		return TRACE_TYPE_PARTIAL_LINE;
718 	return TRACE_TYPE_HANDLED;
719 }
720 
721 enum print_line_t
722 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
723 {
724 	unsigned long nsecs_rem = do_div(duration, 1000);
725 	/* log10(ULONG_MAX) + '\0' */
726 	char msecs_str[21];
727 	char nsecs_str[5];
728 	int ret, len;
729 	int i;
730 
731 	sprintf(msecs_str, "%lu", (unsigned long) duration);
732 
733 	/* Print msecs */
734 	ret = trace_seq_printf(s, "%s", msecs_str);
735 	if (!ret)
736 		return TRACE_TYPE_PARTIAL_LINE;
737 
738 	len = strlen(msecs_str);
739 
740 	/* Print nsecs (we don't want to exceed 7 numbers) */
741 	if (len < 7) {
742 		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
743 
744 		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
745 		ret = trace_seq_printf(s, ".%s", nsecs_str);
746 		if (!ret)
747 			return TRACE_TYPE_PARTIAL_LINE;
748 		len += strlen(nsecs_str);
749 	}
750 
751 	ret = trace_seq_puts(s, " us ");
752 	if (!ret)
753 		return TRACE_TYPE_PARTIAL_LINE;
754 
755 	/* Print remaining spaces to fit the row's width */
756 	for (i = len; i < 7; i++) {
757 		ret = trace_seq_putc(s, ' ');
758 		if (!ret)
759 			return TRACE_TYPE_PARTIAL_LINE;
760 	}
761 	return TRACE_TYPE_HANDLED;
762 }
763 
764 static enum print_line_t
765 print_graph_duration(unsigned long long duration, struct trace_seq *s,
766 		     u32 flags)
767 {
768 	int ret = -1;
769 
770 	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
771 	    !(trace_flags & TRACE_ITER_CONTEXT_INFO))
772 			return TRACE_TYPE_HANDLED;
773 
774 	/* No real adata, just filling the column with spaces */
775 	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
776 	case FLAGS_FILL_FULL:
777 		ret = trace_seq_puts(s, "              |  ");
778 		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
779 	case FLAGS_FILL_START:
780 		ret = trace_seq_puts(s, "  ");
781 		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
782 	case FLAGS_FILL_END:
783 		ret = trace_seq_puts(s, " |");
784 		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
785 	}
786 
787 	/* Signal a overhead of time execution to the output */
788 	if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
789 		/* Duration exceeded 100 msecs */
790 		if (duration > 100000ULL)
791 			ret = trace_seq_puts(s, "! ");
792 		/* Duration exceeded 10 msecs */
793 		else if (duration > 10000ULL)
794 			ret = trace_seq_puts(s, "+ ");
795 	}
796 
797 	/*
798 	 * The -1 means we either did not exceed the duration tresholds
799 	 * or we dont want to print out the overhead. Either way we need
800 	 * to fill out the space.
801 	 */
802 	if (ret == -1)
803 		ret = trace_seq_puts(s, "  ");
804 
805 	/* Catching here any failure happenned above */
806 	if (!ret)
807 		return TRACE_TYPE_PARTIAL_LINE;
808 
809 	ret = trace_print_graph_duration(duration, s);
810 	if (ret != TRACE_TYPE_HANDLED)
811 		return ret;
812 
813 	ret = trace_seq_puts(s, "|  ");
814 	if (!ret)
815 		return TRACE_TYPE_PARTIAL_LINE;
816 
817 	return TRACE_TYPE_HANDLED;
818 }
819 
820 /* Case of a leaf function on its call entry */
821 static enum print_line_t
822 print_graph_entry_leaf(struct trace_iterator *iter,
823 		struct ftrace_graph_ent_entry *entry,
824 		struct ftrace_graph_ret_entry *ret_entry,
825 		struct trace_seq *s, u32 flags)
826 {
827 	struct fgraph_data *data = iter->private;
828 	struct ftrace_graph_ret *graph_ret;
829 	struct ftrace_graph_ent *call;
830 	unsigned long long duration;
831 	int ret;
832 	int i;
833 
834 	graph_ret = &ret_entry->ret;
835 	call = &entry->graph_ent;
836 	duration = graph_ret->rettime - graph_ret->calltime;
837 
838 	if (data) {
839 		struct fgraph_cpu_data *cpu_data;
840 		int cpu = iter->cpu;
841 
842 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
843 
844 		/*
845 		 * Comments display at + 1 to depth. Since
846 		 * this is a leaf function, keep the comments
847 		 * equal to this depth.
848 		 */
849 		cpu_data->depth = call->depth - 1;
850 
851 		/* No need to keep this function around for this depth */
852 		if (call->depth < FTRACE_RETFUNC_DEPTH)
853 			cpu_data->enter_funcs[call->depth] = 0;
854 	}
855 
856 	/* Overhead and duration */
857 	ret = print_graph_duration(duration, s, flags);
858 	if (ret == TRACE_TYPE_PARTIAL_LINE)
859 		return TRACE_TYPE_PARTIAL_LINE;
860 
861 	/* Function */
862 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
863 		ret = trace_seq_putc(s, ' ');
864 		if (!ret)
865 			return TRACE_TYPE_PARTIAL_LINE;
866 	}
867 
868 	ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
869 	if (!ret)
870 		return TRACE_TYPE_PARTIAL_LINE;
871 
872 	return TRACE_TYPE_HANDLED;
873 }
874 
875 static enum print_line_t
876 print_graph_entry_nested(struct trace_iterator *iter,
877 			 struct ftrace_graph_ent_entry *entry,
878 			 struct trace_seq *s, int cpu, u32 flags)
879 {
880 	struct ftrace_graph_ent *call = &entry->graph_ent;
881 	struct fgraph_data *data = iter->private;
882 	int ret;
883 	int i;
884 
885 	if (data) {
886 		struct fgraph_cpu_data *cpu_data;
887 		int cpu = iter->cpu;
888 
889 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
890 		cpu_data->depth = call->depth;
891 
892 		/* Save this function pointer to see if the exit matches */
893 		if (call->depth < FTRACE_RETFUNC_DEPTH)
894 			cpu_data->enter_funcs[call->depth] = call->func;
895 	}
896 
897 	/* No time */
898 	ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
899 	if (ret != TRACE_TYPE_HANDLED)
900 		return ret;
901 
902 	/* Function */
903 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
904 		ret = trace_seq_putc(s, ' ');
905 		if (!ret)
906 			return TRACE_TYPE_PARTIAL_LINE;
907 	}
908 
909 	ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
910 	if (!ret)
911 		return TRACE_TYPE_PARTIAL_LINE;
912 
913 	/*
914 	 * we already consumed the current entry to check the next one
915 	 * and see if this is a leaf.
916 	 */
917 	return TRACE_TYPE_NO_CONSUME;
918 }
919 
920 static enum print_line_t
921 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
922 		     int type, unsigned long addr, u32 flags)
923 {
924 	struct fgraph_data *data = iter->private;
925 	struct trace_entry *ent = iter->ent;
926 	int cpu = iter->cpu;
927 	int ret;
928 
929 	/* Pid */
930 	if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
931 		return TRACE_TYPE_PARTIAL_LINE;
932 
933 	if (type) {
934 		/* Interrupt */
935 		ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
936 		if (ret == TRACE_TYPE_PARTIAL_LINE)
937 			return TRACE_TYPE_PARTIAL_LINE;
938 	}
939 
940 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
941 		return 0;
942 
943 	/* Absolute time */
944 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
945 		ret = print_graph_abs_time(iter->ts, s);
946 		if (!ret)
947 			return TRACE_TYPE_PARTIAL_LINE;
948 	}
949 
950 	/* Cpu */
951 	if (flags & TRACE_GRAPH_PRINT_CPU) {
952 		ret = print_graph_cpu(s, cpu);
953 		if (ret == TRACE_TYPE_PARTIAL_LINE)
954 			return TRACE_TYPE_PARTIAL_LINE;
955 	}
956 
957 	/* Proc */
958 	if (flags & TRACE_GRAPH_PRINT_PROC) {
959 		ret = print_graph_proc(s, ent->pid);
960 		if (ret == TRACE_TYPE_PARTIAL_LINE)
961 			return TRACE_TYPE_PARTIAL_LINE;
962 
963 		ret = trace_seq_puts(s, " | ");
964 		if (!ret)
965 			return TRACE_TYPE_PARTIAL_LINE;
966 	}
967 
968 	/* Latency format */
969 	if (trace_flags & TRACE_ITER_LATENCY_FMT) {
970 		ret = print_graph_lat_fmt(s, ent);
971 		if (ret == TRACE_TYPE_PARTIAL_LINE)
972 			return TRACE_TYPE_PARTIAL_LINE;
973 	}
974 
975 	return 0;
976 }
977 
978 /*
979  * Entry check for irq code
980  *
981  * returns 1 if
982  *  - we are inside irq code
983  *  - we just entered irq code
984  *
985  * retunns 0 if
986  *  - funcgraph-interrupts option is set
987  *  - we are not inside irq code
988  */
989 static int
990 check_irq_entry(struct trace_iterator *iter, u32 flags,
991 		unsigned long addr, int depth)
992 {
993 	int cpu = iter->cpu;
994 	int *depth_irq;
995 	struct fgraph_data *data = iter->private;
996 
997 	/*
998 	 * If we are either displaying irqs, or we got called as
999 	 * a graph event and private data does not exist,
1000 	 * then we bypass the irq check.
1001 	 */
1002 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1003 	    (!data))
1004 		return 0;
1005 
1006 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1007 
1008 	/*
1009 	 * We are inside the irq code
1010 	 */
1011 	if (*depth_irq >= 0)
1012 		return 1;
1013 
1014 	if ((addr < (unsigned long)__irqentry_text_start) ||
1015 	    (addr >= (unsigned long)__irqentry_text_end))
1016 		return 0;
1017 
1018 	/*
1019 	 * We are entering irq code.
1020 	 */
1021 	*depth_irq = depth;
1022 	return 1;
1023 }
1024 
1025 /*
1026  * Return check for irq code
1027  *
1028  * returns 1 if
1029  *  - we are inside irq code
1030  *  - we just left irq code
1031  *
1032  * returns 0 if
1033  *  - funcgraph-interrupts option is set
1034  *  - we are not inside irq code
1035  */
1036 static int
1037 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1038 {
1039 	int cpu = iter->cpu;
1040 	int *depth_irq;
1041 	struct fgraph_data *data = iter->private;
1042 
1043 	/*
1044 	 * If we are either displaying irqs, or we got called as
1045 	 * a graph event and private data does not exist,
1046 	 * then we bypass the irq check.
1047 	 */
1048 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1049 	    (!data))
1050 		return 0;
1051 
1052 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1053 
1054 	/*
1055 	 * We are not inside the irq code.
1056 	 */
1057 	if (*depth_irq == -1)
1058 		return 0;
1059 
1060 	/*
1061 	 * We are inside the irq code, and this is returning entry.
1062 	 * Let's not trace it and clear the entry depth, since
1063 	 * we are out of irq code.
1064 	 *
1065 	 * This condition ensures that we 'leave the irq code' once
1066 	 * we are out of the entry depth. Thus protecting us from
1067 	 * the RETURN entry loss.
1068 	 */
1069 	if (*depth_irq >= depth) {
1070 		*depth_irq = -1;
1071 		return 1;
1072 	}
1073 
1074 	/*
1075 	 * We are inside the irq code, and this is not the entry.
1076 	 */
1077 	return 1;
1078 }
1079 
1080 static enum print_line_t
1081 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1082 			struct trace_iterator *iter, u32 flags)
1083 {
1084 	struct fgraph_data *data = iter->private;
1085 	struct ftrace_graph_ent *call = &field->graph_ent;
1086 	struct ftrace_graph_ret_entry *leaf_ret;
1087 	static enum print_line_t ret;
1088 	int cpu = iter->cpu;
1089 
1090 	if (check_irq_entry(iter, flags, call->func, call->depth))
1091 		return TRACE_TYPE_HANDLED;
1092 
1093 	if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
1094 		return TRACE_TYPE_PARTIAL_LINE;
1095 
1096 	leaf_ret = get_return_for_leaf(iter, field);
1097 	if (leaf_ret)
1098 		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1099 	else
1100 		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1101 
1102 	if (data) {
1103 		/*
1104 		 * If we failed to write our output, then we need to make
1105 		 * note of it. Because we already consumed our entry.
1106 		 */
1107 		if (s->full) {
1108 			data->failed = 1;
1109 			data->cpu = cpu;
1110 		} else
1111 			data->failed = 0;
1112 	}
1113 
1114 	return ret;
1115 }
1116 
1117 static enum print_line_t
1118 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1119 		   struct trace_entry *ent, struct trace_iterator *iter,
1120 		   u32 flags)
1121 {
1122 	unsigned long long duration = trace->rettime - trace->calltime;
1123 	struct fgraph_data *data = iter->private;
1124 	pid_t pid = ent->pid;
1125 	int cpu = iter->cpu;
1126 	int func_match = 1;
1127 	int ret;
1128 	int i;
1129 
1130 	if (check_irq_return(iter, flags, trace->depth))
1131 		return TRACE_TYPE_HANDLED;
1132 
1133 	if (data) {
1134 		struct fgraph_cpu_data *cpu_data;
1135 		int cpu = iter->cpu;
1136 
1137 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1138 
1139 		/*
1140 		 * Comments display at + 1 to depth. This is the
1141 		 * return from a function, we now want the comments
1142 		 * to display at the same level of the bracket.
1143 		 */
1144 		cpu_data->depth = trace->depth - 1;
1145 
1146 		if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1147 			if (cpu_data->enter_funcs[trace->depth] != trace->func)
1148 				func_match = 0;
1149 			cpu_data->enter_funcs[trace->depth] = 0;
1150 		}
1151 	}
1152 
1153 	if (print_graph_prologue(iter, s, 0, 0, flags))
1154 		return TRACE_TYPE_PARTIAL_LINE;
1155 
1156 	/* Overhead and duration */
1157 	ret = print_graph_duration(duration, s, flags);
1158 	if (ret == TRACE_TYPE_PARTIAL_LINE)
1159 		return TRACE_TYPE_PARTIAL_LINE;
1160 
1161 	/* Closing brace */
1162 	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
1163 		ret = trace_seq_putc(s, ' ');
1164 		if (!ret)
1165 			return TRACE_TYPE_PARTIAL_LINE;
1166 	}
1167 
1168 	/*
1169 	 * If the return function does not have a matching entry,
1170 	 * then the entry was lost. Instead of just printing
1171 	 * the '}' and letting the user guess what function this
1172 	 * belongs to, write out the function name. Always do
1173 	 * that if the funcgraph-tail option is enabled.
1174 	 */
1175 	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) {
1176 		ret = trace_seq_puts(s, "}\n");
1177 		if (!ret)
1178 			return TRACE_TYPE_PARTIAL_LINE;
1179 	} else {
1180 		ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1181 		if (!ret)
1182 			return TRACE_TYPE_PARTIAL_LINE;
1183 	}
1184 
1185 	/* Overrun */
1186 	if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
1187 		ret = trace_seq_printf(s, " (Overruns: %lu)\n",
1188 					trace->overrun);
1189 		if (!ret)
1190 			return TRACE_TYPE_PARTIAL_LINE;
1191 	}
1192 
1193 	ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1194 			      cpu, pid, flags);
1195 	if (ret == TRACE_TYPE_PARTIAL_LINE)
1196 		return TRACE_TYPE_PARTIAL_LINE;
1197 
1198 	return TRACE_TYPE_HANDLED;
1199 }
1200 
1201 static enum print_line_t
1202 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1203 		    struct trace_iterator *iter, u32 flags)
1204 {
1205 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1206 	struct fgraph_data *data = iter->private;
1207 	struct trace_event *event;
1208 	int depth = 0;
1209 	int ret;
1210 	int i;
1211 
1212 	if (data)
1213 		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1214 
1215 	if (print_graph_prologue(iter, s, 0, 0, flags))
1216 		return TRACE_TYPE_PARTIAL_LINE;
1217 
1218 	/* No time */
1219 	ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
1220 	if (ret != TRACE_TYPE_HANDLED)
1221 		return ret;
1222 
1223 	/* Indentation */
1224 	if (depth > 0)
1225 		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1226 			ret = trace_seq_putc(s, ' ');
1227 			if (!ret)
1228 				return TRACE_TYPE_PARTIAL_LINE;
1229 		}
1230 
1231 	/* The comment */
1232 	ret = trace_seq_puts(s, "/* ");
1233 	if (!ret)
1234 		return TRACE_TYPE_PARTIAL_LINE;
1235 
1236 	switch (iter->ent->type) {
1237 	case TRACE_BPRINT:
1238 		ret = trace_print_bprintk_msg_only(iter);
1239 		if (ret != TRACE_TYPE_HANDLED)
1240 			return ret;
1241 		break;
1242 	case TRACE_PRINT:
1243 		ret = trace_print_printk_msg_only(iter);
1244 		if (ret != TRACE_TYPE_HANDLED)
1245 			return ret;
1246 		break;
1247 	default:
1248 		event = ftrace_find_event(ent->type);
1249 		if (!event)
1250 			return TRACE_TYPE_UNHANDLED;
1251 
1252 		ret = event->funcs->trace(iter, sym_flags, event);
1253 		if (ret != TRACE_TYPE_HANDLED)
1254 			return ret;
1255 	}
1256 
1257 	/* Strip ending newline */
1258 	if (s->buffer[s->len - 1] == '\n') {
1259 		s->buffer[s->len - 1] = '\0';
1260 		s->len--;
1261 	}
1262 
1263 	ret = trace_seq_puts(s, " */\n");
1264 	if (!ret)
1265 		return TRACE_TYPE_PARTIAL_LINE;
1266 
1267 	return TRACE_TYPE_HANDLED;
1268 }
1269 
1270 
1271 enum print_line_t
1272 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1273 {
1274 	struct ftrace_graph_ent_entry *field;
1275 	struct fgraph_data *data = iter->private;
1276 	struct trace_entry *entry = iter->ent;
1277 	struct trace_seq *s = &iter->seq;
1278 	int cpu = iter->cpu;
1279 	int ret;
1280 
1281 	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1282 		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1283 		return TRACE_TYPE_HANDLED;
1284 	}
1285 
1286 	/*
1287 	 * If the last output failed, there's a possibility we need
1288 	 * to print out the missing entry which would never go out.
1289 	 */
1290 	if (data && data->failed) {
1291 		field = &data->ent;
1292 		iter->cpu = data->cpu;
1293 		ret = print_graph_entry(field, s, iter, flags);
1294 		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1295 			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1296 			ret = TRACE_TYPE_NO_CONSUME;
1297 		}
1298 		iter->cpu = cpu;
1299 		return ret;
1300 	}
1301 
1302 	switch (entry->type) {
1303 	case TRACE_GRAPH_ENT: {
1304 		/*
1305 		 * print_graph_entry() may consume the current event,
1306 		 * thus @field may become invalid, so we need to save it.
1307 		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1308 		 * it can be safely saved at the stack.
1309 		 */
1310 		struct ftrace_graph_ent_entry saved;
1311 		trace_assign_type(field, entry);
1312 		saved = *field;
1313 		return print_graph_entry(&saved, s, iter, flags);
1314 	}
1315 	case TRACE_GRAPH_RET: {
1316 		struct ftrace_graph_ret_entry *field;
1317 		trace_assign_type(field, entry);
1318 		return print_graph_return(&field->ret, s, entry, iter, flags);
1319 	}
1320 	case TRACE_STACK:
1321 	case TRACE_FN:
1322 		/* dont trace stack and functions as comments */
1323 		return TRACE_TYPE_UNHANDLED;
1324 
1325 	default:
1326 		return print_graph_comment(s, entry, iter, flags);
1327 	}
1328 
1329 	return TRACE_TYPE_HANDLED;
1330 }
1331 
1332 static enum print_line_t
1333 print_graph_function(struct trace_iterator *iter)
1334 {
1335 	return print_graph_function_flags(iter, tracer_flags.val);
1336 }
1337 
1338 static enum print_line_t
1339 print_graph_function_event(struct trace_iterator *iter, int flags,
1340 			   struct trace_event *event)
1341 {
1342 	return print_graph_function(iter);
1343 }
1344 
1345 static void print_lat_header(struct seq_file *s, u32 flags)
1346 {
1347 	static const char spaces[] = "                "	/* 16 spaces */
1348 		"    "					/* 4 spaces */
1349 		"                 ";			/* 17 spaces */
1350 	int size = 0;
1351 
1352 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1353 		size += 16;
1354 	if (flags & TRACE_GRAPH_PRINT_CPU)
1355 		size += 4;
1356 	if (flags & TRACE_GRAPH_PRINT_PROC)
1357 		size += 17;
1358 
1359 	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1360 	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1361 	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1362 	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1363 	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1364 }
1365 
1366 static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
1367 {
1368 	int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1369 
1370 	if (lat)
1371 		print_lat_header(s, flags);
1372 
1373 	/* 1st line */
1374 	seq_printf(s, "#");
1375 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1376 		seq_printf(s, "     TIME       ");
1377 	if (flags & TRACE_GRAPH_PRINT_CPU)
1378 		seq_printf(s, " CPU");
1379 	if (flags & TRACE_GRAPH_PRINT_PROC)
1380 		seq_printf(s, "  TASK/PID       ");
1381 	if (lat)
1382 		seq_printf(s, "||||");
1383 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1384 		seq_printf(s, "  DURATION   ");
1385 	seq_printf(s, "               FUNCTION CALLS\n");
1386 
1387 	/* 2nd line */
1388 	seq_printf(s, "#");
1389 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1390 		seq_printf(s, "      |         ");
1391 	if (flags & TRACE_GRAPH_PRINT_CPU)
1392 		seq_printf(s, " |  ");
1393 	if (flags & TRACE_GRAPH_PRINT_PROC)
1394 		seq_printf(s, "   |    |        ");
1395 	if (lat)
1396 		seq_printf(s, "||||");
1397 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1398 		seq_printf(s, "   |   |      ");
1399 	seq_printf(s, "               |   |   |   |\n");
1400 }
1401 
1402 void print_graph_headers(struct seq_file *s)
1403 {
1404 	print_graph_headers_flags(s, tracer_flags.val);
1405 }
1406 
1407 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1408 {
1409 	struct trace_iterator *iter = s->private;
1410 
1411 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
1412 		return;
1413 
1414 	if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1415 		/* print nothing if the buffers are empty */
1416 		if (trace_empty(iter))
1417 			return;
1418 
1419 		print_trace_header(s, iter);
1420 	}
1421 
1422 	__print_graph_headers_flags(s, flags);
1423 }
1424 
1425 void graph_trace_open(struct trace_iterator *iter)
1426 {
1427 	/* pid and depth on the last trace processed */
1428 	struct fgraph_data *data;
1429 	int cpu;
1430 
1431 	iter->private = NULL;
1432 
1433 	data = kzalloc(sizeof(*data), GFP_KERNEL);
1434 	if (!data)
1435 		goto out_err;
1436 
1437 	data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1438 	if (!data->cpu_data)
1439 		goto out_err_free;
1440 
1441 	for_each_possible_cpu(cpu) {
1442 		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1443 		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1444 		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1445 		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1446 
1447 		*pid = -1;
1448 		*depth = 0;
1449 		*ignore = 0;
1450 		*depth_irq = -1;
1451 	}
1452 
1453 	iter->private = data;
1454 
1455 	return;
1456 
1457  out_err_free:
1458 	kfree(data);
1459  out_err:
1460 	pr_warning("function graph tracer: not enough memory\n");
1461 }
1462 
1463 void graph_trace_close(struct trace_iterator *iter)
1464 {
1465 	struct fgraph_data *data = iter->private;
1466 
1467 	if (data) {
1468 		free_percpu(data->cpu_data);
1469 		kfree(data);
1470 	}
1471 }
1472 
1473 static int
1474 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1475 {
1476 	if (bit == TRACE_GRAPH_PRINT_IRQS)
1477 		ftrace_graph_skip_irqs = !set;
1478 
1479 	return 0;
1480 }
1481 
1482 static struct trace_event_functions graph_functions = {
1483 	.trace		= print_graph_function_event,
1484 };
1485 
1486 static struct trace_event graph_trace_entry_event = {
1487 	.type		= TRACE_GRAPH_ENT,
1488 	.funcs		= &graph_functions,
1489 };
1490 
1491 static struct trace_event graph_trace_ret_event = {
1492 	.type		= TRACE_GRAPH_RET,
1493 	.funcs		= &graph_functions
1494 };
1495 
1496 static struct tracer graph_trace __tracer_data = {
1497 	.name		= "function_graph",
1498 	.open		= graph_trace_open,
1499 	.pipe_open	= graph_trace_open,
1500 	.close		= graph_trace_close,
1501 	.pipe_close	= graph_trace_close,
1502 	.init		= graph_trace_init,
1503 	.reset		= graph_trace_reset,
1504 	.print_line	= print_graph_function,
1505 	.print_header	= print_graph_headers,
1506 	.flags		= &tracer_flags,
1507 	.set_flag	= func_graph_set_flag,
1508 #ifdef CONFIG_FTRACE_SELFTEST
1509 	.selftest	= trace_selftest_startup_function_graph,
1510 #endif
1511 };
1512 
1513 
1514 static ssize_t
1515 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1516 		  loff_t *ppos)
1517 {
1518 	unsigned long val;
1519 	int ret;
1520 
1521 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1522 	if (ret)
1523 		return ret;
1524 
1525 	max_depth = val;
1526 
1527 	*ppos += cnt;
1528 
1529 	return cnt;
1530 }
1531 
1532 static ssize_t
1533 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1534 		 loff_t *ppos)
1535 {
1536 	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1537 	int n;
1538 
1539 	n = sprintf(buf, "%d\n", max_depth);
1540 
1541 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1542 }
1543 
1544 static const struct file_operations graph_depth_fops = {
1545 	.open		= tracing_open_generic,
1546 	.write		= graph_depth_write,
1547 	.read		= graph_depth_read,
1548 	.llseek		= generic_file_llseek,
1549 };
1550 
1551 static __init int init_graph_debugfs(void)
1552 {
1553 	struct dentry *d_tracer;
1554 
1555 	d_tracer = tracing_init_dentry();
1556 	if (!d_tracer)
1557 		return 0;
1558 
1559 	trace_create_file("max_graph_depth", 0644, d_tracer,
1560 			  NULL, &graph_depth_fops);
1561 
1562 	return 0;
1563 }
1564 fs_initcall(init_graph_debugfs);
1565 
1566 static __init int init_graph_trace(void)
1567 {
1568 	max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1569 
1570 	if (!register_ftrace_event(&graph_trace_entry_event)) {
1571 		pr_warning("Warning: could not register graph trace events\n");
1572 		return 1;
1573 	}
1574 
1575 	if (!register_ftrace_event(&graph_trace_ret_event)) {
1576 		pr_warning("Warning: could not register graph trace events\n");
1577 		return 1;
1578 	}
1579 
1580 	return register_tracer(&graph_trace);
1581 }
1582 
1583 core_initcall(init_graph_trace);
1584