1 /*
2  *
3  * Function graph tracer.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14 
15 #include "trace.h"
16 #include "trace_output.h"
17 
18 /* When set, irq functions will be ignored */
19 static int ftrace_graph_skip_irqs;
20 
21 struct fgraph_cpu_data {
22 	pid_t		last_pid;
23 	int		depth;
24 	int		depth_irq;
25 	int		ignore;
26 	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
27 };
28 
29 struct fgraph_data {
30 	struct fgraph_cpu_data __percpu *cpu_data;
31 
32 	/* Place to preserve last processed entry. */
33 	struct ftrace_graph_ent_entry	ent;
34 	struct ftrace_graph_ret_entry	ret;
35 	int				failed;
36 	int				cpu;
37 };
38 
39 #define TRACE_GRAPH_INDENT	2
40 
41 /* Flag options */
42 #define TRACE_GRAPH_PRINT_OVERRUN	0x1
43 #define TRACE_GRAPH_PRINT_CPU		0x2
44 #define TRACE_GRAPH_PRINT_OVERHEAD	0x4
45 #define TRACE_GRAPH_PRINT_PROC		0x8
46 #define TRACE_GRAPH_PRINT_DURATION	0x10
47 #define TRACE_GRAPH_PRINT_ABS_TIME	0x20
48 #define TRACE_GRAPH_PRINT_IRQS		0x40
49 
50 static unsigned int max_depth;
51 
52 static struct tracer_opt trace_opts[] = {
53 	/* Display overruns? (for self-debug purpose) */
54 	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
55 	/* Display CPU ? */
56 	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
57 	/* Display Overhead ? */
58 	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
59 	/* Display proc name/pid */
60 	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
61 	/* Display duration of execution */
62 	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
63 	/* Display absolute time of an entry */
64 	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
65 	/* Display interrupts */
66 	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
67 	{ } /* Empty entry */
68 };
69 
70 static struct tracer_flags tracer_flags = {
71 	/* Don't display overruns and proc by default */
72 	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
73 	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
74 	.opts = trace_opts
75 };
76 
77 static struct trace_array *graph_array;
78 
79 /*
80  * DURATION column is being also used to display IRQ signs,
81  * following values are used by print_graph_irq and others
82  * to fill in space into DURATION column.
83  */
84 enum {
85 	DURATION_FILL_FULL  = -1,
86 	DURATION_FILL_START = -2,
87 	DURATION_FILL_END   = -3,
88 };
89 
90 static enum print_line_t
91 print_graph_duration(unsigned long long duration, struct trace_seq *s,
92 		     u32 flags);
93 
94 /* Add a function return address to the trace stack on thread info.*/
95 int
96 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
97 			 unsigned long frame_pointer)
98 {
99 	unsigned long long calltime;
100 	int index;
101 
102 	if (!current->ret_stack)
103 		return -EBUSY;
104 
105 	/*
106 	 * We must make sure the ret_stack is tested before we read
107 	 * anything else.
108 	 */
109 	smp_rmb();
110 
111 	/* The return trace stack is full */
112 	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
113 		atomic_inc(&current->trace_overrun);
114 		return -EBUSY;
115 	}
116 
117 	calltime = trace_clock_local();
118 
119 	index = ++current->curr_ret_stack;
120 	barrier();
121 	current->ret_stack[index].ret = ret;
122 	current->ret_stack[index].func = func;
123 	current->ret_stack[index].calltime = calltime;
124 	current->ret_stack[index].subtime = 0;
125 	current->ret_stack[index].fp = frame_pointer;
126 	*depth = index;
127 
128 	return 0;
129 }
130 
131 /* Retrieve a function return address to the trace stack on thread info.*/
132 static void
133 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
134 			unsigned long frame_pointer)
135 {
136 	int index;
137 
138 	index = current->curr_ret_stack;
139 
140 	if (unlikely(index < 0)) {
141 		ftrace_graph_stop();
142 		WARN_ON(1);
143 		/* Might as well panic, otherwise we have no where to go */
144 		*ret = (unsigned long)panic;
145 		return;
146 	}
147 
148 #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
149 	/*
150 	 * The arch may choose to record the frame pointer used
151 	 * and check it here to make sure that it is what we expect it
152 	 * to be. If gcc does not set the place holder of the return
153 	 * address in the frame pointer, and does a copy instead, then
154 	 * the function graph trace will fail. This test detects this
155 	 * case.
156 	 *
157 	 * Currently, x86_32 with optimize for size (-Os) makes the latest
158 	 * gcc do the above.
159 	 *
160 	 * Note, -mfentry does not use frame pointers, and this test
161 	 *  is not needed if CC_USING_FENTRY is set.
162 	 */
163 	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
164 		ftrace_graph_stop();
165 		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
166 		     "  from func %ps return to %lx\n",
167 		     current->ret_stack[index].fp,
168 		     frame_pointer,
169 		     (void *)current->ret_stack[index].func,
170 		     current->ret_stack[index].ret);
171 		*ret = (unsigned long)panic;
172 		return;
173 	}
174 #endif
175 
176 	*ret = current->ret_stack[index].ret;
177 	trace->func = current->ret_stack[index].func;
178 	trace->calltime = current->ret_stack[index].calltime;
179 	trace->overrun = atomic_read(&current->trace_overrun);
180 	trace->depth = index;
181 }
182 
183 /*
184  * Send the trace to the ring-buffer.
185  * @return the original return address.
186  */
187 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
188 {
189 	struct ftrace_graph_ret trace;
190 	unsigned long ret;
191 
192 	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
193 	trace.rettime = trace_clock_local();
194 	barrier();
195 	current->curr_ret_stack--;
196 
197 	/*
198 	 * The trace should run after decrementing the ret counter
199 	 * in case an interrupt were to come in. We don't want to
200 	 * lose the interrupt if max_depth is set.
201 	 */
202 	ftrace_graph_return(&trace);
203 
204 	if (unlikely(!ret)) {
205 		ftrace_graph_stop();
206 		WARN_ON(1);
207 		/* Might as well panic. What else to do? */
208 		ret = (unsigned long)panic;
209 	}
210 
211 	return ret;
212 }
213 
214 int __trace_graph_entry(struct trace_array *tr,
215 				struct ftrace_graph_ent *trace,
216 				unsigned long flags,
217 				int pc)
218 {
219 	struct ftrace_event_call *call = &event_funcgraph_entry;
220 	struct ring_buffer_event *event;
221 	struct ring_buffer *buffer = tr->trace_buffer.buffer;
222 	struct ftrace_graph_ent_entry *entry;
223 
224 	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
225 		return 0;
226 
227 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
228 					  sizeof(*entry), flags, pc);
229 	if (!event)
230 		return 0;
231 	entry	= ring_buffer_event_data(event);
232 	entry->graph_ent			= *trace;
233 	if (!filter_current_check_discard(buffer, call, entry, event))
234 		__buffer_unlock_commit(buffer, event);
235 
236 	return 1;
237 }
238 
239 static inline int ftrace_graph_ignore_irqs(void)
240 {
241 	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
242 		return 0;
243 
244 	return in_irq();
245 }
246 
247 int trace_graph_entry(struct ftrace_graph_ent *trace)
248 {
249 	struct trace_array *tr = graph_array;
250 	struct trace_array_cpu *data;
251 	unsigned long flags;
252 	long disabled;
253 	int ret;
254 	int cpu;
255 	int pc;
256 
257 	if (!ftrace_trace_task(current))
258 		return 0;
259 
260 	/* trace it when it is-nested-in or is a function enabled. */
261 	if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
262 	     ftrace_graph_ignore_irqs()) ||
263 	    (max_depth && trace->depth >= max_depth))
264 		return 0;
265 
266 	local_irq_save(flags);
267 	cpu = raw_smp_processor_id();
268 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
269 	disabled = atomic_inc_return(&data->disabled);
270 	if (likely(disabled == 1)) {
271 		pc = preempt_count();
272 		ret = __trace_graph_entry(tr, trace, flags, pc);
273 	} else {
274 		ret = 0;
275 	}
276 
277 	atomic_dec(&data->disabled);
278 	local_irq_restore(flags);
279 
280 	return ret;
281 }
282 
283 int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
284 {
285 	if (tracing_thresh)
286 		return 1;
287 	else
288 		return trace_graph_entry(trace);
289 }
290 
291 static void
292 __trace_graph_function(struct trace_array *tr,
293 		unsigned long ip, unsigned long flags, int pc)
294 {
295 	u64 time = trace_clock_local();
296 	struct ftrace_graph_ent ent = {
297 		.func  = ip,
298 		.depth = 0,
299 	};
300 	struct ftrace_graph_ret ret = {
301 		.func     = ip,
302 		.depth    = 0,
303 		.calltime = time,
304 		.rettime  = time,
305 	};
306 
307 	__trace_graph_entry(tr, &ent, flags, pc);
308 	__trace_graph_return(tr, &ret, flags, pc);
309 }
310 
311 void
312 trace_graph_function(struct trace_array *tr,
313 		unsigned long ip, unsigned long parent_ip,
314 		unsigned long flags, int pc)
315 {
316 	__trace_graph_function(tr, ip, flags, pc);
317 }
318 
319 void __trace_graph_return(struct trace_array *tr,
320 				struct ftrace_graph_ret *trace,
321 				unsigned long flags,
322 				int pc)
323 {
324 	struct ftrace_event_call *call = &event_funcgraph_exit;
325 	struct ring_buffer_event *event;
326 	struct ring_buffer *buffer = tr->trace_buffer.buffer;
327 	struct ftrace_graph_ret_entry *entry;
328 
329 	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
330 		return;
331 
332 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
333 					  sizeof(*entry), flags, pc);
334 	if (!event)
335 		return;
336 	entry	= ring_buffer_event_data(event);
337 	entry->ret				= *trace;
338 	if (!filter_current_check_discard(buffer, call, entry, event))
339 		__buffer_unlock_commit(buffer, event);
340 }
341 
342 void trace_graph_return(struct ftrace_graph_ret *trace)
343 {
344 	struct trace_array *tr = graph_array;
345 	struct trace_array_cpu *data;
346 	unsigned long flags;
347 	long disabled;
348 	int cpu;
349 	int pc;
350 
351 	local_irq_save(flags);
352 	cpu = raw_smp_processor_id();
353 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
354 	disabled = atomic_inc_return(&data->disabled);
355 	if (likely(disabled == 1)) {
356 		pc = preempt_count();
357 		__trace_graph_return(tr, trace, flags, pc);
358 	}
359 	atomic_dec(&data->disabled);
360 	local_irq_restore(flags);
361 }
362 
363 void set_graph_array(struct trace_array *tr)
364 {
365 	graph_array = tr;
366 
367 	/* Make graph_array visible before we start tracing */
368 
369 	smp_mb();
370 }
371 
372 void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
373 {
374 	if (tracing_thresh &&
375 	    (trace->rettime - trace->calltime < tracing_thresh))
376 		return;
377 	else
378 		trace_graph_return(trace);
379 }
380 
381 static int graph_trace_init(struct trace_array *tr)
382 {
383 	int ret;
384 
385 	set_graph_array(tr);
386 	if (tracing_thresh)
387 		ret = register_ftrace_graph(&trace_graph_thresh_return,
388 					    &trace_graph_thresh_entry);
389 	else
390 		ret = register_ftrace_graph(&trace_graph_return,
391 					    &trace_graph_entry);
392 	if (ret)
393 		return ret;
394 	tracing_start_cmdline_record();
395 
396 	return 0;
397 }
398 
399 static void graph_trace_reset(struct trace_array *tr)
400 {
401 	tracing_stop_cmdline_record();
402 	unregister_ftrace_graph();
403 }
404 
405 static int max_bytes_for_cpu;
406 
407 static enum print_line_t
408 print_graph_cpu(struct trace_seq *s, int cpu)
409 {
410 	int ret;
411 
412 	/*
413 	 * Start with a space character - to make it stand out
414 	 * to the right a bit when trace output is pasted into
415 	 * email:
416 	 */
417 	ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
418 	if (!ret)
419 		return TRACE_TYPE_PARTIAL_LINE;
420 
421 	return TRACE_TYPE_HANDLED;
422 }
423 
424 #define TRACE_GRAPH_PROCINFO_LENGTH	14
425 
426 static enum print_line_t
427 print_graph_proc(struct trace_seq *s, pid_t pid)
428 {
429 	char comm[TASK_COMM_LEN];
430 	/* sign + log10(MAX_INT) + '\0' */
431 	char pid_str[11];
432 	int spaces = 0;
433 	int ret;
434 	int len;
435 	int i;
436 
437 	trace_find_cmdline(pid, comm);
438 	comm[7] = '\0';
439 	sprintf(pid_str, "%d", pid);
440 
441 	/* 1 stands for the "-" character */
442 	len = strlen(comm) + strlen(pid_str) + 1;
443 
444 	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
445 		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
446 
447 	/* First spaces to align center */
448 	for (i = 0; i < spaces / 2; i++) {
449 		ret = trace_seq_printf(s, " ");
450 		if (!ret)
451 			return TRACE_TYPE_PARTIAL_LINE;
452 	}
453 
454 	ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
455 	if (!ret)
456 		return TRACE_TYPE_PARTIAL_LINE;
457 
458 	/* Last spaces to align center */
459 	for (i = 0; i < spaces - (spaces / 2); i++) {
460 		ret = trace_seq_printf(s, " ");
461 		if (!ret)
462 			return TRACE_TYPE_PARTIAL_LINE;
463 	}
464 	return TRACE_TYPE_HANDLED;
465 }
466 
467 
468 static enum print_line_t
469 print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
470 {
471 	if (!trace_seq_putc(s, ' '))
472 		return 0;
473 
474 	return trace_print_lat_fmt(s, entry);
475 }
476 
477 /* If the pid changed since the last trace, output this event */
478 static enum print_line_t
479 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
480 {
481 	pid_t prev_pid;
482 	pid_t *last_pid;
483 	int ret;
484 
485 	if (!data)
486 		return TRACE_TYPE_HANDLED;
487 
488 	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
489 
490 	if (*last_pid == pid)
491 		return TRACE_TYPE_HANDLED;
492 
493 	prev_pid = *last_pid;
494 	*last_pid = pid;
495 
496 	if (prev_pid == -1)
497 		return TRACE_TYPE_HANDLED;
498 /*
499  * Context-switch trace line:
500 
501  ------------------------------------------
502  | 1)  migration/0--1  =>  sshd-1755
503  ------------------------------------------
504 
505  */
506 	ret = trace_seq_printf(s,
507 		" ------------------------------------------\n");
508 	if (!ret)
509 		return TRACE_TYPE_PARTIAL_LINE;
510 
511 	ret = print_graph_cpu(s, cpu);
512 	if (ret == TRACE_TYPE_PARTIAL_LINE)
513 		return TRACE_TYPE_PARTIAL_LINE;
514 
515 	ret = print_graph_proc(s, prev_pid);
516 	if (ret == TRACE_TYPE_PARTIAL_LINE)
517 		return TRACE_TYPE_PARTIAL_LINE;
518 
519 	ret = trace_seq_printf(s, " => ");
520 	if (!ret)
521 		return TRACE_TYPE_PARTIAL_LINE;
522 
523 	ret = print_graph_proc(s, pid);
524 	if (ret == TRACE_TYPE_PARTIAL_LINE)
525 		return TRACE_TYPE_PARTIAL_LINE;
526 
527 	ret = trace_seq_printf(s,
528 		"\n ------------------------------------------\n\n");
529 	if (!ret)
530 		return TRACE_TYPE_PARTIAL_LINE;
531 
532 	return TRACE_TYPE_HANDLED;
533 }
534 
535 static struct ftrace_graph_ret_entry *
536 get_return_for_leaf(struct trace_iterator *iter,
537 		struct ftrace_graph_ent_entry *curr)
538 {
539 	struct fgraph_data *data = iter->private;
540 	struct ring_buffer_iter *ring_iter = NULL;
541 	struct ring_buffer_event *event;
542 	struct ftrace_graph_ret_entry *next;
543 
544 	/*
545 	 * If the previous output failed to write to the seq buffer,
546 	 * then we just reuse the data from before.
547 	 */
548 	if (data && data->failed) {
549 		curr = &data->ent;
550 		next = &data->ret;
551 	} else {
552 
553 		ring_iter = trace_buffer_iter(iter, iter->cpu);
554 
555 		/* First peek to compare current entry and the next one */
556 		if (ring_iter)
557 			event = ring_buffer_iter_peek(ring_iter, NULL);
558 		else {
559 			/*
560 			 * We need to consume the current entry to see
561 			 * the next one.
562 			 */
563 			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
564 					    NULL, NULL);
565 			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
566 						 NULL, NULL);
567 		}
568 
569 		if (!event)
570 			return NULL;
571 
572 		next = ring_buffer_event_data(event);
573 
574 		if (data) {
575 			/*
576 			 * Save current and next entries for later reference
577 			 * if the output fails.
578 			 */
579 			data->ent = *curr;
580 			/*
581 			 * If the next event is not a return type, then
582 			 * we only care about what type it is. Otherwise we can
583 			 * safely copy the entire event.
584 			 */
585 			if (next->ent.type == TRACE_GRAPH_RET)
586 				data->ret = *next;
587 			else
588 				data->ret.ent.type = next->ent.type;
589 		}
590 	}
591 
592 	if (next->ent.type != TRACE_GRAPH_RET)
593 		return NULL;
594 
595 	if (curr->ent.pid != next->ent.pid ||
596 			curr->graph_ent.func != next->ret.func)
597 		return NULL;
598 
599 	/* this is a leaf, now advance the iterator */
600 	if (ring_iter)
601 		ring_buffer_read(ring_iter, NULL);
602 
603 	return next;
604 }
605 
606 static int print_graph_abs_time(u64 t, struct trace_seq *s)
607 {
608 	unsigned long usecs_rem;
609 
610 	usecs_rem = do_div(t, NSEC_PER_SEC);
611 	usecs_rem /= 1000;
612 
613 	return trace_seq_printf(s, "%5lu.%06lu |  ",
614 			(unsigned long)t, usecs_rem);
615 }
616 
617 static enum print_line_t
618 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
619 		enum trace_type type, int cpu, pid_t pid, u32 flags)
620 {
621 	int ret;
622 	struct trace_seq *s = &iter->seq;
623 
624 	if (addr < (unsigned long)__irqentry_text_start ||
625 		addr >= (unsigned long)__irqentry_text_end)
626 		return TRACE_TYPE_UNHANDLED;
627 
628 	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
629 		/* Absolute time */
630 		if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
631 			ret = print_graph_abs_time(iter->ts, s);
632 			if (!ret)
633 				return TRACE_TYPE_PARTIAL_LINE;
634 		}
635 
636 		/* Cpu */
637 		if (flags & TRACE_GRAPH_PRINT_CPU) {
638 			ret = print_graph_cpu(s, cpu);
639 			if (ret == TRACE_TYPE_PARTIAL_LINE)
640 				return TRACE_TYPE_PARTIAL_LINE;
641 		}
642 
643 		/* Proc */
644 		if (flags & TRACE_GRAPH_PRINT_PROC) {
645 			ret = print_graph_proc(s, pid);
646 			if (ret == TRACE_TYPE_PARTIAL_LINE)
647 				return TRACE_TYPE_PARTIAL_LINE;
648 			ret = trace_seq_printf(s, " | ");
649 			if (!ret)
650 				return TRACE_TYPE_PARTIAL_LINE;
651 		}
652 	}
653 
654 	/* No overhead */
655 	ret = print_graph_duration(DURATION_FILL_START, s, flags);
656 	if (ret != TRACE_TYPE_HANDLED)
657 		return ret;
658 
659 	if (type == TRACE_GRAPH_ENT)
660 		ret = trace_seq_printf(s, "==========>");
661 	else
662 		ret = trace_seq_printf(s, "<==========");
663 
664 	if (!ret)
665 		return TRACE_TYPE_PARTIAL_LINE;
666 
667 	ret = print_graph_duration(DURATION_FILL_END, s, flags);
668 	if (ret != TRACE_TYPE_HANDLED)
669 		return ret;
670 
671 	ret = trace_seq_printf(s, "\n");
672 
673 	if (!ret)
674 		return TRACE_TYPE_PARTIAL_LINE;
675 	return TRACE_TYPE_HANDLED;
676 }
677 
678 enum print_line_t
679 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
680 {
681 	unsigned long nsecs_rem = do_div(duration, 1000);
682 	/* log10(ULONG_MAX) + '\0' */
683 	char msecs_str[21];
684 	char nsecs_str[5];
685 	int ret, len;
686 	int i;
687 
688 	sprintf(msecs_str, "%lu", (unsigned long) duration);
689 
690 	/* Print msecs */
691 	ret = trace_seq_printf(s, "%s", msecs_str);
692 	if (!ret)
693 		return TRACE_TYPE_PARTIAL_LINE;
694 
695 	len = strlen(msecs_str);
696 
697 	/* Print nsecs (we don't want to exceed 7 numbers) */
698 	if (len < 7) {
699 		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
700 
701 		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
702 		ret = trace_seq_printf(s, ".%s", nsecs_str);
703 		if (!ret)
704 			return TRACE_TYPE_PARTIAL_LINE;
705 		len += strlen(nsecs_str);
706 	}
707 
708 	ret = trace_seq_printf(s, " us ");
709 	if (!ret)
710 		return TRACE_TYPE_PARTIAL_LINE;
711 
712 	/* Print remaining spaces to fit the row's width */
713 	for (i = len; i < 7; i++) {
714 		ret = trace_seq_printf(s, " ");
715 		if (!ret)
716 			return TRACE_TYPE_PARTIAL_LINE;
717 	}
718 	return TRACE_TYPE_HANDLED;
719 }
720 
721 static enum print_line_t
722 print_graph_duration(unsigned long long duration, struct trace_seq *s,
723 		     u32 flags)
724 {
725 	int ret = -1;
726 
727 	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
728 	    !(trace_flags & TRACE_ITER_CONTEXT_INFO))
729 			return TRACE_TYPE_HANDLED;
730 
731 	/* No real adata, just filling the column with spaces */
732 	switch (duration) {
733 	case DURATION_FILL_FULL:
734 		ret = trace_seq_printf(s, "              |  ");
735 		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
736 	case DURATION_FILL_START:
737 		ret = trace_seq_printf(s, "  ");
738 		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
739 	case DURATION_FILL_END:
740 		ret = trace_seq_printf(s, " |");
741 		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
742 	}
743 
744 	/* Signal a overhead of time execution to the output */
745 	if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
746 		/* Duration exceeded 100 msecs */
747 		if (duration > 100000ULL)
748 			ret = trace_seq_printf(s, "! ");
749 		/* Duration exceeded 10 msecs */
750 		else if (duration > 10000ULL)
751 			ret = trace_seq_printf(s, "+ ");
752 	}
753 
754 	/*
755 	 * The -1 means we either did not exceed the duration tresholds
756 	 * or we dont want to print out the overhead. Either way we need
757 	 * to fill out the space.
758 	 */
759 	if (ret == -1)
760 		ret = trace_seq_printf(s, "  ");
761 
762 	/* Catching here any failure happenned above */
763 	if (!ret)
764 		return TRACE_TYPE_PARTIAL_LINE;
765 
766 	ret = trace_print_graph_duration(duration, s);
767 	if (ret != TRACE_TYPE_HANDLED)
768 		return ret;
769 
770 	ret = trace_seq_printf(s, "|  ");
771 	if (!ret)
772 		return TRACE_TYPE_PARTIAL_LINE;
773 
774 	return TRACE_TYPE_HANDLED;
775 }
776 
777 /* Case of a leaf function on its call entry */
778 static enum print_line_t
779 print_graph_entry_leaf(struct trace_iterator *iter,
780 		struct ftrace_graph_ent_entry *entry,
781 		struct ftrace_graph_ret_entry *ret_entry,
782 		struct trace_seq *s, u32 flags)
783 {
784 	struct fgraph_data *data = iter->private;
785 	struct ftrace_graph_ret *graph_ret;
786 	struct ftrace_graph_ent *call;
787 	unsigned long long duration;
788 	int ret;
789 	int i;
790 
791 	graph_ret = &ret_entry->ret;
792 	call = &entry->graph_ent;
793 	duration = graph_ret->rettime - graph_ret->calltime;
794 
795 	if (data) {
796 		struct fgraph_cpu_data *cpu_data;
797 		int cpu = iter->cpu;
798 
799 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
800 
801 		/*
802 		 * Comments display at + 1 to depth. Since
803 		 * this is a leaf function, keep the comments
804 		 * equal to this depth.
805 		 */
806 		cpu_data->depth = call->depth - 1;
807 
808 		/* No need to keep this function around for this depth */
809 		if (call->depth < FTRACE_RETFUNC_DEPTH)
810 			cpu_data->enter_funcs[call->depth] = 0;
811 	}
812 
813 	/* Overhead and duration */
814 	ret = print_graph_duration(duration, s, flags);
815 	if (ret == TRACE_TYPE_PARTIAL_LINE)
816 		return TRACE_TYPE_PARTIAL_LINE;
817 
818 	/* Function */
819 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
820 		ret = trace_seq_printf(s, " ");
821 		if (!ret)
822 			return TRACE_TYPE_PARTIAL_LINE;
823 	}
824 
825 	ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
826 	if (!ret)
827 		return TRACE_TYPE_PARTIAL_LINE;
828 
829 	return TRACE_TYPE_HANDLED;
830 }
831 
832 static enum print_line_t
833 print_graph_entry_nested(struct trace_iterator *iter,
834 			 struct ftrace_graph_ent_entry *entry,
835 			 struct trace_seq *s, int cpu, u32 flags)
836 {
837 	struct ftrace_graph_ent *call = &entry->graph_ent;
838 	struct fgraph_data *data = iter->private;
839 	int ret;
840 	int i;
841 
842 	if (data) {
843 		struct fgraph_cpu_data *cpu_data;
844 		int cpu = iter->cpu;
845 
846 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
847 		cpu_data->depth = call->depth;
848 
849 		/* Save this function pointer to see if the exit matches */
850 		if (call->depth < FTRACE_RETFUNC_DEPTH)
851 			cpu_data->enter_funcs[call->depth] = call->func;
852 	}
853 
854 	/* No time */
855 	ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
856 	if (ret != TRACE_TYPE_HANDLED)
857 		return ret;
858 
859 	/* Function */
860 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
861 		ret = trace_seq_printf(s, " ");
862 		if (!ret)
863 			return TRACE_TYPE_PARTIAL_LINE;
864 	}
865 
866 	ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
867 	if (!ret)
868 		return TRACE_TYPE_PARTIAL_LINE;
869 
870 	/*
871 	 * we already consumed the current entry to check the next one
872 	 * and see if this is a leaf.
873 	 */
874 	return TRACE_TYPE_NO_CONSUME;
875 }
876 
877 static enum print_line_t
878 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
879 		     int type, unsigned long addr, u32 flags)
880 {
881 	struct fgraph_data *data = iter->private;
882 	struct trace_entry *ent = iter->ent;
883 	int cpu = iter->cpu;
884 	int ret;
885 
886 	/* Pid */
887 	if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
888 		return TRACE_TYPE_PARTIAL_LINE;
889 
890 	if (type) {
891 		/* Interrupt */
892 		ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
893 		if (ret == TRACE_TYPE_PARTIAL_LINE)
894 			return TRACE_TYPE_PARTIAL_LINE;
895 	}
896 
897 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
898 		return 0;
899 
900 	/* Absolute time */
901 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
902 		ret = print_graph_abs_time(iter->ts, s);
903 		if (!ret)
904 			return TRACE_TYPE_PARTIAL_LINE;
905 	}
906 
907 	/* Cpu */
908 	if (flags & TRACE_GRAPH_PRINT_CPU) {
909 		ret = print_graph_cpu(s, cpu);
910 		if (ret == TRACE_TYPE_PARTIAL_LINE)
911 			return TRACE_TYPE_PARTIAL_LINE;
912 	}
913 
914 	/* Proc */
915 	if (flags & TRACE_GRAPH_PRINT_PROC) {
916 		ret = print_graph_proc(s, ent->pid);
917 		if (ret == TRACE_TYPE_PARTIAL_LINE)
918 			return TRACE_TYPE_PARTIAL_LINE;
919 
920 		ret = trace_seq_printf(s, " | ");
921 		if (!ret)
922 			return TRACE_TYPE_PARTIAL_LINE;
923 	}
924 
925 	/* Latency format */
926 	if (trace_flags & TRACE_ITER_LATENCY_FMT) {
927 		ret = print_graph_lat_fmt(s, ent);
928 		if (ret == TRACE_TYPE_PARTIAL_LINE)
929 			return TRACE_TYPE_PARTIAL_LINE;
930 	}
931 
932 	return 0;
933 }
934 
935 /*
936  * Entry check for irq code
937  *
938  * returns 1 if
939  *  - we are inside irq code
940  *  - we just entered irq code
941  *
942  * retunns 0 if
943  *  - funcgraph-interrupts option is set
944  *  - we are not inside irq code
945  */
946 static int
947 check_irq_entry(struct trace_iterator *iter, u32 flags,
948 		unsigned long addr, int depth)
949 {
950 	int cpu = iter->cpu;
951 	int *depth_irq;
952 	struct fgraph_data *data = iter->private;
953 
954 	/*
955 	 * If we are either displaying irqs, or we got called as
956 	 * a graph event and private data does not exist,
957 	 * then we bypass the irq check.
958 	 */
959 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
960 	    (!data))
961 		return 0;
962 
963 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
964 
965 	/*
966 	 * We are inside the irq code
967 	 */
968 	if (*depth_irq >= 0)
969 		return 1;
970 
971 	if ((addr < (unsigned long)__irqentry_text_start) ||
972 	    (addr >= (unsigned long)__irqentry_text_end))
973 		return 0;
974 
975 	/*
976 	 * We are entering irq code.
977 	 */
978 	*depth_irq = depth;
979 	return 1;
980 }
981 
982 /*
983  * Return check for irq code
984  *
985  * returns 1 if
986  *  - we are inside irq code
987  *  - we just left irq code
988  *
989  * returns 0 if
990  *  - funcgraph-interrupts option is set
991  *  - we are not inside irq code
992  */
993 static int
994 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
995 {
996 	int cpu = iter->cpu;
997 	int *depth_irq;
998 	struct fgraph_data *data = iter->private;
999 
1000 	/*
1001 	 * If we are either displaying irqs, or we got called as
1002 	 * a graph event and private data does not exist,
1003 	 * then we bypass the irq check.
1004 	 */
1005 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1006 	    (!data))
1007 		return 0;
1008 
1009 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1010 
1011 	/*
1012 	 * We are not inside the irq code.
1013 	 */
1014 	if (*depth_irq == -1)
1015 		return 0;
1016 
1017 	/*
1018 	 * We are inside the irq code, and this is returning entry.
1019 	 * Let's not trace it and clear the entry depth, since
1020 	 * we are out of irq code.
1021 	 *
1022 	 * This condition ensures that we 'leave the irq code' once
1023 	 * we are out of the entry depth. Thus protecting us from
1024 	 * the RETURN entry loss.
1025 	 */
1026 	if (*depth_irq >= depth) {
1027 		*depth_irq = -1;
1028 		return 1;
1029 	}
1030 
1031 	/*
1032 	 * We are inside the irq code, and this is not the entry.
1033 	 */
1034 	return 1;
1035 }
1036 
1037 static enum print_line_t
1038 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1039 			struct trace_iterator *iter, u32 flags)
1040 {
1041 	struct fgraph_data *data = iter->private;
1042 	struct ftrace_graph_ent *call = &field->graph_ent;
1043 	struct ftrace_graph_ret_entry *leaf_ret;
1044 	static enum print_line_t ret;
1045 	int cpu = iter->cpu;
1046 
1047 	if (check_irq_entry(iter, flags, call->func, call->depth))
1048 		return TRACE_TYPE_HANDLED;
1049 
1050 	if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
1051 		return TRACE_TYPE_PARTIAL_LINE;
1052 
1053 	leaf_ret = get_return_for_leaf(iter, field);
1054 	if (leaf_ret)
1055 		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1056 	else
1057 		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1058 
1059 	if (data) {
1060 		/*
1061 		 * If we failed to write our output, then we need to make
1062 		 * note of it. Because we already consumed our entry.
1063 		 */
1064 		if (s->full) {
1065 			data->failed = 1;
1066 			data->cpu = cpu;
1067 		} else
1068 			data->failed = 0;
1069 	}
1070 
1071 	return ret;
1072 }
1073 
1074 static enum print_line_t
1075 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1076 		   struct trace_entry *ent, struct trace_iterator *iter,
1077 		   u32 flags)
1078 {
1079 	unsigned long long duration = trace->rettime - trace->calltime;
1080 	struct fgraph_data *data = iter->private;
1081 	pid_t pid = ent->pid;
1082 	int cpu = iter->cpu;
1083 	int func_match = 1;
1084 	int ret;
1085 	int i;
1086 
1087 	if (check_irq_return(iter, flags, trace->depth))
1088 		return TRACE_TYPE_HANDLED;
1089 
1090 	if (data) {
1091 		struct fgraph_cpu_data *cpu_data;
1092 		int cpu = iter->cpu;
1093 
1094 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1095 
1096 		/*
1097 		 * Comments display at + 1 to depth. This is the
1098 		 * return from a function, we now want the comments
1099 		 * to display at the same level of the bracket.
1100 		 */
1101 		cpu_data->depth = trace->depth - 1;
1102 
1103 		if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1104 			if (cpu_data->enter_funcs[trace->depth] != trace->func)
1105 				func_match = 0;
1106 			cpu_data->enter_funcs[trace->depth] = 0;
1107 		}
1108 	}
1109 
1110 	if (print_graph_prologue(iter, s, 0, 0, flags))
1111 		return TRACE_TYPE_PARTIAL_LINE;
1112 
1113 	/* Overhead and duration */
1114 	ret = print_graph_duration(duration, s, flags);
1115 	if (ret == TRACE_TYPE_PARTIAL_LINE)
1116 		return TRACE_TYPE_PARTIAL_LINE;
1117 
1118 	/* Closing brace */
1119 	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
1120 		ret = trace_seq_printf(s, " ");
1121 		if (!ret)
1122 			return TRACE_TYPE_PARTIAL_LINE;
1123 	}
1124 
1125 	/*
1126 	 * If the return function does not have a matching entry,
1127 	 * then the entry was lost. Instead of just printing
1128 	 * the '}' and letting the user guess what function this
1129 	 * belongs to, write out the function name.
1130 	 */
1131 	if (func_match) {
1132 		ret = trace_seq_printf(s, "}\n");
1133 		if (!ret)
1134 			return TRACE_TYPE_PARTIAL_LINE;
1135 	} else {
1136 		ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1137 		if (!ret)
1138 			return TRACE_TYPE_PARTIAL_LINE;
1139 	}
1140 
1141 	/* Overrun */
1142 	if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
1143 		ret = trace_seq_printf(s, " (Overruns: %lu)\n",
1144 					trace->overrun);
1145 		if (!ret)
1146 			return TRACE_TYPE_PARTIAL_LINE;
1147 	}
1148 
1149 	ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1150 			      cpu, pid, flags);
1151 	if (ret == TRACE_TYPE_PARTIAL_LINE)
1152 		return TRACE_TYPE_PARTIAL_LINE;
1153 
1154 	return TRACE_TYPE_HANDLED;
1155 }
1156 
1157 static enum print_line_t
1158 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1159 		    struct trace_iterator *iter, u32 flags)
1160 {
1161 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1162 	struct fgraph_data *data = iter->private;
1163 	struct trace_event *event;
1164 	int depth = 0;
1165 	int ret;
1166 	int i;
1167 
1168 	if (data)
1169 		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1170 
1171 	if (print_graph_prologue(iter, s, 0, 0, flags))
1172 		return TRACE_TYPE_PARTIAL_LINE;
1173 
1174 	/* No time */
1175 	ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
1176 	if (ret != TRACE_TYPE_HANDLED)
1177 		return ret;
1178 
1179 	/* Indentation */
1180 	if (depth > 0)
1181 		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1182 			ret = trace_seq_printf(s, " ");
1183 			if (!ret)
1184 				return TRACE_TYPE_PARTIAL_LINE;
1185 		}
1186 
1187 	/* The comment */
1188 	ret = trace_seq_printf(s, "/* ");
1189 	if (!ret)
1190 		return TRACE_TYPE_PARTIAL_LINE;
1191 
1192 	switch (iter->ent->type) {
1193 	case TRACE_BPRINT:
1194 		ret = trace_print_bprintk_msg_only(iter);
1195 		if (ret != TRACE_TYPE_HANDLED)
1196 			return ret;
1197 		break;
1198 	case TRACE_PRINT:
1199 		ret = trace_print_printk_msg_only(iter);
1200 		if (ret != TRACE_TYPE_HANDLED)
1201 			return ret;
1202 		break;
1203 	default:
1204 		event = ftrace_find_event(ent->type);
1205 		if (!event)
1206 			return TRACE_TYPE_UNHANDLED;
1207 
1208 		ret = event->funcs->trace(iter, sym_flags, event);
1209 		if (ret != TRACE_TYPE_HANDLED)
1210 			return ret;
1211 	}
1212 
1213 	/* Strip ending newline */
1214 	if (s->buffer[s->len - 1] == '\n') {
1215 		s->buffer[s->len - 1] = '\0';
1216 		s->len--;
1217 	}
1218 
1219 	ret = trace_seq_printf(s, " */\n");
1220 	if (!ret)
1221 		return TRACE_TYPE_PARTIAL_LINE;
1222 
1223 	return TRACE_TYPE_HANDLED;
1224 }
1225 
1226 
1227 enum print_line_t
1228 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1229 {
1230 	struct ftrace_graph_ent_entry *field;
1231 	struct fgraph_data *data = iter->private;
1232 	struct trace_entry *entry = iter->ent;
1233 	struct trace_seq *s = &iter->seq;
1234 	int cpu = iter->cpu;
1235 	int ret;
1236 
1237 	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1238 		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1239 		return TRACE_TYPE_HANDLED;
1240 	}
1241 
1242 	/*
1243 	 * If the last output failed, there's a possibility we need
1244 	 * to print out the missing entry which would never go out.
1245 	 */
1246 	if (data && data->failed) {
1247 		field = &data->ent;
1248 		iter->cpu = data->cpu;
1249 		ret = print_graph_entry(field, s, iter, flags);
1250 		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1251 			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1252 			ret = TRACE_TYPE_NO_CONSUME;
1253 		}
1254 		iter->cpu = cpu;
1255 		return ret;
1256 	}
1257 
1258 	switch (entry->type) {
1259 	case TRACE_GRAPH_ENT: {
1260 		/*
1261 		 * print_graph_entry() may consume the current event,
1262 		 * thus @field may become invalid, so we need to save it.
1263 		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1264 		 * it can be safely saved at the stack.
1265 		 */
1266 		struct ftrace_graph_ent_entry saved;
1267 		trace_assign_type(field, entry);
1268 		saved = *field;
1269 		return print_graph_entry(&saved, s, iter, flags);
1270 	}
1271 	case TRACE_GRAPH_RET: {
1272 		struct ftrace_graph_ret_entry *field;
1273 		trace_assign_type(field, entry);
1274 		return print_graph_return(&field->ret, s, entry, iter, flags);
1275 	}
1276 	case TRACE_STACK:
1277 	case TRACE_FN:
1278 		/* dont trace stack and functions as comments */
1279 		return TRACE_TYPE_UNHANDLED;
1280 
1281 	default:
1282 		return print_graph_comment(s, entry, iter, flags);
1283 	}
1284 
1285 	return TRACE_TYPE_HANDLED;
1286 }
1287 
1288 static enum print_line_t
1289 print_graph_function(struct trace_iterator *iter)
1290 {
1291 	return print_graph_function_flags(iter, tracer_flags.val);
1292 }
1293 
1294 static enum print_line_t
1295 print_graph_function_event(struct trace_iterator *iter, int flags,
1296 			   struct trace_event *event)
1297 {
1298 	return print_graph_function(iter);
1299 }
1300 
1301 static void print_lat_header(struct seq_file *s, u32 flags)
1302 {
1303 	static const char spaces[] = "                "	/* 16 spaces */
1304 		"    "					/* 4 spaces */
1305 		"                 ";			/* 17 spaces */
1306 	int size = 0;
1307 
1308 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1309 		size += 16;
1310 	if (flags & TRACE_GRAPH_PRINT_CPU)
1311 		size += 4;
1312 	if (flags & TRACE_GRAPH_PRINT_PROC)
1313 		size += 17;
1314 
1315 	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1316 	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1317 	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1318 	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1319 	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1320 }
1321 
1322 static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
1323 {
1324 	int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1325 
1326 	if (lat)
1327 		print_lat_header(s, flags);
1328 
1329 	/* 1st line */
1330 	seq_printf(s, "#");
1331 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1332 		seq_printf(s, "     TIME       ");
1333 	if (flags & TRACE_GRAPH_PRINT_CPU)
1334 		seq_printf(s, " CPU");
1335 	if (flags & TRACE_GRAPH_PRINT_PROC)
1336 		seq_printf(s, "  TASK/PID       ");
1337 	if (lat)
1338 		seq_printf(s, "||||");
1339 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1340 		seq_printf(s, "  DURATION   ");
1341 	seq_printf(s, "               FUNCTION CALLS\n");
1342 
1343 	/* 2nd line */
1344 	seq_printf(s, "#");
1345 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1346 		seq_printf(s, "      |         ");
1347 	if (flags & TRACE_GRAPH_PRINT_CPU)
1348 		seq_printf(s, " |  ");
1349 	if (flags & TRACE_GRAPH_PRINT_PROC)
1350 		seq_printf(s, "   |    |        ");
1351 	if (lat)
1352 		seq_printf(s, "||||");
1353 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1354 		seq_printf(s, "   |   |      ");
1355 	seq_printf(s, "               |   |   |   |\n");
1356 }
1357 
1358 void print_graph_headers(struct seq_file *s)
1359 {
1360 	print_graph_headers_flags(s, tracer_flags.val);
1361 }
1362 
1363 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1364 {
1365 	struct trace_iterator *iter = s->private;
1366 
1367 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
1368 		return;
1369 
1370 	if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1371 		/* print nothing if the buffers are empty */
1372 		if (trace_empty(iter))
1373 			return;
1374 
1375 		print_trace_header(s, iter);
1376 	}
1377 
1378 	__print_graph_headers_flags(s, flags);
1379 }
1380 
1381 void graph_trace_open(struct trace_iterator *iter)
1382 {
1383 	/* pid and depth on the last trace processed */
1384 	struct fgraph_data *data;
1385 	int cpu;
1386 
1387 	iter->private = NULL;
1388 
1389 	data = kzalloc(sizeof(*data), GFP_KERNEL);
1390 	if (!data)
1391 		goto out_err;
1392 
1393 	data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1394 	if (!data->cpu_data)
1395 		goto out_err_free;
1396 
1397 	for_each_possible_cpu(cpu) {
1398 		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1399 		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1400 		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1401 		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1402 
1403 		*pid = -1;
1404 		*depth = 0;
1405 		*ignore = 0;
1406 		*depth_irq = -1;
1407 	}
1408 
1409 	iter->private = data;
1410 
1411 	return;
1412 
1413  out_err_free:
1414 	kfree(data);
1415  out_err:
1416 	pr_warning("function graph tracer: not enough memory\n");
1417 }
1418 
1419 void graph_trace_close(struct trace_iterator *iter)
1420 {
1421 	struct fgraph_data *data = iter->private;
1422 
1423 	if (data) {
1424 		free_percpu(data->cpu_data);
1425 		kfree(data);
1426 	}
1427 }
1428 
1429 static int func_graph_set_flag(u32 old_flags, u32 bit, int set)
1430 {
1431 	if (bit == TRACE_GRAPH_PRINT_IRQS)
1432 		ftrace_graph_skip_irqs = !set;
1433 
1434 	return 0;
1435 }
1436 
1437 static struct trace_event_functions graph_functions = {
1438 	.trace		= print_graph_function_event,
1439 };
1440 
1441 static struct trace_event graph_trace_entry_event = {
1442 	.type		= TRACE_GRAPH_ENT,
1443 	.funcs		= &graph_functions,
1444 };
1445 
1446 static struct trace_event graph_trace_ret_event = {
1447 	.type		= TRACE_GRAPH_RET,
1448 	.funcs		= &graph_functions
1449 };
1450 
1451 static struct tracer graph_trace __read_mostly = {
1452 	.name		= "function_graph",
1453 	.open		= graph_trace_open,
1454 	.pipe_open	= graph_trace_open,
1455 	.close		= graph_trace_close,
1456 	.pipe_close	= graph_trace_close,
1457 	.wait_pipe	= poll_wait_pipe,
1458 	.init		= graph_trace_init,
1459 	.reset		= graph_trace_reset,
1460 	.print_line	= print_graph_function,
1461 	.print_header	= print_graph_headers,
1462 	.flags		= &tracer_flags,
1463 	.set_flag	= func_graph_set_flag,
1464 #ifdef CONFIG_FTRACE_SELFTEST
1465 	.selftest	= trace_selftest_startup_function_graph,
1466 #endif
1467 };
1468 
1469 
1470 static ssize_t
1471 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1472 		  loff_t *ppos)
1473 {
1474 	unsigned long val;
1475 	int ret;
1476 
1477 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1478 	if (ret)
1479 		return ret;
1480 
1481 	max_depth = val;
1482 
1483 	*ppos += cnt;
1484 
1485 	return cnt;
1486 }
1487 
1488 static ssize_t
1489 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1490 		 loff_t *ppos)
1491 {
1492 	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1493 	int n;
1494 
1495 	n = sprintf(buf, "%d\n", max_depth);
1496 
1497 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1498 }
1499 
1500 static const struct file_operations graph_depth_fops = {
1501 	.open		= tracing_open_generic,
1502 	.write		= graph_depth_write,
1503 	.read		= graph_depth_read,
1504 	.llseek		= generic_file_llseek,
1505 };
1506 
1507 static __init int init_graph_debugfs(void)
1508 {
1509 	struct dentry *d_tracer;
1510 
1511 	d_tracer = tracing_init_dentry();
1512 	if (!d_tracer)
1513 		return 0;
1514 
1515 	trace_create_file("max_graph_depth", 0644, d_tracer,
1516 			  NULL, &graph_depth_fops);
1517 
1518 	return 0;
1519 }
1520 fs_initcall(init_graph_debugfs);
1521 
1522 static __init int init_graph_trace(void)
1523 {
1524 	max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1525 
1526 	if (!register_ftrace_event(&graph_trace_entry_event)) {
1527 		pr_warning("Warning: could not register graph trace events\n");
1528 		return 1;
1529 	}
1530 
1531 	if (!register_ftrace_event(&graph_trace_ret_event)) {
1532 		pr_warning("Warning: could not register graph trace events\n");
1533 		return 1;
1534 	}
1535 
1536 	return register_tracer(&graph_trace);
1537 }
1538 
1539 core_initcall(init_graph_trace);
1540