1 /*
2  *
3  * Function graph tracer.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14 
15 #include "trace.h"
16 #include "trace_output.h"
17 
18 /* When set, irq functions will be ignored */
19 static int ftrace_graph_skip_irqs;
20 
21 struct fgraph_cpu_data {
22 	pid_t		last_pid;
23 	int		depth;
24 	int		depth_irq;
25 	int		ignore;
26 	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
27 };
28 
29 struct fgraph_data {
30 	struct fgraph_cpu_data __percpu *cpu_data;
31 
32 	/* Place to preserve last processed entry. */
33 	struct ftrace_graph_ent_entry	ent;
34 	struct ftrace_graph_ret_entry	ret;
35 	int				failed;
36 	int				cpu;
37 };
38 
39 #define TRACE_GRAPH_INDENT	2
40 
41 /* Flag options */
42 #define TRACE_GRAPH_PRINT_OVERRUN	0x1
43 #define TRACE_GRAPH_PRINT_CPU		0x2
44 #define TRACE_GRAPH_PRINT_OVERHEAD	0x4
45 #define TRACE_GRAPH_PRINT_PROC		0x8
46 #define TRACE_GRAPH_PRINT_DURATION	0x10
47 #define TRACE_GRAPH_PRINT_ABS_TIME	0x20
48 #define TRACE_GRAPH_PRINT_IRQS		0x40
49 
50 static unsigned int max_depth;
51 
52 static struct tracer_opt trace_opts[] = {
53 	/* Display overruns? (for self-debug purpose) */
54 	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
55 	/* Display CPU ? */
56 	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
57 	/* Display Overhead ? */
58 	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
59 	/* Display proc name/pid */
60 	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
61 	/* Display duration of execution */
62 	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
63 	/* Display absolute time of an entry */
64 	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
65 	/* Display interrupts */
66 	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
67 	{ } /* Empty entry */
68 };
69 
70 static struct tracer_flags tracer_flags = {
71 	/* Don't display overruns and proc by default */
72 	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
73 	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
74 	.opts = trace_opts
75 };
76 
77 static struct trace_array *graph_array;
78 
79 /*
80  * DURATION column is being also used to display IRQ signs,
81  * following values are used by print_graph_irq and others
82  * to fill in space into DURATION column.
83  */
84 enum {
85 	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
86 	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
87 	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
88 };
89 
90 static enum print_line_t
91 print_graph_duration(unsigned long long duration, struct trace_seq *s,
92 		     u32 flags);
93 
94 /* Add a function return address to the trace stack on thread info.*/
95 int
96 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
97 			 unsigned long frame_pointer)
98 {
99 	unsigned long long calltime;
100 	int index;
101 
102 	if (!current->ret_stack)
103 		return -EBUSY;
104 
105 	/*
106 	 * We must make sure the ret_stack is tested before we read
107 	 * anything else.
108 	 */
109 	smp_rmb();
110 
111 	/* The return trace stack is full */
112 	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
113 		atomic_inc(&current->trace_overrun);
114 		return -EBUSY;
115 	}
116 
117 	/*
118 	 * The curr_ret_stack is an index to ftrace return stack of
119 	 * current task.  Its value should be in [0, FTRACE_RETFUNC_
120 	 * DEPTH) when the function graph tracer is used.  To support
121 	 * filtering out specific functions, it makes the index
122 	 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
123 	 * so when it sees a negative index the ftrace will ignore
124 	 * the record.  And the index gets recovered when returning
125 	 * from the filtered function by adding the FTRACE_NOTRACE_
126 	 * DEPTH and then it'll continue to record functions normally.
127 	 *
128 	 * The curr_ret_stack is initialized to -1 and get increased
129 	 * in this function.  So it can be less than -1 only if it was
130 	 * filtered out via ftrace_graph_notrace_addr() which can be
131 	 * set from set_graph_notrace file in debugfs by user.
132 	 */
133 	if (current->curr_ret_stack < -1)
134 		return -EBUSY;
135 
136 	calltime = trace_clock_local();
137 
138 	index = ++current->curr_ret_stack;
139 	if (ftrace_graph_notrace_addr(func))
140 		current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
141 	barrier();
142 	current->ret_stack[index].ret = ret;
143 	current->ret_stack[index].func = func;
144 	current->ret_stack[index].calltime = calltime;
145 	current->ret_stack[index].subtime = 0;
146 	current->ret_stack[index].fp = frame_pointer;
147 	*depth = current->curr_ret_stack;
148 
149 	return 0;
150 }
151 
152 /* Retrieve a function return address to the trace stack on thread info.*/
153 static void
154 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
155 			unsigned long frame_pointer)
156 {
157 	int index;
158 
159 	index = current->curr_ret_stack;
160 
161 	/*
162 	 * A negative index here means that it's just returned from a
163 	 * notrace'd function.  Recover index to get an original
164 	 * return address.  See ftrace_push_return_trace().
165 	 *
166 	 * TODO: Need to check whether the stack gets corrupted.
167 	 */
168 	if (index < 0)
169 		index += FTRACE_NOTRACE_DEPTH;
170 
171 	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
172 		ftrace_graph_stop();
173 		WARN_ON(1);
174 		/* Might as well panic, otherwise we have no where to go */
175 		*ret = (unsigned long)panic;
176 		return;
177 	}
178 
179 #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
180 	/*
181 	 * The arch may choose to record the frame pointer used
182 	 * and check it here to make sure that it is what we expect it
183 	 * to be. If gcc does not set the place holder of the return
184 	 * address in the frame pointer, and does a copy instead, then
185 	 * the function graph trace will fail. This test detects this
186 	 * case.
187 	 *
188 	 * Currently, x86_32 with optimize for size (-Os) makes the latest
189 	 * gcc do the above.
190 	 *
191 	 * Note, -mfentry does not use frame pointers, and this test
192 	 *  is not needed if CC_USING_FENTRY is set.
193 	 */
194 	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
195 		ftrace_graph_stop();
196 		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
197 		     "  from func %ps return to %lx\n",
198 		     current->ret_stack[index].fp,
199 		     frame_pointer,
200 		     (void *)current->ret_stack[index].func,
201 		     current->ret_stack[index].ret);
202 		*ret = (unsigned long)panic;
203 		return;
204 	}
205 #endif
206 
207 	*ret = current->ret_stack[index].ret;
208 	trace->func = current->ret_stack[index].func;
209 	trace->calltime = current->ret_stack[index].calltime;
210 	trace->overrun = atomic_read(&current->trace_overrun);
211 	trace->depth = index;
212 }
213 
214 /*
215  * Send the trace to the ring-buffer.
216  * @return the original return address.
217  */
218 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
219 {
220 	struct ftrace_graph_ret trace;
221 	unsigned long ret;
222 
223 	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
224 	trace.rettime = trace_clock_local();
225 	barrier();
226 	current->curr_ret_stack--;
227 	/*
228 	 * The curr_ret_stack can be less than -1 only if it was
229 	 * filtered out and it's about to return from the function.
230 	 * Recover the index and continue to trace normal functions.
231 	 */
232 	if (current->curr_ret_stack < -1) {
233 		current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
234 		return ret;
235 	}
236 
237 	/*
238 	 * The trace should run after decrementing the ret counter
239 	 * in case an interrupt were to come in. We don't want to
240 	 * lose the interrupt if max_depth is set.
241 	 */
242 	ftrace_graph_return(&trace);
243 
244 	if (unlikely(!ret)) {
245 		ftrace_graph_stop();
246 		WARN_ON(1);
247 		/* Might as well panic. What else to do? */
248 		ret = (unsigned long)panic;
249 	}
250 
251 	return ret;
252 }
253 
254 int __trace_graph_entry(struct trace_array *tr,
255 				struct ftrace_graph_ent *trace,
256 				unsigned long flags,
257 				int pc)
258 {
259 	struct ftrace_event_call *call = &event_funcgraph_entry;
260 	struct ring_buffer_event *event;
261 	struct ring_buffer *buffer = tr->trace_buffer.buffer;
262 	struct ftrace_graph_ent_entry *entry;
263 
264 	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
265 		return 0;
266 
267 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
268 					  sizeof(*entry), flags, pc);
269 	if (!event)
270 		return 0;
271 	entry	= ring_buffer_event_data(event);
272 	entry->graph_ent			= *trace;
273 	if (!call_filter_check_discard(call, entry, buffer, event))
274 		__buffer_unlock_commit(buffer, event);
275 
276 	return 1;
277 }
278 
279 static inline int ftrace_graph_ignore_irqs(void)
280 {
281 	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
282 		return 0;
283 
284 	return in_irq();
285 }
286 
287 int trace_graph_entry(struct ftrace_graph_ent *trace)
288 {
289 	struct trace_array *tr = graph_array;
290 	struct trace_array_cpu *data;
291 	unsigned long flags;
292 	long disabled;
293 	int ret;
294 	int cpu;
295 	int pc;
296 
297 	if (!ftrace_trace_task(current))
298 		return 0;
299 
300 	/* trace it when it is-nested-in or is a function enabled. */
301 	if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
302 	     ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
303 	    (max_depth && trace->depth >= max_depth))
304 		return 0;
305 
306 	/*
307 	 * Do not trace a function if it's filtered by set_graph_notrace.
308 	 * Make the index of ret stack negative to indicate that it should
309 	 * ignore further functions.  But it needs its own ret stack entry
310 	 * to recover the original index in order to continue tracing after
311 	 * returning from the function.
312 	 */
313 	if (ftrace_graph_notrace_addr(trace->func))
314 		return 1;
315 
316 	local_irq_save(flags);
317 	cpu = raw_smp_processor_id();
318 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
319 	disabled = atomic_inc_return(&data->disabled);
320 	if (likely(disabled == 1)) {
321 		pc = preempt_count();
322 		ret = __trace_graph_entry(tr, trace, flags, pc);
323 	} else {
324 		ret = 0;
325 	}
326 
327 	atomic_dec(&data->disabled);
328 	local_irq_restore(flags);
329 
330 	return ret;
331 }
332 
333 int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
334 {
335 	if (tracing_thresh)
336 		return 1;
337 	else
338 		return trace_graph_entry(trace);
339 }
340 
341 static void
342 __trace_graph_function(struct trace_array *tr,
343 		unsigned long ip, unsigned long flags, int pc)
344 {
345 	u64 time = trace_clock_local();
346 	struct ftrace_graph_ent ent = {
347 		.func  = ip,
348 		.depth = 0,
349 	};
350 	struct ftrace_graph_ret ret = {
351 		.func     = ip,
352 		.depth    = 0,
353 		.calltime = time,
354 		.rettime  = time,
355 	};
356 
357 	__trace_graph_entry(tr, &ent, flags, pc);
358 	__trace_graph_return(tr, &ret, flags, pc);
359 }
360 
361 void
362 trace_graph_function(struct trace_array *tr,
363 		unsigned long ip, unsigned long parent_ip,
364 		unsigned long flags, int pc)
365 {
366 	__trace_graph_function(tr, ip, flags, pc);
367 }
368 
369 void __trace_graph_return(struct trace_array *tr,
370 				struct ftrace_graph_ret *trace,
371 				unsigned long flags,
372 				int pc)
373 {
374 	struct ftrace_event_call *call = &event_funcgraph_exit;
375 	struct ring_buffer_event *event;
376 	struct ring_buffer *buffer = tr->trace_buffer.buffer;
377 	struct ftrace_graph_ret_entry *entry;
378 
379 	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
380 		return;
381 
382 	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
383 					  sizeof(*entry), flags, pc);
384 	if (!event)
385 		return;
386 	entry	= ring_buffer_event_data(event);
387 	entry->ret				= *trace;
388 	if (!call_filter_check_discard(call, entry, buffer, event))
389 		__buffer_unlock_commit(buffer, event);
390 }
391 
392 void trace_graph_return(struct ftrace_graph_ret *trace)
393 {
394 	struct trace_array *tr = graph_array;
395 	struct trace_array_cpu *data;
396 	unsigned long flags;
397 	long disabled;
398 	int cpu;
399 	int pc;
400 
401 	local_irq_save(flags);
402 	cpu = raw_smp_processor_id();
403 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
404 	disabled = atomic_inc_return(&data->disabled);
405 	if (likely(disabled == 1)) {
406 		pc = preempt_count();
407 		__trace_graph_return(tr, trace, flags, pc);
408 	}
409 	atomic_dec(&data->disabled);
410 	local_irq_restore(flags);
411 }
412 
413 void set_graph_array(struct trace_array *tr)
414 {
415 	graph_array = tr;
416 
417 	/* Make graph_array visible before we start tracing */
418 
419 	smp_mb();
420 }
421 
422 void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
423 {
424 	if (tracing_thresh &&
425 	    (trace->rettime - trace->calltime < tracing_thresh))
426 		return;
427 	else
428 		trace_graph_return(trace);
429 }
430 
431 static int graph_trace_init(struct trace_array *tr)
432 {
433 	int ret;
434 
435 	set_graph_array(tr);
436 	if (tracing_thresh)
437 		ret = register_ftrace_graph(&trace_graph_thresh_return,
438 					    &trace_graph_thresh_entry);
439 	else
440 		ret = register_ftrace_graph(&trace_graph_return,
441 					    &trace_graph_entry);
442 	if (ret)
443 		return ret;
444 	tracing_start_cmdline_record();
445 
446 	return 0;
447 }
448 
449 static void graph_trace_reset(struct trace_array *tr)
450 {
451 	tracing_stop_cmdline_record();
452 	unregister_ftrace_graph();
453 }
454 
455 static int max_bytes_for_cpu;
456 
457 static enum print_line_t
458 print_graph_cpu(struct trace_seq *s, int cpu)
459 {
460 	int ret;
461 
462 	/*
463 	 * Start with a space character - to make it stand out
464 	 * to the right a bit when trace output is pasted into
465 	 * email:
466 	 */
467 	ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
468 	if (!ret)
469 		return TRACE_TYPE_PARTIAL_LINE;
470 
471 	return TRACE_TYPE_HANDLED;
472 }
473 
474 #define TRACE_GRAPH_PROCINFO_LENGTH	14
475 
476 static enum print_line_t
477 print_graph_proc(struct trace_seq *s, pid_t pid)
478 {
479 	char comm[TASK_COMM_LEN];
480 	/* sign + log10(MAX_INT) + '\0' */
481 	char pid_str[11];
482 	int spaces = 0;
483 	int ret;
484 	int len;
485 	int i;
486 
487 	trace_find_cmdline(pid, comm);
488 	comm[7] = '\0';
489 	sprintf(pid_str, "%d", pid);
490 
491 	/* 1 stands for the "-" character */
492 	len = strlen(comm) + strlen(pid_str) + 1;
493 
494 	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
495 		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
496 
497 	/* First spaces to align center */
498 	for (i = 0; i < spaces / 2; i++) {
499 		ret = trace_seq_putc(s, ' ');
500 		if (!ret)
501 			return TRACE_TYPE_PARTIAL_LINE;
502 	}
503 
504 	ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
505 	if (!ret)
506 		return TRACE_TYPE_PARTIAL_LINE;
507 
508 	/* Last spaces to align center */
509 	for (i = 0; i < spaces - (spaces / 2); i++) {
510 		ret = trace_seq_putc(s, ' ');
511 		if (!ret)
512 			return TRACE_TYPE_PARTIAL_LINE;
513 	}
514 	return TRACE_TYPE_HANDLED;
515 }
516 
517 
518 static enum print_line_t
519 print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
520 {
521 	if (!trace_seq_putc(s, ' '))
522 		return 0;
523 
524 	return trace_print_lat_fmt(s, entry);
525 }
526 
527 /* If the pid changed since the last trace, output this event */
528 static enum print_line_t
529 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
530 {
531 	pid_t prev_pid;
532 	pid_t *last_pid;
533 	int ret;
534 
535 	if (!data)
536 		return TRACE_TYPE_HANDLED;
537 
538 	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
539 
540 	if (*last_pid == pid)
541 		return TRACE_TYPE_HANDLED;
542 
543 	prev_pid = *last_pid;
544 	*last_pid = pid;
545 
546 	if (prev_pid == -1)
547 		return TRACE_TYPE_HANDLED;
548 /*
549  * Context-switch trace line:
550 
551  ------------------------------------------
552  | 1)  migration/0--1  =>  sshd-1755
553  ------------------------------------------
554 
555  */
556 	ret = trace_seq_puts(s,
557 		" ------------------------------------------\n");
558 	if (!ret)
559 		return TRACE_TYPE_PARTIAL_LINE;
560 
561 	ret = print_graph_cpu(s, cpu);
562 	if (ret == TRACE_TYPE_PARTIAL_LINE)
563 		return TRACE_TYPE_PARTIAL_LINE;
564 
565 	ret = print_graph_proc(s, prev_pid);
566 	if (ret == TRACE_TYPE_PARTIAL_LINE)
567 		return TRACE_TYPE_PARTIAL_LINE;
568 
569 	ret = trace_seq_puts(s, " => ");
570 	if (!ret)
571 		return TRACE_TYPE_PARTIAL_LINE;
572 
573 	ret = print_graph_proc(s, pid);
574 	if (ret == TRACE_TYPE_PARTIAL_LINE)
575 		return TRACE_TYPE_PARTIAL_LINE;
576 
577 	ret = trace_seq_puts(s,
578 		"\n ------------------------------------------\n\n");
579 	if (!ret)
580 		return TRACE_TYPE_PARTIAL_LINE;
581 
582 	return TRACE_TYPE_HANDLED;
583 }
584 
585 static struct ftrace_graph_ret_entry *
586 get_return_for_leaf(struct trace_iterator *iter,
587 		struct ftrace_graph_ent_entry *curr)
588 {
589 	struct fgraph_data *data = iter->private;
590 	struct ring_buffer_iter *ring_iter = NULL;
591 	struct ring_buffer_event *event;
592 	struct ftrace_graph_ret_entry *next;
593 
594 	/*
595 	 * If the previous output failed to write to the seq buffer,
596 	 * then we just reuse the data from before.
597 	 */
598 	if (data && data->failed) {
599 		curr = &data->ent;
600 		next = &data->ret;
601 	} else {
602 
603 		ring_iter = trace_buffer_iter(iter, iter->cpu);
604 
605 		/* First peek to compare current entry and the next one */
606 		if (ring_iter)
607 			event = ring_buffer_iter_peek(ring_iter, NULL);
608 		else {
609 			/*
610 			 * We need to consume the current entry to see
611 			 * the next one.
612 			 */
613 			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
614 					    NULL, NULL);
615 			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
616 						 NULL, NULL);
617 		}
618 
619 		if (!event)
620 			return NULL;
621 
622 		next = ring_buffer_event_data(event);
623 
624 		if (data) {
625 			/*
626 			 * Save current and next entries for later reference
627 			 * if the output fails.
628 			 */
629 			data->ent = *curr;
630 			/*
631 			 * If the next event is not a return type, then
632 			 * we only care about what type it is. Otherwise we can
633 			 * safely copy the entire event.
634 			 */
635 			if (next->ent.type == TRACE_GRAPH_RET)
636 				data->ret = *next;
637 			else
638 				data->ret.ent.type = next->ent.type;
639 		}
640 	}
641 
642 	if (next->ent.type != TRACE_GRAPH_RET)
643 		return NULL;
644 
645 	if (curr->ent.pid != next->ent.pid ||
646 			curr->graph_ent.func != next->ret.func)
647 		return NULL;
648 
649 	/* this is a leaf, now advance the iterator */
650 	if (ring_iter)
651 		ring_buffer_read(ring_iter, NULL);
652 
653 	return next;
654 }
655 
656 static int print_graph_abs_time(u64 t, struct trace_seq *s)
657 {
658 	unsigned long usecs_rem;
659 
660 	usecs_rem = do_div(t, NSEC_PER_SEC);
661 	usecs_rem /= 1000;
662 
663 	return trace_seq_printf(s, "%5lu.%06lu |  ",
664 			(unsigned long)t, usecs_rem);
665 }
666 
667 static enum print_line_t
668 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
669 		enum trace_type type, int cpu, pid_t pid, u32 flags)
670 {
671 	int ret;
672 	struct trace_seq *s = &iter->seq;
673 
674 	if (addr < (unsigned long)__irqentry_text_start ||
675 		addr >= (unsigned long)__irqentry_text_end)
676 		return TRACE_TYPE_UNHANDLED;
677 
678 	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
679 		/* Absolute time */
680 		if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
681 			ret = print_graph_abs_time(iter->ts, s);
682 			if (!ret)
683 				return TRACE_TYPE_PARTIAL_LINE;
684 		}
685 
686 		/* Cpu */
687 		if (flags & TRACE_GRAPH_PRINT_CPU) {
688 			ret = print_graph_cpu(s, cpu);
689 			if (ret == TRACE_TYPE_PARTIAL_LINE)
690 				return TRACE_TYPE_PARTIAL_LINE;
691 		}
692 
693 		/* Proc */
694 		if (flags & TRACE_GRAPH_PRINT_PROC) {
695 			ret = print_graph_proc(s, pid);
696 			if (ret == TRACE_TYPE_PARTIAL_LINE)
697 				return TRACE_TYPE_PARTIAL_LINE;
698 			ret = trace_seq_puts(s, " | ");
699 			if (!ret)
700 				return TRACE_TYPE_PARTIAL_LINE;
701 		}
702 	}
703 
704 	/* No overhead */
705 	ret = print_graph_duration(0, s, flags | FLAGS_FILL_START);
706 	if (ret != TRACE_TYPE_HANDLED)
707 		return ret;
708 
709 	if (type == TRACE_GRAPH_ENT)
710 		ret = trace_seq_puts(s, "==========>");
711 	else
712 		ret = trace_seq_puts(s, "<==========");
713 
714 	if (!ret)
715 		return TRACE_TYPE_PARTIAL_LINE;
716 
717 	ret = print_graph_duration(0, s, flags | FLAGS_FILL_END);
718 	if (ret != TRACE_TYPE_HANDLED)
719 		return ret;
720 
721 	ret = trace_seq_putc(s, '\n');
722 
723 	if (!ret)
724 		return TRACE_TYPE_PARTIAL_LINE;
725 	return TRACE_TYPE_HANDLED;
726 }
727 
728 enum print_line_t
729 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
730 {
731 	unsigned long nsecs_rem = do_div(duration, 1000);
732 	/* log10(ULONG_MAX) + '\0' */
733 	char msecs_str[21];
734 	char nsecs_str[5];
735 	int ret, len;
736 	int i;
737 
738 	sprintf(msecs_str, "%lu", (unsigned long) duration);
739 
740 	/* Print msecs */
741 	ret = trace_seq_printf(s, "%s", msecs_str);
742 	if (!ret)
743 		return TRACE_TYPE_PARTIAL_LINE;
744 
745 	len = strlen(msecs_str);
746 
747 	/* Print nsecs (we don't want to exceed 7 numbers) */
748 	if (len < 7) {
749 		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
750 
751 		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
752 		ret = trace_seq_printf(s, ".%s", nsecs_str);
753 		if (!ret)
754 			return TRACE_TYPE_PARTIAL_LINE;
755 		len += strlen(nsecs_str);
756 	}
757 
758 	ret = trace_seq_puts(s, " us ");
759 	if (!ret)
760 		return TRACE_TYPE_PARTIAL_LINE;
761 
762 	/* Print remaining spaces to fit the row's width */
763 	for (i = len; i < 7; i++) {
764 		ret = trace_seq_putc(s, ' ');
765 		if (!ret)
766 			return TRACE_TYPE_PARTIAL_LINE;
767 	}
768 	return TRACE_TYPE_HANDLED;
769 }
770 
771 static enum print_line_t
772 print_graph_duration(unsigned long long duration, struct trace_seq *s,
773 		     u32 flags)
774 {
775 	int ret = -1;
776 
777 	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
778 	    !(trace_flags & TRACE_ITER_CONTEXT_INFO))
779 			return TRACE_TYPE_HANDLED;
780 
781 	/* No real adata, just filling the column with spaces */
782 	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
783 	case FLAGS_FILL_FULL:
784 		ret = trace_seq_puts(s, "              |  ");
785 		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
786 	case FLAGS_FILL_START:
787 		ret = trace_seq_puts(s, "  ");
788 		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
789 	case FLAGS_FILL_END:
790 		ret = trace_seq_puts(s, " |");
791 		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
792 	}
793 
794 	/* Signal a overhead of time execution to the output */
795 	if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
796 		/* Duration exceeded 100 msecs */
797 		if (duration > 100000ULL)
798 			ret = trace_seq_puts(s, "! ");
799 		/* Duration exceeded 10 msecs */
800 		else if (duration > 10000ULL)
801 			ret = trace_seq_puts(s, "+ ");
802 	}
803 
804 	/*
805 	 * The -1 means we either did not exceed the duration tresholds
806 	 * or we dont want to print out the overhead. Either way we need
807 	 * to fill out the space.
808 	 */
809 	if (ret == -1)
810 		ret = trace_seq_puts(s, "  ");
811 
812 	/* Catching here any failure happenned above */
813 	if (!ret)
814 		return TRACE_TYPE_PARTIAL_LINE;
815 
816 	ret = trace_print_graph_duration(duration, s);
817 	if (ret != TRACE_TYPE_HANDLED)
818 		return ret;
819 
820 	ret = trace_seq_puts(s, "|  ");
821 	if (!ret)
822 		return TRACE_TYPE_PARTIAL_LINE;
823 
824 	return TRACE_TYPE_HANDLED;
825 }
826 
827 /* Case of a leaf function on its call entry */
828 static enum print_line_t
829 print_graph_entry_leaf(struct trace_iterator *iter,
830 		struct ftrace_graph_ent_entry *entry,
831 		struct ftrace_graph_ret_entry *ret_entry,
832 		struct trace_seq *s, u32 flags)
833 {
834 	struct fgraph_data *data = iter->private;
835 	struct ftrace_graph_ret *graph_ret;
836 	struct ftrace_graph_ent *call;
837 	unsigned long long duration;
838 	int ret;
839 	int i;
840 
841 	graph_ret = &ret_entry->ret;
842 	call = &entry->graph_ent;
843 	duration = graph_ret->rettime - graph_ret->calltime;
844 
845 	if (data) {
846 		struct fgraph_cpu_data *cpu_data;
847 		int cpu = iter->cpu;
848 
849 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
850 
851 		/*
852 		 * Comments display at + 1 to depth. Since
853 		 * this is a leaf function, keep the comments
854 		 * equal to this depth.
855 		 */
856 		cpu_data->depth = call->depth - 1;
857 
858 		/* No need to keep this function around for this depth */
859 		if (call->depth < FTRACE_RETFUNC_DEPTH)
860 			cpu_data->enter_funcs[call->depth] = 0;
861 	}
862 
863 	/* Overhead and duration */
864 	ret = print_graph_duration(duration, s, flags);
865 	if (ret == TRACE_TYPE_PARTIAL_LINE)
866 		return TRACE_TYPE_PARTIAL_LINE;
867 
868 	/* Function */
869 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
870 		ret = trace_seq_putc(s, ' ');
871 		if (!ret)
872 			return TRACE_TYPE_PARTIAL_LINE;
873 	}
874 
875 	ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
876 	if (!ret)
877 		return TRACE_TYPE_PARTIAL_LINE;
878 
879 	return TRACE_TYPE_HANDLED;
880 }
881 
882 static enum print_line_t
883 print_graph_entry_nested(struct trace_iterator *iter,
884 			 struct ftrace_graph_ent_entry *entry,
885 			 struct trace_seq *s, int cpu, u32 flags)
886 {
887 	struct ftrace_graph_ent *call = &entry->graph_ent;
888 	struct fgraph_data *data = iter->private;
889 	int ret;
890 	int i;
891 
892 	if (data) {
893 		struct fgraph_cpu_data *cpu_data;
894 		int cpu = iter->cpu;
895 
896 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
897 		cpu_data->depth = call->depth;
898 
899 		/* Save this function pointer to see if the exit matches */
900 		if (call->depth < FTRACE_RETFUNC_DEPTH)
901 			cpu_data->enter_funcs[call->depth] = call->func;
902 	}
903 
904 	/* No time */
905 	ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
906 	if (ret != TRACE_TYPE_HANDLED)
907 		return ret;
908 
909 	/* Function */
910 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
911 		ret = trace_seq_putc(s, ' ');
912 		if (!ret)
913 			return TRACE_TYPE_PARTIAL_LINE;
914 	}
915 
916 	ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
917 	if (!ret)
918 		return TRACE_TYPE_PARTIAL_LINE;
919 
920 	/*
921 	 * we already consumed the current entry to check the next one
922 	 * and see if this is a leaf.
923 	 */
924 	return TRACE_TYPE_NO_CONSUME;
925 }
926 
927 static enum print_line_t
928 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
929 		     int type, unsigned long addr, u32 flags)
930 {
931 	struct fgraph_data *data = iter->private;
932 	struct trace_entry *ent = iter->ent;
933 	int cpu = iter->cpu;
934 	int ret;
935 
936 	/* Pid */
937 	if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
938 		return TRACE_TYPE_PARTIAL_LINE;
939 
940 	if (type) {
941 		/* Interrupt */
942 		ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
943 		if (ret == TRACE_TYPE_PARTIAL_LINE)
944 			return TRACE_TYPE_PARTIAL_LINE;
945 	}
946 
947 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
948 		return 0;
949 
950 	/* Absolute time */
951 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
952 		ret = print_graph_abs_time(iter->ts, s);
953 		if (!ret)
954 			return TRACE_TYPE_PARTIAL_LINE;
955 	}
956 
957 	/* Cpu */
958 	if (flags & TRACE_GRAPH_PRINT_CPU) {
959 		ret = print_graph_cpu(s, cpu);
960 		if (ret == TRACE_TYPE_PARTIAL_LINE)
961 			return TRACE_TYPE_PARTIAL_LINE;
962 	}
963 
964 	/* Proc */
965 	if (flags & TRACE_GRAPH_PRINT_PROC) {
966 		ret = print_graph_proc(s, ent->pid);
967 		if (ret == TRACE_TYPE_PARTIAL_LINE)
968 			return TRACE_TYPE_PARTIAL_LINE;
969 
970 		ret = trace_seq_puts(s, " | ");
971 		if (!ret)
972 			return TRACE_TYPE_PARTIAL_LINE;
973 	}
974 
975 	/* Latency format */
976 	if (trace_flags & TRACE_ITER_LATENCY_FMT) {
977 		ret = print_graph_lat_fmt(s, ent);
978 		if (ret == TRACE_TYPE_PARTIAL_LINE)
979 			return TRACE_TYPE_PARTIAL_LINE;
980 	}
981 
982 	return 0;
983 }
984 
985 /*
986  * Entry check for irq code
987  *
988  * returns 1 if
989  *  - we are inside irq code
990  *  - we just entered irq code
991  *
992  * retunns 0 if
993  *  - funcgraph-interrupts option is set
994  *  - we are not inside irq code
995  */
996 static int
997 check_irq_entry(struct trace_iterator *iter, u32 flags,
998 		unsigned long addr, int depth)
999 {
1000 	int cpu = iter->cpu;
1001 	int *depth_irq;
1002 	struct fgraph_data *data = iter->private;
1003 
1004 	/*
1005 	 * If we are either displaying irqs, or we got called as
1006 	 * a graph event and private data does not exist,
1007 	 * then we bypass the irq check.
1008 	 */
1009 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1010 	    (!data))
1011 		return 0;
1012 
1013 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1014 
1015 	/*
1016 	 * We are inside the irq code
1017 	 */
1018 	if (*depth_irq >= 0)
1019 		return 1;
1020 
1021 	if ((addr < (unsigned long)__irqentry_text_start) ||
1022 	    (addr >= (unsigned long)__irqentry_text_end))
1023 		return 0;
1024 
1025 	/*
1026 	 * We are entering irq code.
1027 	 */
1028 	*depth_irq = depth;
1029 	return 1;
1030 }
1031 
1032 /*
1033  * Return check for irq code
1034  *
1035  * returns 1 if
1036  *  - we are inside irq code
1037  *  - we just left irq code
1038  *
1039  * returns 0 if
1040  *  - funcgraph-interrupts option is set
1041  *  - we are not inside irq code
1042  */
1043 static int
1044 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1045 {
1046 	int cpu = iter->cpu;
1047 	int *depth_irq;
1048 	struct fgraph_data *data = iter->private;
1049 
1050 	/*
1051 	 * If we are either displaying irqs, or we got called as
1052 	 * a graph event and private data does not exist,
1053 	 * then we bypass the irq check.
1054 	 */
1055 	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1056 	    (!data))
1057 		return 0;
1058 
1059 	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1060 
1061 	/*
1062 	 * We are not inside the irq code.
1063 	 */
1064 	if (*depth_irq == -1)
1065 		return 0;
1066 
1067 	/*
1068 	 * We are inside the irq code, and this is returning entry.
1069 	 * Let's not trace it and clear the entry depth, since
1070 	 * we are out of irq code.
1071 	 *
1072 	 * This condition ensures that we 'leave the irq code' once
1073 	 * we are out of the entry depth. Thus protecting us from
1074 	 * the RETURN entry loss.
1075 	 */
1076 	if (*depth_irq >= depth) {
1077 		*depth_irq = -1;
1078 		return 1;
1079 	}
1080 
1081 	/*
1082 	 * We are inside the irq code, and this is not the entry.
1083 	 */
1084 	return 1;
1085 }
1086 
1087 static enum print_line_t
1088 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1089 			struct trace_iterator *iter, u32 flags)
1090 {
1091 	struct fgraph_data *data = iter->private;
1092 	struct ftrace_graph_ent *call = &field->graph_ent;
1093 	struct ftrace_graph_ret_entry *leaf_ret;
1094 	static enum print_line_t ret;
1095 	int cpu = iter->cpu;
1096 
1097 	if (check_irq_entry(iter, flags, call->func, call->depth))
1098 		return TRACE_TYPE_HANDLED;
1099 
1100 	if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
1101 		return TRACE_TYPE_PARTIAL_LINE;
1102 
1103 	leaf_ret = get_return_for_leaf(iter, field);
1104 	if (leaf_ret)
1105 		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1106 	else
1107 		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1108 
1109 	if (data) {
1110 		/*
1111 		 * If we failed to write our output, then we need to make
1112 		 * note of it. Because we already consumed our entry.
1113 		 */
1114 		if (s->full) {
1115 			data->failed = 1;
1116 			data->cpu = cpu;
1117 		} else
1118 			data->failed = 0;
1119 	}
1120 
1121 	return ret;
1122 }
1123 
1124 static enum print_line_t
1125 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1126 		   struct trace_entry *ent, struct trace_iterator *iter,
1127 		   u32 flags)
1128 {
1129 	unsigned long long duration = trace->rettime - trace->calltime;
1130 	struct fgraph_data *data = iter->private;
1131 	pid_t pid = ent->pid;
1132 	int cpu = iter->cpu;
1133 	int func_match = 1;
1134 	int ret;
1135 	int i;
1136 
1137 	if (check_irq_return(iter, flags, trace->depth))
1138 		return TRACE_TYPE_HANDLED;
1139 
1140 	if (data) {
1141 		struct fgraph_cpu_data *cpu_data;
1142 		int cpu = iter->cpu;
1143 
1144 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1145 
1146 		/*
1147 		 * Comments display at + 1 to depth. This is the
1148 		 * return from a function, we now want the comments
1149 		 * to display at the same level of the bracket.
1150 		 */
1151 		cpu_data->depth = trace->depth - 1;
1152 
1153 		if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1154 			if (cpu_data->enter_funcs[trace->depth] != trace->func)
1155 				func_match = 0;
1156 			cpu_data->enter_funcs[trace->depth] = 0;
1157 		}
1158 	}
1159 
1160 	if (print_graph_prologue(iter, s, 0, 0, flags))
1161 		return TRACE_TYPE_PARTIAL_LINE;
1162 
1163 	/* Overhead and duration */
1164 	ret = print_graph_duration(duration, s, flags);
1165 	if (ret == TRACE_TYPE_PARTIAL_LINE)
1166 		return TRACE_TYPE_PARTIAL_LINE;
1167 
1168 	/* Closing brace */
1169 	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
1170 		ret = trace_seq_putc(s, ' ');
1171 		if (!ret)
1172 			return TRACE_TYPE_PARTIAL_LINE;
1173 	}
1174 
1175 	/*
1176 	 * If the return function does not have a matching entry,
1177 	 * then the entry was lost. Instead of just printing
1178 	 * the '}' and letting the user guess what function this
1179 	 * belongs to, write out the function name.
1180 	 */
1181 	if (func_match) {
1182 		ret = trace_seq_puts(s, "}\n");
1183 		if (!ret)
1184 			return TRACE_TYPE_PARTIAL_LINE;
1185 	} else {
1186 		ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1187 		if (!ret)
1188 			return TRACE_TYPE_PARTIAL_LINE;
1189 	}
1190 
1191 	/* Overrun */
1192 	if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
1193 		ret = trace_seq_printf(s, " (Overruns: %lu)\n",
1194 					trace->overrun);
1195 		if (!ret)
1196 			return TRACE_TYPE_PARTIAL_LINE;
1197 	}
1198 
1199 	ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1200 			      cpu, pid, flags);
1201 	if (ret == TRACE_TYPE_PARTIAL_LINE)
1202 		return TRACE_TYPE_PARTIAL_LINE;
1203 
1204 	return TRACE_TYPE_HANDLED;
1205 }
1206 
1207 static enum print_line_t
1208 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1209 		    struct trace_iterator *iter, u32 flags)
1210 {
1211 	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1212 	struct fgraph_data *data = iter->private;
1213 	struct trace_event *event;
1214 	int depth = 0;
1215 	int ret;
1216 	int i;
1217 
1218 	if (data)
1219 		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1220 
1221 	if (print_graph_prologue(iter, s, 0, 0, flags))
1222 		return TRACE_TYPE_PARTIAL_LINE;
1223 
1224 	/* No time */
1225 	ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
1226 	if (ret != TRACE_TYPE_HANDLED)
1227 		return ret;
1228 
1229 	/* Indentation */
1230 	if (depth > 0)
1231 		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1232 			ret = trace_seq_putc(s, ' ');
1233 			if (!ret)
1234 				return TRACE_TYPE_PARTIAL_LINE;
1235 		}
1236 
1237 	/* The comment */
1238 	ret = trace_seq_puts(s, "/* ");
1239 	if (!ret)
1240 		return TRACE_TYPE_PARTIAL_LINE;
1241 
1242 	switch (iter->ent->type) {
1243 	case TRACE_BPRINT:
1244 		ret = trace_print_bprintk_msg_only(iter);
1245 		if (ret != TRACE_TYPE_HANDLED)
1246 			return ret;
1247 		break;
1248 	case TRACE_PRINT:
1249 		ret = trace_print_printk_msg_only(iter);
1250 		if (ret != TRACE_TYPE_HANDLED)
1251 			return ret;
1252 		break;
1253 	default:
1254 		event = ftrace_find_event(ent->type);
1255 		if (!event)
1256 			return TRACE_TYPE_UNHANDLED;
1257 
1258 		ret = event->funcs->trace(iter, sym_flags, event);
1259 		if (ret != TRACE_TYPE_HANDLED)
1260 			return ret;
1261 	}
1262 
1263 	/* Strip ending newline */
1264 	if (s->buffer[s->len - 1] == '\n') {
1265 		s->buffer[s->len - 1] = '\0';
1266 		s->len--;
1267 	}
1268 
1269 	ret = trace_seq_puts(s, " */\n");
1270 	if (!ret)
1271 		return TRACE_TYPE_PARTIAL_LINE;
1272 
1273 	return TRACE_TYPE_HANDLED;
1274 }
1275 
1276 
1277 enum print_line_t
1278 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1279 {
1280 	struct ftrace_graph_ent_entry *field;
1281 	struct fgraph_data *data = iter->private;
1282 	struct trace_entry *entry = iter->ent;
1283 	struct trace_seq *s = &iter->seq;
1284 	int cpu = iter->cpu;
1285 	int ret;
1286 
1287 	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1288 		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1289 		return TRACE_TYPE_HANDLED;
1290 	}
1291 
1292 	/*
1293 	 * If the last output failed, there's a possibility we need
1294 	 * to print out the missing entry which would never go out.
1295 	 */
1296 	if (data && data->failed) {
1297 		field = &data->ent;
1298 		iter->cpu = data->cpu;
1299 		ret = print_graph_entry(field, s, iter, flags);
1300 		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1301 			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1302 			ret = TRACE_TYPE_NO_CONSUME;
1303 		}
1304 		iter->cpu = cpu;
1305 		return ret;
1306 	}
1307 
1308 	switch (entry->type) {
1309 	case TRACE_GRAPH_ENT: {
1310 		/*
1311 		 * print_graph_entry() may consume the current event,
1312 		 * thus @field may become invalid, so we need to save it.
1313 		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1314 		 * it can be safely saved at the stack.
1315 		 */
1316 		struct ftrace_graph_ent_entry saved;
1317 		trace_assign_type(field, entry);
1318 		saved = *field;
1319 		return print_graph_entry(&saved, s, iter, flags);
1320 	}
1321 	case TRACE_GRAPH_RET: {
1322 		struct ftrace_graph_ret_entry *field;
1323 		trace_assign_type(field, entry);
1324 		return print_graph_return(&field->ret, s, entry, iter, flags);
1325 	}
1326 	case TRACE_STACK:
1327 	case TRACE_FN:
1328 		/* dont trace stack and functions as comments */
1329 		return TRACE_TYPE_UNHANDLED;
1330 
1331 	default:
1332 		return print_graph_comment(s, entry, iter, flags);
1333 	}
1334 
1335 	return TRACE_TYPE_HANDLED;
1336 }
1337 
1338 static enum print_line_t
1339 print_graph_function(struct trace_iterator *iter)
1340 {
1341 	return print_graph_function_flags(iter, tracer_flags.val);
1342 }
1343 
1344 static enum print_line_t
1345 print_graph_function_event(struct trace_iterator *iter, int flags,
1346 			   struct trace_event *event)
1347 {
1348 	return print_graph_function(iter);
1349 }
1350 
1351 static void print_lat_header(struct seq_file *s, u32 flags)
1352 {
1353 	static const char spaces[] = "                "	/* 16 spaces */
1354 		"    "					/* 4 spaces */
1355 		"                 ";			/* 17 spaces */
1356 	int size = 0;
1357 
1358 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1359 		size += 16;
1360 	if (flags & TRACE_GRAPH_PRINT_CPU)
1361 		size += 4;
1362 	if (flags & TRACE_GRAPH_PRINT_PROC)
1363 		size += 17;
1364 
1365 	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1366 	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1367 	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1368 	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1369 	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1370 }
1371 
1372 static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
1373 {
1374 	int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1375 
1376 	if (lat)
1377 		print_lat_header(s, flags);
1378 
1379 	/* 1st line */
1380 	seq_printf(s, "#");
1381 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1382 		seq_printf(s, "     TIME       ");
1383 	if (flags & TRACE_GRAPH_PRINT_CPU)
1384 		seq_printf(s, " CPU");
1385 	if (flags & TRACE_GRAPH_PRINT_PROC)
1386 		seq_printf(s, "  TASK/PID       ");
1387 	if (lat)
1388 		seq_printf(s, "||||");
1389 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1390 		seq_printf(s, "  DURATION   ");
1391 	seq_printf(s, "               FUNCTION CALLS\n");
1392 
1393 	/* 2nd line */
1394 	seq_printf(s, "#");
1395 	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1396 		seq_printf(s, "      |         ");
1397 	if (flags & TRACE_GRAPH_PRINT_CPU)
1398 		seq_printf(s, " |  ");
1399 	if (flags & TRACE_GRAPH_PRINT_PROC)
1400 		seq_printf(s, "   |    |        ");
1401 	if (lat)
1402 		seq_printf(s, "||||");
1403 	if (flags & TRACE_GRAPH_PRINT_DURATION)
1404 		seq_printf(s, "   |   |      ");
1405 	seq_printf(s, "               |   |   |   |\n");
1406 }
1407 
1408 void print_graph_headers(struct seq_file *s)
1409 {
1410 	print_graph_headers_flags(s, tracer_flags.val);
1411 }
1412 
1413 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1414 {
1415 	struct trace_iterator *iter = s->private;
1416 
1417 	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
1418 		return;
1419 
1420 	if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1421 		/* print nothing if the buffers are empty */
1422 		if (trace_empty(iter))
1423 			return;
1424 
1425 		print_trace_header(s, iter);
1426 	}
1427 
1428 	__print_graph_headers_flags(s, flags);
1429 }
1430 
1431 void graph_trace_open(struct trace_iterator *iter)
1432 {
1433 	/* pid and depth on the last trace processed */
1434 	struct fgraph_data *data;
1435 	int cpu;
1436 
1437 	iter->private = NULL;
1438 
1439 	data = kzalloc(sizeof(*data), GFP_KERNEL);
1440 	if (!data)
1441 		goto out_err;
1442 
1443 	data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1444 	if (!data->cpu_data)
1445 		goto out_err_free;
1446 
1447 	for_each_possible_cpu(cpu) {
1448 		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1449 		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1450 		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1451 		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1452 
1453 		*pid = -1;
1454 		*depth = 0;
1455 		*ignore = 0;
1456 		*depth_irq = -1;
1457 	}
1458 
1459 	iter->private = data;
1460 
1461 	return;
1462 
1463  out_err_free:
1464 	kfree(data);
1465  out_err:
1466 	pr_warning("function graph tracer: not enough memory\n");
1467 }
1468 
1469 void graph_trace_close(struct trace_iterator *iter)
1470 {
1471 	struct fgraph_data *data = iter->private;
1472 
1473 	if (data) {
1474 		free_percpu(data->cpu_data);
1475 		kfree(data);
1476 	}
1477 }
1478 
1479 static int func_graph_set_flag(u32 old_flags, u32 bit, int set)
1480 {
1481 	if (bit == TRACE_GRAPH_PRINT_IRQS)
1482 		ftrace_graph_skip_irqs = !set;
1483 
1484 	return 0;
1485 }
1486 
1487 static struct trace_event_functions graph_functions = {
1488 	.trace		= print_graph_function_event,
1489 };
1490 
1491 static struct trace_event graph_trace_entry_event = {
1492 	.type		= TRACE_GRAPH_ENT,
1493 	.funcs		= &graph_functions,
1494 };
1495 
1496 static struct trace_event graph_trace_ret_event = {
1497 	.type		= TRACE_GRAPH_RET,
1498 	.funcs		= &graph_functions
1499 };
1500 
1501 static struct tracer graph_trace __tracer_data = {
1502 	.name		= "function_graph",
1503 	.open		= graph_trace_open,
1504 	.pipe_open	= graph_trace_open,
1505 	.close		= graph_trace_close,
1506 	.pipe_close	= graph_trace_close,
1507 	.wait_pipe	= poll_wait_pipe,
1508 	.init		= graph_trace_init,
1509 	.reset		= graph_trace_reset,
1510 	.print_line	= print_graph_function,
1511 	.print_header	= print_graph_headers,
1512 	.flags		= &tracer_flags,
1513 	.set_flag	= func_graph_set_flag,
1514 #ifdef CONFIG_FTRACE_SELFTEST
1515 	.selftest	= trace_selftest_startup_function_graph,
1516 #endif
1517 };
1518 
1519 
1520 static ssize_t
1521 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1522 		  loff_t *ppos)
1523 {
1524 	unsigned long val;
1525 	int ret;
1526 
1527 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1528 	if (ret)
1529 		return ret;
1530 
1531 	max_depth = val;
1532 
1533 	*ppos += cnt;
1534 
1535 	return cnt;
1536 }
1537 
1538 static ssize_t
1539 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1540 		 loff_t *ppos)
1541 {
1542 	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1543 	int n;
1544 
1545 	n = sprintf(buf, "%d\n", max_depth);
1546 
1547 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1548 }
1549 
1550 static const struct file_operations graph_depth_fops = {
1551 	.open		= tracing_open_generic,
1552 	.write		= graph_depth_write,
1553 	.read		= graph_depth_read,
1554 	.llseek		= generic_file_llseek,
1555 };
1556 
1557 static __init int init_graph_debugfs(void)
1558 {
1559 	struct dentry *d_tracer;
1560 
1561 	d_tracer = tracing_init_dentry();
1562 	if (!d_tracer)
1563 		return 0;
1564 
1565 	trace_create_file("max_graph_depth", 0644, d_tracer,
1566 			  NULL, &graph_depth_fops);
1567 
1568 	return 0;
1569 }
1570 fs_initcall(init_graph_debugfs);
1571 
1572 static __init int init_graph_trace(void)
1573 {
1574 	max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1575 
1576 	if (!register_ftrace_event(&graph_trace_entry_event)) {
1577 		pr_warning("Warning: could not register graph trace events\n");
1578 		return 1;
1579 	}
1580 
1581 	if (!register_ftrace_event(&graph_trace_ret_event)) {
1582 		pr_warning("Warning: could not register graph trace events\n");
1583 		return 1;
1584 	}
1585 
1586 	return register_tracer(&graph_trace);
1587 }
1588 
1589 core_initcall(init_graph_trace);
1590