xref: /openbmc/linux/kernel/trace/trace_kdb.c (revision a06c488d)
1 /*
2  * kdb helper for dumping the ftrace buffer
3  *
4  * Copyright (C) 2010 Jason Wessel <jason.wessel@windriver.com>
5  *
6  * ftrace_dump_buf based on ftrace_dump:
7  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
8  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
9  *
10  */
11 #include <linux/init.h>
12 #include <linux/kgdb.h>
13 #include <linux/kdb.h>
14 #include <linux/ftrace.h>
15 
16 #include "trace.h"
17 #include "trace_output.h"
18 
19 static void ftrace_dump_buf(int skip_lines, long cpu_file)
20 {
21 	/* use static because iter can be a bit big for the stack */
22 	static struct trace_iterator iter;
23 	static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS];
24 	struct trace_array *tr;
25 	unsigned int old_userobj;
26 	int cnt = 0, cpu;
27 
28 	trace_init_global_iter(&iter);
29 	iter.buffer_iter = buffer_iter;
30 	tr = iter.tr;
31 
32 	for_each_tracing_cpu(cpu) {
33 		atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
34 	}
35 
36 	old_userobj = tr->trace_flags;
37 
38 	/* don't look at user memory in panic mode */
39 	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
40 
41 	kdb_printf("Dumping ftrace buffer:\n");
42 
43 	/* reset all but tr, trace, and overruns */
44 	memset(&iter.seq, 0,
45 		   sizeof(struct trace_iterator) -
46 		   offsetof(struct trace_iterator, seq));
47 	iter.iter_flags |= TRACE_FILE_LAT_FMT;
48 	iter.pos = -1;
49 
50 	if (cpu_file == RING_BUFFER_ALL_CPUS) {
51 		for_each_tracing_cpu(cpu) {
52 			iter.buffer_iter[cpu] =
53 			ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu);
54 			ring_buffer_read_start(iter.buffer_iter[cpu]);
55 			tracing_iter_reset(&iter, cpu);
56 		}
57 	} else {
58 		iter.cpu_file = cpu_file;
59 		iter.buffer_iter[cpu_file] =
60 			ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu_file);
61 		ring_buffer_read_start(iter.buffer_iter[cpu_file]);
62 		tracing_iter_reset(&iter, cpu_file);
63 	}
64 
65 	while (trace_find_next_entry_inc(&iter)) {
66 		if (!cnt)
67 			kdb_printf("---------------------------------\n");
68 		cnt++;
69 
70 		if (!skip_lines) {
71 			print_trace_line(&iter);
72 			trace_printk_seq(&iter.seq);
73 		} else {
74 			skip_lines--;
75 		}
76 
77 		if (KDB_FLAG(CMD_INTERRUPT))
78 			goto out;
79 	}
80 
81 	if (!cnt)
82 		kdb_printf("   (ftrace buffer empty)\n");
83 	else
84 		kdb_printf("---------------------------------\n");
85 
86 out:
87 	tr->trace_flags = old_userobj;
88 
89 	for_each_tracing_cpu(cpu) {
90 		atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
91 	}
92 
93 	for_each_tracing_cpu(cpu) {
94 		if (iter.buffer_iter[cpu]) {
95 			ring_buffer_read_finish(iter.buffer_iter[cpu]);
96 			iter.buffer_iter[cpu] = NULL;
97 		}
98 	}
99 }
100 
101 /*
102  * kdb_ftdump - Dump the ftrace log buffer
103  */
104 static int kdb_ftdump(int argc, const char **argv)
105 {
106 	int skip_lines = 0;
107 	long cpu_file;
108 	char *cp;
109 
110 	if (argc > 2)
111 		return KDB_ARGCOUNT;
112 
113 	if (argc) {
114 		skip_lines = simple_strtol(argv[1], &cp, 0);
115 		if (*cp)
116 			skip_lines = 0;
117 	}
118 
119 	if (argc == 2) {
120 		cpu_file = simple_strtol(argv[2], &cp, 0);
121 		if (*cp || cpu_file >= NR_CPUS || cpu_file < 0 ||
122 		    !cpu_online(cpu_file))
123 			return KDB_BADINT;
124 	} else {
125 		cpu_file = RING_BUFFER_ALL_CPUS;
126 	}
127 
128 	kdb_trap_printk++;
129 	ftrace_dump_buf(skip_lines, cpu_file);
130 	kdb_trap_printk--;
131 
132 	return 0;
133 }
134 
135 static __init int kdb_ftrace_register(void)
136 {
137 	kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
138 			    "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE);
139 	return 0;
140 }
141 
142 late_initcall(kdb_ftrace_register);
143