xref: /openbmc/linux/kernel/trace/trace_kdb.c (revision d2574c33)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * kdb helper for dumping the ftrace buffer
4  *
5  * Copyright (C) 2010 Jason Wessel <jason.wessel@windriver.com>
6  *
7  * ftrace_dump_buf based on ftrace_dump:
8  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
9  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
10  *
11  */
12 #include <linux/init.h>
13 #include <linux/kgdb.h>
14 #include <linux/kdb.h>
15 #include <linux/ftrace.h>
16 
17 #include "trace.h"
18 #include "trace_output.h"
19 
20 static void ftrace_dump_buf(int skip_lines, long cpu_file)
21 {
22 	/* use static because iter can be a bit big for the stack */
23 	static struct trace_iterator iter;
24 	static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS];
25 	struct trace_array *tr;
26 	unsigned int old_userobj;
27 	int cnt = 0, cpu;
28 
29 	trace_init_global_iter(&iter);
30 	iter.buffer_iter = buffer_iter;
31 	tr = iter.tr;
32 
33 	for_each_tracing_cpu(cpu) {
34 		atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
35 	}
36 
37 	old_userobj = tr->trace_flags;
38 
39 	/* don't look at user memory in panic mode */
40 	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
41 
42 	kdb_printf("Dumping ftrace buffer:\n");
43 
44 	/* reset all but tr, trace, and overruns */
45 	memset(&iter.seq, 0,
46 		   sizeof(struct trace_iterator) -
47 		   offsetof(struct trace_iterator, seq));
48 	iter.iter_flags |= TRACE_FILE_LAT_FMT;
49 	iter.pos = -1;
50 
51 	if (cpu_file == RING_BUFFER_ALL_CPUS) {
52 		for_each_tracing_cpu(cpu) {
53 			iter.buffer_iter[cpu] =
54 			ring_buffer_read_prepare(iter.trace_buffer->buffer,
55 						 cpu, GFP_ATOMIC);
56 			ring_buffer_read_start(iter.buffer_iter[cpu]);
57 			tracing_iter_reset(&iter, cpu);
58 		}
59 	} else {
60 		iter.cpu_file = cpu_file;
61 		iter.buffer_iter[cpu_file] =
62 			ring_buffer_read_prepare(iter.trace_buffer->buffer,
63 						 cpu_file, GFP_ATOMIC);
64 		ring_buffer_read_start(iter.buffer_iter[cpu_file]);
65 		tracing_iter_reset(&iter, cpu_file);
66 	}
67 
68 	while (trace_find_next_entry_inc(&iter)) {
69 		if (!cnt)
70 			kdb_printf("---------------------------------\n");
71 		cnt++;
72 
73 		if (!skip_lines) {
74 			print_trace_line(&iter);
75 			trace_printk_seq(&iter.seq);
76 		} else {
77 			skip_lines--;
78 		}
79 
80 		if (KDB_FLAG(CMD_INTERRUPT))
81 			goto out;
82 	}
83 
84 	if (!cnt)
85 		kdb_printf("   (ftrace buffer empty)\n");
86 	else
87 		kdb_printf("---------------------------------\n");
88 
89 out:
90 	tr->trace_flags = old_userobj;
91 
92 	for_each_tracing_cpu(cpu) {
93 		atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
94 	}
95 
96 	for_each_tracing_cpu(cpu) {
97 		if (iter.buffer_iter[cpu]) {
98 			ring_buffer_read_finish(iter.buffer_iter[cpu]);
99 			iter.buffer_iter[cpu] = NULL;
100 		}
101 	}
102 }
103 
104 /*
105  * kdb_ftdump - Dump the ftrace log buffer
106  */
107 static int kdb_ftdump(int argc, const char **argv)
108 {
109 	int skip_lines = 0;
110 	long cpu_file;
111 	char *cp;
112 
113 	if (argc > 2)
114 		return KDB_ARGCOUNT;
115 
116 	if (argc) {
117 		skip_lines = simple_strtol(argv[1], &cp, 0);
118 		if (*cp)
119 			skip_lines = 0;
120 	}
121 
122 	if (argc == 2) {
123 		cpu_file = simple_strtol(argv[2], &cp, 0);
124 		if (*cp || cpu_file >= NR_CPUS || cpu_file < 0 ||
125 		    !cpu_online(cpu_file))
126 			return KDB_BADINT;
127 	} else {
128 		cpu_file = RING_BUFFER_ALL_CPUS;
129 	}
130 
131 	kdb_trap_printk++;
132 	ftrace_dump_buf(skip_lines, cpu_file);
133 	kdb_trap_printk--;
134 
135 	return 0;
136 }
137 
138 static __init int kdb_ftrace_register(void)
139 {
140 	kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
141 			    "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE);
142 	return 0;
143 }
144 
145 late_initcall(kdb_ftrace_register);
146