xref: /openbmc/linux/kernel/trace/trace_boot.c (revision 78c99ba1)
1 /*
2  * ring buffer based initcalls tracer
3  *
4  * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5  *
6  */
7 
8 #include <linux/init.h>
9 #include <linux/debugfs.h>
10 #include <linux/ftrace.h>
11 #include <linux/kallsyms.h>
12 #include <linux/time.h>
13 
14 #include "trace.h"
15 #include "trace_output.h"
16 
17 static struct trace_array *boot_trace;
18 static bool pre_initcalls_finished;
19 
20 /* Tells the boot tracer that the pre_smp_initcalls are finished.
21  * So we are ready .
22  * It doesn't enable sched events tracing however.
23  * You have to call enable_boot_trace to do so.
24  */
25 void start_boot_trace(void)
26 {
27 	pre_initcalls_finished = true;
28 }
29 
30 void enable_boot_trace(void)
31 {
32 	if (boot_trace && pre_initcalls_finished)
33 		tracing_start_sched_switch_record();
34 }
35 
36 void disable_boot_trace(void)
37 {
38 	if (boot_trace && pre_initcalls_finished)
39 		tracing_stop_sched_switch_record();
40 }
41 
42 static int boot_trace_init(struct trace_array *tr)
43 {
44 	int cpu;
45 	boot_trace = tr;
46 
47 	if (!tr)
48 		return 0;
49 
50 	for_each_cpu(cpu, cpu_possible_mask)
51 		tracing_reset(tr, cpu);
52 
53 	tracing_sched_switch_assign_trace(tr);
54 	return 0;
55 }
56 
57 static enum print_line_t
58 initcall_call_print_line(struct trace_iterator *iter)
59 {
60 	struct trace_entry *entry = iter->ent;
61 	struct trace_seq *s = &iter->seq;
62 	struct trace_boot_call *field;
63 	struct boot_trace_call *call;
64 	u64 ts;
65 	unsigned long nsec_rem;
66 	int ret;
67 
68 	trace_assign_type(field, entry);
69 	call = &field->boot_call;
70 	ts = iter->ts;
71 	nsec_rem = do_div(ts, NSEC_PER_SEC);
72 
73 	ret = trace_seq_printf(s, "[%5ld.%09ld] calling  %s @ %i\n",
74 			(unsigned long)ts, nsec_rem, call->func, call->caller);
75 
76 	if (!ret)
77 		return TRACE_TYPE_PARTIAL_LINE;
78 	else
79 		return TRACE_TYPE_HANDLED;
80 }
81 
82 static enum print_line_t
83 initcall_ret_print_line(struct trace_iterator *iter)
84 {
85 	struct trace_entry *entry = iter->ent;
86 	struct trace_seq *s = &iter->seq;
87 	struct trace_boot_ret *field;
88 	struct boot_trace_ret *init_ret;
89 	u64 ts;
90 	unsigned long nsec_rem;
91 	int ret;
92 
93 	trace_assign_type(field, entry);
94 	init_ret = &field->boot_ret;
95 	ts = iter->ts;
96 	nsec_rem = do_div(ts, NSEC_PER_SEC);
97 
98 	ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
99 			"returned %d after %llu msecs\n",
100 			(unsigned long) ts,
101 			nsec_rem,
102 			init_ret->func, init_ret->result, init_ret->duration);
103 
104 	if (!ret)
105 		return TRACE_TYPE_PARTIAL_LINE;
106 	else
107 		return TRACE_TYPE_HANDLED;
108 }
109 
110 static enum print_line_t initcall_print_line(struct trace_iterator *iter)
111 {
112 	struct trace_entry *entry = iter->ent;
113 
114 	switch (entry->type) {
115 	case TRACE_BOOT_CALL:
116 		return initcall_call_print_line(iter);
117 	case TRACE_BOOT_RET:
118 		return initcall_ret_print_line(iter);
119 	default:
120 		return TRACE_TYPE_UNHANDLED;
121 	}
122 }
123 
124 struct tracer boot_tracer __read_mostly =
125 {
126 	.name		= "initcall",
127 	.init		= boot_trace_init,
128 	.reset		= tracing_reset_online_cpus,
129 	.print_line	= initcall_print_line,
130 };
131 
132 void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
133 {
134 	struct ring_buffer_event *event;
135 	struct trace_boot_call *entry;
136 	struct trace_array *tr = boot_trace;
137 
138 	if (!tr || !pre_initcalls_finished)
139 		return;
140 
141 	/* Get its name now since this function could
142 	 * disappear because it is in the .init section.
143 	 */
144 	sprint_symbol(bt->func, (unsigned long)fn);
145 	preempt_disable();
146 
147 	event = trace_buffer_lock_reserve(tr, TRACE_BOOT_CALL,
148 					  sizeof(*entry), 0, 0);
149 	if (!event)
150 		goto out;
151 	entry	= ring_buffer_event_data(event);
152 	entry->boot_call = *bt;
153 	trace_buffer_unlock_commit(tr, event, 0, 0);
154  out:
155 	preempt_enable();
156 }
157 
158 void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
159 {
160 	struct ring_buffer_event *event;
161 	struct trace_boot_ret *entry;
162 	struct trace_array *tr = boot_trace;
163 
164 	if (!tr || !pre_initcalls_finished)
165 		return;
166 
167 	sprint_symbol(bt->func, (unsigned long)fn);
168 	preempt_disable();
169 
170 	event = trace_buffer_lock_reserve(tr, TRACE_BOOT_RET,
171 					  sizeof(*entry), 0, 0);
172 	if (!event)
173 		goto out;
174 	entry	= ring_buffer_event_data(event);
175 	entry->boot_ret = *bt;
176 	trace_buffer_unlock_commit(tr, event, 0, 0);
177  out:
178 	preempt_enable();
179 }
180