1 /* 2 * ring buffer based initcalls tracer 3 * 4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> 5 * 6 */ 7 8 #include <linux/init.h> 9 #include <linux/debugfs.h> 10 #include <linux/ftrace.h> 11 #include <linux/kallsyms.h> 12 13 #include "trace.h" 14 15 static struct trace_array *boot_trace; 16 static bool pre_initcalls_finished; 17 18 /* Tells the boot tracer that the pre_smp_initcalls are finished. 19 * So we are ready . 20 * It doesn't enable sched events tracing however. 21 * You have to call enable_boot_trace to do so. 22 */ 23 void start_boot_trace(void) 24 { 25 pre_initcalls_finished = true; 26 } 27 28 void enable_boot_trace(void) 29 { 30 if (pre_initcalls_finished) 31 tracing_start_sched_switch_record(); 32 } 33 34 void disable_boot_trace(void) 35 { 36 if (pre_initcalls_finished) 37 tracing_stop_sched_switch_record(); 38 } 39 40 static int boot_trace_init(struct trace_array *tr) 41 { 42 int cpu; 43 boot_trace = tr; 44 45 for_each_cpu(cpu, cpu_possible_mask) 46 tracing_reset(tr, cpu); 47 48 tracing_sched_switch_assign_trace(tr); 49 return 0; 50 } 51 52 static enum print_line_t 53 initcall_call_print_line(struct trace_iterator *iter) 54 { 55 struct trace_entry *entry = iter->ent; 56 struct trace_seq *s = &iter->seq; 57 struct trace_boot_call *field; 58 struct boot_trace_call *call; 59 u64 ts; 60 unsigned long nsec_rem; 61 int ret; 62 63 trace_assign_type(field, entry); 64 call = &field->boot_call; 65 ts = iter->ts; 66 nsec_rem = do_div(ts, 1000000000); 67 68 ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n", 69 (unsigned long)ts, nsec_rem, call->func, call->caller); 70 71 if (!ret) 72 return TRACE_TYPE_PARTIAL_LINE; 73 else 74 return TRACE_TYPE_HANDLED; 75 } 76 77 static enum print_line_t 78 initcall_ret_print_line(struct trace_iterator *iter) 79 { 80 struct trace_entry *entry = iter->ent; 81 struct trace_seq *s = &iter->seq; 82 struct trace_boot_ret *field; 83 struct boot_trace_ret *init_ret; 84 u64 ts; 85 unsigned long nsec_rem; 86 int ret; 87 88 trace_assign_type(field, entry); 89 init_ret = &field->boot_ret; 90 ts = iter->ts; 91 nsec_rem = do_div(ts, 1000000000); 92 93 ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s " 94 "returned %d after %llu msecs\n", 95 (unsigned long) ts, 96 nsec_rem, 97 init_ret->func, init_ret->result, init_ret->duration); 98 99 if (!ret) 100 return TRACE_TYPE_PARTIAL_LINE; 101 else 102 return TRACE_TYPE_HANDLED; 103 } 104 105 static enum print_line_t initcall_print_line(struct trace_iterator *iter) 106 { 107 struct trace_entry *entry = iter->ent; 108 109 switch (entry->type) { 110 case TRACE_BOOT_CALL: 111 return initcall_call_print_line(iter); 112 case TRACE_BOOT_RET: 113 return initcall_ret_print_line(iter); 114 default: 115 return TRACE_TYPE_UNHANDLED; 116 } 117 } 118 119 struct tracer boot_tracer __read_mostly = 120 { 121 .name = "initcall", 122 .init = boot_trace_init, 123 .reset = tracing_reset_online_cpus, 124 .print_line = initcall_print_line, 125 }; 126 127 void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) 128 { 129 struct ring_buffer_event *event; 130 struct trace_boot_call *entry; 131 unsigned long irq_flags; 132 struct trace_array *tr = boot_trace; 133 134 if (!pre_initcalls_finished) 135 return; 136 137 /* Get its name now since this function could 138 * disappear because it is in the .init section. 139 */ 140 sprint_symbol(bt->func, (unsigned long)fn); 141 preempt_disable(); 142 143 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 144 &irq_flags); 145 if (!event) 146 goto out; 147 entry = ring_buffer_event_data(event); 148 tracing_generic_entry_update(&entry->ent, 0, 0); 149 entry->ent.type = TRACE_BOOT_CALL; 150 entry->boot_call = *bt; 151 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 152 153 trace_wake_up(); 154 155 out: 156 preempt_enable(); 157 } 158 159 void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) 160 { 161 struct ring_buffer_event *event; 162 struct trace_boot_ret *entry; 163 unsigned long irq_flags; 164 struct trace_array *tr = boot_trace; 165 166 if (!pre_initcalls_finished) 167 return; 168 169 sprint_symbol(bt->func, (unsigned long)fn); 170 preempt_disable(); 171 172 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 173 &irq_flags); 174 if (!event) 175 goto out; 176 entry = ring_buffer_event_data(event); 177 tracing_generic_entry_update(&entry->ent, 0, 0); 178 entry->ent.type = TRACE_BOOT_RET; 179 entry->boot_ret = *bt; 180 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 181 182 trace_wake_up(); 183 184 out: 185 preempt_enable(); 186 } 187