1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * trace_boot.c 4 * Tracing kernel boot-time 5 */ 6 7 #define pr_fmt(fmt) "trace_boot: " fmt 8 9 #include <linux/bootconfig.h> 10 #include <linux/cpumask.h> 11 #include <linux/ftrace.h> 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/mutex.h> 15 #include <linux/string.h> 16 #include <linux/slab.h> 17 #include <linux/trace.h> 18 #include <linux/trace_events.h> 19 20 #include "trace.h" 21 22 #define MAX_BUF_LEN 256 23 24 static void __init 25 trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node) 26 { 27 struct xbc_node *anode; 28 const char *p; 29 char buf[MAX_BUF_LEN]; 30 unsigned long v = 0; 31 32 /* Common ftrace options */ 33 xbc_node_for_each_array_value(node, "options", anode, p) { 34 if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) { 35 pr_err("String is too long: %s\n", p); 36 continue; 37 } 38 39 if (trace_set_options(tr, buf) < 0) 40 pr_err("Failed to set option: %s\n", buf); 41 } 42 43 p = xbc_node_find_value(node, "tracing_on", NULL); 44 if (p && *p != '\0') { 45 if (kstrtoul(p, 10, &v)) 46 pr_err("Failed to set tracing on: %s\n", p); 47 if (v) 48 tracer_tracing_on(tr); 49 else 50 tracer_tracing_off(tr); 51 } 52 53 p = xbc_node_find_value(node, "trace_clock", NULL); 54 if (p && *p != '\0') { 55 if (tracing_set_clock(tr, p) < 0) 56 pr_err("Failed to set trace clock: %s\n", p); 57 } 58 59 p = xbc_node_find_value(node, "buffer_size", NULL); 60 if (p && *p != '\0') { 61 v = memparse(p, NULL); 62 if (v < PAGE_SIZE) 63 pr_err("Buffer size is too small: %s\n", p); 64 if (tracing_resize_ring_buffer(tr, v, RING_BUFFER_ALL_CPUS) < 0) 65 pr_err("Failed to resize trace buffer to %s\n", p); 66 } 67 68 p = xbc_node_find_value(node, "cpumask", NULL); 69 if (p && *p != '\0') { 70 cpumask_var_t new_mask; 71 72 if (alloc_cpumask_var(&new_mask, GFP_KERNEL)) { 73 if (cpumask_parse(p, new_mask) < 0 || 74 tracing_set_cpumask(tr, new_mask) < 0) 75 pr_err("Failed to set new CPU mask %s\n", p); 76 free_cpumask_var(new_mask); 77 } 78 } 79 } 80 81 #ifdef CONFIG_EVENT_TRACING 82 static void __init 83 trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node) 84 { 85 struct xbc_node *anode; 86 char buf[MAX_BUF_LEN]; 87 const char *p; 88 89 xbc_node_for_each_array_value(node, "events", anode, p) { 90 if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) { 91 pr_err("String is too long: %s\n", p); 92 continue; 93 } 94 95 if (ftrace_set_clr_event(tr, buf, 1) < 0) 96 pr_err("Failed to enable event: %s\n", p); 97 } 98 } 99 100 #ifdef CONFIG_KPROBE_EVENTS 101 static int __init 102 trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) 103 { 104 struct dynevent_cmd cmd; 105 struct xbc_node *anode; 106 char buf[MAX_BUF_LEN]; 107 const char *val; 108 int ret = 0; 109 110 xbc_node_for_each_array_value(node, "probes", anode, val) { 111 kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN); 112 113 ret = kprobe_event_gen_cmd_start(&cmd, event, val); 114 if (ret) { 115 pr_err("Failed to generate probe: %s\n", buf); 116 break; 117 } 118 119 ret = kprobe_event_gen_cmd_end(&cmd); 120 if (ret) { 121 pr_err("Failed to add probe: %s\n", buf); 122 break; 123 } 124 } 125 126 return ret; 127 } 128 #else 129 static inline int __init 130 trace_boot_add_kprobe_event(struct xbc_node *node, const char *event) 131 { 132 pr_err("Kprobe event is not supported.\n"); 133 return -ENOTSUPP; 134 } 135 #endif 136 137 #ifdef CONFIG_SYNTH_EVENTS 138 static int __init 139 trace_boot_add_synth_event(struct xbc_node *node, const char *event) 140 { 141 struct dynevent_cmd cmd; 142 struct xbc_node *anode; 143 char buf[MAX_BUF_LEN]; 144 const char *p; 145 int ret; 146 147 synth_event_cmd_init(&cmd, buf, MAX_BUF_LEN); 148 149 ret = synth_event_gen_cmd_start(&cmd, event, NULL); 150 if (ret) 151 return ret; 152 153 xbc_node_for_each_array_value(node, "fields", anode, p) { 154 ret = synth_event_add_field_str(&cmd, p); 155 if (ret) 156 return ret; 157 } 158 159 ret = synth_event_gen_cmd_end(&cmd); 160 if (ret < 0) 161 pr_err("Failed to add synthetic event: %s\n", buf); 162 163 return ret; 164 } 165 #else 166 static inline int __init 167 trace_boot_add_synth_event(struct xbc_node *node, const char *event) 168 { 169 pr_err("Synthetic event is not supported.\n"); 170 return -ENOTSUPP; 171 } 172 #endif 173 174 static void __init 175 trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode, 176 struct xbc_node *enode) 177 { 178 struct trace_event_file *file; 179 struct xbc_node *anode; 180 char buf[MAX_BUF_LEN]; 181 const char *p, *group, *event; 182 183 group = xbc_node_get_data(gnode); 184 event = xbc_node_get_data(enode); 185 186 if (!strcmp(group, "kprobes")) 187 if (trace_boot_add_kprobe_event(enode, event) < 0) 188 return; 189 if (!strcmp(group, "synthetic")) 190 if (trace_boot_add_synth_event(enode, event) < 0) 191 return; 192 193 mutex_lock(&event_mutex); 194 file = find_event_file(tr, group, event); 195 if (!file) { 196 pr_err("Failed to find event: %s:%s\n", group, event); 197 goto out; 198 } 199 200 p = xbc_node_find_value(enode, "filter", NULL); 201 if (p && *p != '\0') { 202 if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) 203 pr_err("filter string is too long: %s\n", p); 204 else if (apply_event_filter(file, buf) < 0) 205 pr_err("Failed to apply filter: %s\n", buf); 206 } 207 208 xbc_node_for_each_array_value(enode, "actions", anode, p) { 209 if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) 210 pr_err("action string is too long: %s\n", p); 211 else if (trigger_process_regex(file, buf) < 0) 212 pr_err("Failed to apply an action: %s\n", buf); 213 } 214 215 if (xbc_node_find_value(enode, "enable", NULL)) { 216 if (trace_event_enable_disable(file, 1, 0) < 0) 217 pr_err("Failed to enable event node: %s:%s\n", 218 group, event); 219 } 220 out: 221 mutex_unlock(&event_mutex); 222 } 223 224 static void __init 225 trace_boot_init_events(struct trace_array *tr, struct xbc_node *node) 226 { 227 struct xbc_node *gnode, *enode; 228 bool enable, enable_all = false; 229 const char *data; 230 231 node = xbc_node_find_child(node, "event"); 232 if (!node) 233 return; 234 /* per-event key starts with "event.GROUP.EVENT" */ 235 xbc_node_for_each_child(node, gnode) { 236 data = xbc_node_get_data(gnode); 237 if (!strcmp(data, "enable")) { 238 enable_all = true; 239 continue; 240 } 241 enable = false; 242 xbc_node_for_each_child(gnode, enode) { 243 data = xbc_node_get_data(enode); 244 if (!strcmp(data, "enable")) { 245 enable = true; 246 continue; 247 } 248 trace_boot_init_one_event(tr, gnode, enode); 249 } 250 /* Event enablement must be done after event settings */ 251 if (enable) { 252 data = xbc_node_get_data(gnode); 253 trace_array_set_clr_event(tr, data, NULL, true); 254 } 255 } 256 /* Ditto */ 257 if (enable_all) 258 trace_array_set_clr_event(tr, NULL, NULL, true); 259 } 260 #else 261 #define trace_boot_enable_events(tr, node) do {} while (0) 262 #define trace_boot_init_events(tr, node) do {} while (0) 263 #endif 264 265 #ifdef CONFIG_DYNAMIC_FTRACE 266 static void __init 267 trace_boot_set_ftrace_filter(struct trace_array *tr, struct xbc_node *node) 268 { 269 struct xbc_node *anode; 270 const char *p; 271 char *q; 272 273 xbc_node_for_each_array_value(node, "ftrace.filters", anode, p) { 274 q = kstrdup(p, GFP_KERNEL); 275 if (!q) 276 return; 277 if (ftrace_set_filter(tr->ops, q, strlen(q), 0) < 0) 278 pr_err("Failed to add %s to ftrace filter\n", p); 279 else 280 ftrace_filter_param = true; 281 kfree(q); 282 } 283 xbc_node_for_each_array_value(node, "ftrace.notraces", anode, p) { 284 q = kstrdup(p, GFP_KERNEL); 285 if (!q) 286 return; 287 if (ftrace_set_notrace(tr->ops, q, strlen(q), 0) < 0) 288 pr_err("Failed to add %s to ftrace filter\n", p); 289 else 290 ftrace_filter_param = true; 291 kfree(q); 292 } 293 } 294 #else 295 #define trace_boot_set_ftrace_filter(tr, node) do {} while (0) 296 #endif 297 298 static void __init 299 trace_boot_enable_tracer(struct trace_array *tr, struct xbc_node *node) 300 { 301 const char *p; 302 303 trace_boot_set_ftrace_filter(tr, node); 304 305 p = xbc_node_find_value(node, "tracer", NULL); 306 if (p && *p != '\0') { 307 if (tracing_set_tracer(tr, p) < 0) 308 pr_err("Failed to set given tracer: %s\n", p); 309 } 310 311 /* Since tracer can free snapshot buffer, allocate snapshot here.*/ 312 if (xbc_node_find_value(node, "alloc_snapshot", NULL)) { 313 if (tracing_alloc_snapshot_instance(tr) < 0) 314 pr_err("Failed to allocate snapshot buffer\n"); 315 } 316 } 317 318 static void __init 319 trace_boot_init_one_instance(struct trace_array *tr, struct xbc_node *node) 320 { 321 trace_boot_set_instance_options(tr, node); 322 trace_boot_init_events(tr, node); 323 trace_boot_enable_events(tr, node); 324 trace_boot_enable_tracer(tr, node); 325 } 326 327 static void __init 328 trace_boot_init_instances(struct xbc_node *node) 329 { 330 struct xbc_node *inode; 331 struct trace_array *tr; 332 const char *p; 333 334 node = xbc_node_find_child(node, "instance"); 335 if (!node) 336 return; 337 338 xbc_node_for_each_child(node, inode) { 339 p = xbc_node_get_data(inode); 340 if (!p || *p == '\0') 341 continue; 342 343 tr = trace_array_get_by_name(p); 344 if (!tr) { 345 pr_err("Failed to get trace instance %s\n", p); 346 continue; 347 } 348 trace_boot_init_one_instance(tr, inode); 349 trace_array_put(tr); 350 } 351 } 352 353 static int __init trace_boot_init(void) 354 { 355 struct xbc_node *trace_node; 356 struct trace_array *tr; 357 358 trace_node = xbc_find_node("ftrace"); 359 if (!trace_node) 360 return 0; 361 362 tr = top_trace_array(); 363 if (!tr) 364 return 0; 365 366 /* Global trace array is also one instance */ 367 trace_boot_init_one_instance(tr, trace_node); 368 trace_boot_init_instances(trace_node); 369 370 disable_tracing_selftest("running boot-time tracing"); 371 372 return 0; 373 } 374 /* 375 * Start tracing at the end of core-initcall, so that it starts tracing 376 * from the beginning of postcore_initcall. 377 */ 378 core_initcall_sync(trace_boot_init); 379