1 /* 2 * ring buffer based function tracer 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 6 * 7 * Based on code from the latency_tracer, that is: 8 * 9 * Copyright (C) 2004-2006 Ingo Molnar 10 * Copyright (C) 2004 Nadia Yvette Chambers 11 */ 12 #include <linux/ring_buffer.h> 13 #include <linux/debugfs.h> 14 #include <linux/uaccess.h> 15 #include <linux/ftrace.h> 16 #include <linux/slab.h> 17 #include <linux/fs.h> 18 19 #include "trace.h" 20 21 static void tracing_start_function_trace(struct trace_array *tr); 22 static void tracing_stop_function_trace(struct trace_array *tr); 23 static void 24 function_trace_call(unsigned long ip, unsigned long parent_ip, 25 struct ftrace_ops *op, struct pt_regs *pt_regs); 26 static void 27 function_stack_trace_call(unsigned long ip, unsigned long parent_ip, 28 struct ftrace_ops *op, struct pt_regs *pt_regs); 29 static struct ftrace_ops trace_ops; 30 static struct ftrace_ops trace_stack_ops; 31 static struct tracer_flags func_flags; 32 33 /* Our option */ 34 enum { 35 TRACE_FUNC_OPT_STACK = 0x1, 36 }; 37 38 static int allocate_ftrace_ops(struct trace_array *tr) 39 { 40 struct ftrace_ops *ops; 41 42 ops = kzalloc(sizeof(*ops), GFP_KERNEL); 43 if (!ops) 44 return -ENOMEM; 45 46 /* Currently only the non stack verision is supported */ 47 ops->func = function_trace_call; 48 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE; 49 50 tr->ops = ops; 51 ops->private = tr; 52 return 0; 53 } 54 55 56 int ftrace_create_function_files(struct trace_array *tr, 57 struct dentry *parent) 58 { 59 int ret; 60 61 /* The top level array uses the "global_ops". */ 62 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) { 63 ret = allocate_ftrace_ops(tr); 64 if (ret) 65 return ret; 66 } 67 68 ftrace_create_filter_files(tr->ops, parent); 69 70 return 0; 71 } 72 73 void ftrace_destroy_function_files(struct trace_array *tr) 74 { 75 ftrace_destroy_filter_files(tr->ops); 76 kfree(tr->ops); 77 tr->ops = NULL; 78 } 79 80 static int function_trace_init(struct trace_array *tr) 81 { 82 struct ftrace_ops *ops; 83 84 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { 85 /* There's only one global tr */ 86 if (!trace_ops.private) { 87 trace_ops.private = tr; 88 trace_stack_ops.private = tr; 89 } 90 91 if (func_flags.val & TRACE_FUNC_OPT_STACK) 92 ops = &trace_stack_ops; 93 else 94 ops = &trace_ops; 95 tr->ops = ops; 96 } else if (!tr->ops) { 97 /* 98 * Instance trace_arrays get their ops allocated 99 * at instance creation. Unless it failed 100 * the allocation. 101 */ 102 return -ENOMEM; 103 } 104 105 tr->trace_buffer.cpu = get_cpu(); 106 put_cpu(); 107 108 tracing_start_cmdline_record(); 109 tracing_start_function_trace(tr); 110 return 0; 111 } 112 113 static void function_trace_reset(struct trace_array *tr) 114 { 115 tracing_stop_function_trace(tr); 116 tracing_stop_cmdline_record(); 117 } 118 119 static void function_trace_start(struct trace_array *tr) 120 { 121 tracing_reset_online_cpus(&tr->trace_buffer); 122 } 123 124 static void 125 function_trace_call(unsigned long ip, unsigned long parent_ip, 126 struct ftrace_ops *op, struct pt_regs *pt_regs) 127 { 128 struct trace_array *tr = op->private; 129 struct trace_array_cpu *data; 130 unsigned long flags; 131 int bit; 132 int cpu; 133 int pc; 134 135 if (unlikely(!tr->function_enabled)) 136 return; 137 138 pc = preempt_count(); 139 preempt_disable_notrace(); 140 141 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX); 142 if (bit < 0) 143 goto out; 144 145 cpu = smp_processor_id(); 146 data = per_cpu_ptr(tr->trace_buffer.data, cpu); 147 if (!atomic_read(&data->disabled)) { 148 local_save_flags(flags); 149 trace_function(tr, ip, parent_ip, flags, pc); 150 } 151 trace_clear_recursion(bit); 152 153 out: 154 preempt_enable_notrace(); 155 } 156 157 static void 158 function_stack_trace_call(unsigned long ip, unsigned long parent_ip, 159 struct ftrace_ops *op, struct pt_regs *pt_regs) 160 { 161 struct trace_array *tr = op->private; 162 struct trace_array_cpu *data; 163 unsigned long flags; 164 long disabled; 165 int cpu; 166 int pc; 167 168 if (unlikely(!tr->function_enabled)) 169 return; 170 171 /* 172 * Need to use raw, since this must be called before the 173 * recursive protection is performed. 174 */ 175 local_irq_save(flags); 176 cpu = raw_smp_processor_id(); 177 data = per_cpu_ptr(tr->trace_buffer.data, cpu); 178 disabled = atomic_inc_return(&data->disabled); 179 180 if (likely(disabled == 1)) { 181 pc = preempt_count(); 182 trace_function(tr, ip, parent_ip, flags, pc); 183 /* 184 * skip over 5 funcs: 185 * __ftrace_trace_stack, 186 * __trace_stack, 187 * function_stack_trace_call 188 * ftrace_list_func 189 * ftrace_call 190 */ 191 __trace_stack(tr, flags, 5, pc); 192 } 193 194 atomic_dec(&data->disabled); 195 local_irq_restore(flags); 196 } 197 198 static struct ftrace_ops trace_ops __read_mostly = 199 { 200 .func = function_trace_call, 201 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, 202 }; 203 204 static struct ftrace_ops trace_stack_ops __read_mostly = 205 { 206 .func = function_stack_trace_call, 207 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, 208 }; 209 210 static struct tracer_opt func_opts[] = { 211 #ifdef CONFIG_STACKTRACE 212 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, 213 #endif 214 { } /* Always set a last empty entry */ 215 }; 216 217 static struct tracer_flags func_flags = { 218 .val = 0, /* By default: all flags disabled */ 219 .opts = func_opts 220 }; 221 222 static void tracing_start_function_trace(struct trace_array *tr) 223 { 224 tr->function_enabled = 0; 225 register_ftrace_function(tr->ops); 226 tr->function_enabled = 1; 227 } 228 229 static void tracing_stop_function_trace(struct trace_array *tr) 230 { 231 tr->function_enabled = 0; 232 unregister_ftrace_function(tr->ops); 233 } 234 235 static int 236 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) 237 { 238 switch (bit) { 239 case TRACE_FUNC_OPT_STACK: 240 /* do nothing if already set */ 241 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) 242 break; 243 244 unregister_ftrace_function(tr->ops); 245 246 if (set) { 247 tr->ops = &trace_stack_ops; 248 register_ftrace_function(tr->ops); 249 } else { 250 tr->ops = &trace_ops; 251 register_ftrace_function(tr->ops); 252 } 253 254 break; 255 default: 256 return -EINVAL; 257 } 258 259 return 0; 260 } 261 262 static struct tracer function_trace __tracer_data = 263 { 264 .name = "function", 265 .init = function_trace_init, 266 .reset = function_trace_reset, 267 .start = function_trace_start, 268 .wait_pipe = poll_wait_pipe, 269 .flags = &func_flags, 270 .set_flag = func_set_flag, 271 .allow_instances = true, 272 #ifdef CONFIG_FTRACE_SELFTEST 273 .selftest = trace_selftest_startup_function, 274 #endif 275 }; 276 277 #ifdef CONFIG_DYNAMIC_FTRACE 278 static int update_count(void **data) 279 { 280 unsigned long *count = (long *)data; 281 282 if (!*count) 283 return 0; 284 285 if (*count != -1) 286 (*count)--; 287 288 return 1; 289 } 290 291 static void 292 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data) 293 { 294 if (tracing_is_on()) 295 return; 296 297 if (update_count(data)) 298 tracing_on(); 299 } 300 301 static void 302 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data) 303 { 304 if (!tracing_is_on()) 305 return; 306 307 if (update_count(data)) 308 tracing_off(); 309 } 310 311 static void 312 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data) 313 { 314 if (tracing_is_on()) 315 return; 316 317 tracing_on(); 318 } 319 320 static void 321 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) 322 { 323 if (!tracing_is_on()) 324 return; 325 326 tracing_off(); 327 } 328 329 /* 330 * Skip 4: 331 * ftrace_stacktrace() 332 * function_trace_probe_call() 333 * ftrace_ops_list_func() 334 * ftrace_call() 335 */ 336 #define STACK_SKIP 4 337 338 static void 339 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data) 340 { 341 trace_dump_stack(STACK_SKIP); 342 } 343 344 static void 345 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data) 346 { 347 if (!tracing_is_on()) 348 return; 349 350 if (update_count(data)) 351 trace_dump_stack(STACK_SKIP); 352 } 353 354 static void 355 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data) 356 { 357 if (update_count(data)) 358 ftrace_dump(DUMP_ALL); 359 } 360 361 /* Only dump the current CPU buffer. */ 362 static void 363 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data) 364 { 365 if (update_count(data)) 366 ftrace_dump(DUMP_ORIG); 367 } 368 369 static int 370 ftrace_probe_print(const char *name, struct seq_file *m, 371 unsigned long ip, void *data) 372 { 373 long count = (long)data; 374 375 seq_printf(m, "%ps:%s", (void *)ip, name); 376 377 if (count == -1) 378 seq_printf(m, ":unlimited\n"); 379 else 380 seq_printf(m, ":count=%ld\n", count); 381 382 return 0; 383 } 384 385 static int 386 ftrace_traceon_print(struct seq_file *m, unsigned long ip, 387 struct ftrace_probe_ops *ops, void *data) 388 { 389 return ftrace_probe_print("traceon", m, ip, data); 390 } 391 392 static int 393 ftrace_traceoff_print(struct seq_file *m, unsigned long ip, 394 struct ftrace_probe_ops *ops, void *data) 395 { 396 return ftrace_probe_print("traceoff", m, ip, data); 397 } 398 399 static int 400 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip, 401 struct ftrace_probe_ops *ops, void *data) 402 { 403 return ftrace_probe_print("stacktrace", m, ip, data); 404 } 405 406 static int 407 ftrace_dump_print(struct seq_file *m, unsigned long ip, 408 struct ftrace_probe_ops *ops, void *data) 409 { 410 return ftrace_probe_print("dump", m, ip, data); 411 } 412 413 static int 414 ftrace_cpudump_print(struct seq_file *m, unsigned long ip, 415 struct ftrace_probe_ops *ops, void *data) 416 { 417 return ftrace_probe_print("cpudump", m, ip, data); 418 } 419 420 static struct ftrace_probe_ops traceon_count_probe_ops = { 421 .func = ftrace_traceon_count, 422 .print = ftrace_traceon_print, 423 }; 424 425 static struct ftrace_probe_ops traceoff_count_probe_ops = { 426 .func = ftrace_traceoff_count, 427 .print = ftrace_traceoff_print, 428 }; 429 430 static struct ftrace_probe_ops stacktrace_count_probe_ops = { 431 .func = ftrace_stacktrace_count, 432 .print = ftrace_stacktrace_print, 433 }; 434 435 static struct ftrace_probe_ops dump_probe_ops = { 436 .func = ftrace_dump_probe, 437 .print = ftrace_dump_print, 438 }; 439 440 static struct ftrace_probe_ops cpudump_probe_ops = { 441 .func = ftrace_cpudump_probe, 442 .print = ftrace_cpudump_print, 443 }; 444 445 static struct ftrace_probe_ops traceon_probe_ops = { 446 .func = ftrace_traceon, 447 .print = ftrace_traceon_print, 448 }; 449 450 static struct ftrace_probe_ops traceoff_probe_ops = { 451 .func = ftrace_traceoff, 452 .print = ftrace_traceoff_print, 453 }; 454 455 static struct ftrace_probe_ops stacktrace_probe_ops = { 456 .func = ftrace_stacktrace, 457 .print = ftrace_stacktrace_print, 458 }; 459 460 static int 461 ftrace_trace_probe_callback(struct ftrace_probe_ops *ops, 462 struct ftrace_hash *hash, char *glob, 463 char *cmd, char *param, int enable) 464 { 465 void *count = (void *)-1; 466 char *number; 467 int ret; 468 469 /* hash funcs only work with set_ftrace_filter */ 470 if (!enable) 471 return -EINVAL; 472 473 if (glob[0] == '!') { 474 unregister_ftrace_function_probe_func(glob+1, ops); 475 return 0; 476 } 477 478 if (!param) 479 goto out_reg; 480 481 number = strsep(¶m, ":"); 482 483 if (!strlen(number)) 484 goto out_reg; 485 486 /* 487 * We use the callback data field (which is a pointer) 488 * as our counter. 489 */ 490 ret = kstrtoul(number, 0, (unsigned long *)&count); 491 if (ret) 492 return ret; 493 494 out_reg: 495 ret = register_ftrace_function_probe(glob, ops, count); 496 497 return ret < 0 ? ret : 0; 498 } 499 500 static int 501 ftrace_trace_onoff_callback(struct ftrace_hash *hash, 502 char *glob, char *cmd, char *param, int enable) 503 { 504 struct ftrace_probe_ops *ops; 505 506 /* we register both traceon and traceoff to this callback */ 507 if (strcmp(cmd, "traceon") == 0) 508 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops; 509 else 510 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops; 511 512 return ftrace_trace_probe_callback(ops, hash, glob, cmd, 513 param, enable); 514 } 515 516 static int 517 ftrace_stacktrace_callback(struct ftrace_hash *hash, 518 char *glob, char *cmd, char *param, int enable) 519 { 520 struct ftrace_probe_ops *ops; 521 522 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops; 523 524 return ftrace_trace_probe_callback(ops, hash, glob, cmd, 525 param, enable); 526 } 527 528 static int 529 ftrace_dump_callback(struct ftrace_hash *hash, 530 char *glob, char *cmd, char *param, int enable) 531 { 532 struct ftrace_probe_ops *ops; 533 534 ops = &dump_probe_ops; 535 536 /* Only dump once. */ 537 return ftrace_trace_probe_callback(ops, hash, glob, cmd, 538 "1", enable); 539 } 540 541 static int 542 ftrace_cpudump_callback(struct ftrace_hash *hash, 543 char *glob, char *cmd, char *param, int enable) 544 { 545 struct ftrace_probe_ops *ops; 546 547 ops = &cpudump_probe_ops; 548 549 /* Only dump once. */ 550 return ftrace_trace_probe_callback(ops, hash, glob, cmd, 551 "1", enable); 552 } 553 554 static struct ftrace_func_command ftrace_traceon_cmd = { 555 .name = "traceon", 556 .func = ftrace_trace_onoff_callback, 557 }; 558 559 static struct ftrace_func_command ftrace_traceoff_cmd = { 560 .name = "traceoff", 561 .func = ftrace_trace_onoff_callback, 562 }; 563 564 static struct ftrace_func_command ftrace_stacktrace_cmd = { 565 .name = "stacktrace", 566 .func = ftrace_stacktrace_callback, 567 }; 568 569 static struct ftrace_func_command ftrace_dump_cmd = { 570 .name = "dump", 571 .func = ftrace_dump_callback, 572 }; 573 574 static struct ftrace_func_command ftrace_cpudump_cmd = { 575 .name = "cpudump", 576 .func = ftrace_cpudump_callback, 577 }; 578 579 static int __init init_func_cmd_traceon(void) 580 { 581 int ret; 582 583 ret = register_ftrace_command(&ftrace_traceoff_cmd); 584 if (ret) 585 return ret; 586 587 ret = register_ftrace_command(&ftrace_traceon_cmd); 588 if (ret) 589 goto out_free_traceoff; 590 591 ret = register_ftrace_command(&ftrace_stacktrace_cmd); 592 if (ret) 593 goto out_free_traceon; 594 595 ret = register_ftrace_command(&ftrace_dump_cmd); 596 if (ret) 597 goto out_free_stacktrace; 598 599 ret = register_ftrace_command(&ftrace_cpudump_cmd); 600 if (ret) 601 goto out_free_dump; 602 603 return 0; 604 605 out_free_dump: 606 unregister_ftrace_command(&ftrace_dump_cmd); 607 out_free_stacktrace: 608 unregister_ftrace_command(&ftrace_stacktrace_cmd); 609 out_free_traceon: 610 unregister_ftrace_command(&ftrace_traceon_cmd); 611 out_free_traceoff: 612 unregister_ftrace_command(&ftrace_traceoff_cmd); 613 614 return ret; 615 } 616 #else 617 static inline int init_func_cmd_traceon(void) 618 { 619 return 0; 620 } 621 #endif /* CONFIG_DYNAMIC_FTRACE */ 622 623 static __init int init_function_trace(void) 624 { 625 init_func_cmd_traceon(); 626 return register_tracer(&function_trace); 627 } 628 core_initcall(init_function_trace); 629