1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * 4 * Function graph tracer. 5 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> 6 * Mostly borrowed from function tracer which 7 * is Copyright (c) Steven Rostedt <srostedt@redhat.com> 8 * 9 */ 10 #include <linux/uaccess.h> 11 #include <linux/ftrace.h> 12 #include <linux/interrupt.h> 13 #include <linux/slab.h> 14 #include <linux/fs.h> 15 16 #include "trace.h" 17 #include "trace_output.h" 18 19 /* When set, irq functions will be ignored */ 20 static int ftrace_graph_skip_irqs; 21 22 struct fgraph_cpu_data { 23 pid_t last_pid; 24 int depth; 25 int depth_irq; 26 int ignore; 27 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH]; 28 }; 29 30 struct fgraph_data { 31 struct fgraph_cpu_data __percpu *cpu_data; 32 33 /* Place to preserve last processed entry. */ 34 struct ftrace_graph_ent_entry ent; 35 struct ftrace_graph_ret_entry ret; 36 int failed; 37 int cpu; 38 }; 39 40 #define TRACE_GRAPH_INDENT 2 41 42 unsigned int fgraph_max_depth; 43 44 static struct tracer_opt trace_opts[] = { 45 /* Display overruns? (for self-debug purpose) */ 46 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, 47 /* Display CPU ? */ 48 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, 49 /* Display Overhead ? */ 50 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, 51 /* Display proc name/pid */ 52 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, 53 /* Display duration of execution */ 54 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, 55 /* Display absolute time of an entry */ 56 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, 57 /* Display interrupts */ 58 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, 59 /* Display function name after trailing } */ 60 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) }, 61 /* Include sleep time (scheduled out) between entry and return */ 62 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) }, 63 64 #ifdef CONFIG_FUNCTION_PROFILER 65 /* Include time within nested functions */ 66 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) }, 67 #endif 68 69 { } /* Empty entry */ 70 }; 71 72 static struct tracer_flags tracer_flags = { 73 /* Don't display overruns, proc, or tail by default */ 74 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | 75 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS | 76 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME, 77 .opts = trace_opts 78 }; 79 80 static struct trace_array *graph_array; 81 82 /* 83 * DURATION column is being also used to display IRQ signs, 84 * following values are used by print_graph_irq and others 85 * to fill in space into DURATION column. 86 */ 87 enum { 88 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT, 89 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT, 90 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT, 91 }; 92 93 static void 94 print_graph_duration(struct trace_array *tr, unsigned long long duration, 95 struct trace_seq *s, u32 flags); 96 97 int __trace_graph_entry(struct trace_array *tr, 98 struct ftrace_graph_ent *trace, 99 unsigned long flags, 100 int pc) 101 { 102 struct trace_event_call *call = &event_funcgraph_entry; 103 struct ring_buffer_event *event; 104 struct ring_buffer *buffer = tr->trace_buffer.buffer; 105 struct ftrace_graph_ent_entry *entry; 106 107 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, 108 sizeof(*entry), flags, pc); 109 if (!event) 110 return 0; 111 entry = ring_buffer_event_data(event); 112 entry->graph_ent = *trace; 113 if (!call_filter_check_discard(call, entry, buffer, event)) 114 trace_buffer_unlock_commit_nostack(buffer, event); 115 116 return 1; 117 } 118 119 static inline int ftrace_graph_ignore_irqs(void) 120 { 121 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT)) 122 return 0; 123 124 return in_irq(); 125 } 126 127 int trace_graph_entry(struct ftrace_graph_ent *trace) 128 { 129 struct trace_array *tr = graph_array; 130 struct trace_array_cpu *data; 131 unsigned long flags; 132 long disabled; 133 int ret; 134 int cpu; 135 int pc; 136 137 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) 138 return 0; 139 140 if (ftrace_graph_notrace_addr(trace->func)) { 141 trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT); 142 /* 143 * Need to return 1 to have the return called 144 * that will clear the NOTRACE bit. 145 */ 146 return 1; 147 } 148 149 if (!ftrace_trace_task(tr)) 150 return 0; 151 152 if (ftrace_graph_ignore_func(trace)) 153 return 0; 154 155 if (ftrace_graph_ignore_irqs()) 156 return 0; 157 158 /* 159 * Do not trace a function if it's filtered by set_graph_notrace. 160 * Make the index of ret stack negative to indicate that it should 161 * ignore further functions. But it needs its own ret stack entry 162 * to recover the original index in order to continue tracing after 163 * returning from the function. 164 */ 165 if (ftrace_graph_notrace_addr(trace->func)) 166 return 1; 167 168 /* 169 * Stop here if tracing_threshold is set. We only write function return 170 * events to the ring buffer. 171 */ 172 if (tracing_thresh) 173 return 1; 174 175 local_irq_save(flags); 176 cpu = raw_smp_processor_id(); 177 data = per_cpu_ptr(tr->trace_buffer.data, cpu); 178 disabled = atomic_inc_return(&data->disabled); 179 if (likely(disabled == 1)) { 180 pc = preempt_count(); 181 ret = __trace_graph_entry(tr, trace, flags, pc); 182 } else { 183 ret = 0; 184 } 185 186 atomic_dec(&data->disabled); 187 local_irq_restore(flags); 188 189 return ret; 190 } 191 192 static void 193 __trace_graph_function(struct trace_array *tr, 194 unsigned long ip, unsigned long flags, int pc) 195 { 196 u64 time = trace_clock_local(); 197 struct ftrace_graph_ent ent = { 198 .func = ip, 199 .depth = 0, 200 }; 201 struct ftrace_graph_ret ret = { 202 .func = ip, 203 .depth = 0, 204 .calltime = time, 205 .rettime = time, 206 }; 207 208 __trace_graph_entry(tr, &ent, flags, pc); 209 __trace_graph_return(tr, &ret, flags, pc); 210 } 211 212 void 213 trace_graph_function(struct trace_array *tr, 214 unsigned long ip, unsigned long parent_ip, 215 unsigned long flags, int pc) 216 { 217 __trace_graph_function(tr, ip, flags, pc); 218 } 219 220 void __trace_graph_return(struct trace_array *tr, 221 struct ftrace_graph_ret *trace, 222 unsigned long flags, 223 int pc) 224 { 225 struct trace_event_call *call = &event_funcgraph_exit; 226 struct ring_buffer_event *event; 227 struct ring_buffer *buffer = tr->trace_buffer.buffer; 228 struct ftrace_graph_ret_entry *entry; 229 230 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, 231 sizeof(*entry), flags, pc); 232 if (!event) 233 return; 234 entry = ring_buffer_event_data(event); 235 entry->ret = *trace; 236 if (!call_filter_check_discard(call, entry, buffer, event)) 237 trace_buffer_unlock_commit_nostack(buffer, event); 238 } 239 240 void trace_graph_return(struct ftrace_graph_ret *trace) 241 { 242 struct trace_array *tr = graph_array; 243 struct trace_array_cpu *data; 244 unsigned long flags; 245 long disabled; 246 int cpu; 247 int pc; 248 249 ftrace_graph_addr_finish(trace); 250 251 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) { 252 trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT); 253 return; 254 } 255 256 local_irq_save(flags); 257 cpu = raw_smp_processor_id(); 258 data = per_cpu_ptr(tr->trace_buffer.data, cpu); 259 disabled = atomic_inc_return(&data->disabled); 260 if (likely(disabled == 1)) { 261 pc = preempt_count(); 262 __trace_graph_return(tr, trace, flags, pc); 263 } 264 atomic_dec(&data->disabled); 265 local_irq_restore(flags); 266 } 267 268 void set_graph_array(struct trace_array *tr) 269 { 270 graph_array = tr; 271 272 /* Make graph_array visible before we start tracing */ 273 274 smp_mb(); 275 } 276 277 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace) 278 { 279 ftrace_graph_addr_finish(trace); 280 281 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) { 282 trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT); 283 return; 284 } 285 286 if (tracing_thresh && 287 (trace->rettime - trace->calltime < tracing_thresh)) 288 return; 289 else 290 trace_graph_return(trace); 291 } 292 293 static struct fgraph_ops funcgraph_thresh_ops = { 294 .entryfunc = &trace_graph_entry, 295 .retfunc = &trace_graph_thresh_return, 296 }; 297 298 static struct fgraph_ops funcgraph_ops = { 299 .entryfunc = &trace_graph_entry, 300 .retfunc = &trace_graph_return, 301 }; 302 303 static int graph_trace_init(struct trace_array *tr) 304 { 305 int ret; 306 307 set_graph_array(tr); 308 if (tracing_thresh) 309 ret = register_ftrace_graph(&funcgraph_thresh_ops); 310 else 311 ret = register_ftrace_graph(&funcgraph_ops); 312 if (ret) 313 return ret; 314 tracing_start_cmdline_record(); 315 316 return 0; 317 } 318 319 static void graph_trace_reset(struct trace_array *tr) 320 { 321 tracing_stop_cmdline_record(); 322 if (tracing_thresh) 323 unregister_ftrace_graph(&funcgraph_thresh_ops); 324 else 325 unregister_ftrace_graph(&funcgraph_ops); 326 } 327 328 static int graph_trace_update_thresh(struct trace_array *tr) 329 { 330 graph_trace_reset(tr); 331 return graph_trace_init(tr); 332 } 333 334 static int max_bytes_for_cpu; 335 336 static void print_graph_cpu(struct trace_seq *s, int cpu) 337 { 338 /* 339 * Start with a space character - to make it stand out 340 * to the right a bit when trace output is pasted into 341 * email: 342 */ 343 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); 344 } 345 346 #define TRACE_GRAPH_PROCINFO_LENGTH 14 347 348 static void print_graph_proc(struct trace_seq *s, pid_t pid) 349 { 350 char comm[TASK_COMM_LEN]; 351 /* sign + log10(MAX_INT) + '\0' */ 352 char pid_str[11]; 353 int spaces = 0; 354 int len; 355 int i; 356 357 trace_find_cmdline(pid, comm); 358 comm[7] = '\0'; 359 sprintf(pid_str, "%d", pid); 360 361 /* 1 stands for the "-" character */ 362 len = strlen(comm) + strlen(pid_str) + 1; 363 364 if (len < TRACE_GRAPH_PROCINFO_LENGTH) 365 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; 366 367 /* First spaces to align center */ 368 for (i = 0; i < spaces / 2; i++) 369 trace_seq_putc(s, ' '); 370 371 trace_seq_printf(s, "%s-%s", comm, pid_str); 372 373 /* Last spaces to align center */ 374 for (i = 0; i < spaces - (spaces / 2); i++) 375 trace_seq_putc(s, ' '); 376 } 377 378 379 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) 380 { 381 trace_seq_putc(s, ' '); 382 trace_print_lat_fmt(s, entry); 383 } 384 385 /* If the pid changed since the last trace, output this event */ 386 static void 387 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) 388 { 389 pid_t prev_pid; 390 pid_t *last_pid; 391 392 if (!data) 393 return; 394 395 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); 396 397 if (*last_pid == pid) 398 return; 399 400 prev_pid = *last_pid; 401 *last_pid = pid; 402 403 if (prev_pid == -1) 404 return; 405 /* 406 * Context-switch trace line: 407 408 ------------------------------------------ 409 | 1) migration/0--1 => sshd-1755 410 ------------------------------------------ 411 412 */ 413 trace_seq_puts(s, " ------------------------------------------\n"); 414 print_graph_cpu(s, cpu); 415 print_graph_proc(s, prev_pid); 416 trace_seq_puts(s, " => "); 417 print_graph_proc(s, pid); 418 trace_seq_puts(s, "\n ------------------------------------------\n\n"); 419 } 420 421 static struct ftrace_graph_ret_entry * 422 get_return_for_leaf(struct trace_iterator *iter, 423 struct ftrace_graph_ent_entry *curr) 424 { 425 struct fgraph_data *data = iter->private; 426 struct ring_buffer_iter *ring_iter = NULL; 427 struct ring_buffer_event *event; 428 struct ftrace_graph_ret_entry *next; 429 430 /* 431 * If the previous output failed to write to the seq buffer, 432 * then we just reuse the data from before. 433 */ 434 if (data && data->failed) { 435 curr = &data->ent; 436 next = &data->ret; 437 } else { 438 439 ring_iter = trace_buffer_iter(iter, iter->cpu); 440 441 /* First peek to compare current entry and the next one */ 442 if (ring_iter) 443 event = ring_buffer_iter_peek(ring_iter, NULL); 444 else { 445 /* 446 * We need to consume the current entry to see 447 * the next one. 448 */ 449 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, 450 NULL, NULL); 451 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu, 452 NULL, NULL); 453 } 454 455 if (!event) 456 return NULL; 457 458 next = ring_buffer_event_data(event); 459 460 if (data) { 461 /* 462 * Save current and next entries for later reference 463 * if the output fails. 464 */ 465 data->ent = *curr; 466 /* 467 * If the next event is not a return type, then 468 * we only care about what type it is. Otherwise we can 469 * safely copy the entire event. 470 */ 471 if (next->ent.type == TRACE_GRAPH_RET) 472 data->ret = *next; 473 else 474 data->ret.ent.type = next->ent.type; 475 } 476 } 477 478 if (next->ent.type != TRACE_GRAPH_RET) 479 return NULL; 480 481 if (curr->ent.pid != next->ent.pid || 482 curr->graph_ent.func != next->ret.func) 483 return NULL; 484 485 /* this is a leaf, now advance the iterator */ 486 if (ring_iter) 487 ring_buffer_read(ring_iter, NULL); 488 489 return next; 490 } 491 492 static void print_graph_abs_time(u64 t, struct trace_seq *s) 493 { 494 unsigned long usecs_rem; 495 496 usecs_rem = do_div(t, NSEC_PER_SEC); 497 usecs_rem /= 1000; 498 499 trace_seq_printf(s, "%5lu.%06lu | ", 500 (unsigned long)t, usecs_rem); 501 } 502 503 static void 504 print_graph_irq(struct trace_iterator *iter, unsigned long addr, 505 enum trace_type type, int cpu, pid_t pid, u32 flags) 506 { 507 struct trace_array *tr = iter->tr; 508 struct trace_seq *s = &iter->seq; 509 struct trace_entry *ent = iter->ent; 510 511 if (addr < (unsigned long)__irqentry_text_start || 512 addr >= (unsigned long)__irqentry_text_end) 513 return; 514 515 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 516 /* Absolute time */ 517 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 518 print_graph_abs_time(iter->ts, s); 519 520 /* Cpu */ 521 if (flags & TRACE_GRAPH_PRINT_CPU) 522 print_graph_cpu(s, cpu); 523 524 /* Proc */ 525 if (flags & TRACE_GRAPH_PRINT_PROC) { 526 print_graph_proc(s, pid); 527 trace_seq_puts(s, " | "); 528 } 529 530 /* Latency format */ 531 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 532 print_graph_lat_fmt(s, ent); 533 } 534 535 /* No overhead */ 536 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START); 537 538 if (type == TRACE_GRAPH_ENT) 539 trace_seq_puts(s, "==========>"); 540 else 541 trace_seq_puts(s, "<=========="); 542 543 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END); 544 trace_seq_putc(s, '\n'); 545 } 546 547 void 548 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) 549 { 550 unsigned long nsecs_rem = do_div(duration, 1000); 551 /* log10(ULONG_MAX) + '\0' */ 552 char usecs_str[21]; 553 char nsecs_str[5]; 554 int len; 555 int i; 556 557 sprintf(usecs_str, "%lu", (unsigned long) duration); 558 559 /* Print msecs */ 560 trace_seq_printf(s, "%s", usecs_str); 561 562 len = strlen(usecs_str); 563 564 /* Print nsecs (we don't want to exceed 7 numbers) */ 565 if (len < 7) { 566 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); 567 568 snprintf(nsecs_str, slen, "%03lu", nsecs_rem); 569 trace_seq_printf(s, ".%s", nsecs_str); 570 len += strlen(nsecs_str) + 1; 571 } 572 573 trace_seq_puts(s, " us "); 574 575 /* Print remaining spaces to fit the row's width */ 576 for (i = len; i < 8; i++) 577 trace_seq_putc(s, ' '); 578 } 579 580 static void 581 print_graph_duration(struct trace_array *tr, unsigned long long duration, 582 struct trace_seq *s, u32 flags) 583 { 584 if (!(flags & TRACE_GRAPH_PRINT_DURATION) || 585 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) 586 return; 587 588 /* No real adata, just filling the column with spaces */ 589 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) { 590 case FLAGS_FILL_FULL: 591 trace_seq_puts(s, " | "); 592 return; 593 case FLAGS_FILL_START: 594 trace_seq_puts(s, " "); 595 return; 596 case FLAGS_FILL_END: 597 trace_seq_puts(s, " |"); 598 return; 599 } 600 601 /* Signal a overhead of time execution to the output */ 602 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) 603 trace_seq_printf(s, "%c ", trace_find_mark(duration)); 604 else 605 trace_seq_puts(s, " "); 606 607 trace_print_graph_duration(duration, s); 608 trace_seq_puts(s, "| "); 609 } 610 611 /* Case of a leaf function on its call entry */ 612 static enum print_line_t 613 print_graph_entry_leaf(struct trace_iterator *iter, 614 struct ftrace_graph_ent_entry *entry, 615 struct ftrace_graph_ret_entry *ret_entry, 616 struct trace_seq *s, u32 flags) 617 { 618 struct fgraph_data *data = iter->private; 619 struct trace_array *tr = iter->tr; 620 struct ftrace_graph_ret *graph_ret; 621 struct ftrace_graph_ent *call; 622 unsigned long long duration; 623 int cpu = iter->cpu; 624 int i; 625 626 graph_ret = &ret_entry->ret; 627 call = &entry->graph_ent; 628 duration = graph_ret->rettime - graph_ret->calltime; 629 630 if (data) { 631 struct fgraph_cpu_data *cpu_data; 632 633 cpu_data = per_cpu_ptr(data->cpu_data, cpu); 634 635 /* 636 * Comments display at + 1 to depth. Since 637 * this is a leaf function, keep the comments 638 * equal to this depth. 639 */ 640 cpu_data->depth = call->depth - 1; 641 642 /* No need to keep this function around for this depth */ 643 if (call->depth < FTRACE_RETFUNC_DEPTH && 644 !WARN_ON_ONCE(call->depth < 0)) 645 cpu_data->enter_funcs[call->depth] = 0; 646 } 647 648 /* Overhead and duration */ 649 print_graph_duration(tr, duration, s, flags); 650 651 /* Function */ 652 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) 653 trace_seq_putc(s, ' '); 654 655 trace_seq_printf(s, "%ps();\n", (void *)call->func); 656 657 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET, 658 cpu, iter->ent->pid, flags); 659 660 return trace_handle_return(s); 661 } 662 663 static enum print_line_t 664 print_graph_entry_nested(struct trace_iterator *iter, 665 struct ftrace_graph_ent_entry *entry, 666 struct trace_seq *s, int cpu, u32 flags) 667 { 668 struct ftrace_graph_ent *call = &entry->graph_ent; 669 struct fgraph_data *data = iter->private; 670 struct trace_array *tr = iter->tr; 671 int i; 672 673 if (data) { 674 struct fgraph_cpu_data *cpu_data; 675 int cpu = iter->cpu; 676 677 cpu_data = per_cpu_ptr(data->cpu_data, cpu); 678 cpu_data->depth = call->depth; 679 680 /* Save this function pointer to see if the exit matches */ 681 if (call->depth < FTRACE_RETFUNC_DEPTH && 682 !WARN_ON_ONCE(call->depth < 0)) 683 cpu_data->enter_funcs[call->depth] = call->func; 684 } 685 686 /* No time */ 687 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); 688 689 /* Function */ 690 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) 691 trace_seq_putc(s, ' '); 692 693 trace_seq_printf(s, "%ps() {\n", (void *)call->func); 694 695 if (trace_seq_has_overflowed(s)) 696 return TRACE_TYPE_PARTIAL_LINE; 697 698 /* 699 * we already consumed the current entry to check the next one 700 * and see if this is a leaf. 701 */ 702 return TRACE_TYPE_NO_CONSUME; 703 } 704 705 static void 706 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, 707 int type, unsigned long addr, u32 flags) 708 { 709 struct fgraph_data *data = iter->private; 710 struct trace_entry *ent = iter->ent; 711 struct trace_array *tr = iter->tr; 712 int cpu = iter->cpu; 713 714 /* Pid */ 715 verif_pid(s, ent->pid, cpu, data); 716 717 if (type) 718 /* Interrupt */ 719 print_graph_irq(iter, addr, type, cpu, ent->pid, flags); 720 721 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) 722 return; 723 724 /* Absolute time */ 725 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 726 print_graph_abs_time(iter->ts, s); 727 728 /* Cpu */ 729 if (flags & TRACE_GRAPH_PRINT_CPU) 730 print_graph_cpu(s, cpu); 731 732 /* Proc */ 733 if (flags & TRACE_GRAPH_PRINT_PROC) { 734 print_graph_proc(s, ent->pid); 735 trace_seq_puts(s, " | "); 736 } 737 738 /* Latency format */ 739 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 740 print_graph_lat_fmt(s, ent); 741 742 return; 743 } 744 745 /* 746 * Entry check for irq code 747 * 748 * returns 1 if 749 * - we are inside irq code 750 * - we just entered irq code 751 * 752 * retunns 0 if 753 * - funcgraph-interrupts option is set 754 * - we are not inside irq code 755 */ 756 static int 757 check_irq_entry(struct trace_iterator *iter, u32 flags, 758 unsigned long addr, int depth) 759 { 760 int cpu = iter->cpu; 761 int *depth_irq; 762 struct fgraph_data *data = iter->private; 763 764 /* 765 * If we are either displaying irqs, or we got called as 766 * a graph event and private data does not exist, 767 * then we bypass the irq check. 768 */ 769 if ((flags & TRACE_GRAPH_PRINT_IRQS) || 770 (!data)) 771 return 0; 772 773 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); 774 775 /* 776 * We are inside the irq code 777 */ 778 if (*depth_irq >= 0) 779 return 1; 780 781 if ((addr < (unsigned long)__irqentry_text_start) || 782 (addr >= (unsigned long)__irqentry_text_end)) 783 return 0; 784 785 /* 786 * We are entering irq code. 787 */ 788 *depth_irq = depth; 789 return 1; 790 } 791 792 /* 793 * Return check for irq code 794 * 795 * returns 1 if 796 * - we are inside irq code 797 * - we just left irq code 798 * 799 * returns 0 if 800 * - funcgraph-interrupts option is set 801 * - we are not inside irq code 802 */ 803 static int 804 check_irq_return(struct trace_iterator *iter, u32 flags, int depth) 805 { 806 int cpu = iter->cpu; 807 int *depth_irq; 808 struct fgraph_data *data = iter->private; 809 810 /* 811 * If we are either displaying irqs, or we got called as 812 * a graph event and private data does not exist, 813 * then we bypass the irq check. 814 */ 815 if ((flags & TRACE_GRAPH_PRINT_IRQS) || 816 (!data)) 817 return 0; 818 819 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); 820 821 /* 822 * We are not inside the irq code. 823 */ 824 if (*depth_irq == -1) 825 return 0; 826 827 /* 828 * We are inside the irq code, and this is returning entry. 829 * Let's not trace it and clear the entry depth, since 830 * we are out of irq code. 831 * 832 * This condition ensures that we 'leave the irq code' once 833 * we are out of the entry depth. Thus protecting us from 834 * the RETURN entry loss. 835 */ 836 if (*depth_irq >= depth) { 837 *depth_irq = -1; 838 return 1; 839 } 840 841 /* 842 * We are inside the irq code, and this is not the entry. 843 */ 844 return 1; 845 } 846 847 static enum print_line_t 848 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, 849 struct trace_iterator *iter, u32 flags) 850 { 851 struct fgraph_data *data = iter->private; 852 struct ftrace_graph_ent *call = &field->graph_ent; 853 struct ftrace_graph_ret_entry *leaf_ret; 854 static enum print_line_t ret; 855 int cpu = iter->cpu; 856 857 if (check_irq_entry(iter, flags, call->func, call->depth)) 858 return TRACE_TYPE_HANDLED; 859 860 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags); 861 862 leaf_ret = get_return_for_leaf(iter, field); 863 if (leaf_ret) 864 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags); 865 else 866 ret = print_graph_entry_nested(iter, field, s, cpu, flags); 867 868 if (data) { 869 /* 870 * If we failed to write our output, then we need to make 871 * note of it. Because we already consumed our entry. 872 */ 873 if (s->full) { 874 data->failed = 1; 875 data->cpu = cpu; 876 } else 877 data->failed = 0; 878 } 879 880 return ret; 881 } 882 883 static enum print_line_t 884 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, 885 struct trace_entry *ent, struct trace_iterator *iter, 886 u32 flags) 887 { 888 unsigned long long duration = trace->rettime - trace->calltime; 889 struct fgraph_data *data = iter->private; 890 struct trace_array *tr = iter->tr; 891 pid_t pid = ent->pid; 892 int cpu = iter->cpu; 893 int func_match = 1; 894 int i; 895 896 if (check_irq_return(iter, flags, trace->depth)) 897 return TRACE_TYPE_HANDLED; 898 899 if (data) { 900 struct fgraph_cpu_data *cpu_data; 901 int cpu = iter->cpu; 902 903 cpu_data = per_cpu_ptr(data->cpu_data, cpu); 904 905 /* 906 * Comments display at + 1 to depth. This is the 907 * return from a function, we now want the comments 908 * to display at the same level of the bracket. 909 */ 910 cpu_data->depth = trace->depth - 1; 911 912 if (trace->depth < FTRACE_RETFUNC_DEPTH && 913 !WARN_ON_ONCE(trace->depth < 0)) { 914 if (cpu_data->enter_funcs[trace->depth] != trace->func) 915 func_match = 0; 916 cpu_data->enter_funcs[trace->depth] = 0; 917 } 918 } 919 920 print_graph_prologue(iter, s, 0, 0, flags); 921 922 /* Overhead and duration */ 923 print_graph_duration(tr, duration, s, flags); 924 925 /* Closing brace */ 926 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) 927 trace_seq_putc(s, ' '); 928 929 /* 930 * If the return function does not have a matching entry, 931 * then the entry was lost. Instead of just printing 932 * the '}' and letting the user guess what function this 933 * belongs to, write out the function name. Always do 934 * that if the funcgraph-tail option is enabled. 935 */ 936 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) 937 trace_seq_puts(s, "}\n"); 938 else 939 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); 940 941 /* Overrun */ 942 if (flags & TRACE_GRAPH_PRINT_OVERRUN) 943 trace_seq_printf(s, " (Overruns: %lu)\n", 944 trace->overrun); 945 946 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, 947 cpu, pid, flags); 948 949 return trace_handle_return(s); 950 } 951 952 static enum print_line_t 953 print_graph_comment(struct trace_seq *s, struct trace_entry *ent, 954 struct trace_iterator *iter, u32 flags) 955 { 956 struct trace_array *tr = iter->tr; 957 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); 958 struct fgraph_data *data = iter->private; 959 struct trace_event *event; 960 int depth = 0; 961 int ret; 962 int i; 963 964 if (data) 965 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; 966 967 print_graph_prologue(iter, s, 0, 0, flags); 968 969 /* No time */ 970 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); 971 972 /* Indentation */ 973 if (depth > 0) 974 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) 975 trace_seq_putc(s, ' '); 976 977 /* The comment */ 978 trace_seq_puts(s, "/* "); 979 980 switch (iter->ent->type) { 981 case TRACE_BPUTS: 982 ret = trace_print_bputs_msg_only(iter); 983 if (ret != TRACE_TYPE_HANDLED) 984 return ret; 985 break; 986 case TRACE_BPRINT: 987 ret = trace_print_bprintk_msg_only(iter); 988 if (ret != TRACE_TYPE_HANDLED) 989 return ret; 990 break; 991 case TRACE_PRINT: 992 ret = trace_print_printk_msg_only(iter); 993 if (ret != TRACE_TYPE_HANDLED) 994 return ret; 995 break; 996 default: 997 event = ftrace_find_event(ent->type); 998 if (!event) 999 return TRACE_TYPE_UNHANDLED; 1000 1001 ret = event->funcs->trace(iter, sym_flags, event); 1002 if (ret != TRACE_TYPE_HANDLED) 1003 return ret; 1004 } 1005 1006 if (trace_seq_has_overflowed(s)) 1007 goto out; 1008 1009 /* Strip ending newline */ 1010 if (s->buffer[s->seq.len - 1] == '\n') { 1011 s->buffer[s->seq.len - 1] = '\0'; 1012 s->seq.len--; 1013 } 1014 1015 trace_seq_puts(s, " */\n"); 1016 out: 1017 return trace_handle_return(s); 1018 } 1019 1020 1021 enum print_line_t 1022 print_graph_function_flags(struct trace_iterator *iter, u32 flags) 1023 { 1024 struct ftrace_graph_ent_entry *field; 1025 struct fgraph_data *data = iter->private; 1026 struct trace_entry *entry = iter->ent; 1027 struct trace_seq *s = &iter->seq; 1028 int cpu = iter->cpu; 1029 int ret; 1030 1031 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { 1032 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0; 1033 return TRACE_TYPE_HANDLED; 1034 } 1035 1036 /* 1037 * If the last output failed, there's a possibility we need 1038 * to print out the missing entry which would never go out. 1039 */ 1040 if (data && data->failed) { 1041 field = &data->ent; 1042 iter->cpu = data->cpu; 1043 ret = print_graph_entry(field, s, iter, flags); 1044 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { 1045 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; 1046 ret = TRACE_TYPE_NO_CONSUME; 1047 } 1048 iter->cpu = cpu; 1049 return ret; 1050 } 1051 1052 switch (entry->type) { 1053 case TRACE_GRAPH_ENT: { 1054 /* 1055 * print_graph_entry() may consume the current event, 1056 * thus @field may become invalid, so we need to save it. 1057 * sizeof(struct ftrace_graph_ent_entry) is very small, 1058 * it can be safely saved at the stack. 1059 */ 1060 struct ftrace_graph_ent_entry saved; 1061 trace_assign_type(field, entry); 1062 saved = *field; 1063 return print_graph_entry(&saved, s, iter, flags); 1064 } 1065 case TRACE_GRAPH_RET: { 1066 struct ftrace_graph_ret_entry *field; 1067 trace_assign_type(field, entry); 1068 return print_graph_return(&field->ret, s, entry, iter, flags); 1069 } 1070 case TRACE_STACK: 1071 case TRACE_FN: 1072 /* dont trace stack and functions as comments */ 1073 return TRACE_TYPE_UNHANDLED; 1074 1075 default: 1076 return print_graph_comment(s, entry, iter, flags); 1077 } 1078 1079 return TRACE_TYPE_HANDLED; 1080 } 1081 1082 static enum print_line_t 1083 print_graph_function(struct trace_iterator *iter) 1084 { 1085 return print_graph_function_flags(iter, tracer_flags.val); 1086 } 1087 1088 static enum print_line_t 1089 print_graph_function_event(struct trace_iterator *iter, int flags, 1090 struct trace_event *event) 1091 { 1092 return print_graph_function(iter); 1093 } 1094 1095 static void print_lat_header(struct seq_file *s, u32 flags) 1096 { 1097 static const char spaces[] = " " /* 16 spaces */ 1098 " " /* 4 spaces */ 1099 " "; /* 17 spaces */ 1100 int size = 0; 1101 1102 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 1103 size += 16; 1104 if (flags & TRACE_GRAPH_PRINT_CPU) 1105 size += 4; 1106 if (flags & TRACE_GRAPH_PRINT_PROC) 1107 size += 17; 1108 1109 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces); 1110 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); 1111 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); 1112 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); 1113 seq_printf(s, "#%.*s||| / \n", size, spaces); 1114 } 1115 1116 static void __print_graph_headers_flags(struct trace_array *tr, 1117 struct seq_file *s, u32 flags) 1118 { 1119 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT; 1120 1121 if (lat) 1122 print_lat_header(s, flags); 1123 1124 /* 1st line */ 1125 seq_putc(s, '#'); 1126 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 1127 seq_puts(s, " TIME "); 1128 if (flags & TRACE_GRAPH_PRINT_CPU) 1129 seq_puts(s, " CPU"); 1130 if (flags & TRACE_GRAPH_PRINT_PROC) 1131 seq_puts(s, " TASK/PID "); 1132 if (lat) 1133 seq_puts(s, "||||"); 1134 if (flags & TRACE_GRAPH_PRINT_DURATION) 1135 seq_puts(s, " DURATION "); 1136 seq_puts(s, " FUNCTION CALLS\n"); 1137 1138 /* 2nd line */ 1139 seq_putc(s, '#'); 1140 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 1141 seq_puts(s, " | "); 1142 if (flags & TRACE_GRAPH_PRINT_CPU) 1143 seq_puts(s, " | "); 1144 if (flags & TRACE_GRAPH_PRINT_PROC) 1145 seq_puts(s, " | | "); 1146 if (lat) 1147 seq_puts(s, "||||"); 1148 if (flags & TRACE_GRAPH_PRINT_DURATION) 1149 seq_puts(s, " | | "); 1150 seq_puts(s, " | | | |\n"); 1151 } 1152 1153 static void print_graph_headers(struct seq_file *s) 1154 { 1155 print_graph_headers_flags(s, tracer_flags.val); 1156 } 1157 1158 void print_graph_headers_flags(struct seq_file *s, u32 flags) 1159 { 1160 struct trace_iterator *iter = s->private; 1161 struct trace_array *tr = iter->tr; 1162 1163 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) 1164 return; 1165 1166 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) { 1167 /* print nothing if the buffers are empty */ 1168 if (trace_empty(iter)) 1169 return; 1170 1171 print_trace_header(s, iter); 1172 } 1173 1174 __print_graph_headers_flags(tr, s, flags); 1175 } 1176 1177 void graph_trace_open(struct trace_iterator *iter) 1178 { 1179 /* pid and depth on the last trace processed */ 1180 struct fgraph_data *data; 1181 gfp_t gfpflags; 1182 int cpu; 1183 1184 iter->private = NULL; 1185 1186 /* We can be called in atomic context via ftrace_dump() */ 1187 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; 1188 1189 data = kzalloc(sizeof(*data), gfpflags); 1190 if (!data) 1191 goto out_err; 1192 1193 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags); 1194 if (!data->cpu_data) 1195 goto out_err_free; 1196 1197 for_each_possible_cpu(cpu) { 1198 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); 1199 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); 1200 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); 1201 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); 1202 1203 *pid = -1; 1204 *depth = 0; 1205 *ignore = 0; 1206 *depth_irq = -1; 1207 } 1208 1209 iter->private = data; 1210 1211 return; 1212 1213 out_err_free: 1214 kfree(data); 1215 out_err: 1216 pr_warn("function graph tracer: not enough memory\n"); 1217 } 1218 1219 void graph_trace_close(struct trace_iterator *iter) 1220 { 1221 struct fgraph_data *data = iter->private; 1222 1223 if (data) { 1224 free_percpu(data->cpu_data); 1225 kfree(data); 1226 } 1227 } 1228 1229 static int 1230 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) 1231 { 1232 if (bit == TRACE_GRAPH_PRINT_IRQS) 1233 ftrace_graph_skip_irqs = !set; 1234 1235 if (bit == TRACE_GRAPH_SLEEP_TIME) 1236 ftrace_graph_sleep_time_control(set); 1237 1238 if (bit == TRACE_GRAPH_GRAPH_TIME) 1239 ftrace_graph_graph_time_control(set); 1240 1241 return 0; 1242 } 1243 1244 static struct trace_event_functions graph_functions = { 1245 .trace = print_graph_function_event, 1246 }; 1247 1248 static struct trace_event graph_trace_entry_event = { 1249 .type = TRACE_GRAPH_ENT, 1250 .funcs = &graph_functions, 1251 }; 1252 1253 static struct trace_event graph_trace_ret_event = { 1254 .type = TRACE_GRAPH_RET, 1255 .funcs = &graph_functions 1256 }; 1257 1258 static struct tracer graph_trace __tracer_data = { 1259 .name = "function_graph", 1260 .update_thresh = graph_trace_update_thresh, 1261 .open = graph_trace_open, 1262 .pipe_open = graph_trace_open, 1263 .close = graph_trace_close, 1264 .pipe_close = graph_trace_close, 1265 .init = graph_trace_init, 1266 .reset = graph_trace_reset, 1267 .print_line = print_graph_function, 1268 .print_header = print_graph_headers, 1269 .flags = &tracer_flags, 1270 .set_flag = func_graph_set_flag, 1271 #ifdef CONFIG_FTRACE_SELFTEST 1272 .selftest = trace_selftest_startup_function_graph, 1273 #endif 1274 }; 1275 1276 1277 static ssize_t 1278 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt, 1279 loff_t *ppos) 1280 { 1281 unsigned long val; 1282 int ret; 1283 1284 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 1285 if (ret) 1286 return ret; 1287 1288 fgraph_max_depth = val; 1289 1290 *ppos += cnt; 1291 1292 return cnt; 1293 } 1294 1295 static ssize_t 1296 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt, 1297 loff_t *ppos) 1298 { 1299 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/ 1300 int n; 1301 1302 n = sprintf(buf, "%d\n", fgraph_max_depth); 1303 1304 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n); 1305 } 1306 1307 static const struct file_operations graph_depth_fops = { 1308 .open = tracing_open_generic, 1309 .write = graph_depth_write, 1310 .read = graph_depth_read, 1311 .llseek = generic_file_llseek, 1312 }; 1313 1314 static __init int init_graph_tracefs(void) 1315 { 1316 struct dentry *d_tracer; 1317 1318 d_tracer = tracing_init_dentry(); 1319 if (IS_ERR(d_tracer)) 1320 return 0; 1321 1322 trace_create_file("max_graph_depth", 0644, d_tracer, 1323 NULL, &graph_depth_fops); 1324 1325 return 0; 1326 } 1327 fs_initcall(init_graph_tracefs); 1328 1329 static __init int init_graph_trace(void) 1330 { 1331 max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1); 1332 1333 if (!register_trace_event(&graph_trace_entry_event)) { 1334 pr_warn("Warning: could not register graph trace events\n"); 1335 return 1; 1336 } 1337 1338 if (!register_trace_event(&graph_trace_ret_event)) { 1339 pr_warn("Warning: could not register graph trace events\n"); 1340 return 1; 1341 } 1342 1343 return register_tracer(&graph_trace); 1344 } 1345 1346 core_initcall(init_graph_trace); 1347