1 /* 2 * 3 * Function graph tracer. 4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> 5 * Mostly borrowed from function tracer which 6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com> 7 * 8 */ 9 #include <linux/debugfs.h> 10 #include <linux/uaccess.h> 11 #include <linux/ftrace.h> 12 #include <linux/fs.h> 13 14 #include "trace.h" 15 #include "trace_output.h" 16 17 struct fgraph_data { 18 pid_t last_pid; 19 int depth; 20 }; 21 22 #define TRACE_GRAPH_INDENT 2 23 24 /* Flag options */ 25 #define TRACE_GRAPH_PRINT_OVERRUN 0x1 26 #define TRACE_GRAPH_PRINT_CPU 0x2 27 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 28 #define TRACE_GRAPH_PRINT_PROC 0x8 29 #define TRACE_GRAPH_PRINT_DURATION 0x10 30 #define TRACE_GRAPH_PRINT_ABS_TIME 0X20 31 32 static struct tracer_opt trace_opts[] = { 33 /* Display overruns? (for self-debug purpose) */ 34 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, 35 /* Display CPU ? */ 36 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, 37 /* Display Overhead ? */ 38 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, 39 /* Display proc name/pid */ 40 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, 41 /* Display duration of execution */ 42 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, 43 /* Display absolute time of an entry */ 44 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, 45 { } /* Empty entry */ 46 }; 47 48 static struct tracer_flags tracer_flags = { 49 /* Don't display overruns and proc by default */ 50 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | 51 TRACE_GRAPH_PRINT_DURATION, 52 .opts = trace_opts 53 }; 54 55 /* pid on the last trace processed */ 56 57 58 /* Add a function return address to the trace stack on thread info.*/ 59 int 60 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) 61 { 62 unsigned long long calltime; 63 int index; 64 65 if (!current->ret_stack) 66 return -EBUSY; 67 68 /* 69 * We must make sure the ret_stack is tested before we read 70 * anything else. 71 */ 72 smp_rmb(); 73 74 /* The return trace stack is full */ 75 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { 76 atomic_inc(¤t->trace_overrun); 77 return -EBUSY; 78 } 79 80 calltime = trace_clock_local(); 81 82 index = ++current->curr_ret_stack; 83 barrier(); 84 current->ret_stack[index].ret = ret; 85 current->ret_stack[index].func = func; 86 current->ret_stack[index].calltime = calltime; 87 current->ret_stack[index].subtime = 0; 88 *depth = index; 89 90 return 0; 91 } 92 93 /* Retrieve a function return address to the trace stack on thread info.*/ 94 static void 95 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) 96 { 97 int index; 98 99 index = current->curr_ret_stack; 100 101 if (unlikely(index < 0)) { 102 ftrace_graph_stop(); 103 WARN_ON(1); 104 /* Might as well panic, otherwise we have no where to go */ 105 *ret = (unsigned long)panic; 106 return; 107 } 108 109 *ret = current->ret_stack[index].ret; 110 trace->func = current->ret_stack[index].func; 111 trace->calltime = current->ret_stack[index].calltime; 112 trace->overrun = atomic_read(¤t->trace_overrun); 113 trace->depth = index; 114 } 115 116 /* 117 * Send the trace to the ring-buffer. 118 * @return the original return address. 119 */ 120 unsigned long ftrace_return_to_handler(void) 121 { 122 struct ftrace_graph_ret trace; 123 unsigned long ret; 124 125 ftrace_pop_return_trace(&trace, &ret); 126 trace.rettime = trace_clock_local(); 127 ftrace_graph_return(&trace); 128 barrier(); 129 current->curr_ret_stack--; 130 131 if (unlikely(!ret)) { 132 ftrace_graph_stop(); 133 WARN_ON(1); 134 /* Might as well panic. What else to do? */ 135 ret = (unsigned long)panic; 136 } 137 138 return ret; 139 } 140 141 static int graph_trace_init(struct trace_array *tr) 142 { 143 int ret = register_ftrace_graph(&trace_graph_return, 144 &trace_graph_entry); 145 if (ret) 146 return ret; 147 tracing_start_cmdline_record(); 148 149 return 0; 150 } 151 152 static void graph_trace_reset(struct trace_array *tr) 153 { 154 tracing_stop_cmdline_record(); 155 unregister_ftrace_graph(); 156 } 157 158 static inline int log10_cpu(int nb) 159 { 160 if (nb / 100) 161 return 3; 162 if (nb / 10) 163 return 2; 164 return 1; 165 } 166 167 static enum print_line_t 168 print_graph_cpu(struct trace_seq *s, int cpu) 169 { 170 int i; 171 int ret; 172 int log10_this = log10_cpu(cpu); 173 int log10_all = log10_cpu(cpumask_weight(cpu_online_mask)); 174 175 176 /* 177 * Start with a space character - to make it stand out 178 * to the right a bit when trace output is pasted into 179 * email: 180 */ 181 ret = trace_seq_printf(s, " "); 182 183 /* 184 * Tricky - we space the CPU field according to the max 185 * number of online CPUs. On a 2-cpu system it would take 186 * a maximum of 1 digit - on a 128 cpu system it would 187 * take up to 3 digits: 188 */ 189 for (i = 0; i < log10_all - log10_this; i++) { 190 ret = trace_seq_printf(s, " "); 191 if (!ret) 192 return TRACE_TYPE_PARTIAL_LINE; 193 } 194 ret = trace_seq_printf(s, "%d) ", cpu); 195 if (!ret) 196 return TRACE_TYPE_PARTIAL_LINE; 197 198 return TRACE_TYPE_HANDLED; 199 } 200 201 #define TRACE_GRAPH_PROCINFO_LENGTH 14 202 203 static enum print_line_t 204 print_graph_proc(struct trace_seq *s, pid_t pid) 205 { 206 char comm[TASK_COMM_LEN]; 207 /* sign + log10(MAX_INT) + '\0' */ 208 char pid_str[11]; 209 int spaces = 0; 210 int ret; 211 int len; 212 int i; 213 214 trace_find_cmdline(pid, comm); 215 comm[7] = '\0'; 216 sprintf(pid_str, "%d", pid); 217 218 /* 1 stands for the "-" character */ 219 len = strlen(comm) + strlen(pid_str) + 1; 220 221 if (len < TRACE_GRAPH_PROCINFO_LENGTH) 222 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; 223 224 /* First spaces to align center */ 225 for (i = 0; i < spaces / 2; i++) { 226 ret = trace_seq_printf(s, " "); 227 if (!ret) 228 return TRACE_TYPE_PARTIAL_LINE; 229 } 230 231 ret = trace_seq_printf(s, "%s-%s", comm, pid_str); 232 if (!ret) 233 return TRACE_TYPE_PARTIAL_LINE; 234 235 /* Last spaces to align center */ 236 for (i = 0; i < spaces - (spaces / 2); i++) { 237 ret = trace_seq_printf(s, " "); 238 if (!ret) 239 return TRACE_TYPE_PARTIAL_LINE; 240 } 241 return TRACE_TYPE_HANDLED; 242 } 243 244 245 /* If the pid changed since the last trace, output this event */ 246 static enum print_line_t 247 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) 248 { 249 pid_t prev_pid; 250 pid_t *last_pid; 251 int ret; 252 253 if (!data) 254 return TRACE_TYPE_HANDLED; 255 256 last_pid = &(per_cpu_ptr(data, cpu)->last_pid); 257 258 if (*last_pid == pid) 259 return TRACE_TYPE_HANDLED; 260 261 prev_pid = *last_pid; 262 *last_pid = pid; 263 264 if (prev_pid == -1) 265 return TRACE_TYPE_HANDLED; 266 /* 267 * Context-switch trace line: 268 269 ------------------------------------------ 270 | 1) migration/0--1 => sshd-1755 271 ------------------------------------------ 272 273 */ 274 ret = trace_seq_printf(s, 275 " ------------------------------------------\n"); 276 if (!ret) 277 return TRACE_TYPE_PARTIAL_LINE; 278 279 ret = print_graph_cpu(s, cpu); 280 if (ret == TRACE_TYPE_PARTIAL_LINE) 281 return TRACE_TYPE_PARTIAL_LINE; 282 283 ret = print_graph_proc(s, prev_pid); 284 if (ret == TRACE_TYPE_PARTIAL_LINE) 285 return TRACE_TYPE_PARTIAL_LINE; 286 287 ret = trace_seq_printf(s, " => "); 288 if (!ret) 289 return TRACE_TYPE_PARTIAL_LINE; 290 291 ret = print_graph_proc(s, pid); 292 if (ret == TRACE_TYPE_PARTIAL_LINE) 293 return TRACE_TYPE_PARTIAL_LINE; 294 295 ret = trace_seq_printf(s, 296 "\n ------------------------------------------\n\n"); 297 if (!ret) 298 return TRACE_TYPE_PARTIAL_LINE; 299 300 return TRACE_TYPE_HANDLED; 301 } 302 303 static struct ftrace_graph_ret_entry * 304 get_return_for_leaf(struct trace_iterator *iter, 305 struct ftrace_graph_ent_entry *curr) 306 { 307 struct ring_buffer_iter *ring_iter; 308 struct ring_buffer_event *event; 309 struct ftrace_graph_ret_entry *next; 310 311 ring_iter = iter->buffer_iter[iter->cpu]; 312 313 /* First peek to compare current entry and the next one */ 314 if (ring_iter) 315 event = ring_buffer_iter_peek(ring_iter, NULL); 316 else { 317 /* We need to consume the current entry to see the next one */ 318 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); 319 event = ring_buffer_peek(iter->tr->buffer, iter->cpu, 320 NULL); 321 } 322 323 if (!event) 324 return NULL; 325 326 next = ring_buffer_event_data(event); 327 328 if (next->ent.type != TRACE_GRAPH_RET) 329 return NULL; 330 331 if (curr->ent.pid != next->ent.pid || 332 curr->graph_ent.func != next->ret.func) 333 return NULL; 334 335 /* this is a leaf, now advance the iterator */ 336 if (ring_iter) 337 ring_buffer_read(ring_iter, NULL); 338 339 return next; 340 } 341 342 /* Signal a overhead of time execution to the output */ 343 static int 344 print_graph_overhead(unsigned long long duration, struct trace_seq *s) 345 { 346 /* If duration disappear, we don't need anything */ 347 if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)) 348 return 1; 349 350 /* Non nested entry or return */ 351 if (duration == -1) 352 return trace_seq_printf(s, " "); 353 354 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { 355 /* Duration exceeded 100 msecs */ 356 if (duration > 100000ULL) 357 return trace_seq_printf(s, "! "); 358 359 /* Duration exceeded 10 msecs */ 360 if (duration > 10000ULL) 361 return trace_seq_printf(s, "+ "); 362 } 363 364 return trace_seq_printf(s, " "); 365 } 366 367 static int print_graph_abs_time(u64 t, struct trace_seq *s) 368 { 369 unsigned long usecs_rem; 370 371 usecs_rem = do_div(t, NSEC_PER_SEC); 372 usecs_rem /= 1000; 373 374 return trace_seq_printf(s, "%5lu.%06lu | ", 375 (unsigned long)t, usecs_rem); 376 } 377 378 static enum print_line_t 379 print_graph_irq(struct trace_iterator *iter, unsigned long addr, 380 enum trace_type type, int cpu, pid_t pid) 381 { 382 int ret; 383 struct trace_seq *s = &iter->seq; 384 385 if (addr < (unsigned long)__irqentry_text_start || 386 addr >= (unsigned long)__irqentry_text_end) 387 return TRACE_TYPE_UNHANDLED; 388 389 /* Absolute time */ 390 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { 391 ret = print_graph_abs_time(iter->ts, s); 392 if (!ret) 393 return TRACE_TYPE_PARTIAL_LINE; 394 } 395 396 /* Cpu */ 397 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 398 ret = print_graph_cpu(s, cpu); 399 if (ret == TRACE_TYPE_PARTIAL_LINE) 400 return TRACE_TYPE_PARTIAL_LINE; 401 } 402 /* Proc */ 403 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { 404 ret = print_graph_proc(s, pid); 405 if (ret == TRACE_TYPE_PARTIAL_LINE) 406 return TRACE_TYPE_PARTIAL_LINE; 407 ret = trace_seq_printf(s, " | "); 408 if (!ret) 409 return TRACE_TYPE_PARTIAL_LINE; 410 } 411 412 /* No overhead */ 413 ret = print_graph_overhead(-1, s); 414 if (!ret) 415 return TRACE_TYPE_PARTIAL_LINE; 416 417 if (type == TRACE_GRAPH_ENT) 418 ret = trace_seq_printf(s, "==========>"); 419 else 420 ret = trace_seq_printf(s, "<=========="); 421 422 if (!ret) 423 return TRACE_TYPE_PARTIAL_LINE; 424 425 /* Don't close the duration column if haven't one */ 426 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) 427 trace_seq_printf(s, " |"); 428 ret = trace_seq_printf(s, "\n"); 429 430 if (!ret) 431 return TRACE_TYPE_PARTIAL_LINE; 432 return TRACE_TYPE_HANDLED; 433 } 434 435 enum print_line_t 436 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) 437 { 438 unsigned long nsecs_rem = do_div(duration, 1000); 439 /* log10(ULONG_MAX) + '\0' */ 440 char msecs_str[21]; 441 char nsecs_str[5]; 442 int ret, len; 443 int i; 444 445 sprintf(msecs_str, "%lu", (unsigned long) duration); 446 447 /* Print msecs */ 448 ret = trace_seq_printf(s, "%s", msecs_str); 449 if (!ret) 450 return TRACE_TYPE_PARTIAL_LINE; 451 452 len = strlen(msecs_str); 453 454 /* Print nsecs (we don't want to exceed 7 numbers) */ 455 if (len < 7) { 456 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem); 457 ret = trace_seq_printf(s, ".%s", nsecs_str); 458 if (!ret) 459 return TRACE_TYPE_PARTIAL_LINE; 460 len += strlen(nsecs_str); 461 } 462 463 ret = trace_seq_printf(s, " us "); 464 if (!ret) 465 return TRACE_TYPE_PARTIAL_LINE; 466 467 /* Print remaining spaces to fit the row's width */ 468 for (i = len; i < 7; i++) { 469 ret = trace_seq_printf(s, " "); 470 if (!ret) 471 return TRACE_TYPE_PARTIAL_LINE; 472 } 473 return TRACE_TYPE_HANDLED; 474 } 475 476 static enum print_line_t 477 print_graph_duration(unsigned long long duration, struct trace_seq *s) 478 { 479 int ret; 480 481 ret = trace_print_graph_duration(duration, s); 482 if (ret != TRACE_TYPE_HANDLED) 483 return ret; 484 485 ret = trace_seq_printf(s, "| "); 486 if (!ret) 487 return TRACE_TYPE_PARTIAL_LINE; 488 489 return TRACE_TYPE_HANDLED; 490 } 491 492 /* Case of a leaf function on its call entry */ 493 static enum print_line_t 494 print_graph_entry_leaf(struct trace_iterator *iter, 495 struct ftrace_graph_ent_entry *entry, 496 struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) 497 { 498 struct fgraph_data *data = iter->private; 499 struct ftrace_graph_ret *graph_ret; 500 struct ftrace_graph_ent *call; 501 unsigned long long duration; 502 int ret; 503 int i; 504 505 graph_ret = &ret_entry->ret; 506 call = &entry->graph_ent; 507 duration = graph_ret->rettime - graph_ret->calltime; 508 509 if (data) { 510 int cpu = iter->cpu; 511 int *depth = &(per_cpu_ptr(data, cpu)->depth); 512 513 /* 514 * Comments display at + 1 to depth. Since 515 * this is a leaf function, keep the comments 516 * equal to this depth. 517 */ 518 *depth = call->depth - 1; 519 } 520 521 /* Overhead */ 522 ret = print_graph_overhead(duration, s); 523 if (!ret) 524 return TRACE_TYPE_PARTIAL_LINE; 525 526 /* Duration */ 527 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 528 ret = print_graph_duration(duration, s); 529 if (ret == TRACE_TYPE_PARTIAL_LINE) 530 return TRACE_TYPE_PARTIAL_LINE; 531 } 532 533 /* Function */ 534 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { 535 ret = trace_seq_printf(s, " "); 536 if (!ret) 537 return TRACE_TYPE_PARTIAL_LINE; 538 } 539 540 ret = seq_print_ip_sym(s, call->func, 0); 541 if (!ret) 542 return TRACE_TYPE_PARTIAL_LINE; 543 544 ret = trace_seq_printf(s, "();\n"); 545 if (!ret) 546 return TRACE_TYPE_PARTIAL_LINE; 547 548 return TRACE_TYPE_HANDLED; 549 } 550 551 static enum print_line_t 552 print_graph_entry_nested(struct trace_iterator *iter, 553 struct ftrace_graph_ent_entry *entry, 554 struct trace_seq *s, int cpu) 555 { 556 struct ftrace_graph_ent *call = &entry->graph_ent; 557 struct fgraph_data *data = iter->private; 558 int ret; 559 int i; 560 561 if (data) { 562 int cpu = iter->cpu; 563 int *depth = &(per_cpu_ptr(data, cpu)->depth); 564 565 *depth = call->depth; 566 } 567 568 /* No overhead */ 569 ret = print_graph_overhead(-1, s); 570 if (!ret) 571 return TRACE_TYPE_PARTIAL_LINE; 572 573 /* No time */ 574 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 575 ret = trace_seq_printf(s, " | "); 576 if (!ret) 577 return TRACE_TYPE_PARTIAL_LINE; 578 } 579 580 /* Function */ 581 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { 582 ret = trace_seq_printf(s, " "); 583 if (!ret) 584 return TRACE_TYPE_PARTIAL_LINE; 585 } 586 587 ret = seq_print_ip_sym(s, call->func, 0); 588 if (!ret) 589 return TRACE_TYPE_PARTIAL_LINE; 590 591 ret = trace_seq_printf(s, "() {\n"); 592 if (!ret) 593 return TRACE_TYPE_PARTIAL_LINE; 594 595 /* 596 * we already consumed the current entry to check the next one 597 * and see if this is a leaf. 598 */ 599 return TRACE_TYPE_NO_CONSUME; 600 } 601 602 static enum print_line_t 603 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, 604 int type, unsigned long addr) 605 { 606 struct fgraph_data *data = iter->private; 607 struct trace_entry *ent = iter->ent; 608 int cpu = iter->cpu; 609 int ret; 610 611 /* Pid */ 612 if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE) 613 return TRACE_TYPE_PARTIAL_LINE; 614 615 if (type) { 616 /* Interrupt */ 617 ret = print_graph_irq(iter, addr, type, cpu, ent->pid); 618 if (ret == TRACE_TYPE_PARTIAL_LINE) 619 return TRACE_TYPE_PARTIAL_LINE; 620 } 621 622 /* Absolute time */ 623 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { 624 ret = print_graph_abs_time(iter->ts, s); 625 if (!ret) 626 return TRACE_TYPE_PARTIAL_LINE; 627 } 628 629 /* Cpu */ 630 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 631 ret = print_graph_cpu(s, cpu); 632 if (ret == TRACE_TYPE_PARTIAL_LINE) 633 return TRACE_TYPE_PARTIAL_LINE; 634 } 635 636 /* Proc */ 637 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { 638 ret = print_graph_proc(s, ent->pid); 639 if (ret == TRACE_TYPE_PARTIAL_LINE) 640 return TRACE_TYPE_PARTIAL_LINE; 641 642 ret = trace_seq_printf(s, " | "); 643 if (!ret) 644 return TRACE_TYPE_PARTIAL_LINE; 645 } 646 647 return 0; 648 } 649 650 static enum print_line_t 651 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, 652 struct trace_iterator *iter) 653 { 654 int cpu = iter->cpu; 655 struct ftrace_graph_ent *call = &field->graph_ent; 656 struct ftrace_graph_ret_entry *leaf_ret; 657 658 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) 659 return TRACE_TYPE_PARTIAL_LINE; 660 661 leaf_ret = get_return_for_leaf(iter, field); 662 if (leaf_ret) 663 return print_graph_entry_leaf(iter, field, leaf_ret, s); 664 else 665 return print_graph_entry_nested(iter, field, s, cpu); 666 667 } 668 669 static enum print_line_t 670 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, 671 struct trace_entry *ent, struct trace_iterator *iter) 672 { 673 unsigned long long duration = trace->rettime - trace->calltime; 674 struct fgraph_data *data = iter->private; 675 pid_t pid = ent->pid; 676 int cpu = iter->cpu; 677 int ret; 678 int i; 679 680 if (data) { 681 int cpu = iter->cpu; 682 int *depth = &(per_cpu_ptr(data, cpu)->depth); 683 684 /* 685 * Comments display at + 1 to depth. This is the 686 * return from a function, we now want the comments 687 * to display at the same level of the bracket. 688 */ 689 *depth = trace->depth - 1; 690 } 691 692 if (print_graph_prologue(iter, s, 0, 0)) 693 return TRACE_TYPE_PARTIAL_LINE; 694 695 /* Overhead */ 696 ret = print_graph_overhead(duration, s); 697 if (!ret) 698 return TRACE_TYPE_PARTIAL_LINE; 699 700 /* Duration */ 701 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 702 ret = print_graph_duration(duration, s); 703 if (ret == TRACE_TYPE_PARTIAL_LINE) 704 return TRACE_TYPE_PARTIAL_LINE; 705 } 706 707 /* Closing brace */ 708 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { 709 ret = trace_seq_printf(s, " "); 710 if (!ret) 711 return TRACE_TYPE_PARTIAL_LINE; 712 } 713 714 ret = trace_seq_printf(s, "}\n"); 715 if (!ret) 716 return TRACE_TYPE_PARTIAL_LINE; 717 718 /* Overrun */ 719 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { 720 ret = trace_seq_printf(s, " (Overruns: %lu)\n", 721 trace->overrun); 722 if (!ret) 723 return TRACE_TYPE_PARTIAL_LINE; 724 } 725 726 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid); 727 if (ret == TRACE_TYPE_PARTIAL_LINE) 728 return TRACE_TYPE_PARTIAL_LINE; 729 730 return TRACE_TYPE_HANDLED; 731 } 732 733 static enum print_line_t 734 print_graph_comment(struct trace_seq *s, struct trace_entry *ent, 735 struct trace_iterator *iter) 736 { 737 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 738 struct fgraph_data *data = iter->private; 739 struct trace_event *event; 740 int depth = 0; 741 int ret; 742 int i; 743 744 if (data) 745 depth = per_cpu_ptr(data, iter->cpu)->depth; 746 747 if (print_graph_prologue(iter, s, 0, 0)) 748 return TRACE_TYPE_PARTIAL_LINE; 749 750 /* No overhead */ 751 ret = print_graph_overhead(-1, s); 752 if (!ret) 753 return TRACE_TYPE_PARTIAL_LINE; 754 755 /* No time */ 756 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 757 ret = trace_seq_printf(s, " | "); 758 if (!ret) 759 return TRACE_TYPE_PARTIAL_LINE; 760 } 761 762 /* Indentation */ 763 if (depth > 0) 764 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { 765 ret = trace_seq_printf(s, " "); 766 if (!ret) 767 return TRACE_TYPE_PARTIAL_LINE; 768 } 769 770 /* The comment */ 771 ret = trace_seq_printf(s, "/* "); 772 if (!ret) 773 return TRACE_TYPE_PARTIAL_LINE; 774 775 switch (iter->ent->type) { 776 case TRACE_BPRINT: 777 ret = trace_print_bprintk_msg_only(iter); 778 if (ret != TRACE_TYPE_HANDLED) 779 return ret; 780 break; 781 case TRACE_PRINT: 782 ret = trace_print_printk_msg_only(iter); 783 if (ret != TRACE_TYPE_HANDLED) 784 return ret; 785 break; 786 default: 787 event = ftrace_find_event(ent->type); 788 if (!event) 789 return TRACE_TYPE_UNHANDLED; 790 791 ret = event->trace(iter, sym_flags); 792 if (ret != TRACE_TYPE_HANDLED) 793 return ret; 794 } 795 796 /* Strip ending newline */ 797 if (s->buffer[s->len - 1] == '\n') { 798 s->buffer[s->len - 1] = '\0'; 799 s->len--; 800 } 801 802 ret = trace_seq_printf(s, " */\n"); 803 if (!ret) 804 return TRACE_TYPE_PARTIAL_LINE; 805 806 return TRACE_TYPE_HANDLED; 807 } 808 809 810 enum print_line_t 811 print_graph_function(struct trace_iterator *iter) 812 { 813 struct trace_entry *entry = iter->ent; 814 struct trace_seq *s = &iter->seq; 815 816 switch (entry->type) { 817 case TRACE_GRAPH_ENT: { 818 struct ftrace_graph_ent_entry *field; 819 trace_assign_type(field, entry); 820 return print_graph_entry(field, s, iter); 821 } 822 case TRACE_GRAPH_RET: { 823 struct ftrace_graph_ret_entry *field; 824 trace_assign_type(field, entry); 825 return print_graph_return(&field->ret, s, entry, iter); 826 } 827 default: 828 return print_graph_comment(s, entry, iter); 829 } 830 831 return TRACE_TYPE_HANDLED; 832 } 833 834 static void print_graph_headers(struct seq_file *s) 835 { 836 /* 1st line */ 837 seq_printf(s, "# "); 838 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) 839 seq_printf(s, " TIME "); 840 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) 841 seq_printf(s, "CPU"); 842 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) 843 seq_printf(s, " TASK/PID "); 844 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) 845 seq_printf(s, " DURATION "); 846 seq_printf(s, " FUNCTION CALLS\n"); 847 848 /* 2nd line */ 849 seq_printf(s, "# "); 850 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) 851 seq_printf(s, " | "); 852 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) 853 seq_printf(s, "| "); 854 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) 855 seq_printf(s, " | | "); 856 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) 857 seq_printf(s, " | | "); 858 seq_printf(s, " | | | |\n"); 859 } 860 861 static void graph_trace_open(struct trace_iterator *iter) 862 { 863 /* pid and depth on the last trace processed */ 864 struct fgraph_data *data = alloc_percpu(struct fgraph_data); 865 int cpu; 866 867 if (!data) 868 pr_warning("function graph tracer: not enough memory\n"); 869 else 870 for_each_possible_cpu(cpu) { 871 pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid); 872 int *depth = &(per_cpu_ptr(data, cpu)->depth); 873 *pid = -1; 874 *depth = 0; 875 } 876 877 iter->private = data; 878 } 879 880 static void graph_trace_close(struct trace_iterator *iter) 881 { 882 free_percpu(iter->private); 883 } 884 885 static struct tracer graph_trace __read_mostly = { 886 .name = "function_graph", 887 .open = graph_trace_open, 888 .close = graph_trace_close, 889 .wait_pipe = poll_wait_pipe, 890 .init = graph_trace_init, 891 .reset = graph_trace_reset, 892 .print_line = print_graph_function, 893 .print_header = print_graph_headers, 894 .flags = &tracer_flags, 895 #ifdef CONFIG_FTRACE_SELFTEST 896 .selftest = trace_selftest_startup_function_graph, 897 #endif 898 }; 899 900 static __init int init_graph_trace(void) 901 { 902 return register_tracer(&graph_trace); 903 } 904 905 device_initcall(init_graph_trace); 906