1 /* 2 * 3 * Function graph tracer. 4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> 5 * Mostly borrowed from function tracer which 6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com> 7 * 8 */ 9 #include <linux/debugfs.h> 10 #include <linux/uaccess.h> 11 #include <linux/ftrace.h> 12 #include <linux/fs.h> 13 14 #include "trace.h" 15 #include "trace_output.h" 16 17 struct fgraph_data { 18 pid_t last_pid; 19 int depth; 20 }; 21 22 #define TRACE_GRAPH_INDENT 2 23 24 /* Flag options */ 25 #define TRACE_GRAPH_PRINT_OVERRUN 0x1 26 #define TRACE_GRAPH_PRINT_CPU 0x2 27 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 28 #define TRACE_GRAPH_PRINT_PROC 0x8 29 #define TRACE_GRAPH_PRINT_DURATION 0x10 30 #define TRACE_GRAPH_PRINT_ABS_TIME 0X20 31 32 static struct tracer_opt trace_opts[] = { 33 /* Display overruns? (for self-debug purpose) */ 34 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, 35 /* Display CPU ? */ 36 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, 37 /* Display Overhead ? */ 38 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, 39 /* Display proc name/pid */ 40 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, 41 /* Display duration of execution */ 42 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, 43 /* Display absolute time of an entry */ 44 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, 45 { } /* Empty entry */ 46 }; 47 48 static struct tracer_flags tracer_flags = { 49 /* Don't display overruns and proc by default */ 50 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | 51 TRACE_GRAPH_PRINT_DURATION, 52 .opts = trace_opts 53 }; 54 55 /* pid on the last trace processed */ 56 57 58 /* Add a function return address to the trace stack on thread info.*/ 59 int 60 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) 61 { 62 unsigned long long calltime; 63 int index; 64 65 if (!current->ret_stack) 66 return -EBUSY; 67 68 /* The return trace stack is full */ 69 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { 70 atomic_inc(¤t->trace_overrun); 71 return -EBUSY; 72 } 73 74 calltime = trace_clock_local(); 75 76 index = ++current->curr_ret_stack; 77 barrier(); 78 current->ret_stack[index].ret = ret; 79 current->ret_stack[index].func = func; 80 current->ret_stack[index].calltime = calltime; 81 *depth = index; 82 83 return 0; 84 } 85 86 /* Retrieve a function return address to the trace stack on thread info.*/ 87 void 88 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) 89 { 90 int index; 91 92 index = current->curr_ret_stack; 93 94 if (unlikely(index < 0)) { 95 ftrace_graph_stop(); 96 WARN_ON(1); 97 /* Might as well panic, otherwise we have no where to go */ 98 *ret = (unsigned long)panic; 99 return; 100 } 101 102 *ret = current->ret_stack[index].ret; 103 trace->func = current->ret_stack[index].func; 104 trace->calltime = current->ret_stack[index].calltime; 105 trace->overrun = atomic_read(¤t->trace_overrun); 106 trace->depth = index; 107 barrier(); 108 current->curr_ret_stack--; 109 110 } 111 112 /* 113 * Send the trace to the ring-buffer. 114 * @return the original return address. 115 */ 116 unsigned long ftrace_return_to_handler(void) 117 { 118 struct ftrace_graph_ret trace; 119 unsigned long ret; 120 121 ftrace_pop_return_trace(&trace, &ret); 122 trace.rettime = trace_clock_local(); 123 ftrace_graph_return(&trace); 124 125 if (unlikely(!ret)) { 126 ftrace_graph_stop(); 127 WARN_ON(1); 128 /* Might as well panic. What else to do? */ 129 ret = (unsigned long)panic; 130 } 131 132 return ret; 133 } 134 135 static int graph_trace_init(struct trace_array *tr) 136 { 137 int ret = register_ftrace_graph(&trace_graph_return, 138 &trace_graph_entry); 139 if (ret) 140 return ret; 141 tracing_start_cmdline_record(); 142 143 return 0; 144 } 145 146 static void graph_trace_reset(struct trace_array *tr) 147 { 148 tracing_stop_cmdline_record(); 149 unregister_ftrace_graph(); 150 } 151 152 static inline int log10_cpu(int nb) 153 { 154 if (nb / 100) 155 return 3; 156 if (nb / 10) 157 return 2; 158 return 1; 159 } 160 161 static enum print_line_t 162 print_graph_cpu(struct trace_seq *s, int cpu) 163 { 164 int i; 165 int ret; 166 int log10_this = log10_cpu(cpu); 167 int log10_all = log10_cpu(cpumask_weight(cpu_online_mask)); 168 169 170 /* 171 * Start with a space character - to make it stand out 172 * to the right a bit when trace output is pasted into 173 * email: 174 */ 175 ret = trace_seq_printf(s, " "); 176 177 /* 178 * Tricky - we space the CPU field according to the max 179 * number of online CPUs. On a 2-cpu system it would take 180 * a maximum of 1 digit - on a 128 cpu system it would 181 * take up to 3 digits: 182 */ 183 for (i = 0; i < log10_all - log10_this; i++) { 184 ret = trace_seq_printf(s, " "); 185 if (!ret) 186 return TRACE_TYPE_PARTIAL_LINE; 187 } 188 ret = trace_seq_printf(s, "%d) ", cpu); 189 if (!ret) 190 return TRACE_TYPE_PARTIAL_LINE; 191 192 return TRACE_TYPE_HANDLED; 193 } 194 195 #define TRACE_GRAPH_PROCINFO_LENGTH 14 196 197 static enum print_line_t 198 print_graph_proc(struct trace_seq *s, pid_t pid) 199 { 200 char comm[TASK_COMM_LEN]; 201 /* sign + log10(MAX_INT) + '\0' */ 202 char pid_str[11]; 203 int spaces = 0; 204 int ret; 205 int len; 206 int i; 207 208 trace_find_cmdline(pid, comm); 209 comm[7] = '\0'; 210 sprintf(pid_str, "%d", pid); 211 212 /* 1 stands for the "-" character */ 213 len = strlen(comm) + strlen(pid_str) + 1; 214 215 if (len < TRACE_GRAPH_PROCINFO_LENGTH) 216 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; 217 218 /* First spaces to align center */ 219 for (i = 0; i < spaces / 2; i++) { 220 ret = trace_seq_printf(s, " "); 221 if (!ret) 222 return TRACE_TYPE_PARTIAL_LINE; 223 } 224 225 ret = trace_seq_printf(s, "%s-%s", comm, pid_str); 226 if (!ret) 227 return TRACE_TYPE_PARTIAL_LINE; 228 229 /* Last spaces to align center */ 230 for (i = 0; i < spaces - (spaces / 2); i++) { 231 ret = trace_seq_printf(s, " "); 232 if (!ret) 233 return TRACE_TYPE_PARTIAL_LINE; 234 } 235 return TRACE_TYPE_HANDLED; 236 } 237 238 239 /* If the pid changed since the last trace, output this event */ 240 static enum print_line_t 241 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) 242 { 243 pid_t prev_pid; 244 pid_t *last_pid; 245 int ret; 246 247 if (!data) 248 return TRACE_TYPE_HANDLED; 249 250 last_pid = &(per_cpu_ptr(data, cpu)->last_pid); 251 252 if (*last_pid == pid) 253 return TRACE_TYPE_HANDLED; 254 255 prev_pid = *last_pid; 256 *last_pid = pid; 257 258 if (prev_pid == -1) 259 return TRACE_TYPE_HANDLED; 260 /* 261 * Context-switch trace line: 262 263 ------------------------------------------ 264 | 1) migration/0--1 => sshd-1755 265 ------------------------------------------ 266 267 */ 268 ret = trace_seq_printf(s, 269 " ------------------------------------------\n"); 270 if (!ret) 271 return TRACE_TYPE_PARTIAL_LINE; 272 273 ret = print_graph_cpu(s, cpu); 274 if (ret == TRACE_TYPE_PARTIAL_LINE) 275 return TRACE_TYPE_PARTIAL_LINE; 276 277 ret = print_graph_proc(s, prev_pid); 278 if (ret == TRACE_TYPE_PARTIAL_LINE) 279 return TRACE_TYPE_PARTIAL_LINE; 280 281 ret = trace_seq_printf(s, " => "); 282 if (!ret) 283 return TRACE_TYPE_PARTIAL_LINE; 284 285 ret = print_graph_proc(s, pid); 286 if (ret == TRACE_TYPE_PARTIAL_LINE) 287 return TRACE_TYPE_PARTIAL_LINE; 288 289 ret = trace_seq_printf(s, 290 "\n ------------------------------------------\n\n"); 291 if (!ret) 292 return TRACE_TYPE_PARTIAL_LINE; 293 294 return TRACE_TYPE_HANDLED; 295 } 296 297 static struct ftrace_graph_ret_entry * 298 get_return_for_leaf(struct trace_iterator *iter, 299 struct ftrace_graph_ent_entry *curr) 300 { 301 struct ring_buffer_iter *ring_iter; 302 struct ring_buffer_event *event; 303 struct ftrace_graph_ret_entry *next; 304 305 ring_iter = iter->buffer_iter[iter->cpu]; 306 307 /* First peek to compare current entry and the next one */ 308 if (ring_iter) 309 event = ring_buffer_iter_peek(ring_iter, NULL); 310 else { 311 /* We need to consume the current entry to see the next one */ 312 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); 313 event = ring_buffer_peek(iter->tr->buffer, iter->cpu, 314 NULL); 315 } 316 317 if (!event) 318 return NULL; 319 320 next = ring_buffer_event_data(event); 321 322 if (next->ent.type != TRACE_GRAPH_RET) 323 return NULL; 324 325 if (curr->ent.pid != next->ent.pid || 326 curr->graph_ent.func != next->ret.func) 327 return NULL; 328 329 /* this is a leaf, now advance the iterator */ 330 if (ring_iter) 331 ring_buffer_read(ring_iter, NULL); 332 333 return next; 334 } 335 336 /* Signal a overhead of time execution to the output */ 337 static int 338 print_graph_overhead(unsigned long long duration, struct trace_seq *s) 339 { 340 /* If duration disappear, we don't need anything */ 341 if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)) 342 return 1; 343 344 /* Non nested entry or return */ 345 if (duration == -1) 346 return trace_seq_printf(s, " "); 347 348 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { 349 /* Duration exceeded 100 msecs */ 350 if (duration > 100000ULL) 351 return trace_seq_printf(s, "! "); 352 353 /* Duration exceeded 10 msecs */ 354 if (duration > 10000ULL) 355 return trace_seq_printf(s, "+ "); 356 } 357 358 return trace_seq_printf(s, " "); 359 } 360 361 static int print_graph_abs_time(u64 t, struct trace_seq *s) 362 { 363 unsigned long usecs_rem; 364 365 usecs_rem = do_div(t, NSEC_PER_SEC); 366 usecs_rem /= 1000; 367 368 return trace_seq_printf(s, "%5lu.%06lu | ", 369 (unsigned long)t, usecs_rem); 370 } 371 372 static enum print_line_t 373 print_graph_irq(struct trace_iterator *iter, unsigned long addr, 374 enum trace_type type, int cpu, pid_t pid) 375 { 376 int ret; 377 struct trace_seq *s = &iter->seq; 378 379 if (addr < (unsigned long)__irqentry_text_start || 380 addr >= (unsigned long)__irqentry_text_end) 381 return TRACE_TYPE_UNHANDLED; 382 383 /* Absolute time */ 384 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { 385 ret = print_graph_abs_time(iter->ts, s); 386 if (!ret) 387 return TRACE_TYPE_PARTIAL_LINE; 388 } 389 390 /* Cpu */ 391 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 392 ret = print_graph_cpu(s, cpu); 393 if (ret == TRACE_TYPE_PARTIAL_LINE) 394 return TRACE_TYPE_PARTIAL_LINE; 395 } 396 /* Proc */ 397 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { 398 ret = print_graph_proc(s, pid); 399 if (ret == TRACE_TYPE_PARTIAL_LINE) 400 return TRACE_TYPE_PARTIAL_LINE; 401 ret = trace_seq_printf(s, " | "); 402 if (!ret) 403 return TRACE_TYPE_PARTIAL_LINE; 404 } 405 406 /* No overhead */ 407 ret = print_graph_overhead(-1, s); 408 if (!ret) 409 return TRACE_TYPE_PARTIAL_LINE; 410 411 if (type == TRACE_GRAPH_ENT) 412 ret = trace_seq_printf(s, "==========>"); 413 else 414 ret = trace_seq_printf(s, "<=========="); 415 416 if (!ret) 417 return TRACE_TYPE_PARTIAL_LINE; 418 419 /* Don't close the duration column if haven't one */ 420 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) 421 trace_seq_printf(s, " |"); 422 ret = trace_seq_printf(s, "\n"); 423 424 if (!ret) 425 return TRACE_TYPE_PARTIAL_LINE; 426 return TRACE_TYPE_HANDLED; 427 } 428 429 static enum print_line_t 430 print_graph_duration(unsigned long long duration, struct trace_seq *s) 431 { 432 unsigned long nsecs_rem = do_div(duration, 1000); 433 /* log10(ULONG_MAX) + '\0' */ 434 char msecs_str[21]; 435 char nsecs_str[5]; 436 int ret, len; 437 int i; 438 439 sprintf(msecs_str, "%lu", (unsigned long) duration); 440 441 /* Print msecs */ 442 ret = trace_seq_printf(s, "%s", msecs_str); 443 if (!ret) 444 return TRACE_TYPE_PARTIAL_LINE; 445 446 len = strlen(msecs_str); 447 448 /* Print nsecs (we don't want to exceed 7 numbers) */ 449 if (len < 7) { 450 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem); 451 ret = trace_seq_printf(s, ".%s", nsecs_str); 452 if (!ret) 453 return TRACE_TYPE_PARTIAL_LINE; 454 len += strlen(nsecs_str); 455 } 456 457 ret = trace_seq_printf(s, " us "); 458 if (!ret) 459 return TRACE_TYPE_PARTIAL_LINE; 460 461 /* Print remaining spaces to fit the row's width */ 462 for (i = len; i < 7; i++) { 463 ret = trace_seq_printf(s, " "); 464 if (!ret) 465 return TRACE_TYPE_PARTIAL_LINE; 466 } 467 468 ret = trace_seq_printf(s, "| "); 469 if (!ret) 470 return TRACE_TYPE_PARTIAL_LINE; 471 return TRACE_TYPE_HANDLED; 472 473 } 474 475 /* Case of a leaf function on its call entry */ 476 static enum print_line_t 477 print_graph_entry_leaf(struct trace_iterator *iter, 478 struct ftrace_graph_ent_entry *entry, 479 struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) 480 { 481 struct fgraph_data *data = iter->private; 482 struct ftrace_graph_ret *graph_ret; 483 struct ftrace_graph_ent *call; 484 unsigned long long duration; 485 int ret; 486 int i; 487 488 graph_ret = &ret_entry->ret; 489 call = &entry->graph_ent; 490 duration = graph_ret->rettime - graph_ret->calltime; 491 492 if (data) { 493 int cpu = iter->cpu; 494 int *depth = &(per_cpu_ptr(data, cpu)->depth); 495 496 /* 497 * Comments display at + 1 to depth. Since 498 * this is a leaf function, keep the comments 499 * equal to this depth. 500 */ 501 *depth = call->depth - 1; 502 } 503 504 /* Overhead */ 505 ret = print_graph_overhead(duration, s); 506 if (!ret) 507 return TRACE_TYPE_PARTIAL_LINE; 508 509 /* Duration */ 510 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 511 ret = print_graph_duration(duration, s); 512 if (ret == TRACE_TYPE_PARTIAL_LINE) 513 return TRACE_TYPE_PARTIAL_LINE; 514 } 515 516 /* Function */ 517 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { 518 ret = trace_seq_printf(s, " "); 519 if (!ret) 520 return TRACE_TYPE_PARTIAL_LINE; 521 } 522 523 ret = seq_print_ip_sym(s, call->func, 0); 524 if (!ret) 525 return TRACE_TYPE_PARTIAL_LINE; 526 527 ret = trace_seq_printf(s, "();\n"); 528 if (!ret) 529 return TRACE_TYPE_PARTIAL_LINE; 530 531 return TRACE_TYPE_HANDLED; 532 } 533 534 static enum print_line_t 535 print_graph_entry_nested(struct trace_iterator *iter, 536 struct ftrace_graph_ent_entry *entry, 537 struct trace_seq *s, int cpu) 538 { 539 struct ftrace_graph_ent *call = &entry->graph_ent; 540 struct fgraph_data *data = iter->private; 541 int ret; 542 int i; 543 544 if (data) { 545 int cpu = iter->cpu; 546 int *depth = &(per_cpu_ptr(data, cpu)->depth); 547 548 *depth = call->depth; 549 } 550 551 /* No overhead */ 552 ret = print_graph_overhead(-1, s); 553 if (!ret) 554 return TRACE_TYPE_PARTIAL_LINE; 555 556 /* No time */ 557 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 558 ret = trace_seq_printf(s, " | "); 559 if (!ret) 560 return TRACE_TYPE_PARTIAL_LINE; 561 } 562 563 /* Function */ 564 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { 565 ret = trace_seq_printf(s, " "); 566 if (!ret) 567 return TRACE_TYPE_PARTIAL_LINE; 568 } 569 570 ret = seq_print_ip_sym(s, call->func, 0); 571 if (!ret) 572 return TRACE_TYPE_PARTIAL_LINE; 573 574 ret = trace_seq_printf(s, "() {\n"); 575 if (!ret) 576 return TRACE_TYPE_PARTIAL_LINE; 577 578 /* 579 * we already consumed the current entry to check the next one 580 * and see if this is a leaf. 581 */ 582 return TRACE_TYPE_NO_CONSUME; 583 } 584 585 static enum print_line_t 586 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, 587 int type, unsigned long addr) 588 { 589 struct fgraph_data *data = iter->private; 590 struct trace_entry *ent = iter->ent; 591 int cpu = iter->cpu; 592 int ret; 593 594 /* Pid */ 595 if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE) 596 return TRACE_TYPE_PARTIAL_LINE; 597 598 if (type) { 599 /* Interrupt */ 600 ret = print_graph_irq(iter, addr, type, cpu, ent->pid); 601 if (ret == TRACE_TYPE_PARTIAL_LINE) 602 return TRACE_TYPE_PARTIAL_LINE; 603 } 604 605 /* Absolute time */ 606 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { 607 ret = print_graph_abs_time(iter->ts, s); 608 if (!ret) 609 return TRACE_TYPE_PARTIAL_LINE; 610 } 611 612 /* Cpu */ 613 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { 614 ret = print_graph_cpu(s, cpu); 615 if (ret == TRACE_TYPE_PARTIAL_LINE) 616 return TRACE_TYPE_PARTIAL_LINE; 617 } 618 619 /* Proc */ 620 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { 621 ret = print_graph_proc(s, ent->pid); 622 if (ret == TRACE_TYPE_PARTIAL_LINE) 623 return TRACE_TYPE_PARTIAL_LINE; 624 625 ret = trace_seq_printf(s, " | "); 626 if (!ret) 627 return TRACE_TYPE_PARTIAL_LINE; 628 } 629 630 return 0; 631 } 632 633 static enum print_line_t 634 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, 635 struct trace_iterator *iter) 636 { 637 int cpu = iter->cpu; 638 struct ftrace_graph_ent *call = &field->graph_ent; 639 struct ftrace_graph_ret_entry *leaf_ret; 640 641 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) 642 return TRACE_TYPE_PARTIAL_LINE; 643 644 leaf_ret = get_return_for_leaf(iter, field); 645 if (leaf_ret) 646 return print_graph_entry_leaf(iter, field, leaf_ret, s); 647 else 648 return print_graph_entry_nested(iter, field, s, cpu); 649 650 } 651 652 static enum print_line_t 653 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, 654 struct trace_entry *ent, struct trace_iterator *iter) 655 { 656 unsigned long long duration = trace->rettime - trace->calltime; 657 struct fgraph_data *data = iter->private; 658 pid_t pid = ent->pid; 659 int cpu = iter->cpu; 660 int ret; 661 int i; 662 663 if (data) { 664 int cpu = iter->cpu; 665 int *depth = &(per_cpu_ptr(data, cpu)->depth); 666 667 /* 668 * Comments display at + 1 to depth. This is the 669 * return from a function, we now want the comments 670 * to display at the same level of the bracket. 671 */ 672 *depth = trace->depth - 1; 673 } 674 675 if (print_graph_prologue(iter, s, 0, 0)) 676 return TRACE_TYPE_PARTIAL_LINE; 677 678 /* Overhead */ 679 ret = print_graph_overhead(duration, s); 680 if (!ret) 681 return TRACE_TYPE_PARTIAL_LINE; 682 683 /* Duration */ 684 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 685 ret = print_graph_duration(duration, s); 686 if (ret == TRACE_TYPE_PARTIAL_LINE) 687 return TRACE_TYPE_PARTIAL_LINE; 688 } 689 690 /* Closing brace */ 691 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { 692 ret = trace_seq_printf(s, " "); 693 if (!ret) 694 return TRACE_TYPE_PARTIAL_LINE; 695 } 696 697 ret = trace_seq_printf(s, "}\n"); 698 if (!ret) 699 return TRACE_TYPE_PARTIAL_LINE; 700 701 /* Overrun */ 702 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { 703 ret = trace_seq_printf(s, " (Overruns: %lu)\n", 704 trace->overrun); 705 if (!ret) 706 return TRACE_TYPE_PARTIAL_LINE; 707 } 708 709 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid); 710 if (ret == TRACE_TYPE_PARTIAL_LINE) 711 return TRACE_TYPE_PARTIAL_LINE; 712 713 return TRACE_TYPE_HANDLED; 714 } 715 716 static enum print_line_t 717 print_graph_comment(struct trace_seq *s, struct trace_entry *ent, 718 struct trace_iterator *iter) 719 { 720 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 721 struct fgraph_data *data = iter->private; 722 struct trace_event *event; 723 int depth = 0; 724 int ret; 725 int i; 726 727 if (data) 728 depth = per_cpu_ptr(data, iter->cpu)->depth; 729 730 if (print_graph_prologue(iter, s, 0, 0)) 731 return TRACE_TYPE_PARTIAL_LINE; 732 733 /* No overhead */ 734 ret = print_graph_overhead(-1, s); 735 if (!ret) 736 return TRACE_TYPE_PARTIAL_LINE; 737 738 /* No time */ 739 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { 740 ret = trace_seq_printf(s, " | "); 741 if (!ret) 742 return TRACE_TYPE_PARTIAL_LINE; 743 } 744 745 /* Indentation */ 746 if (depth > 0) 747 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { 748 ret = trace_seq_printf(s, " "); 749 if (!ret) 750 return TRACE_TYPE_PARTIAL_LINE; 751 } 752 753 /* The comment */ 754 ret = trace_seq_printf(s, "/* "); 755 if (!ret) 756 return TRACE_TYPE_PARTIAL_LINE; 757 758 switch (iter->ent->type) { 759 case TRACE_BPRINT: 760 ret = trace_print_bprintk_msg_only(iter); 761 if (ret != TRACE_TYPE_HANDLED) 762 return ret; 763 break; 764 case TRACE_PRINT: 765 ret = trace_print_printk_msg_only(iter); 766 if (ret != TRACE_TYPE_HANDLED) 767 return ret; 768 break; 769 default: 770 event = ftrace_find_event(ent->type); 771 if (!event) 772 return TRACE_TYPE_UNHANDLED; 773 774 ret = event->trace(iter, sym_flags); 775 if (ret != TRACE_TYPE_HANDLED) 776 return ret; 777 } 778 779 /* Strip ending newline */ 780 if (s->buffer[s->len - 1] == '\n') { 781 s->buffer[s->len - 1] = '\0'; 782 s->len--; 783 } 784 785 ret = trace_seq_printf(s, " */\n"); 786 if (!ret) 787 return TRACE_TYPE_PARTIAL_LINE; 788 789 return TRACE_TYPE_HANDLED; 790 } 791 792 793 enum print_line_t 794 print_graph_function(struct trace_iterator *iter) 795 { 796 struct trace_entry *entry = iter->ent; 797 struct trace_seq *s = &iter->seq; 798 799 switch (entry->type) { 800 case TRACE_GRAPH_ENT: { 801 struct ftrace_graph_ent_entry *field; 802 trace_assign_type(field, entry); 803 return print_graph_entry(field, s, iter); 804 } 805 case TRACE_GRAPH_RET: { 806 struct ftrace_graph_ret_entry *field; 807 trace_assign_type(field, entry); 808 return print_graph_return(&field->ret, s, entry, iter); 809 } 810 default: 811 return print_graph_comment(s, entry, iter); 812 } 813 814 return TRACE_TYPE_HANDLED; 815 } 816 817 static void print_graph_headers(struct seq_file *s) 818 { 819 /* 1st line */ 820 seq_printf(s, "# "); 821 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) 822 seq_printf(s, " TIME "); 823 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) 824 seq_printf(s, "CPU"); 825 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) 826 seq_printf(s, " TASK/PID "); 827 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) 828 seq_printf(s, " DURATION "); 829 seq_printf(s, " FUNCTION CALLS\n"); 830 831 /* 2nd line */ 832 seq_printf(s, "# "); 833 if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) 834 seq_printf(s, " | "); 835 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) 836 seq_printf(s, "| "); 837 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) 838 seq_printf(s, " | | "); 839 if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) 840 seq_printf(s, " | | "); 841 seq_printf(s, " | | | |\n"); 842 } 843 844 static void graph_trace_open(struct trace_iterator *iter) 845 { 846 /* pid and depth on the last trace processed */ 847 struct fgraph_data *data = alloc_percpu(struct fgraph_data); 848 int cpu; 849 850 if (!data) 851 pr_warning("function graph tracer: not enough memory\n"); 852 else 853 for_each_possible_cpu(cpu) { 854 pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid); 855 int *depth = &(per_cpu_ptr(data, cpu)->depth); 856 *pid = -1; 857 *depth = 0; 858 } 859 860 iter->private = data; 861 } 862 863 static void graph_trace_close(struct trace_iterator *iter) 864 { 865 free_percpu(iter->private); 866 } 867 868 static struct tracer graph_trace __read_mostly = { 869 .name = "function_graph", 870 .open = graph_trace_open, 871 .close = graph_trace_close, 872 .wait_pipe = poll_wait_pipe, 873 .init = graph_trace_init, 874 .reset = graph_trace_reset, 875 .print_line = print_graph_function, 876 .print_header = print_graph_headers, 877 .flags = &tracer_flags, 878 #ifdef CONFIG_FTRACE_SELFTEST 879 .selftest = trace_selftest_startup_function_graph, 880 #endif 881 }; 882 883 static __init int init_graph_trace(void) 884 { 885 return register_tracer(&graph_trace); 886 } 887 888 device_initcall(init_graph_trace); 889