1 /* 2 * trace_output.c 3 * 4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> 5 * 6 */ 7 8 #include <linux/module.h> 9 #include <linux/mutex.h> 10 #include <linux/ftrace.h> 11 12 #include "trace_output.h" 13 14 /* must be a power of 2 */ 15 #define EVENT_HASHSIZE 128 16 17 DECLARE_RWSEM(trace_event_sem); 18 19 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; 20 21 static int next_event_type = __TRACE_LAST_TYPE + 1; 22 23 int trace_print_seq(struct seq_file *m, struct trace_seq *s) 24 { 25 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; 26 int ret; 27 28 ret = seq_write(m, s->buffer, len); 29 30 /* 31 * Only reset this buffer if we successfully wrote to the 32 * seq_file buffer. 33 */ 34 if (!ret) 35 trace_seq_init(s); 36 37 return ret; 38 } 39 40 enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter) 41 { 42 struct trace_seq *s = &iter->seq; 43 struct trace_entry *entry = iter->ent; 44 struct bputs_entry *field; 45 int ret; 46 47 trace_assign_type(field, entry); 48 49 ret = trace_seq_puts(s, field->str); 50 if (!ret) 51 return TRACE_TYPE_PARTIAL_LINE; 52 53 return TRACE_TYPE_HANDLED; 54 } 55 56 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) 57 { 58 struct trace_seq *s = &iter->seq; 59 struct trace_entry *entry = iter->ent; 60 struct bprint_entry *field; 61 int ret; 62 63 trace_assign_type(field, entry); 64 65 ret = trace_seq_bprintf(s, field->fmt, field->buf); 66 if (!ret) 67 return TRACE_TYPE_PARTIAL_LINE; 68 69 return TRACE_TYPE_HANDLED; 70 } 71 72 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) 73 { 74 struct trace_seq *s = &iter->seq; 75 struct trace_entry *entry = iter->ent; 76 struct print_entry *field; 77 int ret; 78 79 trace_assign_type(field, entry); 80 81 ret = trace_seq_puts(s, field->buf); 82 if (!ret) 83 return TRACE_TYPE_PARTIAL_LINE; 84 85 return TRACE_TYPE_HANDLED; 86 } 87 88 /** 89 * trace_seq_printf - sequence printing of trace information 90 * @s: trace sequence descriptor 91 * @fmt: printf format string 92 * 93 * It returns 0 if the trace oversizes the buffer's free 94 * space, 1 otherwise. 95 * 96 * The tracer may use either sequence operations or its own 97 * copy to user routines. To simplify formating of a trace 98 * trace_seq_printf is used to store strings into a special 99 * buffer (@s). Then the output may be either used by 100 * the sequencer or pulled into another buffer. 101 */ 102 int 103 trace_seq_printf(struct trace_seq *s, const char *fmt, ...) 104 { 105 int len = (PAGE_SIZE - 1) - s->len; 106 va_list ap; 107 int ret; 108 109 if (s->full || !len) 110 return 0; 111 112 va_start(ap, fmt); 113 ret = vsnprintf(s->buffer + s->len, len, fmt, ap); 114 va_end(ap); 115 116 /* If we can't write it all, don't bother writing anything */ 117 if (ret >= len) { 118 s->full = 1; 119 return 0; 120 } 121 122 s->len += ret; 123 124 return 1; 125 } 126 EXPORT_SYMBOL_GPL(trace_seq_printf); 127 128 /** 129 * trace_seq_bitmask - put a list of longs as a bitmask print output 130 * @s: trace sequence descriptor 131 * @maskp: points to an array of unsigned longs that represent a bitmask 132 * @nmaskbits: The number of bits that are valid in @maskp 133 * 134 * It returns 0 if the trace oversizes the buffer's free 135 * space, 1 otherwise. 136 * 137 * Writes a ASCII representation of a bitmask string into @s. 138 */ 139 int 140 trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, 141 int nmaskbits) 142 { 143 int len = (PAGE_SIZE - 1) - s->len; 144 int ret; 145 146 if (s->full || !len) 147 return 0; 148 149 ret = bitmap_scnprintf(s->buffer, len, maskp, nmaskbits); 150 s->len += ret; 151 152 return 1; 153 } 154 EXPORT_SYMBOL_GPL(trace_seq_bitmask); 155 156 /** 157 * trace_seq_vprintf - sequence printing of trace information 158 * @s: trace sequence descriptor 159 * @fmt: printf format string 160 * 161 * The tracer may use either sequence operations or its own 162 * copy to user routines. To simplify formating of a trace 163 * trace_seq_printf is used to store strings into a special 164 * buffer (@s). Then the output may be either used by 165 * the sequencer or pulled into another buffer. 166 */ 167 int 168 trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) 169 { 170 int len = (PAGE_SIZE - 1) - s->len; 171 int ret; 172 173 if (s->full || !len) 174 return 0; 175 176 ret = vsnprintf(s->buffer + s->len, len, fmt, args); 177 178 /* If we can't write it all, don't bother writing anything */ 179 if (ret >= len) { 180 s->full = 1; 181 return 0; 182 } 183 184 s->len += ret; 185 186 return len; 187 } 188 EXPORT_SYMBOL_GPL(trace_seq_vprintf); 189 190 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) 191 { 192 int len = (PAGE_SIZE - 1) - s->len; 193 int ret; 194 195 if (s->full || !len) 196 return 0; 197 198 ret = bstr_printf(s->buffer + s->len, len, fmt, binary); 199 200 /* If we can't write it all, don't bother writing anything */ 201 if (ret >= len) { 202 s->full = 1; 203 return 0; 204 } 205 206 s->len += ret; 207 208 return len; 209 } 210 211 /** 212 * trace_seq_puts - trace sequence printing of simple string 213 * @s: trace sequence descriptor 214 * @str: simple string to record 215 * 216 * The tracer may use either the sequence operations or its own 217 * copy to user routines. This function records a simple string 218 * into a special buffer (@s) for later retrieval by a sequencer 219 * or other mechanism. 220 */ 221 int trace_seq_puts(struct trace_seq *s, const char *str) 222 { 223 int len = strlen(str); 224 225 if (s->full) 226 return 0; 227 228 if (len > ((PAGE_SIZE - 1) - s->len)) { 229 s->full = 1; 230 return 0; 231 } 232 233 memcpy(s->buffer + s->len, str, len); 234 s->len += len; 235 236 return len; 237 } 238 239 int trace_seq_putc(struct trace_seq *s, unsigned char c) 240 { 241 if (s->full) 242 return 0; 243 244 if (s->len >= (PAGE_SIZE - 1)) { 245 s->full = 1; 246 return 0; 247 } 248 249 s->buffer[s->len++] = c; 250 251 return 1; 252 } 253 EXPORT_SYMBOL(trace_seq_putc); 254 255 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len) 256 { 257 if (s->full) 258 return 0; 259 260 if (len > ((PAGE_SIZE - 1) - s->len)) { 261 s->full = 1; 262 return 0; 263 } 264 265 memcpy(s->buffer + s->len, mem, len); 266 s->len += len; 267 268 return len; 269 } 270 271 int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len) 272 { 273 unsigned char hex[HEX_CHARS]; 274 const unsigned char *data = mem; 275 int i, j; 276 277 if (s->full) 278 return 0; 279 280 #ifdef __BIG_ENDIAN 281 for (i = 0, j = 0; i < len; i++) { 282 #else 283 for (i = len-1, j = 0; i >= 0; i--) { 284 #endif 285 hex[j++] = hex_asc_hi(data[i]); 286 hex[j++] = hex_asc_lo(data[i]); 287 } 288 hex[j++] = ' '; 289 290 return trace_seq_putmem(s, hex, j); 291 } 292 293 void *trace_seq_reserve(struct trace_seq *s, size_t len) 294 { 295 void *ret; 296 297 if (s->full) 298 return NULL; 299 300 if (len > ((PAGE_SIZE - 1) - s->len)) { 301 s->full = 1; 302 return NULL; 303 } 304 305 ret = s->buffer + s->len; 306 s->len += len; 307 308 return ret; 309 } 310 311 int trace_seq_path(struct trace_seq *s, const struct path *path) 312 { 313 unsigned char *p; 314 315 if (s->full) 316 return 0; 317 318 if (s->len >= (PAGE_SIZE - 1)) { 319 s->full = 1; 320 return 0; 321 } 322 323 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); 324 if (!IS_ERR(p)) { 325 p = mangle_path(s->buffer + s->len, p, "\n"); 326 if (p) { 327 s->len = p - s->buffer; 328 return 1; 329 } 330 } else { 331 s->buffer[s->len++] = '?'; 332 return 1; 333 } 334 335 s->full = 1; 336 return 0; 337 } 338 339 const char * 340 ftrace_print_flags_seq(struct trace_seq *p, const char *delim, 341 unsigned long flags, 342 const struct trace_print_flags *flag_array) 343 { 344 unsigned long mask; 345 const char *str; 346 const char *ret = p->buffer + p->len; 347 int i, first = 1; 348 349 for (i = 0; flag_array[i].name && flags; i++) { 350 351 mask = flag_array[i].mask; 352 if ((flags & mask) != mask) 353 continue; 354 355 str = flag_array[i].name; 356 flags &= ~mask; 357 if (!first && delim) 358 trace_seq_puts(p, delim); 359 else 360 first = 0; 361 trace_seq_puts(p, str); 362 } 363 364 /* check for left over flags */ 365 if (flags) { 366 if (!first && delim) 367 trace_seq_puts(p, delim); 368 trace_seq_printf(p, "0x%lx", flags); 369 } 370 371 trace_seq_putc(p, 0); 372 373 return ret; 374 } 375 EXPORT_SYMBOL(ftrace_print_flags_seq); 376 377 const char * 378 ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, 379 const struct trace_print_flags *symbol_array) 380 { 381 int i; 382 const char *ret = p->buffer + p->len; 383 384 for (i = 0; symbol_array[i].name; i++) { 385 386 if (val != symbol_array[i].mask) 387 continue; 388 389 trace_seq_puts(p, symbol_array[i].name); 390 break; 391 } 392 393 if (ret == (const char *)(p->buffer + p->len)) 394 trace_seq_printf(p, "0x%lx", val); 395 396 trace_seq_putc(p, 0); 397 398 return ret; 399 } 400 EXPORT_SYMBOL(ftrace_print_symbols_seq); 401 402 #if BITS_PER_LONG == 32 403 const char * 404 ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, 405 const struct trace_print_flags_u64 *symbol_array) 406 { 407 int i; 408 const char *ret = p->buffer + p->len; 409 410 for (i = 0; symbol_array[i].name; i++) { 411 412 if (val != symbol_array[i].mask) 413 continue; 414 415 trace_seq_puts(p, symbol_array[i].name); 416 break; 417 } 418 419 if (ret == (const char *)(p->buffer + p->len)) 420 trace_seq_printf(p, "0x%llx", val); 421 422 trace_seq_putc(p, 0); 423 424 return ret; 425 } 426 EXPORT_SYMBOL(ftrace_print_symbols_seq_u64); 427 #endif 428 429 const char * 430 ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, 431 unsigned int bitmask_size) 432 { 433 const char *ret = p->buffer + p->len; 434 435 trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8); 436 trace_seq_putc(p, 0); 437 438 return ret; 439 } 440 EXPORT_SYMBOL_GPL(ftrace_print_bitmask_seq); 441 442 const char * 443 ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) 444 { 445 int i; 446 const char *ret = p->buffer + p->len; 447 448 for (i = 0; i < buf_len; i++) 449 trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]); 450 451 trace_seq_putc(p, 0); 452 453 return ret; 454 } 455 EXPORT_SYMBOL(ftrace_print_hex_seq); 456 457 int ftrace_raw_output_prep(struct trace_iterator *iter, 458 struct trace_event *trace_event) 459 { 460 struct ftrace_event_call *event; 461 struct trace_seq *s = &iter->seq; 462 struct trace_seq *p = &iter->tmp_seq; 463 struct trace_entry *entry; 464 int ret; 465 466 event = container_of(trace_event, struct ftrace_event_call, event); 467 entry = iter->ent; 468 469 if (entry->type != event->event.type) { 470 WARN_ON_ONCE(1); 471 return TRACE_TYPE_UNHANDLED; 472 } 473 474 trace_seq_init(p); 475 ret = trace_seq_printf(s, "%s: ", ftrace_event_name(event)); 476 if (!ret) 477 return TRACE_TYPE_PARTIAL_LINE; 478 479 return 0; 480 } 481 EXPORT_SYMBOL(ftrace_raw_output_prep); 482 483 static int ftrace_output_raw(struct trace_iterator *iter, char *name, 484 char *fmt, va_list ap) 485 { 486 struct trace_seq *s = &iter->seq; 487 int ret; 488 489 ret = trace_seq_printf(s, "%s: ", name); 490 if (!ret) 491 return TRACE_TYPE_PARTIAL_LINE; 492 493 ret = trace_seq_vprintf(s, fmt, ap); 494 495 if (!ret) 496 return TRACE_TYPE_PARTIAL_LINE; 497 498 return TRACE_TYPE_HANDLED; 499 } 500 501 int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) 502 { 503 va_list ap; 504 int ret; 505 506 va_start(ap, fmt); 507 ret = ftrace_output_raw(iter, name, fmt, ap); 508 va_end(ap); 509 510 return ret; 511 } 512 EXPORT_SYMBOL_GPL(ftrace_output_call); 513 514 #ifdef CONFIG_KRETPROBES 515 static inline const char *kretprobed(const char *name) 516 { 517 static const char tramp_name[] = "kretprobe_trampoline"; 518 int size = sizeof(tramp_name); 519 520 if (strncmp(tramp_name, name, size) == 0) 521 return "[unknown/kretprobe'd]"; 522 return name; 523 } 524 #else 525 static inline const char *kretprobed(const char *name) 526 { 527 return name; 528 } 529 #endif /* CONFIG_KRETPROBES */ 530 531 static int 532 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) 533 { 534 #ifdef CONFIG_KALLSYMS 535 char str[KSYM_SYMBOL_LEN]; 536 const char *name; 537 538 kallsyms_lookup(address, NULL, NULL, NULL, str); 539 540 name = kretprobed(str); 541 542 return trace_seq_printf(s, fmt, name); 543 #endif 544 return 1; 545 } 546 547 static int 548 seq_print_sym_offset(struct trace_seq *s, const char *fmt, 549 unsigned long address) 550 { 551 #ifdef CONFIG_KALLSYMS 552 char str[KSYM_SYMBOL_LEN]; 553 const char *name; 554 555 sprint_symbol(str, address); 556 name = kretprobed(str); 557 558 return trace_seq_printf(s, fmt, name); 559 #endif 560 return 1; 561 } 562 563 #ifndef CONFIG_64BIT 564 # define IP_FMT "%08lx" 565 #else 566 # define IP_FMT "%016lx" 567 #endif 568 569 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, 570 unsigned long ip, unsigned long sym_flags) 571 { 572 struct file *file = NULL; 573 unsigned long vmstart = 0; 574 int ret = 1; 575 576 if (s->full) 577 return 0; 578 579 if (mm) { 580 const struct vm_area_struct *vma; 581 582 down_read(&mm->mmap_sem); 583 vma = find_vma(mm, ip); 584 if (vma) { 585 file = vma->vm_file; 586 vmstart = vma->vm_start; 587 } 588 if (file) { 589 ret = trace_seq_path(s, &file->f_path); 590 if (ret) 591 ret = trace_seq_printf(s, "[+0x%lx]", 592 ip - vmstart); 593 } 594 up_read(&mm->mmap_sem); 595 } 596 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) 597 ret = trace_seq_printf(s, " <" IP_FMT ">", ip); 598 return ret; 599 } 600 601 int 602 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, 603 unsigned long sym_flags) 604 { 605 struct mm_struct *mm = NULL; 606 int ret = 1; 607 unsigned int i; 608 609 if (trace_flags & TRACE_ITER_SYM_USEROBJ) { 610 struct task_struct *task; 611 /* 612 * we do the lookup on the thread group leader, 613 * since individual threads might have already quit! 614 */ 615 rcu_read_lock(); 616 task = find_task_by_vpid(entry->tgid); 617 if (task) 618 mm = get_task_mm(task); 619 rcu_read_unlock(); 620 } 621 622 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { 623 unsigned long ip = entry->caller[i]; 624 625 if (ip == ULONG_MAX || !ret) 626 break; 627 if (ret) 628 ret = trace_seq_puts(s, " => "); 629 if (!ip) { 630 if (ret) 631 ret = trace_seq_puts(s, "??"); 632 if (ret) 633 ret = trace_seq_putc(s, '\n'); 634 continue; 635 } 636 if (!ret) 637 break; 638 if (ret) 639 ret = seq_print_user_ip(s, mm, ip, sym_flags); 640 ret = trace_seq_putc(s, '\n'); 641 } 642 643 if (mm) 644 mmput(mm); 645 return ret; 646 } 647 648 int 649 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) 650 { 651 int ret; 652 653 if (!ip) 654 return trace_seq_putc(s, '0'); 655 656 if (sym_flags & TRACE_ITER_SYM_OFFSET) 657 ret = seq_print_sym_offset(s, "%s", ip); 658 else 659 ret = seq_print_sym_short(s, "%s", ip); 660 661 if (!ret) 662 return 0; 663 664 if (sym_flags & TRACE_ITER_SYM_ADDR) 665 ret = trace_seq_printf(s, " <" IP_FMT ">", ip); 666 return ret; 667 } 668 669 /** 670 * trace_print_lat_fmt - print the irq, preempt and lockdep fields 671 * @s: trace seq struct to write to 672 * @entry: The trace entry field from the ring buffer 673 * 674 * Prints the generic fields of irqs off, in hard or softirq, preempt 675 * count. 676 */ 677 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) 678 { 679 char hardsoft_irq; 680 char need_resched; 681 char irqs_off; 682 int hardirq; 683 int softirq; 684 int ret; 685 686 hardirq = entry->flags & TRACE_FLAG_HARDIRQ; 687 softirq = entry->flags & TRACE_FLAG_SOFTIRQ; 688 689 irqs_off = 690 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : 691 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : 692 '.'; 693 694 switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | 695 TRACE_FLAG_PREEMPT_RESCHED)) { 696 case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED: 697 need_resched = 'N'; 698 break; 699 case TRACE_FLAG_NEED_RESCHED: 700 need_resched = 'n'; 701 break; 702 case TRACE_FLAG_PREEMPT_RESCHED: 703 need_resched = 'p'; 704 break; 705 default: 706 need_resched = '.'; 707 break; 708 } 709 710 hardsoft_irq = 711 (hardirq && softirq) ? 'H' : 712 hardirq ? 'h' : 713 softirq ? 's' : 714 '.'; 715 716 if (!trace_seq_printf(s, "%c%c%c", 717 irqs_off, need_resched, hardsoft_irq)) 718 return 0; 719 720 if (entry->preempt_count) 721 ret = trace_seq_printf(s, "%x", entry->preempt_count); 722 else 723 ret = trace_seq_putc(s, '.'); 724 725 return ret; 726 } 727 728 static int 729 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) 730 { 731 char comm[TASK_COMM_LEN]; 732 733 trace_find_cmdline(entry->pid, comm); 734 735 if (!trace_seq_printf(s, "%8.8s-%-5d %3d", 736 comm, entry->pid, cpu)) 737 return 0; 738 739 return trace_print_lat_fmt(s, entry); 740 } 741 742 static unsigned long preempt_mark_thresh_us = 100; 743 744 static int 745 lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) 746 { 747 unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE; 748 unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS; 749 unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start; 750 unsigned long long rel_ts = next_ts - iter->ts; 751 struct trace_seq *s = &iter->seq; 752 753 if (in_ns) { 754 abs_ts = ns2usecs(abs_ts); 755 rel_ts = ns2usecs(rel_ts); 756 } 757 758 if (verbose && in_ns) { 759 unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC); 760 unsigned long abs_msec = (unsigned long)abs_ts; 761 unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC); 762 unsigned long rel_msec = (unsigned long)rel_ts; 763 764 return trace_seq_printf( 765 s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ", 766 ns2usecs(iter->ts), 767 abs_msec, abs_usec, 768 rel_msec, rel_usec); 769 } else if (verbose && !in_ns) { 770 return trace_seq_printf( 771 s, "[%016llx] %lld (+%lld): ", 772 iter->ts, abs_ts, rel_ts); 773 } else if (!verbose && in_ns) { 774 return trace_seq_printf( 775 s, " %4lldus%c: ", 776 abs_ts, 777 rel_ts > preempt_mark_thresh_us ? '!' : 778 rel_ts > 1 ? '+' : ' '); 779 } else { /* !verbose && !in_ns */ 780 return trace_seq_printf(s, " %4lld: ", abs_ts); 781 } 782 } 783 784 int trace_print_context(struct trace_iterator *iter) 785 { 786 struct trace_seq *s = &iter->seq; 787 struct trace_entry *entry = iter->ent; 788 unsigned long long t; 789 unsigned long secs, usec_rem; 790 char comm[TASK_COMM_LEN]; 791 int ret; 792 793 trace_find_cmdline(entry->pid, comm); 794 795 ret = trace_seq_printf(s, "%16s-%-5d [%03d] ", 796 comm, entry->pid, iter->cpu); 797 if (!ret) 798 return 0; 799 800 if (trace_flags & TRACE_ITER_IRQ_INFO) { 801 ret = trace_print_lat_fmt(s, entry); 802 if (!ret) 803 return 0; 804 } 805 806 if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) { 807 t = ns2usecs(iter->ts); 808 usec_rem = do_div(t, USEC_PER_SEC); 809 secs = (unsigned long)t; 810 return trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem); 811 } else 812 return trace_seq_printf(s, " %12llu: ", iter->ts); 813 } 814 815 int trace_print_lat_context(struct trace_iterator *iter) 816 { 817 u64 next_ts; 818 int ret; 819 /* trace_find_next_entry will reset ent_size */ 820 int ent_size = iter->ent_size; 821 struct trace_seq *s = &iter->seq; 822 struct trace_entry *entry = iter->ent, 823 *next_entry = trace_find_next_entry(iter, NULL, 824 &next_ts); 825 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); 826 827 /* Restore the original ent_size */ 828 iter->ent_size = ent_size; 829 830 if (!next_entry) 831 next_ts = iter->ts; 832 833 if (verbose) { 834 char comm[TASK_COMM_LEN]; 835 836 trace_find_cmdline(entry->pid, comm); 837 838 ret = trace_seq_printf( 839 s, "%16s %5d %3d %d %08x %08lx ", 840 comm, entry->pid, iter->cpu, entry->flags, 841 entry->preempt_count, iter->idx); 842 } else { 843 ret = lat_print_generic(s, entry, iter->cpu); 844 } 845 846 if (ret) 847 ret = lat_print_timestamp(iter, next_ts); 848 849 return ret; 850 } 851 852 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 853 854 static int task_state_char(unsigned long state) 855 { 856 int bit = state ? __ffs(state) + 1 : 0; 857 858 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; 859 } 860 861 /** 862 * ftrace_find_event - find a registered event 863 * @type: the type of event to look for 864 * 865 * Returns an event of type @type otherwise NULL 866 * Called with trace_event_read_lock() held. 867 */ 868 struct trace_event *ftrace_find_event(int type) 869 { 870 struct trace_event *event; 871 unsigned key; 872 873 key = type & (EVENT_HASHSIZE - 1); 874 875 hlist_for_each_entry(event, &event_hash[key], node) { 876 if (event->type == type) 877 return event; 878 } 879 880 return NULL; 881 } 882 883 static LIST_HEAD(ftrace_event_list); 884 885 static int trace_search_list(struct list_head **list) 886 { 887 struct trace_event *e; 888 int last = __TRACE_LAST_TYPE; 889 890 if (list_empty(&ftrace_event_list)) { 891 *list = &ftrace_event_list; 892 return last + 1; 893 } 894 895 /* 896 * We used up all possible max events, 897 * lets see if somebody freed one. 898 */ 899 list_for_each_entry(e, &ftrace_event_list, list) { 900 if (e->type != last + 1) 901 break; 902 last++; 903 } 904 905 /* Did we used up all 65 thousand events??? */ 906 if ((last + 1) > FTRACE_MAX_EVENT) 907 return 0; 908 909 *list = &e->list; 910 return last + 1; 911 } 912 913 void trace_event_read_lock(void) 914 { 915 down_read(&trace_event_sem); 916 } 917 918 void trace_event_read_unlock(void) 919 { 920 up_read(&trace_event_sem); 921 } 922 923 /** 924 * register_ftrace_event - register output for an event type 925 * @event: the event type to register 926 * 927 * Event types are stored in a hash and this hash is used to 928 * find a way to print an event. If the @event->type is set 929 * then it will use that type, otherwise it will assign a 930 * type to use. 931 * 932 * If you assign your own type, please make sure it is added 933 * to the trace_type enum in trace.h, to avoid collisions 934 * with the dynamic types. 935 * 936 * Returns the event type number or zero on error. 937 */ 938 int register_ftrace_event(struct trace_event *event) 939 { 940 unsigned key; 941 int ret = 0; 942 943 down_write(&trace_event_sem); 944 945 if (WARN_ON(!event)) 946 goto out; 947 948 if (WARN_ON(!event->funcs)) 949 goto out; 950 951 INIT_LIST_HEAD(&event->list); 952 953 if (!event->type) { 954 struct list_head *list = NULL; 955 956 if (next_event_type > FTRACE_MAX_EVENT) { 957 958 event->type = trace_search_list(&list); 959 if (!event->type) 960 goto out; 961 962 } else { 963 964 event->type = next_event_type++; 965 list = &ftrace_event_list; 966 } 967 968 if (WARN_ON(ftrace_find_event(event->type))) 969 goto out; 970 971 list_add_tail(&event->list, list); 972 973 } else if (event->type > __TRACE_LAST_TYPE) { 974 printk(KERN_WARNING "Need to add type to trace.h\n"); 975 WARN_ON(1); 976 goto out; 977 } else { 978 /* Is this event already used */ 979 if (ftrace_find_event(event->type)) 980 goto out; 981 } 982 983 if (event->funcs->trace == NULL) 984 event->funcs->trace = trace_nop_print; 985 if (event->funcs->raw == NULL) 986 event->funcs->raw = trace_nop_print; 987 if (event->funcs->hex == NULL) 988 event->funcs->hex = trace_nop_print; 989 if (event->funcs->binary == NULL) 990 event->funcs->binary = trace_nop_print; 991 992 key = event->type & (EVENT_HASHSIZE - 1); 993 994 hlist_add_head(&event->node, &event_hash[key]); 995 996 ret = event->type; 997 out: 998 up_write(&trace_event_sem); 999 1000 return ret; 1001 } 1002 EXPORT_SYMBOL_GPL(register_ftrace_event); 1003 1004 /* 1005 * Used by module code with the trace_event_sem held for write. 1006 */ 1007 int __unregister_ftrace_event(struct trace_event *event) 1008 { 1009 hlist_del(&event->node); 1010 list_del(&event->list); 1011 return 0; 1012 } 1013 1014 /** 1015 * unregister_ftrace_event - remove a no longer used event 1016 * @event: the event to remove 1017 */ 1018 int unregister_ftrace_event(struct trace_event *event) 1019 { 1020 down_write(&trace_event_sem); 1021 __unregister_ftrace_event(event); 1022 up_write(&trace_event_sem); 1023 1024 return 0; 1025 } 1026 EXPORT_SYMBOL_GPL(unregister_ftrace_event); 1027 1028 /* 1029 * Standard events 1030 */ 1031 1032 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, 1033 struct trace_event *event) 1034 { 1035 if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type)) 1036 return TRACE_TYPE_PARTIAL_LINE; 1037 1038 return TRACE_TYPE_HANDLED; 1039 } 1040 1041 /* TRACE_FN */ 1042 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags, 1043 struct trace_event *event) 1044 { 1045 struct ftrace_entry *field; 1046 struct trace_seq *s = &iter->seq; 1047 1048 trace_assign_type(field, iter->ent); 1049 1050 if (!seq_print_ip_sym(s, field->ip, flags)) 1051 goto partial; 1052 1053 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { 1054 if (!trace_seq_puts(s, " <-")) 1055 goto partial; 1056 if (!seq_print_ip_sym(s, 1057 field->parent_ip, 1058 flags)) 1059 goto partial; 1060 } 1061 if (!trace_seq_putc(s, '\n')) 1062 goto partial; 1063 1064 return TRACE_TYPE_HANDLED; 1065 1066 partial: 1067 return TRACE_TYPE_PARTIAL_LINE; 1068 } 1069 1070 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags, 1071 struct trace_event *event) 1072 { 1073 struct ftrace_entry *field; 1074 1075 trace_assign_type(field, iter->ent); 1076 1077 if (!trace_seq_printf(&iter->seq, "%lx %lx\n", 1078 field->ip, 1079 field->parent_ip)) 1080 return TRACE_TYPE_PARTIAL_LINE; 1081 1082 return TRACE_TYPE_HANDLED; 1083 } 1084 1085 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags, 1086 struct trace_event *event) 1087 { 1088 struct ftrace_entry *field; 1089 struct trace_seq *s = &iter->seq; 1090 1091 trace_assign_type(field, iter->ent); 1092 1093 SEQ_PUT_HEX_FIELD_RET(s, field->ip); 1094 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip); 1095 1096 return TRACE_TYPE_HANDLED; 1097 } 1098 1099 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags, 1100 struct trace_event *event) 1101 { 1102 struct ftrace_entry *field; 1103 struct trace_seq *s = &iter->seq; 1104 1105 trace_assign_type(field, iter->ent); 1106 1107 SEQ_PUT_FIELD_RET(s, field->ip); 1108 SEQ_PUT_FIELD_RET(s, field->parent_ip); 1109 1110 return TRACE_TYPE_HANDLED; 1111 } 1112 1113 static struct trace_event_functions trace_fn_funcs = { 1114 .trace = trace_fn_trace, 1115 .raw = trace_fn_raw, 1116 .hex = trace_fn_hex, 1117 .binary = trace_fn_bin, 1118 }; 1119 1120 static struct trace_event trace_fn_event = { 1121 .type = TRACE_FN, 1122 .funcs = &trace_fn_funcs, 1123 }; 1124 1125 /* TRACE_CTX an TRACE_WAKE */ 1126 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter, 1127 char *delim) 1128 { 1129 struct ctx_switch_entry *field; 1130 char comm[TASK_COMM_LEN]; 1131 int S, T; 1132 1133 1134 trace_assign_type(field, iter->ent); 1135 1136 T = task_state_char(field->next_state); 1137 S = task_state_char(field->prev_state); 1138 trace_find_cmdline(field->next_pid, comm); 1139 if (!trace_seq_printf(&iter->seq, 1140 " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", 1141 field->prev_pid, 1142 field->prev_prio, 1143 S, delim, 1144 field->next_cpu, 1145 field->next_pid, 1146 field->next_prio, 1147 T, comm)) 1148 return TRACE_TYPE_PARTIAL_LINE; 1149 1150 return TRACE_TYPE_HANDLED; 1151 } 1152 1153 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags, 1154 struct trace_event *event) 1155 { 1156 return trace_ctxwake_print(iter, "==>"); 1157 } 1158 1159 static enum print_line_t trace_wake_print(struct trace_iterator *iter, 1160 int flags, struct trace_event *event) 1161 { 1162 return trace_ctxwake_print(iter, " +"); 1163 } 1164 1165 static int trace_ctxwake_raw(struct trace_iterator *iter, char S) 1166 { 1167 struct ctx_switch_entry *field; 1168 int T; 1169 1170 trace_assign_type(field, iter->ent); 1171 1172 if (!S) 1173 S = task_state_char(field->prev_state); 1174 T = task_state_char(field->next_state); 1175 if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", 1176 field->prev_pid, 1177 field->prev_prio, 1178 S, 1179 field->next_cpu, 1180 field->next_pid, 1181 field->next_prio, 1182 T)) 1183 return TRACE_TYPE_PARTIAL_LINE; 1184 1185 return TRACE_TYPE_HANDLED; 1186 } 1187 1188 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags, 1189 struct trace_event *event) 1190 { 1191 return trace_ctxwake_raw(iter, 0); 1192 } 1193 1194 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags, 1195 struct trace_event *event) 1196 { 1197 return trace_ctxwake_raw(iter, '+'); 1198 } 1199 1200 1201 static int trace_ctxwake_hex(struct trace_iterator *iter, char S) 1202 { 1203 struct ctx_switch_entry *field; 1204 struct trace_seq *s = &iter->seq; 1205 int T; 1206 1207 trace_assign_type(field, iter->ent); 1208 1209 if (!S) 1210 S = task_state_char(field->prev_state); 1211 T = task_state_char(field->next_state); 1212 1213 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); 1214 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); 1215 SEQ_PUT_HEX_FIELD_RET(s, S); 1216 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu); 1217 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid); 1218 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio); 1219 SEQ_PUT_HEX_FIELD_RET(s, T); 1220 1221 return TRACE_TYPE_HANDLED; 1222 } 1223 1224 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags, 1225 struct trace_event *event) 1226 { 1227 return trace_ctxwake_hex(iter, 0); 1228 } 1229 1230 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags, 1231 struct trace_event *event) 1232 { 1233 return trace_ctxwake_hex(iter, '+'); 1234 } 1235 1236 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter, 1237 int flags, struct trace_event *event) 1238 { 1239 struct ctx_switch_entry *field; 1240 struct trace_seq *s = &iter->seq; 1241 1242 trace_assign_type(field, iter->ent); 1243 1244 SEQ_PUT_FIELD_RET(s, field->prev_pid); 1245 SEQ_PUT_FIELD_RET(s, field->prev_prio); 1246 SEQ_PUT_FIELD_RET(s, field->prev_state); 1247 SEQ_PUT_FIELD_RET(s, field->next_pid); 1248 SEQ_PUT_FIELD_RET(s, field->next_prio); 1249 SEQ_PUT_FIELD_RET(s, field->next_state); 1250 1251 return TRACE_TYPE_HANDLED; 1252 } 1253 1254 static struct trace_event_functions trace_ctx_funcs = { 1255 .trace = trace_ctx_print, 1256 .raw = trace_ctx_raw, 1257 .hex = trace_ctx_hex, 1258 .binary = trace_ctxwake_bin, 1259 }; 1260 1261 static struct trace_event trace_ctx_event = { 1262 .type = TRACE_CTX, 1263 .funcs = &trace_ctx_funcs, 1264 }; 1265 1266 static struct trace_event_functions trace_wake_funcs = { 1267 .trace = trace_wake_print, 1268 .raw = trace_wake_raw, 1269 .hex = trace_wake_hex, 1270 .binary = trace_ctxwake_bin, 1271 }; 1272 1273 static struct trace_event trace_wake_event = { 1274 .type = TRACE_WAKE, 1275 .funcs = &trace_wake_funcs, 1276 }; 1277 1278 /* TRACE_STACK */ 1279 1280 static enum print_line_t trace_stack_print(struct trace_iterator *iter, 1281 int flags, struct trace_event *event) 1282 { 1283 struct stack_entry *field; 1284 struct trace_seq *s = &iter->seq; 1285 unsigned long *p; 1286 unsigned long *end; 1287 1288 trace_assign_type(field, iter->ent); 1289 end = (unsigned long *)((long)iter->ent + iter->ent_size); 1290 1291 if (!trace_seq_puts(s, "<stack trace>\n")) 1292 goto partial; 1293 1294 for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) { 1295 if (!trace_seq_puts(s, " => ")) 1296 goto partial; 1297 1298 if (!seq_print_ip_sym(s, *p, flags)) 1299 goto partial; 1300 if (!trace_seq_putc(s, '\n')) 1301 goto partial; 1302 } 1303 1304 return TRACE_TYPE_HANDLED; 1305 1306 partial: 1307 return TRACE_TYPE_PARTIAL_LINE; 1308 } 1309 1310 static struct trace_event_functions trace_stack_funcs = { 1311 .trace = trace_stack_print, 1312 }; 1313 1314 static struct trace_event trace_stack_event = { 1315 .type = TRACE_STACK, 1316 .funcs = &trace_stack_funcs, 1317 }; 1318 1319 /* TRACE_USER_STACK */ 1320 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, 1321 int flags, struct trace_event *event) 1322 { 1323 struct userstack_entry *field; 1324 struct trace_seq *s = &iter->seq; 1325 1326 trace_assign_type(field, iter->ent); 1327 1328 if (!trace_seq_puts(s, "<user stack trace>\n")) 1329 goto partial; 1330 1331 if (!seq_print_userip_objs(field, s, flags)) 1332 goto partial; 1333 1334 return TRACE_TYPE_HANDLED; 1335 1336 partial: 1337 return TRACE_TYPE_PARTIAL_LINE; 1338 } 1339 1340 static struct trace_event_functions trace_user_stack_funcs = { 1341 .trace = trace_user_stack_print, 1342 }; 1343 1344 static struct trace_event trace_user_stack_event = { 1345 .type = TRACE_USER_STACK, 1346 .funcs = &trace_user_stack_funcs, 1347 }; 1348 1349 /* TRACE_BPUTS */ 1350 static enum print_line_t 1351 trace_bputs_print(struct trace_iterator *iter, int flags, 1352 struct trace_event *event) 1353 { 1354 struct trace_entry *entry = iter->ent; 1355 struct trace_seq *s = &iter->seq; 1356 struct bputs_entry *field; 1357 1358 trace_assign_type(field, entry); 1359 1360 if (!seq_print_ip_sym(s, field->ip, flags)) 1361 goto partial; 1362 1363 if (!trace_seq_puts(s, ": ")) 1364 goto partial; 1365 1366 if (!trace_seq_puts(s, field->str)) 1367 goto partial; 1368 1369 return TRACE_TYPE_HANDLED; 1370 1371 partial: 1372 return TRACE_TYPE_PARTIAL_LINE; 1373 } 1374 1375 1376 static enum print_line_t 1377 trace_bputs_raw(struct trace_iterator *iter, int flags, 1378 struct trace_event *event) 1379 { 1380 struct bputs_entry *field; 1381 struct trace_seq *s = &iter->seq; 1382 1383 trace_assign_type(field, iter->ent); 1384 1385 if (!trace_seq_printf(s, ": %lx : ", field->ip)) 1386 goto partial; 1387 1388 if (!trace_seq_puts(s, field->str)) 1389 goto partial; 1390 1391 return TRACE_TYPE_HANDLED; 1392 1393 partial: 1394 return TRACE_TYPE_PARTIAL_LINE; 1395 } 1396 1397 static struct trace_event_functions trace_bputs_funcs = { 1398 .trace = trace_bputs_print, 1399 .raw = trace_bputs_raw, 1400 }; 1401 1402 static struct trace_event trace_bputs_event = { 1403 .type = TRACE_BPUTS, 1404 .funcs = &trace_bputs_funcs, 1405 }; 1406 1407 /* TRACE_BPRINT */ 1408 static enum print_line_t 1409 trace_bprint_print(struct trace_iterator *iter, int flags, 1410 struct trace_event *event) 1411 { 1412 struct trace_entry *entry = iter->ent; 1413 struct trace_seq *s = &iter->seq; 1414 struct bprint_entry *field; 1415 1416 trace_assign_type(field, entry); 1417 1418 if (!seq_print_ip_sym(s, field->ip, flags)) 1419 goto partial; 1420 1421 if (!trace_seq_puts(s, ": ")) 1422 goto partial; 1423 1424 if (!trace_seq_bprintf(s, field->fmt, field->buf)) 1425 goto partial; 1426 1427 return TRACE_TYPE_HANDLED; 1428 1429 partial: 1430 return TRACE_TYPE_PARTIAL_LINE; 1431 } 1432 1433 1434 static enum print_line_t 1435 trace_bprint_raw(struct trace_iterator *iter, int flags, 1436 struct trace_event *event) 1437 { 1438 struct bprint_entry *field; 1439 struct trace_seq *s = &iter->seq; 1440 1441 trace_assign_type(field, iter->ent); 1442 1443 if (!trace_seq_printf(s, ": %lx : ", field->ip)) 1444 goto partial; 1445 1446 if (!trace_seq_bprintf(s, field->fmt, field->buf)) 1447 goto partial; 1448 1449 return TRACE_TYPE_HANDLED; 1450 1451 partial: 1452 return TRACE_TYPE_PARTIAL_LINE; 1453 } 1454 1455 static struct trace_event_functions trace_bprint_funcs = { 1456 .trace = trace_bprint_print, 1457 .raw = trace_bprint_raw, 1458 }; 1459 1460 static struct trace_event trace_bprint_event = { 1461 .type = TRACE_BPRINT, 1462 .funcs = &trace_bprint_funcs, 1463 }; 1464 1465 /* TRACE_PRINT */ 1466 static enum print_line_t trace_print_print(struct trace_iterator *iter, 1467 int flags, struct trace_event *event) 1468 { 1469 struct print_entry *field; 1470 struct trace_seq *s = &iter->seq; 1471 1472 trace_assign_type(field, iter->ent); 1473 1474 if (!seq_print_ip_sym(s, field->ip, flags)) 1475 goto partial; 1476 1477 if (!trace_seq_printf(s, ": %s", field->buf)) 1478 goto partial; 1479 1480 return TRACE_TYPE_HANDLED; 1481 1482 partial: 1483 return TRACE_TYPE_PARTIAL_LINE; 1484 } 1485 1486 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags, 1487 struct trace_event *event) 1488 { 1489 struct print_entry *field; 1490 1491 trace_assign_type(field, iter->ent); 1492 1493 if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf)) 1494 goto partial; 1495 1496 return TRACE_TYPE_HANDLED; 1497 1498 partial: 1499 return TRACE_TYPE_PARTIAL_LINE; 1500 } 1501 1502 static struct trace_event_functions trace_print_funcs = { 1503 .trace = trace_print_print, 1504 .raw = trace_print_raw, 1505 }; 1506 1507 static struct trace_event trace_print_event = { 1508 .type = TRACE_PRINT, 1509 .funcs = &trace_print_funcs, 1510 }; 1511 1512 1513 static struct trace_event *events[] __initdata = { 1514 &trace_fn_event, 1515 &trace_ctx_event, 1516 &trace_wake_event, 1517 &trace_stack_event, 1518 &trace_user_stack_event, 1519 &trace_bputs_event, 1520 &trace_bprint_event, 1521 &trace_print_event, 1522 NULL 1523 }; 1524 1525 __init static int init_events(void) 1526 { 1527 struct trace_event *event; 1528 int i, ret; 1529 1530 for (i = 0; events[i]; i++) { 1531 event = events[i]; 1532 1533 ret = register_ftrace_event(event); 1534 if (!ret) { 1535 printk(KERN_WARNING "event %d failed to register\n", 1536 event->type); 1537 WARN_ON_ONCE(1); 1538 } 1539 } 1540 1541 return 0; 1542 } 1543 early_initcall(init_events); 1544