1 /* 2 * trace_output.c 3 * 4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> 5 * 6 */ 7 8 #include <linux/module.h> 9 #include <linux/mutex.h> 10 #include <linux/ftrace.h> 11 12 #include "trace_output.h" 13 14 /* must be a power of 2 */ 15 #define EVENT_HASHSIZE 128 16 17 DECLARE_RWSEM(trace_event_mutex); 18 19 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; 20 21 static int next_event_type = __TRACE_LAST_TYPE + 1; 22 23 int trace_print_seq(struct seq_file *m, struct trace_seq *s) 24 { 25 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; 26 int ret; 27 28 ret = seq_write(m, s->buffer, len); 29 30 /* 31 * Only reset this buffer if we successfully wrote to the 32 * seq_file buffer. 33 */ 34 if (!ret) 35 trace_seq_init(s); 36 37 return ret; 38 } 39 40 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) 41 { 42 struct trace_seq *s = &iter->seq; 43 struct trace_entry *entry = iter->ent; 44 struct bprint_entry *field; 45 int ret; 46 47 trace_assign_type(field, entry); 48 49 ret = trace_seq_bprintf(s, field->fmt, field->buf); 50 if (!ret) 51 return TRACE_TYPE_PARTIAL_LINE; 52 53 return TRACE_TYPE_HANDLED; 54 } 55 56 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) 57 { 58 struct trace_seq *s = &iter->seq; 59 struct trace_entry *entry = iter->ent; 60 struct print_entry *field; 61 int ret; 62 63 trace_assign_type(field, entry); 64 65 ret = trace_seq_printf(s, "%s", field->buf); 66 if (!ret) 67 return TRACE_TYPE_PARTIAL_LINE; 68 69 return TRACE_TYPE_HANDLED; 70 } 71 72 /** 73 * trace_seq_printf - sequence printing of trace information 74 * @s: trace sequence descriptor 75 * @fmt: printf format string 76 * 77 * It returns 0 if the trace oversizes the buffer's free 78 * space, 1 otherwise. 79 * 80 * The tracer may use either sequence operations or its own 81 * copy to user routines. To simplify formating of a trace 82 * trace_seq_printf is used to store strings into a special 83 * buffer (@s). Then the output may be either used by 84 * the sequencer or pulled into another buffer. 85 */ 86 int 87 trace_seq_printf(struct trace_seq *s, const char *fmt, ...) 88 { 89 int len = (PAGE_SIZE - 1) - s->len; 90 va_list ap; 91 int ret; 92 93 if (s->full || !len) 94 return 0; 95 96 va_start(ap, fmt); 97 ret = vsnprintf(s->buffer + s->len, len, fmt, ap); 98 va_end(ap); 99 100 /* If we can't write it all, don't bother writing anything */ 101 if (ret >= len) { 102 s->full = 1; 103 return 0; 104 } 105 106 s->len += ret; 107 108 return 1; 109 } 110 EXPORT_SYMBOL_GPL(trace_seq_printf); 111 112 /** 113 * trace_seq_vprintf - sequence printing of trace information 114 * @s: trace sequence descriptor 115 * @fmt: printf format string 116 * 117 * The tracer may use either sequence operations or its own 118 * copy to user routines. To simplify formating of a trace 119 * trace_seq_printf is used to store strings into a special 120 * buffer (@s). Then the output may be either used by 121 * the sequencer or pulled into another buffer. 122 */ 123 int 124 trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) 125 { 126 int len = (PAGE_SIZE - 1) - s->len; 127 int ret; 128 129 if (s->full || !len) 130 return 0; 131 132 ret = vsnprintf(s->buffer + s->len, len, fmt, args); 133 134 /* If we can't write it all, don't bother writing anything */ 135 if (ret >= len) { 136 s->full = 1; 137 return 0; 138 } 139 140 s->len += ret; 141 142 return len; 143 } 144 EXPORT_SYMBOL_GPL(trace_seq_vprintf); 145 146 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) 147 { 148 int len = (PAGE_SIZE - 1) - s->len; 149 int ret; 150 151 if (s->full || !len) 152 return 0; 153 154 ret = bstr_printf(s->buffer + s->len, len, fmt, binary); 155 156 /* If we can't write it all, don't bother writing anything */ 157 if (ret >= len) { 158 s->full = 1; 159 return 0; 160 } 161 162 s->len += ret; 163 164 return len; 165 } 166 167 /** 168 * trace_seq_puts - trace sequence printing of simple string 169 * @s: trace sequence descriptor 170 * @str: simple string to record 171 * 172 * The tracer may use either the sequence operations or its own 173 * copy to user routines. This function records a simple string 174 * into a special buffer (@s) for later retrieval by a sequencer 175 * or other mechanism. 176 */ 177 int trace_seq_puts(struct trace_seq *s, const char *str) 178 { 179 int len = strlen(str); 180 181 if (s->full) 182 return 0; 183 184 if (len > ((PAGE_SIZE - 1) - s->len)) { 185 s->full = 1; 186 return 0; 187 } 188 189 memcpy(s->buffer + s->len, str, len); 190 s->len += len; 191 192 return len; 193 } 194 195 int trace_seq_putc(struct trace_seq *s, unsigned char c) 196 { 197 if (s->full) 198 return 0; 199 200 if (s->len >= (PAGE_SIZE - 1)) { 201 s->full = 1; 202 return 0; 203 } 204 205 s->buffer[s->len++] = c; 206 207 return 1; 208 } 209 EXPORT_SYMBOL(trace_seq_putc); 210 211 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len) 212 { 213 if (s->full) 214 return 0; 215 216 if (len > ((PAGE_SIZE - 1) - s->len)) { 217 s->full = 1; 218 return 0; 219 } 220 221 memcpy(s->buffer + s->len, mem, len); 222 s->len += len; 223 224 return len; 225 } 226 227 int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len) 228 { 229 unsigned char hex[HEX_CHARS]; 230 const unsigned char *data = mem; 231 int i, j; 232 233 if (s->full) 234 return 0; 235 236 #ifdef __BIG_ENDIAN 237 for (i = 0, j = 0; i < len; i++) { 238 #else 239 for (i = len-1, j = 0; i >= 0; i--) { 240 #endif 241 hex[j++] = hex_asc_hi(data[i]); 242 hex[j++] = hex_asc_lo(data[i]); 243 } 244 hex[j++] = ' '; 245 246 return trace_seq_putmem(s, hex, j); 247 } 248 249 void *trace_seq_reserve(struct trace_seq *s, size_t len) 250 { 251 void *ret; 252 253 if (s->full) 254 return NULL; 255 256 if (len > ((PAGE_SIZE - 1) - s->len)) { 257 s->full = 1; 258 return NULL; 259 } 260 261 ret = s->buffer + s->len; 262 s->len += len; 263 264 return ret; 265 } 266 267 int trace_seq_path(struct trace_seq *s, const struct path *path) 268 { 269 unsigned char *p; 270 271 if (s->full) 272 return 0; 273 274 if (s->len >= (PAGE_SIZE - 1)) { 275 s->full = 1; 276 return 0; 277 } 278 279 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); 280 if (!IS_ERR(p)) { 281 p = mangle_path(s->buffer + s->len, p, "\n"); 282 if (p) { 283 s->len = p - s->buffer; 284 return 1; 285 } 286 } else { 287 s->buffer[s->len++] = '?'; 288 return 1; 289 } 290 291 s->full = 1; 292 return 0; 293 } 294 295 const char * 296 ftrace_print_flags_seq(struct trace_seq *p, const char *delim, 297 unsigned long flags, 298 const struct trace_print_flags *flag_array) 299 { 300 unsigned long mask; 301 const char *str; 302 const char *ret = p->buffer + p->len; 303 int i, first = 1; 304 305 for (i = 0; flag_array[i].name && flags; i++) { 306 307 mask = flag_array[i].mask; 308 if ((flags & mask) != mask) 309 continue; 310 311 str = flag_array[i].name; 312 flags &= ~mask; 313 if (!first && delim) 314 trace_seq_puts(p, delim); 315 else 316 first = 0; 317 trace_seq_puts(p, str); 318 } 319 320 /* check for left over flags */ 321 if (flags) { 322 if (!first && delim) 323 trace_seq_puts(p, delim); 324 trace_seq_printf(p, "0x%lx", flags); 325 } 326 327 trace_seq_putc(p, 0); 328 329 return ret; 330 } 331 EXPORT_SYMBOL(ftrace_print_flags_seq); 332 333 const char * 334 ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, 335 const struct trace_print_flags *symbol_array) 336 { 337 int i; 338 const char *ret = p->buffer + p->len; 339 340 for (i = 0; symbol_array[i].name; i++) { 341 342 if (val != symbol_array[i].mask) 343 continue; 344 345 trace_seq_puts(p, symbol_array[i].name); 346 break; 347 } 348 349 if (ret == (const char *)(p->buffer + p->len)) 350 trace_seq_printf(p, "0x%lx", val); 351 352 trace_seq_putc(p, 0); 353 354 return ret; 355 } 356 EXPORT_SYMBOL(ftrace_print_symbols_seq); 357 358 #if BITS_PER_LONG == 32 359 const char * 360 ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, 361 const struct trace_print_flags_u64 *symbol_array) 362 { 363 int i; 364 const char *ret = p->buffer + p->len; 365 366 for (i = 0; symbol_array[i].name; i++) { 367 368 if (val != symbol_array[i].mask) 369 continue; 370 371 trace_seq_puts(p, symbol_array[i].name); 372 break; 373 } 374 375 if (ret == (const char *)(p->buffer + p->len)) 376 trace_seq_printf(p, "0x%llx", val); 377 378 trace_seq_putc(p, 0); 379 380 return ret; 381 } 382 EXPORT_SYMBOL(ftrace_print_symbols_seq_u64); 383 #endif 384 385 const char * 386 ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) 387 { 388 int i; 389 const char *ret = p->buffer + p->len; 390 391 for (i = 0; i < buf_len; i++) 392 trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]); 393 394 trace_seq_putc(p, 0); 395 396 return ret; 397 } 398 EXPORT_SYMBOL(ftrace_print_hex_seq); 399 400 #ifdef CONFIG_KRETPROBES 401 static inline const char *kretprobed(const char *name) 402 { 403 static const char tramp_name[] = "kretprobe_trampoline"; 404 int size = sizeof(tramp_name); 405 406 if (strncmp(tramp_name, name, size) == 0) 407 return "[unknown/kretprobe'd]"; 408 return name; 409 } 410 #else 411 static inline const char *kretprobed(const char *name) 412 { 413 return name; 414 } 415 #endif /* CONFIG_KRETPROBES */ 416 417 static int 418 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) 419 { 420 #ifdef CONFIG_KALLSYMS 421 char str[KSYM_SYMBOL_LEN]; 422 const char *name; 423 424 kallsyms_lookup(address, NULL, NULL, NULL, str); 425 426 name = kretprobed(str); 427 428 return trace_seq_printf(s, fmt, name); 429 #endif 430 return 1; 431 } 432 433 static int 434 seq_print_sym_offset(struct trace_seq *s, const char *fmt, 435 unsigned long address) 436 { 437 #ifdef CONFIG_KALLSYMS 438 char str[KSYM_SYMBOL_LEN]; 439 const char *name; 440 441 sprint_symbol(str, address); 442 name = kretprobed(str); 443 444 return trace_seq_printf(s, fmt, name); 445 #endif 446 return 1; 447 } 448 449 #ifndef CONFIG_64BIT 450 # define IP_FMT "%08lx" 451 #else 452 # define IP_FMT "%016lx" 453 #endif 454 455 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, 456 unsigned long ip, unsigned long sym_flags) 457 { 458 struct file *file = NULL; 459 unsigned long vmstart = 0; 460 int ret = 1; 461 462 if (s->full) 463 return 0; 464 465 if (mm) { 466 const struct vm_area_struct *vma; 467 468 down_read(&mm->mmap_sem); 469 vma = find_vma(mm, ip); 470 if (vma) { 471 file = vma->vm_file; 472 vmstart = vma->vm_start; 473 } 474 if (file) { 475 ret = trace_seq_path(s, &file->f_path); 476 if (ret) 477 ret = trace_seq_printf(s, "[+0x%lx]", 478 ip - vmstart); 479 } 480 up_read(&mm->mmap_sem); 481 } 482 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) 483 ret = trace_seq_printf(s, " <" IP_FMT ">", ip); 484 return ret; 485 } 486 487 int 488 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, 489 unsigned long sym_flags) 490 { 491 struct mm_struct *mm = NULL; 492 int ret = 1; 493 unsigned int i; 494 495 if (trace_flags & TRACE_ITER_SYM_USEROBJ) { 496 struct task_struct *task; 497 /* 498 * we do the lookup on the thread group leader, 499 * since individual threads might have already quit! 500 */ 501 rcu_read_lock(); 502 task = find_task_by_vpid(entry->tgid); 503 if (task) 504 mm = get_task_mm(task); 505 rcu_read_unlock(); 506 } 507 508 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { 509 unsigned long ip = entry->caller[i]; 510 511 if (ip == ULONG_MAX || !ret) 512 break; 513 if (ret) 514 ret = trace_seq_puts(s, " => "); 515 if (!ip) { 516 if (ret) 517 ret = trace_seq_puts(s, "??"); 518 if (ret) 519 ret = trace_seq_puts(s, "\n"); 520 continue; 521 } 522 if (!ret) 523 break; 524 if (ret) 525 ret = seq_print_user_ip(s, mm, ip, sym_flags); 526 ret = trace_seq_puts(s, "\n"); 527 } 528 529 if (mm) 530 mmput(mm); 531 return ret; 532 } 533 534 int 535 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) 536 { 537 int ret; 538 539 if (!ip) 540 return trace_seq_printf(s, "0"); 541 542 if (sym_flags & TRACE_ITER_SYM_OFFSET) 543 ret = seq_print_sym_offset(s, "%s", ip); 544 else 545 ret = seq_print_sym_short(s, "%s", ip); 546 547 if (!ret) 548 return 0; 549 550 if (sym_flags & TRACE_ITER_SYM_ADDR) 551 ret = trace_seq_printf(s, " <" IP_FMT ">", ip); 552 return ret; 553 } 554 555 /** 556 * trace_print_lat_fmt - print the irq, preempt and lockdep fields 557 * @s: trace seq struct to write to 558 * @entry: The trace entry field from the ring buffer 559 * 560 * Prints the generic fields of irqs off, in hard or softirq, preempt 561 * count. 562 */ 563 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) 564 { 565 char hardsoft_irq; 566 char need_resched; 567 char irqs_off; 568 int hardirq; 569 int softirq; 570 int ret; 571 572 hardirq = entry->flags & TRACE_FLAG_HARDIRQ; 573 softirq = entry->flags & TRACE_FLAG_SOFTIRQ; 574 575 irqs_off = 576 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : 577 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : 578 '.'; 579 need_resched = 580 (entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'; 581 hardsoft_irq = 582 (hardirq && softirq) ? 'H' : 583 hardirq ? 'h' : 584 softirq ? 's' : 585 '.'; 586 587 if (!trace_seq_printf(s, "%c%c%c", 588 irqs_off, need_resched, hardsoft_irq)) 589 return 0; 590 591 if (entry->preempt_count) 592 ret = trace_seq_printf(s, "%x", entry->preempt_count); 593 else 594 ret = trace_seq_putc(s, '.'); 595 596 return ret; 597 } 598 599 static int 600 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) 601 { 602 char comm[TASK_COMM_LEN]; 603 604 trace_find_cmdline(entry->pid, comm); 605 606 if (!trace_seq_printf(s, "%8.8s-%-5d %3d", 607 comm, entry->pid, cpu)) 608 return 0; 609 610 return trace_print_lat_fmt(s, entry); 611 } 612 613 static unsigned long preempt_mark_thresh_us = 100; 614 615 static int 616 lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) 617 { 618 unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE; 619 unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS; 620 unsigned long long abs_ts = iter->ts - iter->tr->time_start; 621 unsigned long long rel_ts = next_ts - iter->ts; 622 struct trace_seq *s = &iter->seq; 623 624 if (in_ns) { 625 abs_ts = ns2usecs(abs_ts); 626 rel_ts = ns2usecs(rel_ts); 627 } 628 629 if (verbose && in_ns) { 630 unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC); 631 unsigned long abs_msec = (unsigned long)abs_ts; 632 unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC); 633 unsigned long rel_msec = (unsigned long)rel_ts; 634 635 return trace_seq_printf( 636 s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ", 637 ns2usecs(iter->ts), 638 abs_msec, abs_usec, 639 rel_msec, rel_usec); 640 } else if (verbose && !in_ns) { 641 return trace_seq_printf( 642 s, "[%016llx] %lld (+%lld): ", 643 iter->ts, abs_ts, rel_ts); 644 } else if (!verbose && in_ns) { 645 return trace_seq_printf( 646 s, " %4lldus%c: ", 647 abs_ts, 648 rel_ts > preempt_mark_thresh_us ? '!' : 649 rel_ts > 1 ? '+' : ' '); 650 } else { /* !verbose && !in_ns */ 651 return trace_seq_printf(s, " %4lld: ", abs_ts); 652 } 653 } 654 655 int trace_print_context(struct trace_iterator *iter) 656 { 657 struct trace_seq *s = &iter->seq; 658 struct trace_entry *entry = iter->ent; 659 unsigned long long t; 660 unsigned long secs, usec_rem; 661 char comm[TASK_COMM_LEN]; 662 int ret; 663 664 trace_find_cmdline(entry->pid, comm); 665 666 ret = trace_seq_printf(s, "%16s-%-5d [%03d] ", 667 comm, entry->pid, iter->cpu); 668 if (!ret) 669 return 0; 670 671 if (trace_flags & TRACE_ITER_IRQ_INFO) { 672 ret = trace_print_lat_fmt(s, entry); 673 if (!ret) 674 return 0; 675 } 676 677 if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) { 678 t = ns2usecs(iter->ts); 679 usec_rem = do_div(t, USEC_PER_SEC); 680 secs = (unsigned long)t; 681 return trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem); 682 } else 683 return trace_seq_printf(s, " %12llu: ", iter->ts); 684 } 685 686 int trace_print_lat_context(struct trace_iterator *iter) 687 { 688 u64 next_ts; 689 int ret; 690 /* trace_find_next_entry will reset ent_size */ 691 int ent_size = iter->ent_size; 692 struct trace_seq *s = &iter->seq; 693 struct trace_entry *entry = iter->ent, 694 *next_entry = trace_find_next_entry(iter, NULL, 695 &next_ts); 696 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); 697 698 /* Restore the original ent_size */ 699 iter->ent_size = ent_size; 700 701 if (!next_entry) 702 next_ts = iter->ts; 703 704 if (verbose) { 705 char comm[TASK_COMM_LEN]; 706 707 trace_find_cmdline(entry->pid, comm); 708 709 ret = trace_seq_printf( 710 s, "%16s %5d %3d %d %08x %08lx ", 711 comm, entry->pid, iter->cpu, entry->flags, 712 entry->preempt_count, iter->idx); 713 } else { 714 ret = lat_print_generic(s, entry, iter->cpu); 715 } 716 717 if (ret) 718 ret = lat_print_timestamp(iter, next_ts); 719 720 return ret; 721 } 722 723 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 724 725 static int task_state_char(unsigned long state) 726 { 727 int bit = state ? __ffs(state) + 1 : 0; 728 729 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; 730 } 731 732 /** 733 * ftrace_find_event - find a registered event 734 * @type: the type of event to look for 735 * 736 * Returns an event of type @type otherwise NULL 737 * Called with trace_event_read_lock() held. 738 */ 739 struct trace_event *ftrace_find_event(int type) 740 { 741 struct trace_event *event; 742 unsigned key; 743 744 key = type & (EVENT_HASHSIZE - 1); 745 746 hlist_for_each_entry(event, &event_hash[key], node) { 747 if (event->type == type) 748 return event; 749 } 750 751 return NULL; 752 } 753 754 static LIST_HEAD(ftrace_event_list); 755 756 static int trace_search_list(struct list_head **list) 757 { 758 struct trace_event *e; 759 int last = __TRACE_LAST_TYPE; 760 761 if (list_empty(&ftrace_event_list)) { 762 *list = &ftrace_event_list; 763 return last + 1; 764 } 765 766 /* 767 * We used up all possible max events, 768 * lets see if somebody freed one. 769 */ 770 list_for_each_entry(e, &ftrace_event_list, list) { 771 if (e->type != last + 1) 772 break; 773 last++; 774 } 775 776 /* Did we used up all 65 thousand events??? */ 777 if ((last + 1) > FTRACE_MAX_EVENT) 778 return 0; 779 780 *list = &e->list; 781 return last + 1; 782 } 783 784 void trace_event_read_lock(void) 785 { 786 down_read(&trace_event_mutex); 787 } 788 789 void trace_event_read_unlock(void) 790 { 791 up_read(&trace_event_mutex); 792 } 793 794 /** 795 * register_ftrace_event - register output for an event type 796 * @event: the event type to register 797 * 798 * Event types are stored in a hash and this hash is used to 799 * find a way to print an event. If the @event->type is set 800 * then it will use that type, otherwise it will assign a 801 * type to use. 802 * 803 * If you assign your own type, please make sure it is added 804 * to the trace_type enum in trace.h, to avoid collisions 805 * with the dynamic types. 806 * 807 * Returns the event type number or zero on error. 808 */ 809 int register_ftrace_event(struct trace_event *event) 810 { 811 unsigned key; 812 int ret = 0; 813 814 down_write(&trace_event_mutex); 815 816 if (WARN_ON(!event)) 817 goto out; 818 819 if (WARN_ON(!event->funcs)) 820 goto out; 821 822 INIT_LIST_HEAD(&event->list); 823 824 if (!event->type) { 825 struct list_head *list = NULL; 826 827 if (next_event_type > FTRACE_MAX_EVENT) { 828 829 event->type = trace_search_list(&list); 830 if (!event->type) 831 goto out; 832 833 } else { 834 835 event->type = next_event_type++; 836 list = &ftrace_event_list; 837 } 838 839 if (WARN_ON(ftrace_find_event(event->type))) 840 goto out; 841 842 list_add_tail(&event->list, list); 843 844 } else if (event->type > __TRACE_LAST_TYPE) { 845 printk(KERN_WARNING "Need to add type to trace.h\n"); 846 WARN_ON(1); 847 goto out; 848 } else { 849 /* Is this event already used */ 850 if (ftrace_find_event(event->type)) 851 goto out; 852 } 853 854 if (event->funcs->trace == NULL) 855 event->funcs->trace = trace_nop_print; 856 if (event->funcs->raw == NULL) 857 event->funcs->raw = trace_nop_print; 858 if (event->funcs->hex == NULL) 859 event->funcs->hex = trace_nop_print; 860 if (event->funcs->binary == NULL) 861 event->funcs->binary = trace_nop_print; 862 863 key = event->type & (EVENT_HASHSIZE - 1); 864 865 hlist_add_head(&event->node, &event_hash[key]); 866 867 ret = event->type; 868 out: 869 up_write(&trace_event_mutex); 870 871 return ret; 872 } 873 EXPORT_SYMBOL_GPL(register_ftrace_event); 874 875 /* 876 * Used by module code with the trace_event_mutex held for write. 877 */ 878 int __unregister_ftrace_event(struct trace_event *event) 879 { 880 hlist_del(&event->node); 881 list_del(&event->list); 882 return 0; 883 } 884 885 /** 886 * unregister_ftrace_event - remove a no longer used event 887 * @event: the event to remove 888 */ 889 int unregister_ftrace_event(struct trace_event *event) 890 { 891 down_write(&trace_event_mutex); 892 __unregister_ftrace_event(event); 893 up_write(&trace_event_mutex); 894 895 return 0; 896 } 897 EXPORT_SYMBOL_GPL(unregister_ftrace_event); 898 899 /* 900 * Standard events 901 */ 902 903 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, 904 struct trace_event *event) 905 { 906 if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type)) 907 return TRACE_TYPE_PARTIAL_LINE; 908 909 return TRACE_TYPE_HANDLED; 910 } 911 912 /* TRACE_FN */ 913 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags, 914 struct trace_event *event) 915 { 916 struct ftrace_entry *field; 917 struct trace_seq *s = &iter->seq; 918 919 trace_assign_type(field, iter->ent); 920 921 if (!seq_print_ip_sym(s, field->ip, flags)) 922 goto partial; 923 924 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { 925 if (!trace_seq_printf(s, " <-")) 926 goto partial; 927 if (!seq_print_ip_sym(s, 928 field->parent_ip, 929 flags)) 930 goto partial; 931 } 932 if (!trace_seq_printf(s, "\n")) 933 goto partial; 934 935 return TRACE_TYPE_HANDLED; 936 937 partial: 938 return TRACE_TYPE_PARTIAL_LINE; 939 } 940 941 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags, 942 struct trace_event *event) 943 { 944 struct ftrace_entry *field; 945 946 trace_assign_type(field, iter->ent); 947 948 if (!trace_seq_printf(&iter->seq, "%lx %lx\n", 949 field->ip, 950 field->parent_ip)) 951 return TRACE_TYPE_PARTIAL_LINE; 952 953 return TRACE_TYPE_HANDLED; 954 } 955 956 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags, 957 struct trace_event *event) 958 { 959 struct ftrace_entry *field; 960 struct trace_seq *s = &iter->seq; 961 962 trace_assign_type(field, iter->ent); 963 964 SEQ_PUT_HEX_FIELD_RET(s, field->ip); 965 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip); 966 967 return TRACE_TYPE_HANDLED; 968 } 969 970 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags, 971 struct trace_event *event) 972 { 973 struct ftrace_entry *field; 974 struct trace_seq *s = &iter->seq; 975 976 trace_assign_type(field, iter->ent); 977 978 SEQ_PUT_FIELD_RET(s, field->ip); 979 SEQ_PUT_FIELD_RET(s, field->parent_ip); 980 981 return TRACE_TYPE_HANDLED; 982 } 983 984 static struct trace_event_functions trace_fn_funcs = { 985 .trace = trace_fn_trace, 986 .raw = trace_fn_raw, 987 .hex = trace_fn_hex, 988 .binary = trace_fn_bin, 989 }; 990 991 static struct trace_event trace_fn_event = { 992 .type = TRACE_FN, 993 .funcs = &trace_fn_funcs, 994 }; 995 996 /* TRACE_CTX an TRACE_WAKE */ 997 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter, 998 char *delim) 999 { 1000 struct ctx_switch_entry *field; 1001 char comm[TASK_COMM_LEN]; 1002 int S, T; 1003 1004 1005 trace_assign_type(field, iter->ent); 1006 1007 T = task_state_char(field->next_state); 1008 S = task_state_char(field->prev_state); 1009 trace_find_cmdline(field->next_pid, comm); 1010 if (!trace_seq_printf(&iter->seq, 1011 " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", 1012 field->prev_pid, 1013 field->prev_prio, 1014 S, delim, 1015 field->next_cpu, 1016 field->next_pid, 1017 field->next_prio, 1018 T, comm)) 1019 return TRACE_TYPE_PARTIAL_LINE; 1020 1021 return TRACE_TYPE_HANDLED; 1022 } 1023 1024 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags, 1025 struct trace_event *event) 1026 { 1027 return trace_ctxwake_print(iter, "==>"); 1028 } 1029 1030 static enum print_line_t trace_wake_print(struct trace_iterator *iter, 1031 int flags, struct trace_event *event) 1032 { 1033 return trace_ctxwake_print(iter, " +"); 1034 } 1035 1036 static int trace_ctxwake_raw(struct trace_iterator *iter, char S) 1037 { 1038 struct ctx_switch_entry *field; 1039 int T; 1040 1041 trace_assign_type(field, iter->ent); 1042 1043 if (!S) 1044 S = task_state_char(field->prev_state); 1045 T = task_state_char(field->next_state); 1046 if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", 1047 field->prev_pid, 1048 field->prev_prio, 1049 S, 1050 field->next_cpu, 1051 field->next_pid, 1052 field->next_prio, 1053 T)) 1054 return TRACE_TYPE_PARTIAL_LINE; 1055 1056 return TRACE_TYPE_HANDLED; 1057 } 1058 1059 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags, 1060 struct trace_event *event) 1061 { 1062 return trace_ctxwake_raw(iter, 0); 1063 } 1064 1065 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags, 1066 struct trace_event *event) 1067 { 1068 return trace_ctxwake_raw(iter, '+'); 1069 } 1070 1071 1072 static int trace_ctxwake_hex(struct trace_iterator *iter, char S) 1073 { 1074 struct ctx_switch_entry *field; 1075 struct trace_seq *s = &iter->seq; 1076 int T; 1077 1078 trace_assign_type(field, iter->ent); 1079 1080 if (!S) 1081 S = task_state_char(field->prev_state); 1082 T = task_state_char(field->next_state); 1083 1084 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); 1085 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); 1086 SEQ_PUT_HEX_FIELD_RET(s, S); 1087 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu); 1088 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid); 1089 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio); 1090 SEQ_PUT_HEX_FIELD_RET(s, T); 1091 1092 return TRACE_TYPE_HANDLED; 1093 } 1094 1095 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags, 1096 struct trace_event *event) 1097 { 1098 return trace_ctxwake_hex(iter, 0); 1099 } 1100 1101 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags, 1102 struct trace_event *event) 1103 { 1104 return trace_ctxwake_hex(iter, '+'); 1105 } 1106 1107 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter, 1108 int flags, struct trace_event *event) 1109 { 1110 struct ctx_switch_entry *field; 1111 struct trace_seq *s = &iter->seq; 1112 1113 trace_assign_type(field, iter->ent); 1114 1115 SEQ_PUT_FIELD_RET(s, field->prev_pid); 1116 SEQ_PUT_FIELD_RET(s, field->prev_prio); 1117 SEQ_PUT_FIELD_RET(s, field->prev_state); 1118 SEQ_PUT_FIELD_RET(s, field->next_pid); 1119 SEQ_PUT_FIELD_RET(s, field->next_prio); 1120 SEQ_PUT_FIELD_RET(s, field->next_state); 1121 1122 return TRACE_TYPE_HANDLED; 1123 } 1124 1125 static struct trace_event_functions trace_ctx_funcs = { 1126 .trace = trace_ctx_print, 1127 .raw = trace_ctx_raw, 1128 .hex = trace_ctx_hex, 1129 .binary = trace_ctxwake_bin, 1130 }; 1131 1132 static struct trace_event trace_ctx_event = { 1133 .type = TRACE_CTX, 1134 .funcs = &trace_ctx_funcs, 1135 }; 1136 1137 static struct trace_event_functions trace_wake_funcs = { 1138 .trace = trace_wake_print, 1139 .raw = trace_wake_raw, 1140 .hex = trace_wake_hex, 1141 .binary = trace_ctxwake_bin, 1142 }; 1143 1144 static struct trace_event trace_wake_event = { 1145 .type = TRACE_WAKE, 1146 .funcs = &trace_wake_funcs, 1147 }; 1148 1149 /* TRACE_STACK */ 1150 1151 static enum print_line_t trace_stack_print(struct trace_iterator *iter, 1152 int flags, struct trace_event *event) 1153 { 1154 struct stack_entry *field; 1155 struct trace_seq *s = &iter->seq; 1156 unsigned long *p; 1157 unsigned long *end; 1158 1159 trace_assign_type(field, iter->ent); 1160 end = (unsigned long *)((long)iter->ent + iter->ent_size); 1161 1162 if (!trace_seq_puts(s, "<stack trace>\n")) 1163 goto partial; 1164 1165 for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) { 1166 if (!trace_seq_puts(s, " => ")) 1167 goto partial; 1168 1169 if (!seq_print_ip_sym(s, *p, flags)) 1170 goto partial; 1171 if (!trace_seq_puts(s, "\n")) 1172 goto partial; 1173 } 1174 1175 return TRACE_TYPE_HANDLED; 1176 1177 partial: 1178 return TRACE_TYPE_PARTIAL_LINE; 1179 } 1180 1181 static struct trace_event_functions trace_stack_funcs = { 1182 .trace = trace_stack_print, 1183 }; 1184 1185 static struct trace_event trace_stack_event = { 1186 .type = TRACE_STACK, 1187 .funcs = &trace_stack_funcs, 1188 }; 1189 1190 /* TRACE_USER_STACK */ 1191 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, 1192 int flags, struct trace_event *event) 1193 { 1194 struct userstack_entry *field; 1195 struct trace_seq *s = &iter->seq; 1196 1197 trace_assign_type(field, iter->ent); 1198 1199 if (!trace_seq_puts(s, "<user stack trace>\n")) 1200 goto partial; 1201 1202 if (!seq_print_userip_objs(field, s, flags)) 1203 goto partial; 1204 1205 return TRACE_TYPE_HANDLED; 1206 1207 partial: 1208 return TRACE_TYPE_PARTIAL_LINE; 1209 } 1210 1211 static struct trace_event_functions trace_user_stack_funcs = { 1212 .trace = trace_user_stack_print, 1213 }; 1214 1215 static struct trace_event trace_user_stack_event = { 1216 .type = TRACE_USER_STACK, 1217 .funcs = &trace_user_stack_funcs, 1218 }; 1219 1220 /* TRACE_BPRINT */ 1221 static enum print_line_t 1222 trace_bprint_print(struct trace_iterator *iter, int flags, 1223 struct trace_event *event) 1224 { 1225 struct trace_entry *entry = iter->ent; 1226 struct trace_seq *s = &iter->seq; 1227 struct bprint_entry *field; 1228 1229 trace_assign_type(field, entry); 1230 1231 if (!seq_print_ip_sym(s, field->ip, flags)) 1232 goto partial; 1233 1234 if (!trace_seq_puts(s, ": ")) 1235 goto partial; 1236 1237 if (!trace_seq_bprintf(s, field->fmt, field->buf)) 1238 goto partial; 1239 1240 return TRACE_TYPE_HANDLED; 1241 1242 partial: 1243 return TRACE_TYPE_PARTIAL_LINE; 1244 } 1245 1246 1247 static enum print_line_t 1248 trace_bprint_raw(struct trace_iterator *iter, int flags, 1249 struct trace_event *event) 1250 { 1251 struct bprint_entry *field; 1252 struct trace_seq *s = &iter->seq; 1253 1254 trace_assign_type(field, iter->ent); 1255 1256 if (!trace_seq_printf(s, ": %lx : ", field->ip)) 1257 goto partial; 1258 1259 if (!trace_seq_bprintf(s, field->fmt, field->buf)) 1260 goto partial; 1261 1262 return TRACE_TYPE_HANDLED; 1263 1264 partial: 1265 return TRACE_TYPE_PARTIAL_LINE; 1266 } 1267 1268 static struct trace_event_functions trace_bprint_funcs = { 1269 .trace = trace_bprint_print, 1270 .raw = trace_bprint_raw, 1271 }; 1272 1273 static struct trace_event trace_bprint_event = { 1274 .type = TRACE_BPRINT, 1275 .funcs = &trace_bprint_funcs, 1276 }; 1277 1278 /* TRACE_PRINT */ 1279 static enum print_line_t trace_print_print(struct trace_iterator *iter, 1280 int flags, struct trace_event *event) 1281 { 1282 struct print_entry *field; 1283 struct trace_seq *s = &iter->seq; 1284 1285 trace_assign_type(field, iter->ent); 1286 1287 if (!seq_print_ip_sym(s, field->ip, flags)) 1288 goto partial; 1289 1290 if (!trace_seq_printf(s, ": %s", field->buf)) 1291 goto partial; 1292 1293 return TRACE_TYPE_HANDLED; 1294 1295 partial: 1296 return TRACE_TYPE_PARTIAL_LINE; 1297 } 1298 1299 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags, 1300 struct trace_event *event) 1301 { 1302 struct print_entry *field; 1303 1304 trace_assign_type(field, iter->ent); 1305 1306 if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf)) 1307 goto partial; 1308 1309 return TRACE_TYPE_HANDLED; 1310 1311 partial: 1312 return TRACE_TYPE_PARTIAL_LINE; 1313 } 1314 1315 static struct trace_event_functions trace_print_funcs = { 1316 .trace = trace_print_print, 1317 .raw = trace_print_raw, 1318 }; 1319 1320 static struct trace_event trace_print_event = { 1321 .type = TRACE_PRINT, 1322 .funcs = &trace_print_funcs, 1323 }; 1324 1325 1326 static struct trace_event *events[] __initdata = { 1327 &trace_fn_event, 1328 &trace_ctx_event, 1329 &trace_wake_event, 1330 &trace_stack_event, 1331 &trace_user_stack_event, 1332 &trace_bprint_event, 1333 &trace_print_event, 1334 NULL 1335 }; 1336 1337 __init static int init_events(void) 1338 { 1339 struct trace_event *event; 1340 int i, ret; 1341 1342 for (i = 0; events[i]; i++) { 1343 event = events[i]; 1344 1345 ret = register_ftrace_event(event); 1346 if (!ret) { 1347 printk(KERN_WARNING "event %d failed to register\n", 1348 event->type); 1349 WARN_ON_ONCE(1); 1350 } 1351 } 1352 1353 return 0; 1354 } 1355 early_initcall(init_events); 1356