1 /* 2 * ring buffer based function tracer 3 * 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 6 * 7 * Originally taken from the RT patch by: 8 * Arnaldo Carvalho de Melo <acme@redhat.com> 9 * 10 * Based on code from the latency_tracer, that is: 11 * Copyright (C) 2004-2006 Ingo Molnar 12 * Copyright (C) 2004 Nadia Yvette Chambers 13 */ 14 #include <linux/ring_buffer.h> 15 #include <generated/utsrelease.h> 16 #include <linux/stacktrace.h> 17 #include <linux/writeback.h> 18 #include <linux/kallsyms.h> 19 #include <linux/seq_file.h> 20 #include <linux/notifier.h> 21 #include <linux/irqflags.h> 22 #include <linux/debugfs.h> 23 #include <linux/pagemap.h> 24 #include <linux/hardirq.h> 25 #include <linux/linkage.h> 26 #include <linux/uaccess.h> 27 #include <linux/kprobes.h> 28 #include <linux/ftrace.h> 29 #include <linux/module.h> 30 #include <linux/percpu.h> 31 #include <linux/splice.h> 32 #include <linux/kdebug.h> 33 #include <linux/string.h> 34 #include <linux/rwsem.h> 35 #include <linux/slab.h> 36 #include <linux/ctype.h> 37 #include <linux/init.h> 38 #include <linux/poll.h> 39 #include <linux/nmi.h> 40 #include <linux/fs.h> 41 #include <linux/sched/rt.h> 42 43 #include "trace.h" 44 #include "trace_output.h" 45 46 /* 47 * On boot up, the ring buffer is set to the minimum size, so that 48 * we do not waste memory on systems that are not using tracing. 49 */ 50 bool ring_buffer_expanded; 51 52 /* 53 * We need to change this state when a selftest is running. 54 * A selftest will lurk into the ring-buffer to count the 55 * entries inserted during the selftest although some concurrent 56 * insertions into the ring-buffer such as trace_printk could occurred 57 * at the same time, giving false positive or negative results. 58 */ 59 static bool __read_mostly tracing_selftest_running; 60 61 /* 62 * If a tracer is running, we do not want to run SELFTEST. 63 */ 64 bool __read_mostly tracing_selftest_disabled; 65 66 /* For tracers that don't implement custom flags */ 67 static struct tracer_opt dummy_tracer_opt[] = { 68 { } 69 }; 70 71 static struct tracer_flags dummy_tracer_flags = { 72 .val = 0, 73 .opts = dummy_tracer_opt 74 }; 75 76 static int dummy_set_flag(u32 old_flags, u32 bit, int set) 77 { 78 return 0; 79 } 80 81 /* 82 * To prevent the comm cache from being overwritten when no 83 * tracing is active, only save the comm when a trace event 84 * occurred. 85 */ 86 static DEFINE_PER_CPU(bool, trace_cmdline_save); 87 88 /* 89 * Kill all tracing for good (never come back). 90 * It is initialized to 1 but will turn to zero if the initialization 91 * of the tracer is successful. But that is the only place that sets 92 * this back to zero. 93 */ 94 static int tracing_disabled = 1; 95 96 DEFINE_PER_CPU(int, ftrace_cpu_disabled); 97 98 cpumask_var_t __read_mostly tracing_buffer_mask; 99 100 /* 101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops 102 * 103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops 104 * is set, then ftrace_dump is called. This will output the contents 105 * of the ftrace buffers to the console. This is very useful for 106 * capturing traces that lead to crashes and outputing it to a 107 * serial console. 108 * 109 * It is default off, but you can enable it with either specifying 110 * "ftrace_dump_on_oops" in the kernel command line, or setting 111 * /proc/sys/kernel/ftrace_dump_on_oops 112 * Set 1 if you want to dump buffers of all CPUs 113 * Set 2 if you want to dump the buffer of the CPU that triggered oops 114 */ 115 116 enum ftrace_dump_mode ftrace_dump_on_oops; 117 118 /* When set, tracing will stop when a WARN*() is hit */ 119 int __disable_trace_on_warning; 120 121 static int tracing_set_tracer(const char *buf); 122 123 #define MAX_TRACER_SIZE 100 124 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 125 static char *default_bootup_tracer; 126 127 static bool allocate_snapshot; 128 129 static int __init set_cmdline_ftrace(char *str) 130 { 131 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 132 default_bootup_tracer = bootup_tracer_buf; 133 /* We are using ftrace early, expand it */ 134 ring_buffer_expanded = true; 135 return 1; 136 } 137 __setup("ftrace=", set_cmdline_ftrace); 138 139 static int __init set_ftrace_dump_on_oops(char *str) 140 { 141 if (*str++ != '=' || !*str) { 142 ftrace_dump_on_oops = DUMP_ALL; 143 return 1; 144 } 145 146 if (!strcmp("orig_cpu", str)) { 147 ftrace_dump_on_oops = DUMP_ORIG; 148 return 1; 149 } 150 151 return 0; 152 } 153 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 154 155 static int __init stop_trace_on_warning(char *str) 156 { 157 __disable_trace_on_warning = 1; 158 return 1; 159 } 160 __setup("traceoff_on_warning=", stop_trace_on_warning); 161 162 static int __init boot_alloc_snapshot(char *str) 163 { 164 allocate_snapshot = true; 165 /* We also need the main ring buffer expanded */ 166 ring_buffer_expanded = true; 167 return 1; 168 } 169 __setup("alloc_snapshot", boot_alloc_snapshot); 170 171 172 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; 173 static char *trace_boot_options __initdata; 174 175 static int __init set_trace_boot_options(char *str) 176 { 177 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); 178 trace_boot_options = trace_boot_options_buf; 179 return 0; 180 } 181 __setup("trace_options=", set_trace_boot_options); 182 183 184 unsigned long long ns2usecs(cycle_t nsec) 185 { 186 nsec += 500; 187 do_div(nsec, 1000); 188 return nsec; 189 } 190 191 /* 192 * The global_trace is the descriptor that holds the tracing 193 * buffers for the live tracing. For each CPU, it contains 194 * a link list of pages that will store trace entries. The 195 * page descriptor of the pages in the memory is used to hold 196 * the link list by linking the lru item in the page descriptor 197 * to each of the pages in the buffer per CPU. 198 * 199 * For each active CPU there is a data field that holds the 200 * pages for the buffer for that CPU. Each CPU has the same number 201 * of pages allocated for its buffer. 202 */ 203 static struct trace_array global_trace; 204 205 LIST_HEAD(ftrace_trace_arrays); 206 207 int trace_array_get(struct trace_array *this_tr) 208 { 209 struct trace_array *tr; 210 int ret = -ENODEV; 211 212 mutex_lock(&trace_types_lock); 213 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 214 if (tr == this_tr) { 215 tr->ref++; 216 ret = 0; 217 break; 218 } 219 } 220 mutex_unlock(&trace_types_lock); 221 222 return ret; 223 } 224 225 static void __trace_array_put(struct trace_array *this_tr) 226 { 227 WARN_ON(!this_tr->ref); 228 this_tr->ref--; 229 } 230 231 void trace_array_put(struct trace_array *this_tr) 232 { 233 mutex_lock(&trace_types_lock); 234 __trace_array_put(this_tr); 235 mutex_unlock(&trace_types_lock); 236 } 237 238 int filter_current_check_discard(struct ring_buffer *buffer, 239 struct ftrace_event_call *call, void *rec, 240 struct ring_buffer_event *event) 241 { 242 return filter_check_discard(call, rec, buffer, event); 243 } 244 EXPORT_SYMBOL_GPL(filter_current_check_discard); 245 246 cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) 247 { 248 u64 ts; 249 250 /* Early boot up does not have a buffer yet */ 251 if (!buf->buffer) 252 return trace_clock_local(); 253 254 ts = ring_buffer_time_stamp(buf->buffer, cpu); 255 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); 256 257 return ts; 258 } 259 260 cycle_t ftrace_now(int cpu) 261 { 262 return buffer_ftrace_now(&global_trace.trace_buffer, cpu); 263 } 264 265 /** 266 * tracing_is_enabled - Show if global_trace has been disabled 267 * 268 * Shows if the global trace has been enabled or not. It uses the 269 * mirror flag "buffer_disabled" to be used in fast paths such as for 270 * the irqsoff tracer. But it may be inaccurate due to races. If you 271 * need to know the accurate state, use tracing_is_on() which is a little 272 * slower, but accurate. 273 */ 274 int tracing_is_enabled(void) 275 { 276 /* 277 * For quick access (irqsoff uses this in fast path), just 278 * return the mirror variable of the state of the ring buffer. 279 * It's a little racy, but we don't really care. 280 */ 281 smp_rmb(); 282 return !global_trace.buffer_disabled; 283 } 284 285 /* 286 * trace_buf_size is the size in bytes that is allocated 287 * for a buffer. Note, the number of bytes is always rounded 288 * to page size. 289 * 290 * This number is purposely set to a low number of 16384. 291 * If the dump on oops happens, it will be much appreciated 292 * to not have to wait for all that output. Anyway this can be 293 * boot time and run time configurable. 294 */ 295 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ 296 297 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; 298 299 /* trace_types holds a link list of available tracers. */ 300 static struct tracer *trace_types __read_mostly; 301 302 /* 303 * trace_types_lock is used to protect the trace_types list. 304 */ 305 DEFINE_MUTEX(trace_types_lock); 306 307 /* 308 * serialize the access of the ring buffer 309 * 310 * ring buffer serializes readers, but it is low level protection. 311 * The validity of the events (which returns by ring_buffer_peek() ..etc) 312 * are not protected by ring buffer. 313 * 314 * The content of events may become garbage if we allow other process consumes 315 * these events concurrently: 316 * A) the page of the consumed events may become a normal page 317 * (not reader page) in ring buffer, and this page will be rewrited 318 * by events producer. 319 * B) The page of the consumed events may become a page for splice_read, 320 * and this page will be returned to system. 321 * 322 * These primitives allow multi process access to different cpu ring buffer 323 * concurrently. 324 * 325 * These primitives don't distinguish read-only and read-consume access. 326 * Multi read-only access are also serialized. 327 */ 328 329 #ifdef CONFIG_SMP 330 static DECLARE_RWSEM(all_cpu_access_lock); 331 static DEFINE_PER_CPU(struct mutex, cpu_access_lock); 332 333 static inline void trace_access_lock(int cpu) 334 { 335 if (cpu == RING_BUFFER_ALL_CPUS) { 336 /* gain it for accessing the whole ring buffer. */ 337 down_write(&all_cpu_access_lock); 338 } else { 339 /* gain it for accessing a cpu ring buffer. */ 340 341 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ 342 down_read(&all_cpu_access_lock); 343 344 /* Secondly block other access to this @cpu ring buffer. */ 345 mutex_lock(&per_cpu(cpu_access_lock, cpu)); 346 } 347 } 348 349 static inline void trace_access_unlock(int cpu) 350 { 351 if (cpu == RING_BUFFER_ALL_CPUS) { 352 up_write(&all_cpu_access_lock); 353 } else { 354 mutex_unlock(&per_cpu(cpu_access_lock, cpu)); 355 up_read(&all_cpu_access_lock); 356 } 357 } 358 359 static inline void trace_access_lock_init(void) 360 { 361 int cpu; 362 363 for_each_possible_cpu(cpu) 364 mutex_init(&per_cpu(cpu_access_lock, cpu)); 365 } 366 367 #else 368 369 static DEFINE_MUTEX(access_lock); 370 371 static inline void trace_access_lock(int cpu) 372 { 373 (void)cpu; 374 mutex_lock(&access_lock); 375 } 376 377 static inline void trace_access_unlock(int cpu) 378 { 379 (void)cpu; 380 mutex_unlock(&access_lock); 381 } 382 383 static inline void trace_access_lock_init(void) 384 { 385 } 386 387 #endif 388 389 /* trace_flags holds trace_options default values */ 390 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | 391 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | 392 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | 393 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION; 394 395 static void tracer_tracing_on(struct trace_array *tr) 396 { 397 if (tr->trace_buffer.buffer) 398 ring_buffer_record_on(tr->trace_buffer.buffer); 399 /* 400 * This flag is looked at when buffers haven't been allocated 401 * yet, or by some tracers (like irqsoff), that just want to 402 * know if the ring buffer has been disabled, but it can handle 403 * races of where it gets disabled but we still do a record. 404 * As the check is in the fast path of the tracers, it is more 405 * important to be fast than accurate. 406 */ 407 tr->buffer_disabled = 0; 408 /* Make the flag seen by readers */ 409 smp_wmb(); 410 } 411 412 /** 413 * tracing_on - enable tracing buffers 414 * 415 * This function enables tracing buffers that may have been 416 * disabled with tracing_off. 417 */ 418 void tracing_on(void) 419 { 420 tracer_tracing_on(&global_trace); 421 } 422 EXPORT_SYMBOL_GPL(tracing_on); 423 424 /** 425 * __trace_puts - write a constant string into the trace buffer. 426 * @ip: The address of the caller 427 * @str: The constant string to write 428 * @size: The size of the string. 429 */ 430 int __trace_puts(unsigned long ip, const char *str, int size) 431 { 432 struct ring_buffer_event *event; 433 struct ring_buffer *buffer; 434 struct print_entry *entry; 435 unsigned long irq_flags; 436 int alloc; 437 438 alloc = sizeof(*entry) + size + 2; /* possible \n added */ 439 440 local_save_flags(irq_flags); 441 buffer = global_trace.trace_buffer.buffer; 442 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 443 irq_flags, preempt_count()); 444 if (!event) 445 return 0; 446 447 entry = ring_buffer_event_data(event); 448 entry->ip = ip; 449 450 memcpy(&entry->buf, str, size); 451 452 /* Add a newline if necessary */ 453 if (entry->buf[size - 1] != '\n') { 454 entry->buf[size] = '\n'; 455 entry->buf[size + 1] = '\0'; 456 } else 457 entry->buf[size] = '\0'; 458 459 __buffer_unlock_commit(buffer, event); 460 461 return size; 462 } 463 EXPORT_SYMBOL_GPL(__trace_puts); 464 465 /** 466 * __trace_bputs - write the pointer to a constant string into trace buffer 467 * @ip: The address of the caller 468 * @str: The constant string to write to the buffer to 469 */ 470 int __trace_bputs(unsigned long ip, const char *str) 471 { 472 struct ring_buffer_event *event; 473 struct ring_buffer *buffer; 474 struct bputs_entry *entry; 475 unsigned long irq_flags; 476 int size = sizeof(struct bputs_entry); 477 478 local_save_flags(irq_flags); 479 buffer = global_trace.trace_buffer.buffer; 480 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, 481 irq_flags, preempt_count()); 482 if (!event) 483 return 0; 484 485 entry = ring_buffer_event_data(event); 486 entry->ip = ip; 487 entry->str = str; 488 489 __buffer_unlock_commit(buffer, event); 490 491 return 1; 492 } 493 EXPORT_SYMBOL_GPL(__trace_bputs); 494 495 #ifdef CONFIG_TRACER_SNAPSHOT 496 /** 497 * trace_snapshot - take a snapshot of the current buffer. 498 * 499 * This causes a swap between the snapshot buffer and the current live 500 * tracing buffer. You can use this to take snapshots of the live 501 * trace when some condition is triggered, but continue to trace. 502 * 503 * Note, make sure to allocate the snapshot with either 504 * a tracing_snapshot_alloc(), or by doing it manually 505 * with: echo 1 > /sys/kernel/debug/tracing/snapshot 506 * 507 * If the snapshot buffer is not allocated, it will stop tracing. 508 * Basically making a permanent snapshot. 509 */ 510 void tracing_snapshot(void) 511 { 512 struct trace_array *tr = &global_trace; 513 struct tracer *tracer = tr->current_trace; 514 unsigned long flags; 515 516 if (in_nmi()) { 517 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); 518 internal_trace_puts("*** snapshot is being ignored ***\n"); 519 return; 520 } 521 522 if (!tr->allocated_snapshot) { 523 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n"); 524 internal_trace_puts("*** stopping trace here! ***\n"); 525 tracing_off(); 526 return; 527 } 528 529 /* Note, snapshot can not be used when the tracer uses it */ 530 if (tracer->use_max_tr) { 531 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n"); 532 internal_trace_puts("*** Can not use snapshot (sorry) ***\n"); 533 return; 534 } 535 536 local_irq_save(flags); 537 update_max_tr(tr, current, smp_processor_id()); 538 local_irq_restore(flags); 539 } 540 EXPORT_SYMBOL_GPL(tracing_snapshot); 541 542 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, 543 struct trace_buffer *size_buf, int cpu_id); 544 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); 545 546 static int alloc_snapshot(struct trace_array *tr) 547 { 548 int ret; 549 550 if (!tr->allocated_snapshot) { 551 552 /* allocate spare buffer */ 553 ret = resize_buffer_duplicate_size(&tr->max_buffer, 554 &tr->trace_buffer, RING_BUFFER_ALL_CPUS); 555 if (ret < 0) 556 return ret; 557 558 tr->allocated_snapshot = true; 559 } 560 561 return 0; 562 } 563 564 void free_snapshot(struct trace_array *tr) 565 { 566 /* 567 * We don't free the ring buffer. instead, resize it because 568 * The max_tr ring buffer has some state (e.g. ring->clock) and 569 * we want preserve it. 570 */ 571 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); 572 set_buffer_entries(&tr->max_buffer, 1); 573 tracing_reset_online_cpus(&tr->max_buffer); 574 tr->allocated_snapshot = false; 575 } 576 577 /** 578 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer. 579 * 580 * This is similar to trace_snapshot(), but it will allocate the 581 * snapshot buffer if it isn't already allocated. Use this only 582 * where it is safe to sleep, as the allocation may sleep. 583 * 584 * This causes a swap between the snapshot buffer and the current live 585 * tracing buffer. You can use this to take snapshots of the live 586 * trace when some condition is triggered, but continue to trace. 587 */ 588 void tracing_snapshot_alloc(void) 589 { 590 struct trace_array *tr = &global_trace; 591 int ret; 592 593 ret = alloc_snapshot(tr); 594 if (WARN_ON(ret < 0)) 595 return; 596 597 tracing_snapshot(); 598 } 599 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 600 #else 601 void tracing_snapshot(void) 602 { 603 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); 604 } 605 EXPORT_SYMBOL_GPL(tracing_snapshot); 606 void tracing_snapshot_alloc(void) 607 { 608 /* Give warning */ 609 tracing_snapshot(); 610 } 611 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 612 #endif /* CONFIG_TRACER_SNAPSHOT */ 613 614 static void tracer_tracing_off(struct trace_array *tr) 615 { 616 if (tr->trace_buffer.buffer) 617 ring_buffer_record_off(tr->trace_buffer.buffer); 618 /* 619 * This flag is looked at when buffers haven't been allocated 620 * yet, or by some tracers (like irqsoff), that just want to 621 * know if the ring buffer has been disabled, but it can handle 622 * races of where it gets disabled but we still do a record. 623 * As the check is in the fast path of the tracers, it is more 624 * important to be fast than accurate. 625 */ 626 tr->buffer_disabled = 1; 627 /* Make the flag seen by readers */ 628 smp_wmb(); 629 } 630 631 /** 632 * tracing_off - turn off tracing buffers 633 * 634 * This function stops the tracing buffers from recording data. 635 * It does not disable any overhead the tracers themselves may 636 * be causing. This function simply causes all recording to 637 * the ring buffers to fail. 638 */ 639 void tracing_off(void) 640 { 641 tracer_tracing_off(&global_trace); 642 } 643 EXPORT_SYMBOL_GPL(tracing_off); 644 645 void disable_trace_on_warning(void) 646 { 647 if (__disable_trace_on_warning) 648 tracing_off(); 649 } 650 651 /** 652 * tracer_tracing_is_on - show real state of ring buffer enabled 653 * @tr : the trace array to know if ring buffer is enabled 654 * 655 * Shows real state of the ring buffer if it is enabled or not. 656 */ 657 static int tracer_tracing_is_on(struct trace_array *tr) 658 { 659 if (tr->trace_buffer.buffer) 660 return ring_buffer_record_is_on(tr->trace_buffer.buffer); 661 return !tr->buffer_disabled; 662 } 663 664 /** 665 * tracing_is_on - show state of ring buffers enabled 666 */ 667 int tracing_is_on(void) 668 { 669 return tracer_tracing_is_on(&global_trace); 670 } 671 EXPORT_SYMBOL_GPL(tracing_is_on); 672 673 static int __init set_buf_size(char *str) 674 { 675 unsigned long buf_size; 676 677 if (!str) 678 return 0; 679 buf_size = memparse(str, &str); 680 /* nr_entries can not be zero */ 681 if (buf_size == 0) 682 return 0; 683 trace_buf_size = buf_size; 684 return 1; 685 } 686 __setup("trace_buf_size=", set_buf_size); 687 688 static int __init set_tracing_thresh(char *str) 689 { 690 unsigned long threshold; 691 int ret; 692 693 if (!str) 694 return 0; 695 ret = kstrtoul(str, 0, &threshold); 696 if (ret < 0) 697 return 0; 698 tracing_thresh = threshold * 1000; 699 return 1; 700 } 701 __setup("tracing_thresh=", set_tracing_thresh); 702 703 unsigned long nsecs_to_usecs(unsigned long nsecs) 704 { 705 return nsecs / 1000; 706 } 707 708 /* These must match the bit postions in trace_iterator_flags */ 709 static const char *trace_options[] = { 710 "print-parent", 711 "sym-offset", 712 "sym-addr", 713 "verbose", 714 "raw", 715 "hex", 716 "bin", 717 "block", 718 "stacktrace", 719 "trace_printk", 720 "ftrace_preempt", 721 "branch", 722 "annotate", 723 "userstacktrace", 724 "sym-userobj", 725 "printk-msg-only", 726 "context-info", 727 "latency-format", 728 "sleep-time", 729 "graph-time", 730 "record-cmd", 731 "overwrite", 732 "disable_on_free", 733 "irq-info", 734 "markers", 735 "function-trace", 736 NULL 737 }; 738 739 static struct { 740 u64 (*func)(void); 741 const char *name; 742 int in_ns; /* is this clock in nanoseconds? */ 743 } trace_clocks[] = { 744 { trace_clock_local, "local", 1 }, 745 { trace_clock_global, "global", 1 }, 746 { trace_clock_counter, "counter", 0 }, 747 { trace_clock_jiffies, "uptime", 1 }, 748 { trace_clock, "perf", 1 }, 749 ARCH_TRACE_CLOCKS 750 }; 751 752 /* 753 * trace_parser_get_init - gets the buffer for trace parser 754 */ 755 int trace_parser_get_init(struct trace_parser *parser, int size) 756 { 757 memset(parser, 0, sizeof(*parser)); 758 759 parser->buffer = kmalloc(size, GFP_KERNEL); 760 if (!parser->buffer) 761 return 1; 762 763 parser->size = size; 764 return 0; 765 } 766 767 /* 768 * trace_parser_put - frees the buffer for trace parser 769 */ 770 void trace_parser_put(struct trace_parser *parser) 771 { 772 kfree(parser->buffer); 773 } 774 775 /* 776 * trace_get_user - reads the user input string separated by space 777 * (matched by isspace(ch)) 778 * 779 * For each string found the 'struct trace_parser' is updated, 780 * and the function returns. 781 * 782 * Returns number of bytes read. 783 * 784 * See kernel/trace/trace.h for 'struct trace_parser' details. 785 */ 786 int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 787 size_t cnt, loff_t *ppos) 788 { 789 char ch; 790 size_t read = 0; 791 ssize_t ret; 792 793 if (!*ppos) 794 trace_parser_clear(parser); 795 796 ret = get_user(ch, ubuf++); 797 if (ret) 798 goto out; 799 800 read++; 801 cnt--; 802 803 /* 804 * The parser is not finished with the last write, 805 * continue reading the user input without skipping spaces. 806 */ 807 if (!parser->cont) { 808 /* skip white space */ 809 while (cnt && isspace(ch)) { 810 ret = get_user(ch, ubuf++); 811 if (ret) 812 goto out; 813 read++; 814 cnt--; 815 } 816 817 /* only spaces were written */ 818 if (isspace(ch)) { 819 *ppos += read; 820 ret = read; 821 goto out; 822 } 823 824 parser->idx = 0; 825 } 826 827 /* read the non-space input */ 828 while (cnt && !isspace(ch)) { 829 if (parser->idx < parser->size - 1) 830 parser->buffer[parser->idx++] = ch; 831 else { 832 ret = -EINVAL; 833 goto out; 834 } 835 ret = get_user(ch, ubuf++); 836 if (ret) 837 goto out; 838 read++; 839 cnt--; 840 } 841 842 /* We either got finished input or we have to wait for another call. */ 843 if (isspace(ch)) { 844 parser->buffer[parser->idx] = 0; 845 parser->cont = false; 846 } else { 847 parser->cont = true; 848 parser->buffer[parser->idx++] = ch; 849 } 850 851 *ppos += read; 852 ret = read; 853 854 out: 855 return ret; 856 } 857 858 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) 859 { 860 int len; 861 int ret; 862 863 if (!cnt) 864 return 0; 865 866 if (s->len <= s->readpos) 867 return -EBUSY; 868 869 len = s->len - s->readpos; 870 if (cnt > len) 871 cnt = len; 872 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); 873 if (ret == cnt) 874 return -EFAULT; 875 876 cnt -= ret; 877 878 s->readpos += cnt; 879 return cnt; 880 } 881 882 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) 883 { 884 int len; 885 886 if (s->len <= s->readpos) 887 return -EBUSY; 888 889 len = s->len - s->readpos; 890 if (cnt > len) 891 cnt = len; 892 memcpy(buf, s->buffer + s->readpos, cnt); 893 894 s->readpos += cnt; 895 return cnt; 896 } 897 898 /* 899 * ftrace_max_lock is used to protect the swapping of buffers 900 * when taking a max snapshot. The buffers themselves are 901 * protected by per_cpu spinlocks. But the action of the swap 902 * needs its own lock. 903 * 904 * This is defined as a arch_spinlock_t in order to help 905 * with performance when lockdep debugging is enabled. 906 * 907 * It is also used in other places outside the update_max_tr 908 * so it needs to be defined outside of the 909 * CONFIG_TRACER_MAX_TRACE. 910 */ 911 static arch_spinlock_t ftrace_max_lock = 912 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 913 914 unsigned long __read_mostly tracing_thresh; 915 916 #ifdef CONFIG_TRACER_MAX_TRACE 917 unsigned long __read_mostly tracing_max_latency; 918 919 /* 920 * Copy the new maximum trace into the separate maximum-trace 921 * structure. (this way the maximum trace is permanently saved, 922 * for later retrieval via /sys/kernel/debug/tracing/latency_trace) 923 */ 924 static void 925 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 926 { 927 struct trace_buffer *trace_buf = &tr->trace_buffer; 928 struct trace_buffer *max_buf = &tr->max_buffer; 929 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); 930 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); 931 932 max_buf->cpu = cpu; 933 max_buf->time_start = data->preempt_timestamp; 934 935 max_data->saved_latency = tracing_max_latency; 936 max_data->critical_start = data->critical_start; 937 max_data->critical_end = data->critical_end; 938 939 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); 940 max_data->pid = tsk->pid; 941 /* 942 * If tsk == current, then use current_uid(), as that does not use 943 * RCU. The irq tracer can be called out of RCU scope. 944 */ 945 if (tsk == current) 946 max_data->uid = current_uid(); 947 else 948 max_data->uid = task_uid(tsk); 949 950 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 951 max_data->policy = tsk->policy; 952 max_data->rt_priority = tsk->rt_priority; 953 954 /* record this tasks comm */ 955 tracing_record_cmdline(tsk); 956 } 957 958 /** 959 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 960 * @tr: tracer 961 * @tsk: the task with the latency 962 * @cpu: The cpu that initiated the trace. 963 * 964 * Flip the buffers between the @tr and the max_tr and record information 965 * about which task was the cause of this latency. 966 */ 967 void 968 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 969 { 970 struct ring_buffer *buf; 971 972 if (tr->stop_count) 973 return; 974 975 WARN_ON_ONCE(!irqs_disabled()); 976 977 if (!tr->allocated_snapshot) { 978 /* Only the nop tracer should hit this when disabling */ 979 WARN_ON_ONCE(tr->current_trace != &nop_trace); 980 return; 981 } 982 983 arch_spin_lock(&ftrace_max_lock); 984 985 buf = tr->trace_buffer.buffer; 986 tr->trace_buffer.buffer = tr->max_buffer.buffer; 987 tr->max_buffer.buffer = buf; 988 989 __update_max_tr(tr, tsk, cpu); 990 arch_spin_unlock(&ftrace_max_lock); 991 } 992 993 /** 994 * update_max_tr_single - only copy one trace over, and reset the rest 995 * @tr - tracer 996 * @tsk - task with the latency 997 * @cpu - the cpu of the buffer to copy. 998 * 999 * Flip the trace of a single CPU buffer between the @tr and the max_tr. 1000 */ 1001 void 1002 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 1003 { 1004 int ret; 1005 1006 if (tr->stop_count) 1007 return; 1008 1009 WARN_ON_ONCE(!irqs_disabled()); 1010 if (!tr->allocated_snapshot) { 1011 /* Only the nop tracer should hit this when disabling */ 1012 WARN_ON_ONCE(tr->current_trace != &nop_trace); 1013 return; 1014 } 1015 1016 arch_spin_lock(&ftrace_max_lock); 1017 1018 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); 1019 1020 if (ret == -EBUSY) { 1021 /* 1022 * We failed to swap the buffer due to a commit taking 1023 * place on this CPU. We fail to record, but we reset 1024 * the max trace buffer (no one writes directly to it) 1025 * and flag that it failed. 1026 */ 1027 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, 1028 "Failed to swap buffers due to commit in progress\n"); 1029 } 1030 1031 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 1032 1033 __update_max_tr(tr, tsk, cpu); 1034 arch_spin_unlock(&ftrace_max_lock); 1035 } 1036 #endif /* CONFIG_TRACER_MAX_TRACE */ 1037 1038 static void default_wait_pipe(struct trace_iterator *iter) 1039 { 1040 /* Iterators are static, they should be filled or empty */ 1041 if (trace_buffer_iter(iter, iter->cpu_file)) 1042 return; 1043 1044 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); 1045 } 1046 1047 #ifdef CONFIG_FTRACE_STARTUP_TEST 1048 static int run_tracer_selftest(struct tracer *type) 1049 { 1050 struct trace_array *tr = &global_trace; 1051 struct tracer *saved_tracer = tr->current_trace; 1052 int ret; 1053 1054 if (!type->selftest || tracing_selftest_disabled) 1055 return 0; 1056 1057 /* 1058 * Run a selftest on this tracer. 1059 * Here we reset the trace buffer, and set the current 1060 * tracer to be this tracer. The tracer can then run some 1061 * internal tracing to verify that everything is in order. 1062 * If we fail, we do not register this tracer. 1063 */ 1064 tracing_reset_online_cpus(&tr->trace_buffer); 1065 1066 tr->current_trace = type; 1067 1068 #ifdef CONFIG_TRACER_MAX_TRACE 1069 if (type->use_max_tr) { 1070 /* If we expanded the buffers, make sure the max is expanded too */ 1071 if (ring_buffer_expanded) 1072 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, 1073 RING_BUFFER_ALL_CPUS); 1074 tr->allocated_snapshot = true; 1075 } 1076 #endif 1077 1078 /* the test is responsible for initializing and enabling */ 1079 pr_info("Testing tracer %s: ", type->name); 1080 ret = type->selftest(type, tr); 1081 /* the test is responsible for resetting too */ 1082 tr->current_trace = saved_tracer; 1083 if (ret) { 1084 printk(KERN_CONT "FAILED!\n"); 1085 /* Add the warning after printing 'FAILED' */ 1086 WARN_ON(1); 1087 return -1; 1088 } 1089 /* Only reset on passing, to avoid touching corrupted buffers */ 1090 tracing_reset_online_cpus(&tr->trace_buffer); 1091 1092 #ifdef CONFIG_TRACER_MAX_TRACE 1093 if (type->use_max_tr) { 1094 tr->allocated_snapshot = false; 1095 1096 /* Shrink the max buffer again */ 1097 if (ring_buffer_expanded) 1098 ring_buffer_resize(tr->max_buffer.buffer, 1, 1099 RING_BUFFER_ALL_CPUS); 1100 } 1101 #endif 1102 1103 printk(KERN_CONT "PASSED\n"); 1104 return 0; 1105 } 1106 #else 1107 static inline int run_tracer_selftest(struct tracer *type) 1108 { 1109 return 0; 1110 } 1111 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 1112 1113 /** 1114 * register_tracer - register a tracer with the ftrace system. 1115 * @type - the plugin for the tracer 1116 * 1117 * Register a new plugin tracer. 1118 */ 1119 int register_tracer(struct tracer *type) 1120 { 1121 struct tracer *t; 1122 int ret = 0; 1123 1124 if (!type->name) { 1125 pr_info("Tracer must have a name\n"); 1126 return -1; 1127 } 1128 1129 if (strlen(type->name) >= MAX_TRACER_SIZE) { 1130 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); 1131 return -1; 1132 } 1133 1134 mutex_lock(&trace_types_lock); 1135 1136 tracing_selftest_running = true; 1137 1138 for (t = trace_types; t; t = t->next) { 1139 if (strcmp(type->name, t->name) == 0) { 1140 /* already found */ 1141 pr_info("Tracer %s already registered\n", 1142 type->name); 1143 ret = -1; 1144 goto out; 1145 } 1146 } 1147 1148 if (!type->set_flag) 1149 type->set_flag = &dummy_set_flag; 1150 if (!type->flags) 1151 type->flags = &dummy_tracer_flags; 1152 else 1153 if (!type->flags->opts) 1154 type->flags->opts = dummy_tracer_opt; 1155 if (!type->wait_pipe) 1156 type->wait_pipe = default_wait_pipe; 1157 1158 ret = run_tracer_selftest(type); 1159 if (ret < 0) 1160 goto out; 1161 1162 type->next = trace_types; 1163 trace_types = type; 1164 1165 out: 1166 tracing_selftest_running = false; 1167 mutex_unlock(&trace_types_lock); 1168 1169 if (ret || !default_bootup_tracer) 1170 goto out_unlock; 1171 1172 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) 1173 goto out_unlock; 1174 1175 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 1176 /* Do we want this tracer to start on bootup? */ 1177 tracing_set_tracer(type->name); 1178 default_bootup_tracer = NULL; 1179 /* disable other selftests, since this will break it. */ 1180 tracing_selftest_disabled = true; 1181 #ifdef CONFIG_FTRACE_STARTUP_TEST 1182 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", 1183 type->name); 1184 #endif 1185 1186 out_unlock: 1187 return ret; 1188 } 1189 1190 void tracing_reset(struct trace_buffer *buf, int cpu) 1191 { 1192 struct ring_buffer *buffer = buf->buffer; 1193 1194 if (!buffer) 1195 return; 1196 1197 ring_buffer_record_disable(buffer); 1198 1199 /* Make sure all commits have finished */ 1200 synchronize_sched(); 1201 ring_buffer_reset_cpu(buffer, cpu); 1202 1203 ring_buffer_record_enable(buffer); 1204 } 1205 1206 void tracing_reset_online_cpus(struct trace_buffer *buf) 1207 { 1208 struct ring_buffer *buffer = buf->buffer; 1209 int cpu; 1210 1211 if (!buffer) 1212 return; 1213 1214 ring_buffer_record_disable(buffer); 1215 1216 /* Make sure all commits have finished */ 1217 synchronize_sched(); 1218 1219 buf->time_start = buffer_ftrace_now(buf, buf->cpu); 1220 1221 for_each_online_cpu(cpu) 1222 ring_buffer_reset_cpu(buffer, cpu); 1223 1224 ring_buffer_record_enable(buffer); 1225 } 1226 1227 /* Must have trace_types_lock held */ 1228 void tracing_reset_all_online_cpus(void) 1229 { 1230 struct trace_array *tr; 1231 1232 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 1233 tracing_reset_online_cpus(&tr->trace_buffer); 1234 #ifdef CONFIG_TRACER_MAX_TRACE 1235 tracing_reset_online_cpus(&tr->max_buffer); 1236 #endif 1237 } 1238 } 1239 1240 #define SAVED_CMDLINES 128 1241 #define NO_CMDLINE_MAP UINT_MAX 1242 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 1243 static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 1244 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; 1245 static int cmdline_idx; 1246 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; 1247 1248 /* temporary disable recording */ 1249 static atomic_t trace_record_cmdline_disabled __read_mostly; 1250 1251 static void trace_init_cmdlines(void) 1252 { 1253 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline)); 1254 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid)); 1255 cmdline_idx = 0; 1256 } 1257 1258 int is_tracing_stopped(void) 1259 { 1260 return global_trace.stop_count; 1261 } 1262 1263 /** 1264 * ftrace_off_permanent - disable all ftrace code permanently 1265 * 1266 * This should only be called when a serious anomally has 1267 * been detected. This will turn off the function tracing, 1268 * ring buffers, and other tracing utilites. It takes no 1269 * locks and can be called from any context. 1270 */ 1271 void ftrace_off_permanent(void) 1272 { 1273 tracing_disabled = 1; 1274 ftrace_stop(); 1275 tracing_off_permanent(); 1276 } 1277 1278 /** 1279 * tracing_start - quick start of the tracer 1280 * 1281 * If tracing is enabled but was stopped by tracing_stop, 1282 * this will start the tracer back up. 1283 */ 1284 void tracing_start(void) 1285 { 1286 struct ring_buffer *buffer; 1287 unsigned long flags; 1288 1289 if (tracing_disabled) 1290 return; 1291 1292 raw_spin_lock_irqsave(&global_trace.start_lock, flags); 1293 if (--global_trace.stop_count) { 1294 if (global_trace.stop_count < 0) { 1295 /* Someone screwed up their debugging */ 1296 WARN_ON_ONCE(1); 1297 global_trace.stop_count = 0; 1298 } 1299 goto out; 1300 } 1301 1302 /* Prevent the buffers from switching */ 1303 arch_spin_lock(&ftrace_max_lock); 1304 1305 buffer = global_trace.trace_buffer.buffer; 1306 if (buffer) 1307 ring_buffer_record_enable(buffer); 1308 1309 #ifdef CONFIG_TRACER_MAX_TRACE 1310 buffer = global_trace.max_buffer.buffer; 1311 if (buffer) 1312 ring_buffer_record_enable(buffer); 1313 #endif 1314 1315 arch_spin_unlock(&ftrace_max_lock); 1316 1317 ftrace_start(); 1318 out: 1319 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 1320 } 1321 1322 static void tracing_start_tr(struct trace_array *tr) 1323 { 1324 struct ring_buffer *buffer; 1325 unsigned long flags; 1326 1327 if (tracing_disabled) 1328 return; 1329 1330 /* If global, we need to also start the max tracer */ 1331 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 1332 return tracing_start(); 1333 1334 raw_spin_lock_irqsave(&tr->start_lock, flags); 1335 1336 if (--tr->stop_count) { 1337 if (tr->stop_count < 0) { 1338 /* Someone screwed up their debugging */ 1339 WARN_ON_ONCE(1); 1340 tr->stop_count = 0; 1341 } 1342 goto out; 1343 } 1344 1345 buffer = tr->trace_buffer.buffer; 1346 if (buffer) 1347 ring_buffer_record_enable(buffer); 1348 1349 out: 1350 raw_spin_unlock_irqrestore(&tr->start_lock, flags); 1351 } 1352 1353 /** 1354 * tracing_stop - quick stop of the tracer 1355 * 1356 * Light weight way to stop tracing. Use in conjunction with 1357 * tracing_start. 1358 */ 1359 void tracing_stop(void) 1360 { 1361 struct ring_buffer *buffer; 1362 unsigned long flags; 1363 1364 ftrace_stop(); 1365 raw_spin_lock_irqsave(&global_trace.start_lock, flags); 1366 if (global_trace.stop_count++) 1367 goto out; 1368 1369 /* Prevent the buffers from switching */ 1370 arch_spin_lock(&ftrace_max_lock); 1371 1372 buffer = global_trace.trace_buffer.buffer; 1373 if (buffer) 1374 ring_buffer_record_disable(buffer); 1375 1376 #ifdef CONFIG_TRACER_MAX_TRACE 1377 buffer = global_trace.max_buffer.buffer; 1378 if (buffer) 1379 ring_buffer_record_disable(buffer); 1380 #endif 1381 1382 arch_spin_unlock(&ftrace_max_lock); 1383 1384 out: 1385 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 1386 } 1387 1388 static void tracing_stop_tr(struct trace_array *tr) 1389 { 1390 struct ring_buffer *buffer; 1391 unsigned long flags; 1392 1393 /* If global, we need to also stop the max tracer */ 1394 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 1395 return tracing_stop(); 1396 1397 raw_spin_lock_irqsave(&tr->start_lock, flags); 1398 if (tr->stop_count++) 1399 goto out; 1400 1401 buffer = tr->trace_buffer.buffer; 1402 if (buffer) 1403 ring_buffer_record_disable(buffer); 1404 1405 out: 1406 raw_spin_unlock_irqrestore(&tr->start_lock, flags); 1407 } 1408 1409 void trace_stop_cmdline_recording(void); 1410 1411 static void trace_save_cmdline(struct task_struct *tsk) 1412 { 1413 unsigned pid, idx; 1414 1415 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) 1416 return; 1417 1418 /* 1419 * It's not the end of the world if we don't get 1420 * the lock, but we also don't want to spin 1421 * nor do we want to disable interrupts, 1422 * so if we miss here, then better luck next time. 1423 */ 1424 if (!arch_spin_trylock(&trace_cmdline_lock)) 1425 return; 1426 1427 idx = map_pid_to_cmdline[tsk->pid]; 1428 if (idx == NO_CMDLINE_MAP) { 1429 idx = (cmdline_idx + 1) % SAVED_CMDLINES; 1430 1431 /* 1432 * Check whether the cmdline buffer at idx has a pid 1433 * mapped. We are going to overwrite that entry so we 1434 * need to clear the map_pid_to_cmdline. Otherwise we 1435 * would read the new comm for the old pid. 1436 */ 1437 pid = map_cmdline_to_pid[idx]; 1438 if (pid != NO_CMDLINE_MAP) 1439 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; 1440 1441 map_cmdline_to_pid[idx] = tsk->pid; 1442 map_pid_to_cmdline[tsk->pid] = idx; 1443 1444 cmdline_idx = idx; 1445 } 1446 1447 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 1448 1449 arch_spin_unlock(&trace_cmdline_lock); 1450 } 1451 1452 void trace_find_cmdline(int pid, char comm[]) 1453 { 1454 unsigned map; 1455 1456 if (!pid) { 1457 strcpy(comm, "<idle>"); 1458 return; 1459 } 1460 1461 if (WARN_ON_ONCE(pid < 0)) { 1462 strcpy(comm, "<XXX>"); 1463 return; 1464 } 1465 1466 if (pid > PID_MAX_DEFAULT) { 1467 strcpy(comm, "<...>"); 1468 return; 1469 } 1470 1471 preempt_disable(); 1472 arch_spin_lock(&trace_cmdline_lock); 1473 map = map_pid_to_cmdline[pid]; 1474 if (map != NO_CMDLINE_MAP) 1475 strcpy(comm, saved_cmdlines[map]); 1476 else 1477 strcpy(comm, "<...>"); 1478 1479 arch_spin_unlock(&trace_cmdline_lock); 1480 preempt_enable(); 1481 } 1482 1483 void tracing_record_cmdline(struct task_struct *tsk) 1484 { 1485 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on()) 1486 return; 1487 1488 if (!__this_cpu_read(trace_cmdline_save)) 1489 return; 1490 1491 __this_cpu_write(trace_cmdline_save, false); 1492 1493 trace_save_cmdline(tsk); 1494 } 1495 1496 void 1497 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, 1498 int pc) 1499 { 1500 struct task_struct *tsk = current; 1501 1502 entry->preempt_count = pc & 0xff; 1503 entry->pid = (tsk) ? tsk->pid : 0; 1504 entry->flags = 1505 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 1506 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 1507 #else 1508 TRACE_FLAG_IRQS_NOSUPPORT | 1509 #endif 1510 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | 1511 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 1512 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 1513 } 1514 EXPORT_SYMBOL_GPL(tracing_generic_entry_update); 1515 1516 struct ring_buffer_event * 1517 trace_buffer_lock_reserve(struct ring_buffer *buffer, 1518 int type, 1519 unsigned long len, 1520 unsigned long flags, int pc) 1521 { 1522 struct ring_buffer_event *event; 1523 1524 event = ring_buffer_lock_reserve(buffer, len); 1525 if (event != NULL) { 1526 struct trace_entry *ent = ring_buffer_event_data(event); 1527 1528 tracing_generic_entry_update(ent, flags, pc); 1529 ent->type = type; 1530 } 1531 1532 return event; 1533 } 1534 1535 void 1536 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) 1537 { 1538 __this_cpu_write(trace_cmdline_save, true); 1539 ring_buffer_unlock_commit(buffer, event); 1540 } 1541 1542 static inline void 1543 __trace_buffer_unlock_commit(struct ring_buffer *buffer, 1544 struct ring_buffer_event *event, 1545 unsigned long flags, int pc) 1546 { 1547 __buffer_unlock_commit(buffer, event); 1548 1549 ftrace_trace_stack(buffer, flags, 6, pc); 1550 ftrace_trace_userstack(buffer, flags, pc); 1551 } 1552 1553 void trace_buffer_unlock_commit(struct ring_buffer *buffer, 1554 struct ring_buffer_event *event, 1555 unsigned long flags, int pc) 1556 { 1557 __trace_buffer_unlock_commit(buffer, event, flags, pc); 1558 } 1559 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); 1560 1561 struct ring_buffer_event * 1562 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, 1563 struct ftrace_event_file *ftrace_file, 1564 int type, unsigned long len, 1565 unsigned long flags, int pc) 1566 { 1567 *current_rb = ftrace_file->tr->trace_buffer.buffer; 1568 return trace_buffer_lock_reserve(*current_rb, 1569 type, len, flags, pc); 1570 } 1571 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); 1572 1573 struct ring_buffer_event * 1574 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, 1575 int type, unsigned long len, 1576 unsigned long flags, int pc) 1577 { 1578 *current_rb = global_trace.trace_buffer.buffer; 1579 return trace_buffer_lock_reserve(*current_rb, 1580 type, len, flags, pc); 1581 } 1582 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); 1583 1584 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, 1585 struct ring_buffer_event *event, 1586 unsigned long flags, int pc) 1587 { 1588 __trace_buffer_unlock_commit(buffer, event, flags, pc); 1589 } 1590 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); 1591 1592 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, 1593 struct ring_buffer_event *event, 1594 unsigned long flags, int pc, 1595 struct pt_regs *regs) 1596 { 1597 __buffer_unlock_commit(buffer, event); 1598 1599 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); 1600 ftrace_trace_userstack(buffer, flags, pc); 1601 } 1602 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs); 1603 1604 void trace_current_buffer_discard_commit(struct ring_buffer *buffer, 1605 struct ring_buffer_event *event) 1606 { 1607 ring_buffer_discard_commit(buffer, event); 1608 } 1609 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); 1610 1611 void 1612 trace_function(struct trace_array *tr, 1613 unsigned long ip, unsigned long parent_ip, unsigned long flags, 1614 int pc) 1615 { 1616 struct ftrace_event_call *call = &event_function; 1617 struct ring_buffer *buffer = tr->trace_buffer.buffer; 1618 struct ring_buffer_event *event; 1619 struct ftrace_entry *entry; 1620 1621 /* If we are reading the ring buffer, don't trace */ 1622 if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) 1623 return; 1624 1625 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 1626 flags, pc); 1627 if (!event) 1628 return; 1629 entry = ring_buffer_event_data(event); 1630 entry->ip = ip; 1631 entry->parent_ip = parent_ip; 1632 1633 if (!filter_check_discard(call, entry, buffer, event)) 1634 __buffer_unlock_commit(buffer, event); 1635 } 1636 1637 #ifdef CONFIG_STACKTRACE 1638 1639 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) 1640 struct ftrace_stack { 1641 unsigned long calls[FTRACE_STACK_MAX_ENTRIES]; 1642 }; 1643 1644 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); 1645 static DEFINE_PER_CPU(int, ftrace_stack_reserve); 1646 1647 static void __ftrace_trace_stack(struct ring_buffer *buffer, 1648 unsigned long flags, 1649 int skip, int pc, struct pt_regs *regs) 1650 { 1651 struct ftrace_event_call *call = &event_kernel_stack; 1652 struct ring_buffer_event *event; 1653 struct stack_entry *entry; 1654 struct stack_trace trace; 1655 int use_stack; 1656 int size = FTRACE_STACK_ENTRIES; 1657 1658 trace.nr_entries = 0; 1659 trace.skip = skip; 1660 1661 /* 1662 * Since events can happen in NMIs there's no safe way to 1663 * use the per cpu ftrace_stacks. We reserve it and if an interrupt 1664 * or NMI comes in, it will just have to use the default 1665 * FTRACE_STACK_SIZE. 1666 */ 1667 preempt_disable_notrace(); 1668 1669 use_stack = __this_cpu_inc_return(ftrace_stack_reserve); 1670 /* 1671 * We don't need any atomic variables, just a barrier. 1672 * If an interrupt comes in, we don't care, because it would 1673 * have exited and put the counter back to what we want. 1674 * We just need a barrier to keep gcc from moving things 1675 * around. 1676 */ 1677 barrier(); 1678 if (use_stack == 1) { 1679 trace.entries = &__get_cpu_var(ftrace_stack).calls[0]; 1680 trace.max_entries = FTRACE_STACK_MAX_ENTRIES; 1681 1682 if (regs) 1683 save_stack_trace_regs(regs, &trace); 1684 else 1685 save_stack_trace(&trace); 1686 1687 if (trace.nr_entries > size) 1688 size = trace.nr_entries; 1689 } else 1690 /* From now on, use_stack is a boolean */ 1691 use_stack = 0; 1692 1693 size *= sizeof(unsigned long); 1694 1695 event = trace_buffer_lock_reserve(buffer, TRACE_STACK, 1696 sizeof(*entry) + size, flags, pc); 1697 if (!event) 1698 goto out; 1699 entry = ring_buffer_event_data(event); 1700 1701 memset(&entry->caller, 0, size); 1702 1703 if (use_stack) 1704 memcpy(&entry->caller, trace.entries, 1705 trace.nr_entries * sizeof(unsigned long)); 1706 else { 1707 trace.max_entries = FTRACE_STACK_ENTRIES; 1708 trace.entries = entry->caller; 1709 if (regs) 1710 save_stack_trace_regs(regs, &trace); 1711 else 1712 save_stack_trace(&trace); 1713 } 1714 1715 entry->size = trace.nr_entries; 1716 1717 if (!filter_check_discard(call, entry, buffer, event)) 1718 __buffer_unlock_commit(buffer, event); 1719 1720 out: 1721 /* Again, don't let gcc optimize things here */ 1722 barrier(); 1723 __this_cpu_dec(ftrace_stack_reserve); 1724 preempt_enable_notrace(); 1725 1726 } 1727 1728 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, 1729 int skip, int pc, struct pt_regs *regs) 1730 { 1731 if (!(trace_flags & TRACE_ITER_STACKTRACE)) 1732 return; 1733 1734 __ftrace_trace_stack(buffer, flags, skip, pc, regs); 1735 } 1736 1737 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, 1738 int skip, int pc) 1739 { 1740 if (!(trace_flags & TRACE_ITER_STACKTRACE)) 1741 return; 1742 1743 __ftrace_trace_stack(buffer, flags, skip, pc, NULL); 1744 } 1745 1746 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 1747 int pc) 1748 { 1749 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL); 1750 } 1751 1752 /** 1753 * trace_dump_stack - record a stack back trace in the trace buffer 1754 * @skip: Number of functions to skip (helper handlers) 1755 */ 1756 void trace_dump_stack(int skip) 1757 { 1758 unsigned long flags; 1759 1760 if (tracing_disabled || tracing_selftest_running) 1761 return; 1762 1763 local_save_flags(flags); 1764 1765 /* 1766 * Skip 3 more, seems to get us at the caller of 1767 * this function. 1768 */ 1769 skip += 3; 1770 __ftrace_trace_stack(global_trace.trace_buffer.buffer, 1771 flags, skip, preempt_count(), NULL); 1772 } 1773 1774 static DEFINE_PER_CPU(int, user_stack_count); 1775 1776 void 1777 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) 1778 { 1779 struct ftrace_event_call *call = &event_user_stack; 1780 struct ring_buffer_event *event; 1781 struct userstack_entry *entry; 1782 struct stack_trace trace; 1783 1784 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) 1785 return; 1786 1787 /* 1788 * NMIs can not handle page faults, even with fix ups. 1789 * The save user stack can (and often does) fault. 1790 */ 1791 if (unlikely(in_nmi())) 1792 return; 1793 1794 /* 1795 * prevent recursion, since the user stack tracing may 1796 * trigger other kernel events. 1797 */ 1798 preempt_disable(); 1799 if (__this_cpu_read(user_stack_count)) 1800 goto out; 1801 1802 __this_cpu_inc(user_stack_count); 1803 1804 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 1805 sizeof(*entry), flags, pc); 1806 if (!event) 1807 goto out_drop_count; 1808 entry = ring_buffer_event_data(event); 1809 1810 entry->tgid = current->tgid; 1811 memset(&entry->caller, 0, sizeof(entry->caller)); 1812 1813 trace.nr_entries = 0; 1814 trace.max_entries = FTRACE_STACK_ENTRIES; 1815 trace.skip = 0; 1816 trace.entries = entry->caller; 1817 1818 save_stack_trace_user(&trace); 1819 if (!filter_check_discard(call, entry, buffer, event)) 1820 __buffer_unlock_commit(buffer, event); 1821 1822 out_drop_count: 1823 __this_cpu_dec(user_stack_count); 1824 out: 1825 preempt_enable(); 1826 } 1827 1828 #ifdef UNUSED 1829 static void __trace_userstack(struct trace_array *tr, unsigned long flags) 1830 { 1831 ftrace_trace_userstack(tr, flags, preempt_count()); 1832 } 1833 #endif /* UNUSED */ 1834 1835 #endif /* CONFIG_STACKTRACE */ 1836 1837 /* created for use with alloc_percpu */ 1838 struct trace_buffer_struct { 1839 char buffer[TRACE_BUF_SIZE]; 1840 }; 1841 1842 static struct trace_buffer_struct *trace_percpu_buffer; 1843 static struct trace_buffer_struct *trace_percpu_sirq_buffer; 1844 static struct trace_buffer_struct *trace_percpu_irq_buffer; 1845 static struct trace_buffer_struct *trace_percpu_nmi_buffer; 1846 1847 /* 1848 * The buffer used is dependent on the context. There is a per cpu 1849 * buffer for normal context, softirq contex, hard irq context and 1850 * for NMI context. Thise allows for lockless recording. 1851 * 1852 * Note, if the buffers failed to be allocated, then this returns NULL 1853 */ 1854 static char *get_trace_buf(void) 1855 { 1856 struct trace_buffer_struct *percpu_buffer; 1857 1858 /* 1859 * If we have allocated per cpu buffers, then we do not 1860 * need to do any locking. 1861 */ 1862 if (in_nmi()) 1863 percpu_buffer = trace_percpu_nmi_buffer; 1864 else if (in_irq()) 1865 percpu_buffer = trace_percpu_irq_buffer; 1866 else if (in_softirq()) 1867 percpu_buffer = trace_percpu_sirq_buffer; 1868 else 1869 percpu_buffer = trace_percpu_buffer; 1870 1871 if (!percpu_buffer) 1872 return NULL; 1873 1874 return this_cpu_ptr(&percpu_buffer->buffer[0]); 1875 } 1876 1877 static int alloc_percpu_trace_buffer(void) 1878 { 1879 struct trace_buffer_struct *buffers; 1880 struct trace_buffer_struct *sirq_buffers; 1881 struct trace_buffer_struct *irq_buffers; 1882 struct trace_buffer_struct *nmi_buffers; 1883 1884 buffers = alloc_percpu(struct trace_buffer_struct); 1885 if (!buffers) 1886 goto err_warn; 1887 1888 sirq_buffers = alloc_percpu(struct trace_buffer_struct); 1889 if (!sirq_buffers) 1890 goto err_sirq; 1891 1892 irq_buffers = alloc_percpu(struct trace_buffer_struct); 1893 if (!irq_buffers) 1894 goto err_irq; 1895 1896 nmi_buffers = alloc_percpu(struct trace_buffer_struct); 1897 if (!nmi_buffers) 1898 goto err_nmi; 1899 1900 trace_percpu_buffer = buffers; 1901 trace_percpu_sirq_buffer = sirq_buffers; 1902 trace_percpu_irq_buffer = irq_buffers; 1903 trace_percpu_nmi_buffer = nmi_buffers; 1904 1905 return 0; 1906 1907 err_nmi: 1908 free_percpu(irq_buffers); 1909 err_irq: 1910 free_percpu(sirq_buffers); 1911 err_sirq: 1912 free_percpu(buffers); 1913 err_warn: 1914 WARN(1, "Could not allocate percpu trace_printk buffer"); 1915 return -ENOMEM; 1916 } 1917 1918 static int buffers_allocated; 1919 1920 void trace_printk_init_buffers(void) 1921 { 1922 if (buffers_allocated) 1923 return; 1924 1925 if (alloc_percpu_trace_buffer()) 1926 return; 1927 1928 pr_info("ftrace: Allocated trace_printk buffers\n"); 1929 1930 /* Expand the buffers to set size */ 1931 tracing_update_buffers(); 1932 1933 buffers_allocated = 1; 1934 1935 /* 1936 * trace_printk_init_buffers() can be called by modules. 1937 * If that happens, then we need to start cmdline recording 1938 * directly here. If the global_trace.buffer is already 1939 * allocated here, then this was called by module code. 1940 */ 1941 if (global_trace.trace_buffer.buffer) 1942 tracing_start_cmdline_record(); 1943 } 1944 1945 void trace_printk_start_comm(void) 1946 { 1947 /* Start tracing comms if trace printk is set */ 1948 if (!buffers_allocated) 1949 return; 1950 tracing_start_cmdline_record(); 1951 } 1952 1953 static void trace_printk_start_stop_comm(int enabled) 1954 { 1955 if (!buffers_allocated) 1956 return; 1957 1958 if (enabled) 1959 tracing_start_cmdline_record(); 1960 else 1961 tracing_stop_cmdline_record(); 1962 } 1963 1964 /** 1965 * trace_vbprintk - write binary msg to tracing buffer 1966 * 1967 */ 1968 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 1969 { 1970 struct ftrace_event_call *call = &event_bprint; 1971 struct ring_buffer_event *event; 1972 struct ring_buffer *buffer; 1973 struct trace_array *tr = &global_trace; 1974 struct bprint_entry *entry; 1975 unsigned long flags; 1976 char *tbuffer; 1977 int len = 0, size, pc; 1978 1979 if (unlikely(tracing_selftest_running || tracing_disabled)) 1980 return 0; 1981 1982 /* Don't pollute graph traces with trace_vprintk internals */ 1983 pause_graph_tracing(); 1984 1985 pc = preempt_count(); 1986 preempt_disable_notrace(); 1987 1988 tbuffer = get_trace_buf(); 1989 if (!tbuffer) { 1990 len = 0; 1991 goto out; 1992 } 1993 1994 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); 1995 1996 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) 1997 goto out; 1998 1999 local_save_flags(flags); 2000 size = sizeof(*entry) + sizeof(u32) * len; 2001 buffer = tr->trace_buffer.buffer; 2002 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, 2003 flags, pc); 2004 if (!event) 2005 goto out; 2006 entry = ring_buffer_event_data(event); 2007 entry->ip = ip; 2008 entry->fmt = fmt; 2009 2010 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 2011 if (!filter_check_discard(call, entry, buffer, event)) { 2012 __buffer_unlock_commit(buffer, event); 2013 ftrace_trace_stack(buffer, flags, 6, pc); 2014 } 2015 2016 out: 2017 preempt_enable_notrace(); 2018 unpause_graph_tracing(); 2019 2020 return len; 2021 } 2022 EXPORT_SYMBOL_GPL(trace_vbprintk); 2023 2024 static int 2025 __trace_array_vprintk(struct ring_buffer *buffer, 2026 unsigned long ip, const char *fmt, va_list args) 2027 { 2028 struct ftrace_event_call *call = &event_print; 2029 struct ring_buffer_event *event; 2030 int len = 0, size, pc; 2031 struct print_entry *entry; 2032 unsigned long flags; 2033 char *tbuffer; 2034 2035 if (tracing_disabled || tracing_selftest_running) 2036 return 0; 2037 2038 /* Don't pollute graph traces with trace_vprintk internals */ 2039 pause_graph_tracing(); 2040 2041 pc = preempt_count(); 2042 preempt_disable_notrace(); 2043 2044 2045 tbuffer = get_trace_buf(); 2046 if (!tbuffer) { 2047 len = 0; 2048 goto out; 2049 } 2050 2051 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); 2052 if (len > TRACE_BUF_SIZE) 2053 goto out; 2054 2055 local_save_flags(flags); 2056 size = sizeof(*entry) + len + 1; 2057 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 2058 flags, pc); 2059 if (!event) 2060 goto out; 2061 entry = ring_buffer_event_data(event); 2062 entry->ip = ip; 2063 2064 memcpy(&entry->buf, tbuffer, len); 2065 entry->buf[len] = '\0'; 2066 if (!filter_check_discard(call, entry, buffer, event)) { 2067 __buffer_unlock_commit(buffer, event); 2068 ftrace_trace_stack(buffer, flags, 6, pc); 2069 } 2070 out: 2071 preempt_enable_notrace(); 2072 unpause_graph_tracing(); 2073 2074 return len; 2075 } 2076 2077 int trace_array_vprintk(struct trace_array *tr, 2078 unsigned long ip, const char *fmt, va_list args) 2079 { 2080 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); 2081 } 2082 2083 int trace_array_printk(struct trace_array *tr, 2084 unsigned long ip, const char *fmt, ...) 2085 { 2086 int ret; 2087 va_list ap; 2088 2089 if (!(trace_flags & TRACE_ITER_PRINTK)) 2090 return 0; 2091 2092 va_start(ap, fmt); 2093 ret = trace_array_vprintk(tr, ip, fmt, ap); 2094 va_end(ap); 2095 return ret; 2096 } 2097 2098 int trace_array_printk_buf(struct ring_buffer *buffer, 2099 unsigned long ip, const char *fmt, ...) 2100 { 2101 int ret; 2102 va_list ap; 2103 2104 if (!(trace_flags & TRACE_ITER_PRINTK)) 2105 return 0; 2106 2107 va_start(ap, fmt); 2108 ret = __trace_array_vprintk(buffer, ip, fmt, ap); 2109 va_end(ap); 2110 return ret; 2111 } 2112 2113 int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 2114 { 2115 return trace_array_vprintk(&global_trace, ip, fmt, args); 2116 } 2117 EXPORT_SYMBOL_GPL(trace_vprintk); 2118 2119 static void trace_iterator_increment(struct trace_iterator *iter) 2120 { 2121 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); 2122 2123 iter->idx++; 2124 if (buf_iter) 2125 ring_buffer_read(buf_iter, NULL); 2126 } 2127 2128 static struct trace_entry * 2129 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, 2130 unsigned long *lost_events) 2131 { 2132 struct ring_buffer_event *event; 2133 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); 2134 2135 if (buf_iter) 2136 event = ring_buffer_iter_peek(buf_iter, ts); 2137 else 2138 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, 2139 lost_events); 2140 2141 if (event) { 2142 iter->ent_size = ring_buffer_event_length(event); 2143 return ring_buffer_event_data(event); 2144 } 2145 iter->ent_size = 0; 2146 return NULL; 2147 } 2148 2149 static struct trace_entry * 2150 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, 2151 unsigned long *missing_events, u64 *ent_ts) 2152 { 2153 struct ring_buffer *buffer = iter->trace_buffer->buffer; 2154 struct trace_entry *ent, *next = NULL; 2155 unsigned long lost_events = 0, next_lost = 0; 2156 int cpu_file = iter->cpu_file; 2157 u64 next_ts = 0, ts; 2158 int next_cpu = -1; 2159 int next_size = 0; 2160 int cpu; 2161 2162 /* 2163 * If we are in a per_cpu trace file, don't bother by iterating over 2164 * all cpu and peek directly. 2165 */ 2166 if (cpu_file > RING_BUFFER_ALL_CPUS) { 2167 if (ring_buffer_empty_cpu(buffer, cpu_file)) 2168 return NULL; 2169 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); 2170 if (ent_cpu) 2171 *ent_cpu = cpu_file; 2172 2173 return ent; 2174 } 2175 2176 for_each_tracing_cpu(cpu) { 2177 2178 if (ring_buffer_empty_cpu(buffer, cpu)) 2179 continue; 2180 2181 ent = peek_next_entry(iter, cpu, &ts, &lost_events); 2182 2183 /* 2184 * Pick the entry with the smallest timestamp: 2185 */ 2186 if (ent && (!next || ts < next_ts)) { 2187 next = ent; 2188 next_cpu = cpu; 2189 next_ts = ts; 2190 next_lost = lost_events; 2191 next_size = iter->ent_size; 2192 } 2193 } 2194 2195 iter->ent_size = next_size; 2196 2197 if (ent_cpu) 2198 *ent_cpu = next_cpu; 2199 2200 if (ent_ts) 2201 *ent_ts = next_ts; 2202 2203 if (missing_events) 2204 *missing_events = next_lost; 2205 2206 return next; 2207 } 2208 2209 /* Find the next real entry, without updating the iterator itself */ 2210 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 2211 int *ent_cpu, u64 *ent_ts) 2212 { 2213 return __find_next_entry(iter, ent_cpu, NULL, ent_ts); 2214 } 2215 2216 /* Find the next real entry, and increment the iterator to the next entry */ 2217 void *trace_find_next_entry_inc(struct trace_iterator *iter) 2218 { 2219 iter->ent = __find_next_entry(iter, &iter->cpu, 2220 &iter->lost_events, &iter->ts); 2221 2222 if (iter->ent) 2223 trace_iterator_increment(iter); 2224 2225 return iter->ent ? iter : NULL; 2226 } 2227 2228 static void trace_consume(struct trace_iterator *iter) 2229 { 2230 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts, 2231 &iter->lost_events); 2232 } 2233 2234 static void *s_next(struct seq_file *m, void *v, loff_t *pos) 2235 { 2236 struct trace_iterator *iter = m->private; 2237 int i = (int)*pos; 2238 void *ent; 2239 2240 WARN_ON_ONCE(iter->leftover); 2241 2242 (*pos)++; 2243 2244 /* can't go backwards */ 2245 if (iter->idx > i) 2246 return NULL; 2247 2248 if (iter->idx < 0) 2249 ent = trace_find_next_entry_inc(iter); 2250 else 2251 ent = iter; 2252 2253 while (ent && iter->idx < i) 2254 ent = trace_find_next_entry_inc(iter); 2255 2256 iter->pos = *pos; 2257 2258 return ent; 2259 } 2260 2261 void tracing_iter_reset(struct trace_iterator *iter, int cpu) 2262 { 2263 struct ring_buffer_event *event; 2264 struct ring_buffer_iter *buf_iter; 2265 unsigned long entries = 0; 2266 u64 ts; 2267 2268 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; 2269 2270 buf_iter = trace_buffer_iter(iter, cpu); 2271 if (!buf_iter) 2272 return; 2273 2274 ring_buffer_iter_reset(buf_iter); 2275 2276 /* 2277 * We could have the case with the max latency tracers 2278 * that a reset never took place on a cpu. This is evident 2279 * by the timestamp being before the start of the buffer. 2280 */ 2281 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { 2282 if (ts >= iter->trace_buffer->time_start) 2283 break; 2284 entries++; 2285 ring_buffer_read(buf_iter, NULL); 2286 } 2287 2288 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; 2289 } 2290 2291 /* 2292 * The current tracer is copied to avoid a global locking 2293 * all around. 2294 */ 2295 static void *s_start(struct seq_file *m, loff_t *pos) 2296 { 2297 struct trace_iterator *iter = m->private; 2298 struct trace_array *tr = iter->tr; 2299 int cpu_file = iter->cpu_file; 2300 void *p = NULL; 2301 loff_t l = 0; 2302 int cpu; 2303 2304 /* 2305 * copy the tracer to avoid using a global lock all around. 2306 * iter->trace is a copy of current_trace, the pointer to the 2307 * name may be used instead of a strcmp(), as iter->trace->name 2308 * will point to the same string as current_trace->name. 2309 */ 2310 mutex_lock(&trace_types_lock); 2311 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) 2312 *iter->trace = *tr->current_trace; 2313 mutex_unlock(&trace_types_lock); 2314 2315 #ifdef CONFIG_TRACER_MAX_TRACE 2316 if (iter->snapshot && iter->trace->use_max_tr) 2317 return ERR_PTR(-EBUSY); 2318 #endif 2319 2320 if (!iter->snapshot) 2321 atomic_inc(&trace_record_cmdline_disabled); 2322 2323 if (*pos != iter->pos) { 2324 iter->ent = NULL; 2325 iter->cpu = 0; 2326 iter->idx = -1; 2327 2328 if (cpu_file == RING_BUFFER_ALL_CPUS) { 2329 for_each_tracing_cpu(cpu) 2330 tracing_iter_reset(iter, cpu); 2331 } else 2332 tracing_iter_reset(iter, cpu_file); 2333 2334 iter->leftover = 0; 2335 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 2336 ; 2337 2338 } else { 2339 /* 2340 * If we overflowed the seq_file before, then we want 2341 * to just reuse the trace_seq buffer again. 2342 */ 2343 if (iter->leftover) 2344 p = iter; 2345 else { 2346 l = *pos - 1; 2347 p = s_next(m, p, &l); 2348 } 2349 } 2350 2351 trace_event_read_lock(); 2352 trace_access_lock(cpu_file); 2353 return p; 2354 } 2355 2356 static void s_stop(struct seq_file *m, void *p) 2357 { 2358 struct trace_iterator *iter = m->private; 2359 2360 #ifdef CONFIG_TRACER_MAX_TRACE 2361 if (iter->snapshot && iter->trace->use_max_tr) 2362 return; 2363 #endif 2364 2365 if (!iter->snapshot) 2366 atomic_dec(&trace_record_cmdline_disabled); 2367 2368 trace_access_unlock(iter->cpu_file); 2369 trace_event_read_unlock(); 2370 } 2371 2372 static void 2373 get_total_entries(struct trace_buffer *buf, 2374 unsigned long *total, unsigned long *entries) 2375 { 2376 unsigned long count; 2377 int cpu; 2378 2379 *total = 0; 2380 *entries = 0; 2381 2382 for_each_tracing_cpu(cpu) { 2383 count = ring_buffer_entries_cpu(buf->buffer, cpu); 2384 /* 2385 * If this buffer has skipped entries, then we hold all 2386 * entries for the trace and we need to ignore the 2387 * ones before the time stamp. 2388 */ 2389 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { 2390 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; 2391 /* total is the same as the entries */ 2392 *total += count; 2393 } else 2394 *total += count + 2395 ring_buffer_overrun_cpu(buf->buffer, cpu); 2396 *entries += count; 2397 } 2398 } 2399 2400 static void print_lat_help_header(struct seq_file *m) 2401 { 2402 seq_puts(m, "# _------=> CPU# \n"); 2403 seq_puts(m, "# / _-----=> irqs-off \n"); 2404 seq_puts(m, "# | / _----=> need-resched \n"); 2405 seq_puts(m, "# || / _---=> hardirq/softirq \n"); 2406 seq_puts(m, "# ||| / _--=> preempt-depth \n"); 2407 seq_puts(m, "# |||| / delay \n"); 2408 seq_puts(m, "# cmd pid ||||| time | caller \n"); 2409 seq_puts(m, "# \\ / ||||| \\ | / \n"); 2410 } 2411 2412 static void print_event_info(struct trace_buffer *buf, struct seq_file *m) 2413 { 2414 unsigned long total; 2415 unsigned long entries; 2416 2417 get_total_entries(buf, &total, &entries); 2418 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", 2419 entries, total, num_online_cpus()); 2420 seq_puts(m, "#\n"); 2421 } 2422 2423 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m) 2424 { 2425 print_event_info(buf, m); 2426 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); 2427 seq_puts(m, "# | | | | |\n"); 2428 } 2429 2430 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m) 2431 { 2432 print_event_info(buf, m); 2433 seq_puts(m, "# _-----=> irqs-off\n"); 2434 seq_puts(m, "# / _----=> need-resched\n"); 2435 seq_puts(m, "# | / _---=> hardirq/softirq\n"); 2436 seq_puts(m, "# || / _--=> preempt-depth\n"); 2437 seq_puts(m, "# ||| / delay\n"); 2438 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"); 2439 seq_puts(m, "# | | | |||| | |\n"); 2440 } 2441 2442 void 2443 print_trace_header(struct seq_file *m, struct trace_iterator *iter) 2444 { 2445 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 2446 struct trace_buffer *buf = iter->trace_buffer; 2447 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); 2448 struct tracer *type = iter->trace; 2449 unsigned long entries; 2450 unsigned long total; 2451 const char *name = "preemption"; 2452 2453 name = type->name; 2454 2455 get_total_entries(buf, &total, &entries); 2456 2457 seq_printf(m, "# %s latency trace v1.1.5 on %s\n", 2458 name, UTS_RELEASE); 2459 seq_puts(m, "# -----------------------------------" 2460 "---------------------------------\n"); 2461 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" 2462 " (M:%s VP:%d, KP:%d, SP:%d HP:%d", 2463 nsecs_to_usecs(data->saved_latency), 2464 entries, 2465 total, 2466 buf->cpu, 2467 #if defined(CONFIG_PREEMPT_NONE) 2468 "server", 2469 #elif defined(CONFIG_PREEMPT_VOLUNTARY) 2470 "desktop", 2471 #elif defined(CONFIG_PREEMPT) 2472 "preempt", 2473 #else 2474 "unknown", 2475 #endif 2476 /* These are reserved for later use */ 2477 0, 0, 0, 0); 2478 #ifdef CONFIG_SMP 2479 seq_printf(m, " #P:%d)\n", num_online_cpus()); 2480 #else 2481 seq_puts(m, ")\n"); 2482 #endif 2483 seq_puts(m, "# -----------------\n"); 2484 seq_printf(m, "# | task: %.16s-%d " 2485 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", 2486 data->comm, data->pid, 2487 from_kuid_munged(seq_user_ns(m), data->uid), data->nice, 2488 data->policy, data->rt_priority); 2489 seq_puts(m, "# -----------------\n"); 2490 2491 if (data->critical_start) { 2492 seq_puts(m, "# => started at: "); 2493 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); 2494 trace_print_seq(m, &iter->seq); 2495 seq_puts(m, "\n# => ended at: "); 2496 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); 2497 trace_print_seq(m, &iter->seq); 2498 seq_puts(m, "\n#\n"); 2499 } 2500 2501 seq_puts(m, "#\n"); 2502 } 2503 2504 static void test_cpu_buff_start(struct trace_iterator *iter) 2505 { 2506 struct trace_seq *s = &iter->seq; 2507 2508 if (!(trace_flags & TRACE_ITER_ANNOTATE)) 2509 return; 2510 2511 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 2512 return; 2513 2514 if (cpumask_test_cpu(iter->cpu, iter->started)) 2515 return; 2516 2517 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) 2518 return; 2519 2520 cpumask_set_cpu(iter->cpu, iter->started); 2521 2522 /* Don't print started cpu buffer for the first entry of the trace */ 2523 if (iter->idx > 1) 2524 trace_seq_printf(s, "##### CPU %u buffer started ####\n", 2525 iter->cpu); 2526 } 2527 2528 static enum print_line_t print_trace_fmt(struct trace_iterator *iter) 2529 { 2530 struct trace_seq *s = &iter->seq; 2531 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 2532 struct trace_entry *entry; 2533 struct trace_event *event; 2534 2535 entry = iter->ent; 2536 2537 test_cpu_buff_start(iter); 2538 2539 event = ftrace_find_event(entry->type); 2540 2541 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2542 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2543 if (!trace_print_lat_context(iter)) 2544 goto partial; 2545 } else { 2546 if (!trace_print_context(iter)) 2547 goto partial; 2548 } 2549 } 2550 2551 if (event) 2552 return event->funcs->trace(iter, sym_flags, event); 2553 2554 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) 2555 goto partial; 2556 2557 return TRACE_TYPE_HANDLED; 2558 partial: 2559 return TRACE_TYPE_PARTIAL_LINE; 2560 } 2561 2562 static enum print_line_t print_raw_fmt(struct trace_iterator *iter) 2563 { 2564 struct trace_seq *s = &iter->seq; 2565 struct trace_entry *entry; 2566 struct trace_event *event; 2567 2568 entry = iter->ent; 2569 2570 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2571 if (!trace_seq_printf(s, "%d %d %llu ", 2572 entry->pid, iter->cpu, iter->ts)) 2573 goto partial; 2574 } 2575 2576 event = ftrace_find_event(entry->type); 2577 if (event) 2578 return event->funcs->raw(iter, 0, event); 2579 2580 if (!trace_seq_printf(s, "%d ?\n", entry->type)) 2581 goto partial; 2582 2583 return TRACE_TYPE_HANDLED; 2584 partial: 2585 return TRACE_TYPE_PARTIAL_LINE; 2586 } 2587 2588 static enum print_line_t print_hex_fmt(struct trace_iterator *iter) 2589 { 2590 struct trace_seq *s = &iter->seq; 2591 unsigned char newline = '\n'; 2592 struct trace_entry *entry; 2593 struct trace_event *event; 2594 2595 entry = iter->ent; 2596 2597 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2598 SEQ_PUT_HEX_FIELD_RET(s, entry->pid); 2599 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); 2600 SEQ_PUT_HEX_FIELD_RET(s, iter->ts); 2601 } 2602 2603 event = ftrace_find_event(entry->type); 2604 if (event) { 2605 enum print_line_t ret = event->funcs->hex(iter, 0, event); 2606 if (ret != TRACE_TYPE_HANDLED) 2607 return ret; 2608 } 2609 2610 SEQ_PUT_FIELD_RET(s, newline); 2611 2612 return TRACE_TYPE_HANDLED; 2613 } 2614 2615 static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 2616 { 2617 struct trace_seq *s = &iter->seq; 2618 struct trace_entry *entry; 2619 struct trace_event *event; 2620 2621 entry = iter->ent; 2622 2623 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2624 SEQ_PUT_FIELD_RET(s, entry->pid); 2625 SEQ_PUT_FIELD_RET(s, iter->cpu); 2626 SEQ_PUT_FIELD_RET(s, iter->ts); 2627 } 2628 2629 event = ftrace_find_event(entry->type); 2630 return event ? event->funcs->binary(iter, 0, event) : 2631 TRACE_TYPE_HANDLED; 2632 } 2633 2634 int trace_empty(struct trace_iterator *iter) 2635 { 2636 struct ring_buffer_iter *buf_iter; 2637 int cpu; 2638 2639 /* If we are looking at one CPU buffer, only check that one */ 2640 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 2641 cpu = iter->cpu_file; 2642 buf_iter = trace_buffer_iter(iter, cpu); 2643 if (buf_iter) { 2644 if (!ring_buffer_iter_empty(buf_iter)) 2645 return 0; 2646 } else { 2647 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) 2648 return 0; 2649 } 2650 return 1; 2651 } 2652 2653 for_each_tracing_cpu(cpu) { 2654 buf_iter = trace_buffer_iter(iter, cpu); 2655 if (buf_iter) { 2656 if (!ring_buffer_iter_empty(buf_iter)) 2657 return 0; 2658 } else { 2659 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) 2660 return 0; 2661 } 2662 } 2663 2664 return 1; 2665 } 2666 2667 /* Called with trace_event_read_lock() held. */ 2668 enum print_line_t print_trace_line(struct trace_iterator *iter) 2669 { 2670 enum print_line_t ret; 2671 2672 if (iter->lost_events && 2673 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", 2674 iter->cpu, iter->lost_events)) 2675 return TRACE_TYPE_PARTIAL_LINE; 2676 2677 if (iter->trace && iter->trace->print_line) { 2678 ret = iter->trace->print_line(iter); 2679 if (ret != TRACE_TYPE_UNHANDLED) 2680 return ret; 2681 } 2682 2683 if (iter->ent->type == TRACE_BPUTS && 2684 trace_flags & TRACE_ITER_PRINTK && 2685 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 2686 return trace_print_bputs_msg_only(iter); 2687 2688 if (iter->ent->type == TRACE_BPRINT && 2689 trace_flags & TRACE_ITER_PRINTK && 2690 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 2691 return trace_print_bprintk_msg_only(iter); 2692 2693 if (iter->ent->type == TRACE_PRINT && 2694 trace_flags & TRACE_ITER_PRINTK && 2695 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 2696 return trace_print_printk_msg_only(iter); 2697 2698 if (trace_flags & TRACE_ITER_BIN) 2699 return print_bin_fmt(iter); 2700 2701 if (trace_flags & TRACE_ITER_HEX) 2702 return print_hex_fmt(iter); 2703 2704 if (trace_flags & TRACE_ITER_RAW) 2705 return print_raw_fmt(iter); 2706 2707 return print_trace_fmt(iter); 2708 } 2709 2710 void trace_latency_header(struct seq_file *m) 2711 { 2712 struct trace_iterator *iter = m->private; 2713 2714 /* print nothing if the buffers are empty */ 2715 if (trace_empty(iter)) 2716 return; 2717 2718 if (iter->iter_flags & TRACE_FILE_LAT_FMT) 2719 print_trace_header(m, iter); 2720 2721 if (!(trace_flags & TRACE_ITER_VERBOSE)) 2722 print_lat_help_header(m); 2723 } 2724 2725 void trace_default_header(struct seq_file *m) 2726 { 2727 struct trace_iterator *iter = m->private; 2728 2729 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) 2730 return; 2731 2732 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2733 /* print nothing if the buffers are empty */ 2734 if (trace_empty(iter)) 2735 return; 2736 print_trace_header(m, iter); 2737 if (!(trace_flags & TRACE_ITER_VERBOSE)) 2738 print_lat_help_header(m); 2739 } else { 2740 if (!(trace_flags & TRACE_ITER_VERBOSE)) { 2741 if (trace_flags & TRACE_ITER_IRQ_INFO) 2742 print_func_help_header_irq(iter->trace_buffer, m); 2743 else 2744 print_func_help_header(iter->trace_buffer, m); 2745 } 2746 } 2747 } 2748 2749 static void test_ftrace_alive(struct seq_file *m) 2750 { 2751 if (!ftrace_is_dead()) 2752 return; 2753 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"); 2754 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); 2755 } 2756 2757 #ifdef CONFIG_TRACER_MAX_TRACE 2758 static void show_snapshot_main_help(struct seq_file *m) 2759 { 2760 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); 2761 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); 2762 seq_printf(m, "# Takes a snapshot of the main buffer.\n"); 2763 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n"); 2764 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); 2765 seq_printf(m, "# is not a '0' or '1')\n"); 2766 } 2767 2768 static void show_snapshot_percpu_help(struct seq_file *m) 2769 { 2770 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); 2771 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 2772 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); 2773 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n"); 2774 #else 2775 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n"); 2776 seq_printf(m, "# Must use main snapshot file to allocate.\n"); 2777 #endif 2778 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"); 2779 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); 2780 seq_printf(m, "# is not a '0' or '1')\n"); 2781 } 2782 2783 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) 2784 { 2785 if (iter->tr->allocated_snapshot) 2786 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n"); 2787 else 2788 seq_printf(m, "#\n# * Snapshot is freed *\n#\n"); 2789 2790 seq_printf(m, "# Snapshot commands:\n"); 2791 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 2792 show_snapshot_main_help(m); 2793 else 2794 show_snapshot_percpu_help(m); 2795 } 2796 #else 2797 /* Should never be called */ 2798 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } 2799 #endif 2800 2801 static int s_show(struct seq_file *m, void *v) 2802 { 2803 struct trace_iterator *iter = v; 2804 int ret; 2805 2806 if (iter->ent == NULL) { 2807 if (iter->tr) { 2808 seq_printf(m, "# tracer: %s\n", iter->trace->name); 2809 seq_puts(m, "#\n"); 2810 test_ftrace_alive(m); 2811 } 2812 if (iter->snapshot && trace_empty(iter)) 2813 print_snapshot_help(m, iter); 2814 else if (iter->trace && iter->trace->print_header) 2815 iter->trace->print_header(m); 2816 else 2817 trace_default_header(m); 2818 2819 } else if (iter->leftover) { 2820 /* 2821 * If we filled the seq_file buffer earlier, we 2822 * want to just show it now. 2823 */ 2824 ret = trace_print_seq(m, &iter->seq); 2825 2826 /* ret should this time be zero, but you never know */ 2827 iter->leftover = ret; 2828 2829 } else { 2830 print_trace_line(iter); 2831 ret = trace_print_seq(m, &iter->seq); 2832 /* 2833 * If we overflow the seq_file buffer, then it will 2834 * ask us for this data again at start up. 2835 * Use that instead. 2836 * ret is 0 if seq_file write succeeded. 2837 * -1 otherwise. 2838 */ 2839 iter->leftover = ret; 2840 } 2841 2842 return 0; 2843 } 2844 2845 /* 2846 * Should be used after trace_array_get(), trace_types_lock 2847 * ensures that i_cdev was already initialized. 2848 */ 2849 static inline int tracing_get_cpu(struct inode *inode) 2850 { 2851 if (inode->i_cdev) /* See trace_create_cpu_file() */ 2852 return (long)inode->i_cdev - 1; 2853 return RING_BUFFER_ALL_CPUS; 2854 } 2855 2856 static const struct seq_operations tracer_seq_ops = { 2857 .start = s_start, 2858 .next = s_next, 2859 .stop = s_stop, 2860 .show = s_show, 2861 }; 2862 2863 static struct trace_iterator * 2864 __tracing_open(struct inode *inode, struct file *file, bool snapshot) 2865 { 2866 struct trace_array *tr = inode->i_private; 2867 struct trace_iterator *iter; 2868 int cpu; 2869 2870 if (tracing_disabled) 2871 return ERR_PTR(-ENODEV); 2872 2873 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); 2874 if (!iter) 2875 return ERR_PTR(-ENOMEM); 2876 2877 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(), 2878 GFP_KERNEL); 2879 if (!iter->buffer_iter) 2880 goto release; 2881 2882 /* 2883 * We make a copy of the current tracer to avoid concurrent 2884 * changes on it while we are reading. 2885 */ 2886 mutex_lock(&trace_types_lock); 2887 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); 2888 if (!iter->trace) 2889 goto fail; 2890 2891 *iter->trace = *tr->current_trace; 2892 2893 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 2894 goto fail; 2895 2896 iter->tr = tr; 2897 2898 #ifdef CONFIG_TRACER_MAX_TRACE 2899 /* Currently only the top directory has a snapshot */ 2900 if (tr->current_trace->print_max || snapshot) 2901 iter->trace_buffer = &tr->max_buffer; 2902 else 2903 #endif 2904 iter->trace_buffer = &tr->trace_buffer; 2905 iter->snapshot = snapshot; 2906 iter->pos = -1; 2907 iter->cpu_file = tracing_get_cpu(inode); 2908 mutex_init(&iter->mutex); 2909 2910 /* Notify the tracer early; before we stop tracing. */ 2911 if (iter->trace && iter->trace->open) 2912 iter->trace->open(iter); 2913 2914 /* Annotate start of buffers if we had overruns */ 2915 if (ring_buffer_overruns(iter->trace_buffer->buffer)) 2916 iter->iter_flags |= TRACE_FILE_ANNOTATE; 2917 2918 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 2919 if (trace_clocks[tr->clock_id].in_ns) 2920 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 2921 2922 /* stop the trace while dumping if we are not opening "snapshot" */ 2923 if (!iter->snapshot) 2924 tracing_stop_tr(tr); 2925 2926 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { 2927 for_each_tracing_cpu(cpu) { 2928 iter->buffer_iter[cpu] = 2929 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); 2930 } 2931 ring_buffer_read_prepare_sync(); 2932 for_each_tracing_cpu(cpu) { 2933 ring_buffer_read_start(iter->buffer_iter[cpu]); 2934 tracing_iter_reset(iter, cpu); 2935 } 2936 } else { 2937 cpu = iter->cpu_file; 2938 iter->buffer_iter[cpu] = 2939 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); 2940 ring_buffer_read_prepare_sync(); 2941 ring_buffer_read_start(iter->buffer_iter[cpu]); 2942 tracing_iter_reset(iter, cpu); 2943 } 2944 2945 mutex_unlock(&trace_types_lock); 2946 2947 return iter; 2948 2949 fail: 2950 mutex_unlock(&trace_types_lock); 2951 kfree(iter->trace); 2952 kfree(iter->buffer_iter); 2953 release: 2954 seq_release_private(inode, file); 2955 return ERR_PTR(-ENOMEM); 2956 } 2957 2958 int tracing_open_generic(struct inode *inode, struct file *filp) 2959 { 2960 if (tracing_disabled) 2961 return -ENODEV; 2962 2963 filp->private_data = inode->i_private; 2964 return 0; 2965 } 2966 2967 /* 2968 * Open and update trace_array ref count. 2969 * Must have the current trace_array passed to it. 2970 */ 2971 static int tracing_open_generic_tr(struct inode *inode, struct file *filp) 2972 { 2973 struct trace_array *tr = inode->i_private; 2974 2975 if (tracing_disabled) 2976 return -ENODEV; 2977 2978 if (trace_array_get(tr) < 0) 2979 return -ENODEV; 2980 2981 filp->private_data = inode->i_private; 2982 2983 return 0; 2984 } 2985 2986 static int tracing_release(struct inode *inode, struct file *file) 2987 { 2988 struct trace_array *tr = inode->i_private; 2989 struct seq_file *m = file->private_data; 2990 struct trace_iterator *iter; 2991 int cpu; 2992 2993 if (!(file->f_mode & FMODE_READ)) { 2994 trace_array_put(tr); 2995 return 0; 2996 } 2997 2998 /* Writes do not use seq_file */ 2999 iter = m->private; 3000 mutex_lock(&trace_types_lock); 3001 3002 for_each_tracing_cpu(cpu) { 3003 if (iter->buffer_iter[cpu]) 3004 ring_buffer_read_finish(iter->buffer_iter[cpu]); 3005 } 3006 3007 if (iter->trace && iter->trace->close) 3008 iter->trace->close(iter); 3009 3010 if (!iter->snapshot) 3011 /* reenable tracing if it was previously enabled */ 3012 tracing_start_tr(tr); 3013 3014 __trace_array_put(tr); 3015 3016 mutex_unlock(&trace_types_lock); 3017 3018 mutex_destroy(&iter->mutex); 3019 free_cpumask_var(iter->started); 3020 kfree(iter->trace); 3021 kfree(iter->buffer_iter); 3022 seq_release_private(inode, file); 3023 3024 return 0; 3025 } 3026 3027 static int tracing_release_generic_tr(struct inode *inode, struct file *file) 3028 { 3029 struct trace_array *tr = inode->i_private; 3030 3031 trace_array_put(tr); 3032 return 0; 3033 } 3034 3035 static int tracing_single_release_tr(struct inode *inode, struct file *file) 3036 { 3037 struct trace_array *tr = inode->i_private; 3038 3039 trace_array_put(tr); 3040 3041 return single_release(inode, file); 3042 } 3043 3044 static int tracing_open(struct inode *inode, struct file *file) 3045 { 3046 struct trace_array *tr = inode->i_private; 3047 struct trace_iterator *iter; 3048 int ret = 0; 3049 3050 if (trace_array_get(tr) < 0) 3051 return -ENODEV; 3052 3053 /* If this file was open for write, then erase contents */ 3054 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 3055 int cpu = tracing_get_cpu(inode); 3056 3057 if (cpu == RING_BUFFER_ALL_CPUS) 3058 tracing_reset_online_cpus(&tr->trace_buffer); 3059 else 3060 tracing_reset(&tr->trace_buffer, cpu); 3061 } 3062 3063 if (file->f_mode & FMODE_READ) { 3064 iter = __tracing_open(inode, file, false); 3065 if (IS_ERR(iter)) 3066 ret = PTR_ERR(iter); 3067 else if (trace_flags & TRACE_ITER_LATENCY_FMT) 3068 iter->iter_flags |= TRACE_FILE_LAT_FMT; 3069 } 3070 3071 if (ret < 0) 3072 trace_array_put(tr); 3073 3074 return ret; 3075 } 3076 3077 static void * 3078 t_next(struct seq_file *m, void *v, loff_t *pos) 3079 { 3080 struct tracer *t = v; 3081 3082 (*pos)++; 3083 3084 if (t) 3085 t = t->next; 3086 3087 return t; 3088 } 3089 3090 static void *t_start(struct seq_file *m, loff_t *pos) 3091 { 3092 struct tracer *t; 3093 loff_t l = 0; 3094 3095 mutex_lock(&trace_types_lock); 3096 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) 3097 ; 3098 3099 return t; 3100 } 3101 3102 static void t_stop(struct seq_file *m, void *p) 3103 { 3104 mutex_unlock(&trace_types_lock); 3105 } 3106 3107 static int t_show(struct seq_file *m, void *v) 3108 { 3109 struct tracer *t = v; 3110 3111 if (!t) 3112 return 0; 3113 3114 seq_printf(m, "%s", t->name); 3115 if (t->next) 3116 seq_putc(m, ' '); 3117 else 3118 seq_putc(m, '\n'); 3119 3120 return 0; 3121 } 3122 3123 static const struct seq_operations show_traces_seq_ops = { 3124 .start = t_start, 3125 .next = t_next, 3126 .stop = t_stop, 3127 .show = t_show, 3128 }; 3129 3130 static int show_traces_open(struct inode *inode, struct file *file) 3131 { 3132 if (tracing_disabled) 3133 return -ENODEV; 3134 3135 return seq_open(file, &show_traces_seq_ops); 3136 } 3137 3138 static ssize_t 3139 tracing_write_stub(struct file *filp, const char __user *ubuf, 3140 size_t count, loff_t *ppos) 3141 { 3142 return count; 3143 } 3144 3145 static loff_t tracing_seek(struct file *file, loff_t offset, int origin) 3146 { 3147 if (file->f_mode & FMODE_READ) 3148 return seq_lseek(file, offset, origin); 3149 else 3150 return 0; 3151 } 3152 3153 static const struct file_operations tracing_fops = { 3154 .open = tracing_open, 3155 .read = seq_read, 3156 .write = tracing_write_stub, 3157 .llseek = tracing_seek, 3158 .release = tracing_release, 3159 }; 3160 3161 static const struct file_operations show_traces_fops = { 3162 .open = show_traces_open, 3163 .read = seq_read, 3164 .release = seq_release, 3165 .llseek = seq_lseek, 3166 }; 3167 3168 /* 3169 * Only trace on a CPU if the bitmask is set: 3170 */ 3171 static cpumask_var_t tracing_cpumask; 3172 3173 /* 3174 * The tracer itself will not take this lock, but still we want 3175 * to provide a consistent cpumask to user-space: 3176 */ 3177 static DEFINE_MUTEX(tracing_cpumask_update_lock); 3178 3179 /* 3180 * Temporary storage for the character representation of the 3181 * CPU bitmask (and one more byte for the newline): 3182 */ 3183 static char mask_str[NR_CPUS + 1]; 3184 3185 static ssize_t 3186 tracing_cpumask_read(struct file *filp, char __user *ubuf, 3187 size_t count, loff_t *ppos) 3188 { 3189 int len; 3190 3191 mutex_lock(&tracing_cpumask_update_lock); 3192 3193 len = cpumask_scnprintf(mask_str, count, tracing_cpumask); 3194 if (count - len < 2) { 3195 count = -EINVAL; 3196 goto out_err; 3197 } 3198 len += sprintf(mask_str + len, "\n"); 3199 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); 3200 3201 out_err: 3202 mutex_unlock(&tracing_cpumask_update_lock); 3203 3204 return count; 3205 } 3206 3207 static ssize_t 3208 tracing_cpumask_write(struct file *filp, const char __user *ubuf, 3209 size_t count, loff_t *ppos) 3210 { 3211 struct trace_array *tr = filp->private_data; 3212 cpumask_var_t tracing_cpumask_new; 3213 int err, cpu; 3214 3215 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) 3216 return -ENOMEM; 3217 3218 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 3219 if (err) 3220 goto err_unlock; 3221 3222 mutex_lock(&tracing_cpumask_update_lock); 3223 3224 local_irq_disable(); 3225 arch_spin_lock(&ftrace_max_lock); 3226 for_each_tracing_cpu(cpu) { 3227 /* 3228 * Increase/decrease the disabled counter if we are 3229 * about to flip a bit in the cpumask: 3230 */ 3231 if (cpumask_test_cpu(cpu, tracing_cpumask) && 3232 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 3233 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); 3234 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); 3235 } 3236 if (!cpumask_test_cpu(cpu, tracing_cpumask) && 3237 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 3238 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); 3239 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); 3240 } 3241 } 3242 arch_spin_unlock(&ftrace_max_lock); 3243 local_irq_enable(); 3244 3245 cpumask_copy(tracing_cpumask, tracing_cpumask_new); 3246 3247 mutex_unlock(&tracing_cpumask_update_lock); 3248 free_cpumask_var(tracing_cpumask_new); 3249 3250 return count; 3251 3252 err_unlock: 3253 free_cpumask_var(tracing_cpumask_new); 3254 3255 return err; 3256 } 3257 3258 static const struct file_operations tracing_cpumask_fops = { 3259 .open = tracing_open_generic, 3260 .read = tracing_cpumask_read, 3261 .write = tracing_cpumask_write, 3262 .llseek = generic_file_llseek, 3263 }; 3264 3265 static int tracing_trace_options_show(struct seq_file *m, void *v) 3266 { 3267 struct tracer_opt *trace_opts; 3268 struct trace_array *tr = m->private; 3269 u32 tracer_flags; 3270 int i; 3271 3272 mutex_lock(&trace_types_lock); 3273 tracer_flags = tr->current_trace->flags->val; 3274 trace_opts = tr->current_trace->flags->opts; 3275 3276 for (i = 0; trace_options[i]; i++) { 3277 if (trace_flags & (1 << i)) 3278 seq_printf(m, "%s\n", trace_options[i]); 3279 else 3280 seq_printf(m, "no%s\n", trace_options[i]); 3281 } 3282 3283 for (i = 0; trace_opts[i].name; i++) { 3284 if (tracer_flags & trace_opts[i].bit) 3285 seq_printf(m, "%s\n", trace_opts[i].name); 3286 else 3287 seq_printf(m, "no%s\n", trace_opts[i].name); 3288 } 3289 mutex_unlock(&trace_types_lock); 3290 3291 return 0; 3292 } 3293 3294 static int __set_tracer_option(struct tracer *trace, 3295 struct tracer_flags *tracer_flags, 3296 struct tracer_opt *opts, int neg) 3297 { 3298 int ret; 3299 3300 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); 3301 if (ret) 3302 return ret; 3303 3304 if (neg) 3305 tracer_flags->val &= ~opts->bit; 3306 else 3307 tracer_flags->val |= opts->bit; 3308 return 0; 3309 } 3310 3311 /* Try to assign a tracer specific option */ 3312 static int set_tracer_option(struct tracer *trace, char *cmp, int neg) 3313 { 3314 struct tracer_flags *tracer_flags = trace->flags; 3315 struct tracer_opt *opts = NULL; 3316 int i; 3317 3318 for (i = 0; tracer_flags->opts[i].name; i++) { 3319 opts = &tracer_flags->opts[i]; 3320 3321 if (strcmp(cmp, opts->name) == 0) 3322 return __set_tracer_option(trace, trace->flags, 3323 opts, neg); 3324 } 3325 3326 return -EINVAL; 3327 } 3328 3329 /* Some tracers require overwrite to stay enabled */ 3330 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) 3331 { 3332 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) 3333 return -1; 3334 3335 return 0; 3336 } 3337 3338 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) 3339 { 3340 /* do nothing if flag is already set */ 3341 if (!!(trace_flags & mask) == !!enabled) 3342 return 0; 3343 3344 /* Give the tracer a chance to approve the change */ 3345 if (tr->current_trace->flag_changed) 3346 if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled)) 3347 return -EINVAL; 3348 3349 if (enabled) 3350 trace_flags |= mask; 3351 else 3352 trace_flags &= ~mask; 3353 3354 if (mask == TRACE_ITER_RECORD_CMD) 3355 trace_event_enable_cmd_record(enabled); 3356 3357 if (mask == TRACE_ITER_OVERWRITE) { 3358 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); 3359 #ifdef CONFIG_TRACER_MAX_TRACE 3360 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); 3361 #endif 3362 } 3363 3364 if (mask == TRACE_ITER_PRINTK) 3365 trace_printk_start_stop_comm(enabled); 3366 3367 return 0; 3368 } 3369 3370 static int trace_set_options(struct trace_array *tr, char *option) 3371 { 3372 char *cmp; 3373 int neg = 0; 3374 int ret = -ENODEV; 3375 int i; 3376 3377 cmp = strstrip(option); 3378 3379 if (strncmp(cmp, "no", 2) == 0) { 3380 neg = 1; 3381 cmp += 2; 3382 } 3383 3384 mutex_lock(&trace_types_lock); 3385 3386 for (i = 0; trace_options[i]; i++) { 3387 if (strcmp(cmp, trace_options[i]) == 0) { 3388 ret = set_tracer_flag(tr, 1 << i, !neg); 3389 break; 3390 } 3391 } 3392 3393 /* If no option could be set, test the specific tracer options */ 3394 if (!trace_options[i]) 3395 ret = set_tracer_option(tr->current_trace, cmp, neg); 3396 3397 mutex_unlock(&trace_types_lock); 3398 3399 return ret; 3400 } 3401 3402 static ssize_t 3403 tracing_trace_options_write(struct file *filp, const char __user *ubuf, 3404 size_t cnt, loff_t *ppos) 3405 { 3406 struct seq_file *m = filp->private_data; 3407 struct trace_array *tr = m->private; 3408 char buf[64]; 3409 int ret; 3410 3411 if (cnt >= sizeof(buf)) 3412 return -EINVAL; 3413 3414 if (copy_from_user(&buf, ubuf, cnt)) 3415 return -EFAULT; 3416 3417 buf[cnt] = 0; 3418 3419 ret = trace_set_options(tr, buf); 3420 if (ret < 0) 3421 return ret; 3422 3423 *ppos += cnt; 3424 3425 return cnt; 3426 } 3427 3428 static int tracing_trace_options_open(struct inode *inode, struct file *file) 3429 { 3430 struct trace_array *tr = inode->i_private; 3431 int ret; 3432 3433 if (tracing_disabled) 3434 return -ENODEV; 3435 3436 if (trace_array_get(tr) < 0) 3437 return -ENODEV; 3438 3439 ret = single_open(file, tracing_trace_options_show, inode->i_private); 3440 if (ret < 0) 3441 trace_array_put(tr); 3442 3443 return ret; 3444 } 3445 3446 static const struct file_operations tracing_iter_fops = { 3447 .open = tracing_trace_options_open, 3448 .read = seq_read, 3449 .llseek = seq_lseek, 3450 .release = tracing_single_release_tr, 3451 .write = tracing_trace_options_write, 3452 }; 3453 3454 static const char readme_msg[] = 3455 "tracing mini-HOWTO:\n\n" 3456 "# echo 0 > tracing_on : quick way to disable tracing\n" 3457 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n" 3458 " Important files:\n" 3459 " trace\t\t\t- The static contents of the buffer\n" 3460 "\t\t\t To clear the buffer write into this file: echo > trace\n" 3461 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" 3462 " current_tracer\t- function and latency tracers\n" 3463 " available_tracers\t- list of configured tracers for current_tracer\n" 3464 " buffer_size_kb\t- view and modify size of per cpu buffer\n" 3465 " buffer_total_size_kb - view total size of all cpu buffers\n\n" 3466 " trace_clock\t\t-change the clock used to order events\n" 3467 " local: Per cpu clock but may not be synced across CPUs\n" 3468 " global: Synced across CPUs but slows tracing down.\n" 3469 " counter: Not a clock, but just an increment\n" 3470 " uptime: Jiffy counter from time of boot\n" 3471 " perf: Same clock that perf events use\n" 3472 #ifdef CONFIG_X86_64 3473 " x86-tsc: TSC cycle counter\n" 3474 #endif 3475 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n" 3476 " tracing_cpumask\t- Limit which CPUs to trace\n" 3477 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" 3478 "\t\t\t Remove sub-buffer with rmdir\n" 3479 " trace_options\t\t- Set format or modify how tracing happens\n" 3480 "\t\t\t Disable an option by adding a suffix 'no' to the option name\n" 3481 #ifdef CONFIG_DYNAMIC_FTRACE 3482 "\n available_filter_functions - list of functions that can be filtered on\n" 3483 " set_ftrace_filter\t- echo function name in here to only trace these functions\n" 3484 " accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" 3485 " modules: Can select a group via module\n" 3486 " Format: :mod:<module-name>\n" 3487 " example: echo :mod:ext3 > set_ftrace_filter\n" 3488 " triggers: a command to perform when function is hit\n" 3489 " Format: <function>:<trigger>[:count]\n" 3490 " trigger: traceon, traceoff\n" 3491 " enable_event:<system>:<event>\n" 3492 " disable_event:<system>:<event>\n" 3493 #ifdef CONFIG_STACKTRACE 3494 " stacktrace\n" 3495 #endif 3496 #ifdef CONFIG_TRACER_SNAPSHOT 3497 " snapshot\n" 3498 #endif 3499 " example: echo do_fault:traceoff > set_ftrace_filter\n" 3500 " echo do_trap:traceoff:3 > set_ftrace_filter\n" 3501 " The first one will disable tracing every time do_fault is hit\n" 3502 " The second will disable tracing at most 3 times when do_trap is hit\n" 3503 " The first time do trap is hit and it disables tracing, the counter\n" 3504 " will decrement to 2. If tracing is already disabled, the counter\n" 3505 " will not decrement. It only decrements when the trigger did work\n" 3506 " To remove trigger without count:\n" 3507 " echo '!<function>:<trigger> > set_ftrace_filter\n" 3508 " To remove trigger with a count:\n" 3509 " echo '!<function>:<trigger>:0 > set_ftrace_filter\n" 3510 " set_ftrace_notrace\t- echo function name in here to never trace.\n" 3511 " accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" 3512 " modules: Can select a group via module command :mod:\n" 3513 " Does not accept triggers\n" 3514 #endif /* CONFIG_DYNAMIC_FTRACE */ 3515 #ifdef CONFIG_FUNCTION_TRACER 3516 " set_ftrace_pid\t- Write pid(s) to only function trace those pids (function)\n" 3517 #endif 3518 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 3519 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" 3520 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" 3521 #endif 3522 #ifdef CONFIG_TRACER_SNAPSHOT 3523 "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n" 3524 "\t\t\t Read the contents for more information\n" 3525 #endif 3526 #ifdef CONFIG_STACK_TRACER 3527 " stack_trace\t\t- Shows the max stack trace when active\n" 3528 " stack_max_size\t- Shows current max stack size that was traced\n" 3529 "\t\t\t Write into this file to reset the max size (trigger a new trace)\n" 3530 #ifdef CONFIG_DYNAMIC_FTRACE 3531 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n" 3532 #endif 3533 #endif /* CONFIG_STACK_TRACER */ 3534 ; 3535 3536 static ssize_t 3537 tracing_readme_read(struct file *filp, char __user *ubuf, 3538 size_t cnt, loff_t *ppos) 3539 { 3540 return simple_read_from_buffer(ubuf, cnt, ppos, 3541 readme_msg, strlen(readme_msg)); 3542 } 3543 3544 static const struct file_operations tracing_readme_fops = { 3545 .open = tracing_open_generic, 3546 .read = tracing_readme_read, 3547 .llseek = generic_file_llseek, 3548 }; 3549 3550 static ssize_t 3551 tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, 3552 size_t cnt, loff_t *ppos) 3553 { 3554 char *buf_comm; 3555 char *file_buf; 3556 char *buf; 3557 int len = 0; 3558 int pid; 3559 int i; 3560 3561 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL); 3562 if (!file_buf) 3563 return -ENOMEM; 3564 3565 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL); 3566 if (!buf_comm) { 3567 kfree(file_buf); 3568 return -ENOMEM; 3569 } 3570 3571 buf = file_buf; 3572 3573 for (i = 0; i < SAVED_CMDLINES; i++) { 3574 int r; 3575 3576 pid = map_cmdline_to_pid[i]; 3577 if (pid == -1 || pid == NO_CMDLINE_MAP) 3578 continue; 3579 3580 trace_find_cmdline(pid, buf_comm); 3581 r = sprintf(buf, "%d %s\n", pid, buf_comm); 3582 buf += r; 3583 len += r; 3584 } 3585 3586 len = simple_read_from_buffer(ubuf, cnt, ppos, 3587 file_buf, len); 3588 3589 kfree(file_buf); 3590 kfree(buf_comm); 3591 3592 return len; 3593 } 3594 3595 static const struct file_operations tracing_saved_cmdlines_fops = { 3596 .open = tracing_open_generic, 3597 .read = tracing_saved_cmdlines_read, 3598 .llseek = generic_file_llseek, 3599 }; 3600 3601 static ssize_t 3602 tracing_set_trace_read(struct file *filp, char __user *ubuf, 3603 size_t cnt, loff_t *ppos) 3604 { 3605 struct trace_array *tr = filp->private_data; 3606 char buf[MAX_TRACER_SIZE+2]; 3607 int r; 3608 3609 mutex_lock(&trace_types_lock); 3610 r = sprintf(buf, "%s\n", tr->current_trace->name); 3611 mutex_unlock(&trace_types_lock); 3612 3613 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3614 } 3615 3616 int tracer_init(struct tracer *t, struct trace_array *tr) 3617 { 3618 tracing_reset_online_cpus(&tr->trace_buffer); 3619 return t->init(tr); 3620 } 3621 3622 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val) 3623 { 3624 int cpu; 3625 3626 for_each_tracing_cpu(cpu) 3627 per_cpu_ptr(buf->data, cpu)->entries = val; 3628 } 3629 3630 #ifdef CONFIG_TRACER_MAX_TRACE 3631 /* resize @tr's buffer to the size of @size_tr's entries */ 3632 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, 3633 struct trace_buffer *size_buf, int cpu_id) 3634 { 3635 int cpu, ret = 0; 3636 3637 if (cpu_id == RING_BUFFER_ALL_CPUS) { 3638 for_each_tracing_cpu(cpu) { 3639 ret = ring_buffer_resize(trace_buf->buffer, 3640 per_cpu_ptr(size_buf->data, cpu)->entries, cpu); 3641 if (ret < 0) 3642 break; 3643 per_cpu_ptr(trace_buf->data, cpu)->entries = 3644 per_cpu_ptr(size_buf->data, cpu)->entries; 3645 } 3646 } else { 3647 ret = ring_buffer_resize(trace_buf->buffer, 3648 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); 3649 if (ret == 0) 3650 per_cpu_ptr(trace_buf->data, cpu_id)->entries = 3651 per_cpu_ptr(size_buf->data, cpu_id)->entries; 3652 } 3653 3654 return ret; 3655 } 3656 #endif /* CONFIG_TRACER_MAX_TRACE */ 3657 3658 static int __tracing_resize_ring_buffer(struct trace_array *tr, 3659 unsigned long size, int cpu) 3660 { 3661 int ret; 3662 3663 /* 3664 * If kernel or user changes the size of the ring buffer 3665 * we use the size that was given, and we can forget about 3666 * expanding it later. 3667 */ 3668 ring_buffer_expanded = true; 3669 3670 /* May be called before buffers are initialized */ 3671 if (!tr->trace_buffer.buffer) 3672 return 0; 3673 3674 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu); 3675 if (ret < 0) 3676 return ret; 3677 3678 #ifdef CONFIG_TRACER_MAX_TRACE 3679 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) || 3680 !tr->current_trace->use_max_tr) 3681 goto out; 3682 3683 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); 3684 if (ret < 0) { 3685 int r = resize_buffer_duplicate_size(&tr->trace_buffer, 3686 &tr->trace_buffer, cpu); 3687 if (r < 0) { 3688 /* 3689 * AARGH! We are left with different 3690 * size max buffer!!!! 3691 * The max buffer is our "snapshot" buffer. 3692 * When a tracer needs a snapshot (one of the 3693 * latency tracers), it swaps the max buffer 3694 * with the saved snap shot. We succeeded to 3695 * update the size of the main buffer, but failed to 3696 * update the size of the max buffer. But when we tried 3697 * to reset the main buffer to the original size, we 3698 * failed there too. This is very unlikely to 3699 * happen, but if it does, warn and kill all 3700 * tracing. 3701 */ 3702 WARN_ON(1); 3703 tracing_disabled = 1; 3704 } 3705 return ret; 3706 } 3707 3708 if (cpu == RING_BUFFER_ALL_CPUS) 3709 set_buffer_entries(&tr->max_buffer, size); 3710 else 3711 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size; 3712 3713 out: 3714 #endif /* CONFIG_TRACER_MAX_TRACE */ 3715 3716 if (cpu == RING_BUFFER_ALL_CPUS) 3717 set_buffer_entries(&tr->trace_buffer, size); 3718 else 3719 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size; 3720 3721 return ret; 3722 } 3723 3724 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr, 3725 unsigned long size, int cpu_id) 3726 { 3727 int ret = size; 3728 3729 mutex_lock(&trace_types_lock); 3730 3731 if (cpu_id != RING_BUFFER_ALL_CPUS) { 3732 /* make sure, this cpu is enabled in the mask */ 3733 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { 3734 ret = -EINVAL; 3735 goto out; 3736 } 3737 } 3738 3739 ret = __tracing_resize_ring_buffer(tr, size, cpu_id); 3740 if (ret < 0) 3741 ret = -ENOMEM; 3742 3743 out: 3744 mutex_unlock(&trace_types_lock); 3745 3746 return ret; 3747 } 3748 3749 3750 /** 3751 * tracing_update_buffers - used by tracing facility to expand ring buffers 3752 * 3753 * To save on memory when the tracing is never used on a system with it 3754 * configured in. The ring buffers are set to a minimum size. But once 3755 * a user starts to use the tracing facility, then they need to grow 3756 * to their default size. 3757 * 3758 * This function is to be called when a tracer is about to be used. 3759 */ 3760 int tracing_update_buffers(void) 3761 { 3762 int ret = 0; 3763 3764 mutex_lock(&trace_types_lock); 3765 if (!ring_buffer_expanded) 3766 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size, 3767 RING_BUFFER_ALL_CPUS); 3768 mutex_unlock(&trace_types_lock); 3769 3770 return ret; 3771 } 3772 3773 struct trace_option_dentry; 3774 3775 static struct trace_option_dentry * 3776 create_trace_option_files(struct trace_array *tr, struct tracer *tracer); 3777 3778 static void 3779 destroy_trace_option_files(struct trace_option_dentry *topts); 3780 3781 static int tracing_set_tracer(const char *buf) 3782 { 3783 static struct trace_option_dentry *topts; 3784 struct trace_array *tr = &global_trace; 3785 struct tracer *t; 3786 #ifdef CONFIG_TRACER_MAX_TRACE 3787 bool had_max_tr; 3788 #endif 3789 int ret = 0; 3790 3791 mutex_lock(&trace_types_lock); 3792 3793 if (!ring_buffer_expanded) { 3794 ret = __tracing_resize_ring_buffer(tr, trace_buf_size, 3795 RING_BUFFER_ALL_CPUS); 3796 if (ret < 0) 3797 goto out; 3798 ret = 0; 3799 } 3800 3801 for (t = trace_types; t; t = t->next) { 3802 if (strcmp(t->name, buf) == 0) 3803 break; 3804 } 3805 if (!t) { 3806 ret = -EINVAL; 3807 goto out; 3808 } 3809 if (t == tr->current_trace) 3810 goto out; 3811 3812 trace_branch_disable(); 3813 3814 tr->current_trace->enabled = false; 3815 3816 if (tr->current_trace->reset) 3817 tr->current_trace->reset(tr); 3818 3819 /* Current trace needs to be nop_trace before synchronize_sched */ 3820 tr->current_trace = &nop_trace; 3821 3822 #ifdef CONFIG_TRACER_MAX_TRACE 3823 had_max_tr = tr->allocated_snapshot; 3824 3825 if (had_max_tr && !t->use_max_tr) { 3826 /* 3827 * We need to make sure that the update_max_tr sees that 3828 * current_trace changed to nop_trace to keep it from 3829 * swapping the buffers after we resize it. 3830 * The update_max_tr is called from interrupts disabled 3831 * so a synchronized_sched() is sufficient. 3832 */ 3833 synchronize_sched(); 3834 free_snapshot(tr); 3835 } 3836 #endif 3837 destroy_trace_option_files(topts); 3838 3839 topts = create_trace_option_files(tr, t); 3840 3841 #ifdef CONFIG_TRACER_MAX_TRACE 3842 if (t->use_max_tr && !had_max_tr) { 3843 ret = alloc_snapshot(tr); 3844 if (ret < 0) 3845 goto out; 3846 } 3847 #endif 3848 3849 if (t->init) { 3850 ret = tracer_init(t, tr); 3851 if (ret) 3852 goto out; 3853 } 3854 3855 tr->current_trace = t; 3856 tr->current_trace->enabled = true; 3857 trace_branch_enable(tr); 3858 out: 3859 mutex_unlock(&trace_types_lock); 3860 3861 return ret; 3862 } 3863 3864 static ssize_t 3865 tracing_set_trace_write(struct file *filp, const char __user *ubuf, 3866 size_t cnt, loff_t *ppos) 3867 { 3868 char buf[MAX_TRACER_SIZE+1]; 3869 int i; 3870 size_t ret; 3871 int err; 3872 3873 ret = cnt; 3874 3875 if (cnt > MAX_TRACER_SIZE) 3876 cnt = MAX_TRACER_SIZE; 3877 3878 if (copy_from_user(&buf, ubuf, cnt)) 3879 return -EFAULT; 3880 3881 buf[cnt] = 0; 3882 3883 /* strip ending whitespace. */ 3884 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) 3885 buf[i] = 0; 3886 3887 err = tracing_set_tracer(buf); 3888 if (err) 3889 return err; 3890 3891 *ppos += ret; 3892 3893 return ret; 3894 } 3895 3896 static ssize_t 3897 tracing_max_lat_read(struct file *filp, char __user *ubuf, 3898 size_t cnt, loff_t *ppos) 3899 { 3900 unsigned long *ptr = filp->private_data; 3901 char buf[64]; 3902 int r; 3903 3904 r = snprintf(buf, sizeof(buf), "%ld\n", 3905 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); 3906 if (r > sizeof(buf)) 3907 r = sizeof(buf); 3908 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3909 } 3910 3911 static ssize_t 3912 tracing_max_lat_write(struct file *filp, const char __user *ubuf, 3913 size_t cnt, loff_t *ppos) 3914 { 3915 unsigned long *ptr = filp->private_data; 3916 unsigned long val; 3917 int ret; 3918 3919 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 3920 if (ret) 3921 return ret; 3922 3923 *ptr = val * 1000; 3924 3925 return cnt; 3926 } 3927 3928 static int tracing_open_pipe(struct inode *inode, struct file *filp) 3929 { 3930 struct trace_array *tr = inode->i_private; 3931 struct trace_iterator *iter; 3932 int ret = 0; 3933 3934 if (tracing_disabled) 3935 return -ENODEV; 3936 3937 if (trace_array_get(tr) < 0) 3938 return -ENODEV; 3939 3940 mutex_lock(&trace_types_lock); 3941 3942 /* create a buffer to store the information to pass to userspace */ 3943 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 3944 if (!iter) { 3945 ret = -ENOMEM; 3946 __trace_array_put(tr); 3947 goto out; 3948 } 3949 3950 /* 3951 * We make a copy of the current tracer to avoid concurrent 3952 * changes on it while we are reading. 3953 */ 3954 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL); 3955 if (!iter->trace) { 3956 ret = -ENOMEM; 3957 goto fail; 3958 } 3959 *iter->trace = *tr->current_trace; 3960 3961 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 3962 ret = -ENOMEM; 3963 goto fail; 3964 } 3965 3966 /* trace pipe does not show start of buffer */ 3967 cpumask_setall(iter->started); 3968 3969 if (trace_flags & TRACE_ITER_LATENCY_FMT) 3970 iter->iter_flags |= TRACE_FILE_LAT_FMT; 3971 3972 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 3973 if (trace_clocks[tr->clock_id].in_ns) 3974 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 3975 3976 iter->tr = tr; 3977 iter->trace_buffer = &tr->trace_buffer; 3978 iter->cpu_file = tracing_get_cpu(inode); 3979 mutex_init(&iter->mutex); 3980 filp->private_data = iter; 3981 3982 if (iter->trace->pipe_open) 3983 iter->trace->pipe_open(iter); 3984 3985 nonseekable_open(inode, filp); 3986 out: 3987 mutex_unlock(&trace_types_lock); 3988 return ret; 3989 3990 fail: 3991 kfree(iter->trace); 3992 kfree(iter); 3993 __trace_array_put(tr); 3994 mutex_unlock(&trace_types_lock); 3995 return ret; 3996 } 3997 3998 static int tracing_release_pipe(struct inode *inode, struct file *file) 3999 { 4000 struct trace_iterator *iter = file->private_data; 4001 struct trace_array *tr = inode->i_private; 4002 4003 mutex_lock(&trace_types_lock); 4004 4005 if (iter->trace->pipe_close) 4006 iter->trace->pipe_close(iter); 4007 4008 mutex_unlock(&trace_types_lock); 4009 4010 free_cpumask_var(iter->started); 4011 mutex_destroy(&iter->mutex); 4012 kfree(iter->trace); 4013 kfree(iter); 4014 4015 trace_array_put(tr); 4016 4017 return 0; 4018 } 4019 4020 static unsigned int 4021 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) 4022 { 4023 /* Iterators are static, they should be filled or empty */ 4024 if (trace_buffer_iter(iter, iter->cpu_file)) 4025 return POLLIN | POLLRDNORM; 4026 4027 if (trace_flags & TRACE_ITER_BLOCK) 4028 /* 4029 * Always select as readable when in blocking mode 4030 */ 4031 return POLLIN | POLLRDNORM; 4032 else 4033 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file, 4034 filp, poll_table); 4035 } 4036 4037 static unsigned int 4038 tracing_poll_pipe(struct file *filp, poll_table *poll_table) 4039 { 4040 struct trace_iterator *iter = filp->private_data; 4041 4042 return trace_poll(iter, filp, poll_table); 4043 } 4044 4045 /* 4046 * This is a make-shift waitqueue. 4047 * A tracer might use this callback on some rare cases: 4048 * 4049 * 1) the current tracer might hold the runqueue lock when it wakes up 4050 * a reader, hence a deadlock (sched, function, and function graph tracers) 4051 * 2) the function tracers, trace all functions, we don't want 4052 * the overhead of calling wake_up and friends 4053 * (and tracing them too) 4054 * 4055 * Anyway, this is really very primitive wakeup. 4056 */ 4057 void poll_wait_pipe(struct trace_iterator *iter) 4058 { 4059 set_current_state(TASK_INTERRUPTIBLE); 4060 /* sleep for 100 msecs, and try again. */ 4061 schedule_timeout(HZ / 10); 4062 } 4063 4064 /* Must be called with trace_types_lock mutex held. */ 4065 static int tracing_wait_pipe(struct file *filp) 4066 { 4067 struct trace_iterator *iter = filp->private_data; 4068 4069 while (trace_empty(iter)) { 4070 4071 if ((filp->f_flags & O_NONBLOCK)) { 4072 return -EAGAIN; 4073 } 4074 4075 mutex_unlock(&iter->mutex); 4076 4077 iter->trace->wait_pipe(iter); 4078 4079 mutex_lock(&iter->mutex); 4080 4081 if (signal_pending(current)) 4082 return -EINTR; 4083 4084 /* 4085 * We block until we read something and tracing is disabled. 4086 * We still block if tracing is disabled, but we have never 4087 * read anything. This allows a user to cat this file, and 4088 * then enable tracing. But after we have read something, 4089 * we give an EOF when tracing is again disabled. 4090 * 4091 * iter->pos will be 0 if we haven't read anything. 4092 */ 4093 if (!tracing_is_on() && iter->pos) 4094 break; 4095 } 4096 4097 return 1; 4098 } 4099 4100 /* 4101 * Consumer reader. 4102 */ 4103 static ssize_t 4104 tracing_read_pipe(struct file *filp, char __user *ubuf, 4105 size_t cnt, loff_t *ppos) 4106 { 4107 struct trace_iterator *iter = filp->private_data; 4108 struct trace_array *tr = iter->tr; 4109 ssize_t sret; 4110 4111 /* return any leftover data */ 4112 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 4113 if (sret != -EBUSY) 4114 return sret; 4115 4116 trace_seq_init(&iter->seq); 4117 4118 /* copy the tracer to avoid using a global lock all around */ 4119 mutex_lock(&trace_types_lock); 4120 if (unlikely(iter->trace->name != tr->current_trace->name)) 4121 *iter->trace = *tr->current_trace; 4122 mutex_unlock(&trace_types_lock); 4123 4124 /* 4125 * Avoid more than one consumer on a single file descriptor 4126 * This is just a matter of traces coherency, the ring buffer itself 4127 * is protected. 4128 */ 4129 mutex_lock(&iter->mutex); 4130 if (iter->trace->read) { 4131 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); 4132 if (sret) 4133 goto out; 4134 } 4135 4136 waitagain: 4137 sret = tracing_wait_pipe(filp); 4138 if (sret <= 0) 4139 goto out; 4140 4141 /* stop when tracing is finished */ 4142 if (trace_empty(iter)) { 4143 sret = 0; 4144 goto out; 4145 } 4146 4147 if (cnt >= PAGE_SIZE) 4148 cnt = PAGE_SIZE - 1; 4149 4150 /* reset all but tr, trace, and overruns */ 4151 memset(&iter->seq, 0, 4152 sizeof(struct trace_iterator) - 4153 offsetof(struct trace_iterator, seq)); 4154 cpumask_clear(iter->started); 4155 iter->pos = -1; 4156 4157 trace_event_read_lock(); 4158 trace_access_lock(iter->cpu_file); 4159 while (trace_find_next_entry_inc(iter) != NULL) { 4160 enum print_line_t ret; 4161 int len = iter->seq.len; 4162 4163 ret = print_trace_line(iter); 4164 if (ret == TRACE_TYPE_PARTIAL_LINE) { 4165 /* don't print partial lines */ 4166 iter->seq.len = len; 4167 break; 4168 } 4169 if (ret != TRACE_TYPE_NO_CONSUME) 4170 trace_consume(iter); 4171 4172 if (iter->seq.len >= cnt) 4173 break; 4174 4175 /* 4176 * Setting the full flag means we reached the trace_seq buffer 4177 * size and we should leave by partial output condition above. 4178 * One of the trace_seq_* functions is not used properly. 4179 */ 4180 WARN_ONCE(iter->seq.full, "full flag set for trace type %d", 4181 iter->ent->type); 4182 } 4183 trace_access_unlock(iter->cpu_file); 4184 trace_event_read_unlock(); 4185 4186 /* Now copy what we have to the user */ 4187 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 4188 if (iter->seq.readpos >= iter->seq.len) 4189 trace_seq_init(&iter->seq); 4190 4191 /* 4192 * If there was nothing to send to user, in spite of consuming trace 4193 * entries, go back to wait for more entries. 4194 */ 4195 if (sret == -EBUSY) 4196 goto waitagain; 4197 4198 out: 4199 mutex_unlock(&iter->mutex); 4200 4201 return sret; 4202 } 4203 4204 static void tracing_pipe_buf_release(struct pipe_inode_info *pipe, 4205 struct pipe_buffer *buf) 4206 { 4207 __free_page(buf->page); 4208 } 4209 4210 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, 4211 unsigned int idx) 4212 { 4213 __free_page(spd->pages[idx]); 4214 } 4215 4216 static const struct pipe_buf_operations tracing_pipe_buf_ops = { 4217 .can_merge = 0, 4218 .map = generic_pipe_buf_map, 4219 .unmap = generic_pipe_buf_unmap, 4220 .confirm = generic_pipe_buf_confirm, 4221 .release = tracing_pipe_buf_release, 4222 .steal = generic_pipe_buf_steal, 4223 .get = generic_pipe_buf_get, 4224 }; 4225 4226 static size_t 4227 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) 4228 { 4229 size_t count; 4230 int ret; 4231 4232 /* Seq buffer is page-sized, exactly what we need. */ 4233 for (;;) { 4234 count = iter->seq.len; 4235 ret = print_trace_line(iter); 4236 count = iter->seq.len - count; 4237 if (rem < count) { 4238 rem = 0; 4239 iter->seq.len -= count; 4240 break; 4241 } 4242 if (ret == TRACE_TYPE_PARTIAL_LINE) { 4243 iter->seq.len -= count; 4244 break; 4245 } 4246 4247 if (ret != TRACE_TYPE_NO_CONSUME) 4248 trace_consume(iter); 4249 rem -= count; 4250 if (!trace_find_next_entry_inc(iter)) { 4251 rem = 0; 4252 iter->ent = NULL; 4253 break; 4254 } 4255 } 4256 4257 return rem; 4258 } 4259 4260 static ssize_t tracing_splice_read_pipe(struct file *filp, 4261 loff_t *ppos, 4262 struct pipe_inode_info *pipe, 4263 size_t len, 4264 unsigned int flags) 4265 { 4266 struct page *pages_def[PIPE_DEF_BUFFERS]; 4267 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 4268 struct trace_iterator *iter = filp->private_data; 4269 struct splice_pipe_desc spd = { 4270 .pages = pages_def, 4271 .partial = partial_def, 4272 .nr_pages = 0, /* This gets updated below. */ 4273 .nr_pages_max = PIPE_DEF_BUFFERS, 4274 .flags = flags, 4275 .ops = &tracing_pipe_buf_ops, 4276 .spd_release = tracing_spd_release_pipe, 4277 }; 4278 struct trace_array *tr = iter->tr; 4279 ssize_t ret; 4280 size_t rem; 4281 unsigned int i; 4282 4283 if (splice_grow_spd(pipe, &spd)) 4284 return -ENOMEM; 4285 4286 /* copy the tracer to avoid using a global lock all around */ 4287 mutex_lock(&trace_types_lock); 4288 if (unlikely(iter->trace->name != tr->current_trace->name)) 4289 *iter->trace = *tr->current_trace; 4290 mutex_unlock(&trace_types_lock); 4291 4292 mutex_lock(&iter->mutex); 4293 4294 if (iter->trace->splice_read) { 4295 ret = iter->trace->splice_read(iter, filp, 4296 ppos, pipe, len, flags); 4297 if (ret) 4298 goto out_err; 4299 } 4300 4301 ret = tracing_wait_pipe(filp); 4302 if (ret <= 0) 4303 goto out_err; 4304 4305 if (!iter->ent && !trace_find_next_entry_inc(iter)) { 4306 ret = -EFAULT; 4307 goto out_err; 4308 } 4309 4310 trace_event_read_lock(); 4311 trace_access_lock(iter->cpu_file); 4312 4313 /* Fill as many pages as possible. */ 4314 for (i = 0, rem = len; i < pipe->buffers && rem; i++) { 4315 spd.pages[i] = alloc_page(GFP_KERNEL); 4316 if (!spd.pages[i]) 4317 break; 4318 4319 rem = tracing_fill_pipe_page(rem, iter); 4320 4321 /* Copy the data into the page, so we can start over. */ 4322 ret = trace_seq_to_buffer(&iter->seq, 4323 page_address(spd.pages[i]), 4324 iter->seq.len); 4325 if (ret < 0) { 4326 __free_page(spd.pages[i]); 4327 break; 4328 } 4329 spd.partial[i].offset = 0; 4330 spd.partial[i].len = iter->seq.len; 4331 4332 trace_seq_init(&iter->seq); 4333 } 4334 4335 trace_access_unlock(iter->cpu_file); 4336 trace_event_read_unlock(); 4337 mutex_unlock(&iter->mutex); 4338 4339 spd.nr_pages = i; 4340 4341 ret = splice_to_pipe(pipe, &spd); 4342 out: 4343 splice_shrink_spd(&spd); 4344 return ret; 4345 4346 out_err: 4347 mutex_unlock(&iter->mutex); 4348 goto out; 4349 } 4350 4351 static ssize_t 4352 tracing_entries_read(struct file *filp, char __user *ubuf, 4353 size_t cnt, loff_t *ppos) 4354 { 4355 struct inode *inode = file_inode(filp); 4356 struct trace_array *tr = inode->i_private; 4357 int cpu = tracing_get_cpu(inode); 4358 char buf[64]; 4359 int r = 0; 4360 ssize_t ret; 4361 4362 mutex_lock(&trace_types_lock); 4363 4364 if (cpu == RING_BUFFER_ALL_CPUS) { 4365 int cpu, buf_size_same; 4366 unsigned long size; 4367 4368 size = 0; 4369 buf_size_same = 1; 4370 /* check if all cpu sizes are same */ 4371 for_each_tracing_cpu(cpu) { 4372 /* fill in the size from first enabled cpu */ 4373 if (size == 0) 4374 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries; 4375 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) { 4376 buf_size_same = 0; 4377 break; 4378 } 4379 } 4380 4381 if (buf_size_same) { 4382 if (!ring_buffer_expanded) 4383 r = sprintf(buf, "%lu (expanded: %lu)\n", 4384 size >> 10, 4385 trace_buf_size >> 10); 4386 else 4387 r = sprintf(buf, "%lu\n", size >> 10); 4388 } else 4389 r = sprintf(buf, "X\n"); 4390 } else 4391 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); 4392 4393 mutex_unlock(&trace_types_lock); 4394 4395 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 4396 return ret; 4397 } 4398 4399 static ssize_t 4400 tracing_entries_write(struct file *filp, const char __user *ubuf, 4401 size_t cnt, loff_t *ppos) 4402 { 4403 struct inode *inode = file_inode(filp); 4404 struct trace_array *tr = inode->i_private; 4405 unsigned long val; 4406 int ret; 4407 4408 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 4409 if (ret) 4410 return ret; 4411 4412 /* must have at least 1 entry */ 4413 if (!val) 4414 return -EINVAL; 4415 4416 /* value is in KB */ 4417 val <<= 10; 4418 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); 4419 if (ret < 0) 4420 return ret; 4421 4422 *ppos += cnt; 4423 4424 return cnt; 4425 } 4426 4427 static ssize_t 4428 tracing_total_entries_read(struct file *filp, char __user *ubuf, 4429 size_t cnt, loff_t *ppos) 4430 { 4431 struct trace_array *tr = filp->private_data; 4432 char buf[64]; 4433 int r, cpu; 4434 unsigned long size = 0, expanded_size = 0; 4435 4436 mutex_lock(&trace_types_lock); 4437 for_each_tracing_cpu(cpu) { 4438 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10; 4439 if (!ring_buffer_expanded) 4440 expanded_size += trace_buf_size >> 10; 4441 } 4442 if (ring_buffer_expanded) 4443 r = sprintf(buf, "%lu\n", size); 4444 else 4445 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); 4446 mutex_unlock(&trace_types_lock); 4447 4448 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 4449 } 4450 4451 static ssize_t 4452 tracing_free_buffer_write(struct file *filp, const char __user *ubuf, 4453 size_t cnt, loff_t *ppos) 4454 { 4455 /* 4456 * There is no need to read what the user has written, this function 4457 * is just to make sure that there is no error when "echo" is used 4458 */ 4459 4460 *ppos += cnt; 4461 4462 return cnt; 4463 } 4464 4465 static int 4466 tracing_free_buffer_release(struct inode *inode, struct file *filp) 4467 { 4468 struct trace_array *tr = inode->i_private; 4469 4470 /* disable tracing ? */ 4471 if (trace_flags & TRACE_ITER_STOP_ON_FREE) 4472 tracer_tracing_off(tr); 4473 /* resize the ring buffer to 0 */ 4474 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); 4475 4476 trace_array_put(tr); 4477 4478 return 0; 4479 } 4480 4481 static ssize_t 4482 tracing_mark_write(struct file *filp, const char __user *ubuf, 4483 size_t cnt, loff_t *fpos) 4484 { 4485 unsigned long addr = (unsigned long)ubuf; 4486 struct trace_array *tr = filp->private_data; 4487 struct ring_buffer_event *event; 4488 struct ring_buffer *buffer; 4489 struct print_entry *entry; 4490 unsigned long irq_flags; 4491 struct page *pages[2]; 4492 void *map_page[2]; 4493 int nr_pages = 1; 4494 ssize_t written; 4495 int offset; 4496 int size; 4497 int len; 4498 int ret; 4499 int i; 4500 4501 if (tracing_disabled) 4502 return -EINVAL; 4503 4504 if (!(trace_flags & TRACE_ITER_MARKERS)) 4505 return -EINVAL; 4506 4507 if (cnt > TRACE_BUF_SIZE) 4508 cnt = TRACE_BUF_SIZE; 4509 4510 /* 4511 * Userspace is injecting traces into the kernel trace buffer. 4512 * We want to be as non intrusive as possible. 4513 * To do so, we do not want to allocate any special buffers 4514 * or take any locks, but instead write the userspace data 4515 * straight into the ring buffer. 4516 * 4517 * First we need to pin the userspace buffer into memory, 4518 * which, most likely it is, because it just referenced it. 4519 * But there's no guarantee that it is. By using get_user_pages_fast() 4520 * and kmap_atomic/kunmap_atomic() we can get access to the 4521 * pages directly. We then write the data directly into the 4522 * ring buffer. 4523 */ 4524 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); 4525 4526 /* check if we cross pages */ 4527 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK)) 4528 nr_pages = 2; 4529 4530 offset = addr & (PAGE_SIZE - 1); 4531 addr &= PAGE_MASK; 4532 4533 ret = get_user_pages_fast(addr, nr_pages, 0, pages); 4534 if (ret < nr_pages) { 4535 while (--ret >= 0) 4536 put_page(pages[ret]); 4537 written = -EFAULT; 4538 goto out; 4539 } 4540 4541 for (i = 0; i < nr_pages; i++) 4542 map_page[i] = kmap_atomic(pages[i]); 4543 4544 local_save_flags(irq_flags); 4545 size = sizeof(*entry) + cnt + 2; /* possible \n added */ 4546 buffer = tr->trace_buffer.buffer; 4547 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 4548 irq_flags, preempt_count()); 4549 if (!event) { 4550 /* Ring buffer disabled, return as if not open for write */ 4551 written = -EBADF; 4552 goto out_unlock; 4553 } 4554 4555 entry = ring_buffer_event_data(event); 4556 entry->ip = _THIS_IP_; 4557 4558 if (nr_pages == 2) { 4559 len = PAGE_SIZE - offset; 4560 memcpy(&entry->buf, map_page[0] + offset, len); 4561 memcpy(&entry->buf[len], map_page[1], cnt - len); 4562 } else 4563 memcpy(&entry->buf, map_page[0] + offset, cnt); 4564 4565 if (entry->buf[cnt - 1] != '\n') { 4566 entry->buf[cnt] = '\n'; 4567 entry->buf[cnt + 1] = '\0'; 4568 } else 4569 entry->buf[cnt] = '\0'; 4570 4571 __buffer_unlock_commit(buffer, event); 4572 4573 written = cnt; 4574 4575 *fpos += written; 4576 4577 out_unlock: 4578 for (i = 0; i < nr_pages; i++){ 4579 kunmap_atomic(map_page[i]); 4580 put_page(pages[i]); 4581 } 4582 out: 4583 return written; 4584 } 4585 4586 static int tracing_clock_show(struct seq_file *m, void *v) 4587 { 4588 struct trace_array *tr = m->private; 4589 int i; 4590 4591 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) 4592 seq_printf(m, 4593 "%s%s%s%s", i ? " " : "", 4594 i == tr->clock_id ? "[" : "", trace_clocks[i].name, 4595 i == tr->clock_id ? "]" : ""); 4596 seq_putc(m, '\n'); 4597 4598 return 0; 4599 } 4600 4601 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, 4602 size_t cnt, loff_t *fpos) 4603 { 4604 struct seq_file *m = filp->private_data; 4605 struct trace_array *tr = m->private; 4606 char buf[64]; 4607 const char *clockstr; 4608 int i; 4609 4610 if (cnt >= sizeof(buf)) 4611 return -EINVAL; 4612 4613 if (copy_from_user(&buf, ubuf, cnt)) 4614 return -EFAULT; 4615 4616 buf[cnt] = 0; 4617 4618 clockstr = strstrip(buf); 4619 4620 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { 4621 if (strcmp(trace_clocks[i].name, clockstr) == 0) 4622 break; 4623 } 4624 if (i == ARRAY_SIZE(trace_clocks)) 4625 return -EINVAL; 4626 4627 mutex_lock(&trace_types_lock); 4628 4629 tr->clock_id = i; 4630 4631 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func); 4632 4633 /* 4634 * New clock may not be consistent with the previous clock. 4635 * Reset the buffer so that it doesn't have incomparable timestamps. 4636 */ 4637 tracing_reset_online_cpus(&tr->trace_buffer); 4638 4639 #ifdef CONFIG_TRACER_MAX_TRACE 4640 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer) 4641 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); 4642 tracing_reset_online_cpus(&tr->max_buffer); 4643 #endif 4644 4645 mutex_unlock(&trace_types_lock); 4646 4647 *fpos += cnt; 4648 4649 return cnt; 4650 } 4651 4652 static int tracing_clock_open(struct inode *inode, struct file *file) 4653 { 4654 struct trace_array *tr = inode->i_private; 4655 int ret; 4656 4657 if (tracing_disabled) 4658 return -ENODEV; 4659 4660 if (trace_array_get(tr)) 4661 return -ENODEV; 4662 4663 ret = single_open(file, tracing_clock_show, inode->i_private); 4664 if (ret < 0) 4665 trace_array_put(tr); 4666 4667 return ret; 4668 } 4669 4670 struct ftrace_buffer_info { 4671 struct trace_iterator iter; 4672 void *spare; 4673 unsigned int read; 4674 }; 4675 4676 #ifdef CONFIG_TRACER_SNAPSHOT 4677 static int tracing_snapshot_open(struct inode *inode, struct file *file) 4678 { 4679 struct trace_array *tr = inode->i_private; 4680 struct trace_iterator *iter; 4681 struct seq_file *m; 4682 int ret = 0; 4683 4684 if (trace_array_get(tr) < 0) 4685 return -ENODEV; 4686 4687 if (file->f_mode & FMODE_READ) { 4688 iter = __tracing_open(inode, file, true); 4689 if (IS_ERR(iter)) 4690 ret = PTR_ERR(iter); 4691 } else { 4692 /* Writes still need the seq_file to hold the private data */ 4693 ret = -ENOMEM; 4694 m = kzalloc(sizeof(*m), GFP_KERNEL); 4695 if (!m) 4696 goto out; 4697 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 4698 if (!iter) { 4699 kfree(m); 4700 goto out; 4701 } 4702 ret = 0; 4703 4704 iter->tr = tr; 4705 iter->trace_buffer = &tr->max_buffer; 4706 iter->cpu_file = tracing_get_cpu(inode); 4707 m->private = iter; 4708 file->private_data = m; 4709 } 4710 out: 4711 if (ret < 0) 4712 trace_array_put(tr); 4713 4714 return ret; 4715 } 4716 4717 static ssize_t 4718 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, 4719 loff_t *ppos) 4720 { 4721 struct seq_file *m = filp->private_data; 4722 struct trace_iterator *iter = m->private; 4723 struct trace_array *tr = iter->tr; 4724 unsigned long val; 4725 int ret; 4726 4727 ret = tracing_update_buffers(); 4728 if (ret < 0) 4729 return ret; 4730 4731 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 4732 if (ret) 4733 return ret; 4734 4735 mutex_lock(&trace_types_lock); 4736 4737 if (tr->current_trace->use_max_tr) { 4738 ret = -EBUSY; 4739 goto out; 4740 } 4741 4742 switch (val) { 4743 case 0: 4744 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 4745 ret = -EINVAL; 4746 break; 4747 } 4748 if (tr->allocated_snapshot) 4749 free_snapshot(tr); 4750 break; 4751 case 1: 4752 /* Only allow per-cpu swap if the ring buffer supports it */ 4753 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP 4754 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 4755 ret = -EINVAL; 4756 break; 4757 } 4758 #endif 4759 if (!tr->allocated_snapshot) { 4760 ret = alloc_snapshot(tr); 4761 if (ret < 0) 4762 break; 4763 } 4764 local_irq_disable(); 4765 /* Now, we're going to swap */ 4766 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 4767 update_max_tr(tr, current, smp_processor_id()); 4768 else 4769 update_max_tr_single(tr, current, iter->cpu_file); 4770 local_irq_enable(); 4771 break; 4772 default: 4773 if (tr->allocated_snapshot) { 4774 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 4775 tracing_reset_online_cpus(&tr->max_buffer); 4776 else 4777 tracing_reset(&tr->max_buffer, iter->cpu_file); 4778 } 4779 break; 4780 } 4781 4782 if (ret >= 0) { 4783 *ppos += cnt; 4784 ret = cnt; 4785 } 4786 out: 4787 mutex_unlock(&trace_types_lock); 4788 return ret; 4789 } 4790 4791 static int tracing_snapshot_release(struct inode *inode, struct file *file) 4792 { 4793 struct seq_file *m = file->private_data; 4794 int ret; 4795 4796 ret = tracing_release(inode, file); 4797 4798 if (file->f_mode & FMODE_READ) 4799 return ret; 4800 4801 /* If write only, the seq_file is just a stub */ 4802 if (m) 4803 kfree(m->private); 4804 kfree(m); 4805 4806 return 0; 4807 } 4808 4809 static int tracing_buffers_open(struct inode *inode, struct file *filp); 4810 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, 4811 size_t count, loff_t *ppos); 4812 static int tracing_buffers_release(struct inode *inode, struct file *file); 4813 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, 4814 struct pipe_inode_info *pipe, size_t len, unsigned int flags); 4815 4816 static int snapshot_raw_open(struct inode *inode, struct file *filp) 4817 { 4818 struct ftrace_buffer_info *info; 4819 int ret; 4820 4821 ret = tracing_buffers_open(inode, filp); 4822 if (ret < 0) 4823 return ret; 4824 4825 info = filp->private_data; 4826 4827 if (info->iter.trace->use_max_tr) { 4828 tracing_buffers_release(inode, filp); 4829 return -EBUSY; 4830 } 4831 4832 info->iter.snapshot = true; 4833 info->iter.trace_buffer = &info->iter.tr->max_buffer; 4834 4835 return ret; 4836 } 4837 4838 #endif /* CONFIG_TRACER_SNAPSHOT */ 4839 4840 4841 static const struct file_operations tracing_max_lat_fops = { 4842 .open = tracing_open_generic, 4843 .read = tracing_max_lat_read, 4844 .write = tracing_max_lat_write, 4845 .llseek = generic_file_llseek, 4846 }; 4847 4848 static const struct file_operations set_tracer_fops = { 4849 .open = tracing_open_generic, 4850 .read = tracing_set_trace_read, 4851 .write = tracing_set_trace_write, 4852 .llseek = generic_file_llseek, 4853 }; 4854 4855 static const struct file_operations tracing_pipe_fops = { 4856 .open = tracing_open_pipe, 4857 .poll = tracing_poll_pipe, 4858 .read = tracing_read_pipe, 4859 .splice_read = tracing_splice_read_pipe, 4860 .release = tracing_release_pipe, 4861 .llseek = no_llseek, 4862 }; 4863 4864 static const struct file_operations tracing_entries_fops = { 4865 .open = tracing_open_generic_tr, 4866 .read = tracing_entries_read, 4867 .write = tracing_entries_write, 4868 .llseek = generic_file_llseek, 4869 .release = tracing_release_generic_tr, 4870 }; 4871 4872 static const struct file_operations tracing_total_entries_fops = { 4873 .open = tracing_open_generic_tr, 4874 .read = tracing_total_entries_read, 4875 .llseek = generic_file_llseek, 4876 .release = tracing_release_generic_tr, 4877 }; 4878 4879 static const struct file_operations tracing_free_buffer_fops = { 4880 .open = tracing_open_generic_tr, 4881 .write = tracing_free_buffer_write, 4882 .release = tracing_free_buffer_release, 4883 }; 4884 4885 static const struct file_operations tracing_mark_fops = { 4886 .open = tracing_open_generic_tr, 4887 .write = tracing_mark_write, 4888 .llseek = generic_file_llseek, 4889 .release = tracing_release_generic_tr, 4890 }; 4891 4892 static const struct file_operations trace_clock_fops = { 4893 .open = tracing_clock_open, 4894 .read = seq_read, 4895 .llseek = seq_lseek, 4896 .release = tracing_single_release_tr, 4897 .write = tracing_clock_write, 4898 }; 4899 4900 #ifdef CONFIG_TRACER_SNAPSHOT 4901 static const struct file_operations snapshot_fops = { 4902 .open = tracing_snapshot_open, 4903 .read = seq_read, 4904 .write = tracing_snapshot_write, 4905 .llseek = tracing_seek, 4906 .release = tracing_snapshot_release, 4907 }; 4908 4909 static const struct file_operations snapshot_raw_fops = { 4910 .open = snapshot_raw_open, 4911 .read = tracing_buffers_read, 4912 .release = tracing_buffers_release, 4913 .splice_read = tracing_buffers_splice_read, 4914 .llseek = no_llseek, 4915 }; 4916 4917 #endif /* CONFIG_TRACER_SNAPSHOT */ 4918 4919 static int tracing_buffers_open(struct inode *inode, struct file *filp) 4920 { 4921 struct trace_array *tr = inode->i_private; 4922 struct ftrace_buffer_info *info; 4923 int ret; 4924 4925 if (tracing_disabled) 4926 return -ENODEV; 4927 4928 if (trace_array_get(tr) < 0) 4929 return -ENODEV; 4930 4931 info = kzalloc(sizeof(*info), GFP_KERNEL); 4932 if (!info) { 4933 trace_array_put(tr); 4934 return -ENOMEM; 4935 } 4936 4937 mutex_lock(&trace_types_lock); 4938 4939 info->iter.tr = tr; 4940 info->iter.cpu_file = tracing_get_cpu(inode); 4941 info->iter.trace = tr->current_trace; 4942 info->iter.trace_buffer = &tr->trace_buffer; 4943 info->spare = NULL; 4944 /* Force reading ring buffer for first read */ 4945 info->read = (unsigned int)-1; 4946 4947 filp->private_data = info; 4948 4949 mutex_unlock(&trace_types_lock); 4950 4951 ret = nonseekable_open(inode, filp); 4952 if (ret < 0) 4953 trace_array_put(tr); 4954 4955 return ret; 4956 } 4957 4958 static unsigned int 4959 tracing_buffers_poll(struct file *filp, poll_table *poll_table) 4960 { 4961 struct ftrace_buffer_info *info = filp->private_data; 4962 struct trace_iterator *iter = &info->iter; 4963 4964 return trace_poll(iter, filp, poll_table); 4965 } 4966 4967 static ssize_t 4968 tracing_buffers_read(struct file *filp, char __user *ubuf, 4969 size_t count, loff_t *ppos) 4970 { 4971 struct ftrace_buffer_info *info = filp->private_data; 4972 struct trace_iterator *iter = &info->iter; 4973 ssize_t ret; 4974 ssize_t size; 4975 4976 if (!count) 4977 return 0; 4978 4979 mutex_lock(&trace_types_lock); 4980 4981 #ifdef CONFIG_TRACER_MAX_TRACE 4982 if (iter->snapshot && iter->tr->current_trace->use_max_tr) { 4983 size = -EBUSY; 4984 goto out_unlock; 4985 } 4986 #endif 4987 4988 if (!info->spare) 4989 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, 4990 iter->cpu_file); 4991 size = -ENOMEM; 4992 if (!info->spare) 4993 goto out_unlock; 4994 4995 /* Do we have previous read data to read? */ 4996 if (info->read < PAGE_SIZE) 4997 goto read; 4998 4999 again: 5000 trace_access_lock(iter->cpu_file); 5001 ret = ring_buffer_read_page(iter->trace_buffer->buffer, 5002 &info->spare, 5003 count, 5004 iter->cpu_file, 0); 5005 trace_access_unlock(iter->cpu_file); 5006 5007 if (ret < 0) { 5008 if (trace_empty(iter)) { 5009 if ((filp->f_flags & O_NONBLOCK)) { 5010 size = -EAGAIN; 5011 goto out_unlock; 5012 } 5013 mutex_unlock(&trace_types_lock); 5014 iter->trace->wait_pipe(iter); 5015 mutex_lock(&trace_types_lock); 5016 if (signal_pending(current)) { 5017 size = -EINTR; 5018 goto out_unlock; 5019 } 5020 goto again; 5021 } 5022 size = 0; 5023 goto out_unlock; 5024 } 5025 5026 info->read = 0; 5027 read: 5028 size = PAGE_SIZE - info->read; 5029 if (size > count) 5030 size = count; 5031 5032 ret = copy_to_user(ubuf, info->spare + info->read, size); 5033 if (ret == size) { 5034 size = -EFAULT; 5035 goto out_unlock; 5036 } 5037 size -= ret; 5038 5039 *ppos += size; 5040 info->read += size; 5041 5042 out_unlock: 5043 mutex_unlock(&trace_types_lock); 5044 5045 return size; 5046 } 5047 5048 static int tracing_buffers_release(struct inode *inode, struct file *file) 5049 { 5050 struct ftrace_buffer_info *info = file->private_data; 5051 struct trace_iterator *iter = &info->iter; 5052 5053 mutex_lock(&trace_types_lock); 5054 5055 __trace_array_put(iter->tr); 5056 5057 if (info->spare) 5058 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare); 5059 kfree(info); 5060 5061 mutex_unlock(&trace_types_lock); 5062 5063 return 0; 5064 } 5065 5066 struct buffer_ref { 5067 struct ring_buffer *buffer; 5068 void *page; 5069 int ref; 5070 }; 5071 5072 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, 5073 struct pipe_buffer *buf) 5074 { 5075 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 5076 5077 if (--ref->ref) 5078 return; 5079 5080 ring_buffer_free_read_page(ref->buffer, ref->page); 5081 kfree(ref); 5082 buf->private = 0; 5083 } 5084 5085 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, 5086 struct pipe_buffer *buf) 5087 { 5088 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 5089 5090 ref->ref++; 5091 } 5092 5093 /* Pipe buffer operations for a buffer. */ 5094 static const struct pipe_buf_operations buffer_pipe_buf_ops = { 5095 .can_merge = 0, 5096 .map = generic_pipe_buf_map, 5097 .unmap = generic_pipe_buf_unmap, 5098 .confirm = generic_pipe_buf_confirm, 5099 .release = buffer_pipe_buf_release, 5100 .steal = generic_pipe_buf_steal, 5101 .get = buffer_pipe_buf_get, 5102 }; 5103 5104 /* 5105 * Callback from splice_to_pipe(), if we need to release some pages 5106 * at the end of the spd in case we error'ed out in filling the pipe. 5107 */ 5108 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) 5109 { 5110 struct buffer_ref *ref = 5111 (struct buffer_ref *)spd->partial[i].private; 5112 5113 if (--ref->ref) 5114 return; 5115 5116 ring_buffer_free_read_page(ref->buffer, ref->page); 5117 kfree(ref); 5118 spd->partial[i].private = 0; 5119 } 5120 5121 static ssize_t 5122 tracing_buffers_splice_read(struct file *file, loff_t *ppos, 5123 struct pipe_inode_info *pipe, size_t len, 5124 unsigned int flags) 5125 { 5126 struct ftrace_buffer_info *info = file->private_data; 5127 struct trace_iterator *iter = &info->iter; 5128 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 5129 struct page *pages_def[PIPE_DEF_BUFFERS]; 5130 struct splice_pipe_desc spd = { 5131 .pages = pages_def, 5132 .partial = partial_def, 5133 .nr_pages_max = PIPE_DEF_BUFFERS, 5134 .flags = flags, 5135 .ops = &buffer_pipe_buf_ops, 5136 .spd_release = buffer_spd_release, 5137 }; 5138 struct buffer_ref *ref; 5139 int entries, size, i; 5140 ssize_t ret; 5141 5142 mutex_lock(&trace_types_lock); 5143 5144 #ifdef CONFIG_TRACER_MAX_TRACE 5145 if (iter->snapshot && iter->tr->current_trace->use_max_tr) { 5146 ret = -EBUSY; 5147 goto out; 5148 } 5149 #endif 5150 5151 if (splice_grow_spd(pipe, &spd)) { 5152 ret = -ENOMEM; 5153 goto out; 5154 } 5155 5156 if (*ppos & (PAGE_SIZE - 1)) { 5157 ret = -EINVAL; 5158 goto out; 5159 } 5160 5161 if (len & (PAGE_SIZE - 1)) { 5162 if (len < PAGE_SIZE) { 5163 ret = -EINVAL; 5164 goto out; 5165 } 5166 len &= PAGE_MASK; 5167 } 5168 5169 again: 5170 trace_access_lock(iter->cpu_file); 5171 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); 5172 5173 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) { 5174 struct page *page; 5175 int r; 5176 5177 ref = kzalloc(sizeof(*ref), GFP_KERNEL); 5178 if (!ref) 5179 break; 5180 5181 ref->ref = 1; 5182 ref->buffer = iter->trace_buffer->buffer; 5183 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); 5184 if (!ref->page) { 5185 kfree(ref); 5186 break; 5187 } 5188 5189 r = ring_buffer_read_page(ref->buffer, &ref->page, 5190 len, iter->cpu_file, 1); 5191 if (r < 0) { 5192 ring_buffer_free_read_page(ref->buffer, ref->page); 5193 kfree(ref); 5194 break; 5195 } 5196 5197 /* 5198 * zero out any left over data, this is going to 5199 * user land. 5200 */ 5201 size = ring_buffer_page_len(ref->page); 5202 if (size < PAGE_SIZE) 5203 memset(ref->page + size, 0, PAGE_SIZE - size); 5204 5205 page = virt_to_page(ref->page); 5206 5207 spd.pages[i] = page; 5208 spd.partial[i].len = PAGE_SIZE; 5209 spd.partial[i].offset = 0; 5210 spd.partial[i].private = (unsigned long)ref; 5211 spd.nr_pages++; 5212 *ppos += PAGE_SIZE; 5213 5214 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); 5215 } 5216 5217 trace_access_unlock(iter->cpu_file); 5218 spd.nr_pages = i; 5219 5220 /* did we read anything? */ 5221 if (!spd.nr_pages) { 5222 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) { 5223 ret = -EAGAIN; 5224 goto out; 5225 } 5226 mutex_unlock(&trace_types_lock); 5227 iter->trace->wait_pipe(iter); 5228 mutex_lock(&trace_types_lock); 5229 if (signal_pending(current)) { 5230 ret = -EINTR; 5231 goto out; 5232 } 5233 goto again; 5234 } 5235 5236 ret = splice_to_pipe(pipe, &spd); 5237 splice_shrink_spd(&spd); 5238 out: 5239 mutex_unlock(&trace_types_lock); 5240 5241 return ret; 5242 } 5243 5244 static const struct file_operations tracing_buffers_fops = { 5245 .open = tracing_buffers_open, 5246 .read = tracing_buffers_read, 5247 .poll = tracing_buffers_poll, 5248 .release = tracing_buffers_release, 5249 .splice_read = tracing_buffers_splice_read, 5250 .llseek = no_llseek, 5251 }; 5252 5253 static ssize_t 5254 tracing_stats_read(struct file *filp, char __user *ubuf, 5255 size_t count, loff_t *ppos) 5256 { 5257 struct inode *inode = file_inode(filp); 5258 struct trace_array *tr = inode->i_private; 5259 struct trace_buffer *trace_buf = &tr->trace_buffer; 5260 int cpu = tracing_get_cpu(inode); 5261 struct trace_seq *s; 5262 unsigned long cnt; 5263 unsigned long long t; 5264 unsigned long usec_rem; 5265 5266 s = kmalloc(sizeof(*s), GFP_KERNEL); 5267 if (!s) 5268 return -ENOMEM; 5269 5270 trace_seq_init(s); 5271 5272 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu); 5273 trace_seq_printf(s, "entries: %ld\n", cnt); 5274 5275 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu); 5276 trace_seq_printf(s, "overrun: %ld\n", cnt); 5277 5278 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu); 5279 trace_seq_printf(s, "commit overrun: %ld\n", cnt); 5280 5281 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); 5282 trace_seq_printf(s, "bytes: %ld\n", cnt); 5283 5284 if (trace_clocks[tr->clock_id].in_ns) { 5285 /* local or global for trace_clock */ 5286 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 5287 usec_rem = do_div(t, USEC_PER_SEC); 5288 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", 5289 t, usec_rem); 5290 5291 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu)); 5292 usec_rem = do_div(t, USEC_PER_SEC); 5293 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); 5294 } else { 5295 /* counter or tsc mode for trace_clock */ 5296 trace_seq_printf(s, "oldest event ts: %llu\n", 5297 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 5298 5299 trace_seq_printf(s, "now ts: %llu\n", 5300 ring_buffer_time_stamp(trace_buf->buffer, cpu)); 5301 } 5302 5303 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu); 5304 trace_seq_printf(s, "dropped events: %ld\n", cnt); 5305 5306 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); 5307 trace_seq_printf(s, "read events: %ld\n", cnt); 5308 5309 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); 5310 5311 kfree(s); 5312 5313 return count; 5314 } 5315 5316 static const struct file_operations tracing_stats_fops = { 5317 .open = tracing_open_generic_tr, 5318 .read = tracing_stats_read, 5319 .llseek = generic_file_llseek, 5320 .release = tracing_release_generic_tr, 5321 }; 5322 5323 #ifdef CONFIG_DYNAMIC_FTRACE 5324 5325 int __weak ftrace_arch_read_dyn_info(char *buf, int size) 5326 { 5327 return 0; 5328 } 5329 5330 static ssize_t 5331 tracing_read_dyn_info(struct file *filp, char __user *ubuf, 5332 size_t cnt, loff_t *ppos) 5333 { 5334 static char ftrace_dyn_info_buffer[1024]; 5335 static DEFINE_MUTEX(dyn_info_mutex); 5336 unsigned long *p = filp->private_data; 5337 char *buf = ftrace_dyn_info_buffer; 5338 int size = ARRAY_SIZE(ftrace_dyn_info_buffer); 5339 int r; 5340 5341 mutex_lock(&dyn_info_mutex); 5342 r = sprintf(buf, "%ld ", *p); 5343 5344 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); 5345 buf[r++] = '\n'; 5346 5347 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 5348 5349 mutex_unlock(&dyn_info_mutex); 5350 5351 return r; 5352 } 5353 5354 static const struct file_operations tracing_dyn_info_fops = { 5355 .open = tracing_open_generic, 5356 .read = tracing_read_dyn_info, 5357 .llseek = generic_file_llseek, 5358 }; 5359 #endif /* CONFIG_DYNAMIC_FTRACE */ 5360 5361 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) 5362 static void 5363 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data) 5364 { 5365 tracing_snapshot(); 5366 } 5367 5368 static void 5369 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data) 5370 { 5371 unsigned long *count = (long *)data; 5372 5373 if (!*count) 5374 return; 5375 5376 if (*count != -1) 5377 (*count)--; 5378 5379 tracing_snapshot(); 5380 } 5381 5382 static int 5383 ftrace_snapshot_print(struct seq_file *m, unsigned long ip, 5384 struct ftrace_probe_ops *ops, void *data) 5385 { 5386 long count = (long)data; 5387 5388 seq_printf(m, "%ps:", (void *)ip); 5389 5390 seq_printf(m, "snapshot"); 5391 5392 if (count == -1) 5393 seq_printf(m, ":unlimited\n"); 5394 else 5395 seq_printf(m, ":count=%ld\n", count); 5396 5397 return 0; 5398 } 5399 5400 static struct ftrace_probe_ops snapshot_probe_ops = { 5401 .func = ftrace_snapshot, 5402 .print = ftrace_snapshot_print, 5403 }; 5404 5405 static struct ftrace_probe_ops snapshot_count_probe_ops = { 5406 .func = ftrace_count_snapshot, 5407 .print = ftrace_snapshot_print, 5408 }; 5409 5410 static int 5411 ftrace_trace_snapshot_callback(struct ftrace_hash *hash, 5412 char *glob, char *cmd, char *param, int enable) 5413 { 5414 struct ftrace_probe_ops *ops; 5415 void *count = (void *)-1; 5416 char *number; 5417 int ret; 5418 5419 /* hash funcs only work with set_ftrace_filter */ 5420 if (!enable) 5421 return -EINVAL; 5422 5423 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; 5424 5425 if (glob[0] == '!') { 5426 unregister_ftrace_function_probe_func(glob+1, ops); 5427 return 0; 5428 } 5429 5430 if (!param) 5431 goto out_reg; 5432 5433 number = strsep(¶m, ":"); 5434 5435 if (!strlen(number)) 5436 goto out_reg; 5437 5438 /* 5439 * We use the callback data field (which is a pointer) 5440 * as our counter. 5441 */ 5442 ret = kstrtoul(number, 0, (unsigned long *)&count); 5443 if (ret) 5444 return ret; 5445 5446 out_reg: 5447 ret = register_ftrace_function_probe(glob, ops, count); 5448 5449 if (ret >= 0) 5450 alloc_snapshot(&global_trace); 5451 5452 return ret < 0 ? ret : 0; 5453 } 5454 5455 static struct ftrace_func_command ftrace_snapshot_cmd = { 5456 .name = "snapshot", 5457 .func = ftrace_trace_snapshot_callback, 5458 }; 5459 5460 static int register_snapshot_cmd(void) 5461 { 5462 return register_ftrace_command(&ftrace_snapshot_cmd); 5463 } 5464 #else 5465 static inline int register_snapshot_cmd(void) { return 0; } 5466 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ 5467 5468 struct dentry *tracing_init_dentry_tr(struct trace_array *tr) 5469 { 5470 if (tr->dir) 5471 return tr->dir; 5472 5473 if (!debugfs_initialized()) 5474 return NULL; 5475 5476 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 5477 tr->dir = debugfs_create_dir("tracing", NULL); 5478 5479 if (!tr->dir) 5480 pr_warn_once("Could not create debugfs directory 'tracing'\n"); 5481 5482 return tr->dir; 5483 } 5484 5485 struct dentry *tracing_init_dentry(void) 5486 { 5487 return tracing_init_dentry_tr(&global_trace); 5488 } 5489 5490 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) 5491 { 5492 struct dentry *d_tracer; 5493 5494 if (tr->percpu_dir) 5495 return tr->percpu_dir; 5496 5497 d_tracer = tracing_init_dentry_tr(tr); 5498 if (!d_tracer) 5499 return NULL; 5500 5501 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer); 5502 5503 WARN_ONCE(!tr->percpu_dir, 5504 "Could not create debugfs directory 'per_cpu/%d'\n", cpu); 5505 5506 return tr->percpu_dir; 5507 } 5508 5509 static struct dentry * 5510 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, 5511 void *data, long cpu, const struct file_operations *fops) 5512 { 5513 struct dentry *ret = trace_create_file(name, mode, parent, data, fops); 5514 5515 if (ret) /* See tracing_get_cpu() */ 5516 ret->d_inode->i_cdev = (void *)(cpu + 1); 5517 return ret; 5518 } 5519 5520 static void 5521 tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) 5522 { 5523 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); 5524 struct dentry *d_cpu; 5525 char cpu_dir[30]; /* 30 characters should be more than enough */ 5526 5527 if (!d_percpu) 5528 return; 5529 5530 snprintf(cpu_dir, 30, "cpu%ld", cpu); 5531 d_cpu = debugfs_create_dir(cpu_dir, d_percpu); 5532 if (!d_cpu) { 5533 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); 5534 return; 5535 } 5536 5537 /* per cpu trace_pipe */ 5538 trace_create_cpu_file("trace_pipe", 0444, d_cpu, 5539 tr, cpu, &tracing_pipe_fops); 5540 5541 /* per cpu trace */ 5542 trace_create_cpu_file("trace", 0644, d_cpu, 5543 tr, cpu, &tracing_fops); 5544 5545 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, 5546 tr, cpu, &tracing_buffers_fops); 5547 5548 trace_create_cpu_file("stats", 0444, d_cpu, 5549 tr, cpu, &tracing_stats_fops); 5550 5551 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, 5552 tr, cpu, &tracing_entries_fops); 5553 5554 #ifdef CONFIG_TRACER_SNAPSHOT 5555 trace_create_cpu_file("snapshot", 0644, d_cpu, 5556 tr, cpu, &snapshot_fops); 5557 5558 trace_create_cpu_file("snapshot_raw", 0444, d_cpu, 5559 tr, cpu, &snapshot_raw_fops); 5560 #endif 5561 } 5562 5563 #ifdef CONFIG_FTRACE_SELFTEST 5564 /* Let selftest have access to static functions in this file */ 5565 #include "trace_selftest.c" 5566 #endif 5567 5568 struct trace_option_dentry { 5569 struct tracer_opt *opt; 5570 struct tracer_flags *flags; 5571 struct trace_array *tr; 5572 struct dentry *entry; 5573 }; 5574 5575 static ssize_t 5576 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, 5577 loff_t *ppos) 5578 { 5579 struct trace_option_dentry *topt = filp->private_data; 5580 char *buf; 5581 5582 if (topt->flags->val & topt->opt->bit) 5583 buf = "1\n"; 5584 else 5585 buf = "0\n"; 5586 5587 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 5588 } 5589 5590 static ssize_t 5591 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, 5592 loff_t *ppos) 5593 { 5594 struct trace_option_dentry *topt = filp->private_data; 5595 unsigned long val; 5596 int ret; 5597 5598 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 5599 if (ret) 5600 return ret; 5601 5602 if (val != 0 && val != 1) 5603 return -EINVAL; 5604 5605 if (!!(topt->flags->val & topt->opt->bit) != val) { 5606 mutex_lock(&trace_types_lock); 5607 ret = __set_tracer_option(topt->tr->current_trace, topt->flags, 5608 topt->opt, !val); 5609 mutex_unlock(&trace_types_lock); 5610 if (ret) 5611 return ret; 5612 } 5613 5614 *ppos += cnt; 5615 5616 return cnt; 5617 } 5618 5619 5620 static const struct file_operations trace_options_fops = { 5621 .open = tracing_open_generic, 5622 .read = trace_options_read, 5623 .write = trace_options_write, 5624 .llseek = generic_file_llseek, 5625 }; 5626 5627 static ssize_t 5628 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, 5629 loff_t *ppos) 5630 { 5631 long index = (long)filp->private_data; 5632 char *buf; 5633 5634 if (trace_flags & (1 << index)) 5635 buf = "1\n"; 5636 else 5637 buf = "0\n"; 5638 5639 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 5640 } 5641 5642 static ssize_t 5643 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, 5644 loff_t *ppos) 5645 { 5646 struct trace_array *tr = &global_trace; 5647 long index = (long)filp->private_data; 5648 unsigned long val; 5649 int ret; 5650 5651 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 5652 if (ret) 5653 return ret; 5654 5655 if (val != 0 && val != 1) 5656 return -EINVAL; 5657 5658 mutex_lock(&trace_types_lock); 5659 ret = set_tracer_flag(tr, 1 << index, val); 5660 mutex_unlock(&trace_types_lock); 5661 5662 if (ret < 0) 5663 return ret; 5664 5665 *ppos += cnt; 5666 5667 return cnt; 5668 } 5669 5670 static const struct file_operations trace_options_core_fops = { 5671 .open = tracing_open_generic, 5672 .read = trace_options_core_read, 5673 .write = trace_options_core_write, 5674 .llseek = generic_file_llseek, 5675 }; 5676 5677 struct dentry *trace_create_file(const char *name, 5678 umode_t mode, 5679 struct dentry *parent, 5680 void *data, 5681 const struct file_operations *fops) 5682 { 5683 struct dentry *ret; 5684 5685 ret = debugfs_create_file(name, mode, parent, data, fops); 5686 if (!ret) 5687 pr_warning("Could not create debugfs '%s' entry\n", name); 5688 5689 return ret; 5690 } 5691 5692 5693 static struct dentry *trace_options_init_dentry(struct trace_array *tr) 5694 { 5695 struct dentry *d_tracer; 5696 5697 if (tr->options) 5698 return tr->options; 5699 5700 d_tracer = tracing_init_dentry_tr(tr); 5701 if (!d_tracer) 5702 return NULL; 5703 5704 tr->options = debugfs_create_dir("options", d_tracer); 5705 if (!tr->options) { 5706 pr_warning("Could not create debugfs directory 'options'\n"); 5707 return NULL; 5708 } 5709 5710 return tr->options; 5711 } 5712 5713 static void 5714 create_trace_option_file(struct trace_array *tr, 5715 struct trace_option_dentry *topt, 5716 struct tracer_flags *flags, 5717 struct tracer_opt *opt) 5718 { 5719 struct dentry *t_options; 5720 5721 t_options = trace_options_init_dentry(tr); 5722 if (!t_options) 5723 return; 5724 5725 topt->flags = flags; 5726 topt->opt = opt; 5727 topt->tr = tr; 5728 5729 topt->entry = trace_create_file(opt->name, 0644, t_options, topt, 5730 &trace_options_fops); 5731 5732 } 5733 5734 static struct trace_option_dentry * 5735 create_trace_option_files(struct trace_array *tr, struct tracer *tracer) 5736 { 5737 struct trace_option_dentry *topts; 5738 struct tracer_flags *flags; 5739 struct tracer_opt *opts; 5740 int cnt; 5741 5742 if (!tracer) 5743 return NULL; 5744 5745 flags = tracer->flags; 5746 5747 if (!flags || !flags->opts) 5748 return NULL; 5749 5750 opts = flags->opts; 5751 5752 for (cnt = 0; opts[cnt].name; cnt++) 5753 ; 5754 5755 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); 5756 if (!topts) 5757 return NULL; 5758 5759 for (cnt = 0; opts[cnt].name; cnt++) 5760 create_trace_option_file(tr, &topts[cnt], flags, 5761 &opts[cnt]); 5762 5763 return topts; 5764 } 5765 5766 static void 5767 destroy_trace_option_files(struct trace_option_dentry *topts) 5768 { 5769 int cnt; 5770 5771 if (!topts) 5772 return; 5773 5774 for (cnt = 0; topts[cnt].opt; cnt++) { 5775 if (topts[cnt].entry) 5776 debugfs_remove(topts[cnt].entry); 5777 } 5778 5779 kfree(topts); 5780 } 5781 5782 static struct dentry * 5783 create_trace_option_core_file(struct trace_array *tr, 5784 const char *option, long index) 5785 { 5786 struct dentry *t_options; 5787 5788 t_options = trace_options_init_dentry(tr); 5789 if (!t_options) 5790 return NULL; 5791 5792 return trace_create_file(option, 0644, t_options, (void *)index, 5793 &trace_options_core_fops); 5794 } 5795 5796 static __init void create_trace_options_dir(struct trace_array *tr) 5797 { 5798 struct dentry *t_options; 5799 int i; 5800 5801 t_options = trace_options_init_dentry(tr); 5802 if (!t_options) 5803 return; 5804 5805 for (i = 0; trace_options[i]; i++) 5806 create_trace_option_core_file(tr, trace_options[i], i); 5807 } 5808 5809 static ssize_t 5810 rb_simple_read(struct file *filp, char __user *ubuf, 5811 size_t cnt, loff_t *ppos) 5812 { 5813 struct trace_array *tr = filp->private_data; 5814 char buf[64]; 5815 int r; 5816 5817 r = tracer_tracing_is_on(tr); 5818 r = sprintf(buf, "%d\n", r); 5819 5820 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 5821 } 5822 5823 static ssize_t 5824 rb_simple_write(struct file *filp, const char __user *ubuf, 5825 size_t cnt, loff_t *ppos) 5826 { 5827 struct trace_array *tr = filp->private_data; 5828 struct ring_buffer *buffer = tr->trace_buffer.buffer; 5829 unsigned long val; 5830 int ret; 5831 5832 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 5833 if (ret) 5834 return ret; 5835 5836 if (buffer) { 5837 mutex_lock(&trace_types_lock); 5838 if (val) { 5839 tracer_tracing_on(tr); 5840 if (tr->current_trace->start) 5841 tr->current_trace->start(tr); 5842 } else { 5843 tracer_tracing_off(tr); 5844 if (tr->current_trace->stop) 5845 tr->current_trace->stop(tr); 5846 } 5847 mutex_unlock(&trace_types_lock); 5848 } 5849 5850 (*ppos)++; 5851 5852 return cnt; 5853 } 5854 5855 static const struct file_operations rb_simple_fops = { 5856 .open = tracing_open_generic_tr, 5857 .read = rb_simple_read, 5858 .write = rb_simple_write, 5859 .release = tracing_release_generic_tr, 5860 .llseek = default_llseek, 5861 }; 5862 5863 struct dentry *trace_instance_dir; 5864 5865 static void 5866 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer); 5867 5868 static int 5869 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) 5870 { 5871 enum ring_buffer_flags rb_flags; 5872 5873 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; 5874 5875 buf->buffer = ring_buffer_alloc(size, rb_flags); 5876 if (!buf->buffer) 5877 return -ENOMEM; 5878 5879 buf->data = alloc_percpu(struct trace_array_cpu); 5880 if (!buf->data) { 5881 ring_buffer_free(buf->buffer); 5882 return -ENOMEM; 5883 } 5884 5885 /* Allocate the first page for all buffers */ 5886 set_buffer_entries(&tr->trace_buffer, 5887 ring_buffer_size(tr->trace_buffer.buffer, 0)); 5888 5889 return 0; 5890 } 5891 5892 static int allocate_trace_buffers(struct trace_array *tr, int size) 5893 { 5894 int ret; 5895 5896 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size); 5897 if (ret) 5898 return ret; 5899 5900 #ifdef CONFIG_TRACER_MAX_TRACE 5901 ret = allocate_trace_buffer(tr, &tr->max_buffer, 5902 allocate_snapshot ? size : 1); 5903 if (WARN_ON(ret)) { 5904 ring_buffer_free(tr->trace_buffer.buffer); 5905 free_percpu(tr->trace_buffer.data); 5906 return -ENOMEM; 5907 } 5908 tr->allocated_snapshot = allocate_snapshot; 5909 5910 /* 5911 * Only the top level trace array gets its snapshot allocated 5912 * from the kernel command line. 5913 */ 5914 allocate_snapshot = false; 5915 #endif 5916 return 0; 5917 } 5918 5919 static int new_instance_create(const char *name) 5920 { 5921 struct trace_array *tr; 5922 int ret; 5923 5924 mutex_lock(&trace_types_lock); 5925 5926 ret = -EEXIST; 5927 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 5928 if (tr->name && strcmp(tr->name, name) == 0) 5929 goto out_unlock; 5930 } 5931 5932 ret = -ENOMEM; 5933 tr = kzalloc(sizeof(*tr), GFP_KERNEL); 5934 if (!tr) 5935 goto out_unlock; 5936 5937 tr->name = kstrdup(name, GFP_KERNEL); 5938 if (!tr->name) 5939 goto out_free_tr; 5940 5941 raw_spin_lock_init(&tr->start_lock); 5942 5943 tr->current_trace = &nop_trace; 5944 5945 INIT_LIST_HEAD(&tr->systems); 5946 INIT_LIST_HEAD(&tr->events); 5947 5948 if (allocate_trace_buffers(tr, trace_buf_size) < 0) 5949 goto out_free_tr; 5950 5951 tr->dir = debugfs_create_dir(name, trace_instance_dir); 5952 if (!tr->dir) 5953 goto out_free_tr; 5954 5955 ret = event_trace_add_tracer(tr->dir, tr); 5956 if (ret) { 5957 debugfs_remove_recursive(tr->dir); 5958 goto out_free_tr; 5959 } 5960 5961 init_tracer_debugfs(tr, tr->dir); 5962 5963 list_add(&tr->list, &ftrace_trace_arrays); 5964 5965 mutex_unlock(&trace_types_lock); 5966 5967 return 0; 5968 5969 out_free_tr: 5970 if (tr->trace_buffer.buffer) 5971 ring_buffer_free(tr->trace_buffer.buffer); 5972 kfree(tr->name); 5973 kfree(tr); 5974 5975 out_unlock: 5976 mutex_unlock(&trace_types_lock); 5977 5978 return ret; 5979 5980 } 5981 5982 static int instance_delete(const char *name) 5983 { 5984 struct trace_array *tr; 5985 int found = 0; 5986 int ret; 5987 5988 mutex_lock(&trace_types_lock); 5989 5990 ret = -ENODEV; 5991 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 5992 if (tr->name && strcmp(tr->name, name) == 0) { 5993 found = 1; 5994 break; 5995 } 5996 } 5997 if (!found) 5998 goto out_unlock; 5999 6000 ret = -EBUSY; 6001 if (tr->ref) 6002 goto out_unlock; 6003 6004 list_del(&tr->list); 6005 6006 event_trace_del_tracer(tr); 6007 debugfs_remove_recursive(tr->dir); 6008 free_percpu(tr->trace_buffer.data); 6009 ring_buffer_free(tr->trace_buffer.buffer); 6010 6011 kfree(tr->name); 6012 kfree(tr); 6013 6014 ret = 0; 6015 6016 out_unlock: 6017 mutex_unlock(&trace_types_lock); 6018 6019 return ret; 6020 } 6021 6022 static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode) 6023 { 6024 struct dentry *parent; 6025 int ret; 6026 6027 /* Paranoid: Make sure the parent is the "instances" directory */ 6028 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias); 6029 if (WARN_ON_ONCE(parent != trace_instance_dir)) 6030 return -ENOENT; 6031 6032 /* 6033 * The inode mutex is locked, but debugfs_create_dir() will also 6034 * take the mutex. As the instances directory can not be destroyed 6035 * or changed in any other way, it is safe to unlock it, and 6036 * let the dentry try. If two users try to make the same dir at 6037 * the same time, then the new_instance_create() will determine the 6038 * winner. 6039 */ 6040 mutex_unlock(&inode->i_mutex); 6041 6042 ret = new_instance_create(dentry->d_iname); 6043 6044 mutex_lock(&inode->i_mutex); 6045 6046 return ret; 6047 } 6048 6049 static int instance_rmdir(struct inode *inode, struct dentry *dentry) 6050 { 6051 struct dentry *parent; 6052 int ret; 6053 6054 /* Paranoid: Make sure the parent is the "instances" directory */ 6055 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias); 6056 if (WARN_ON_ONCE(parent != trace_instance_dir)) 6057 return -ENOENT; 6058 6059 /* The caller did a dget() on dentry */ 6060 mutex_unlock(&dentry->d_inode->i_mutex); 6061 6062 /* 6063 * The inode mutex is locked, but debugfs_create_dir() will also 6064 * take the mutex. As the instances directory can not be destroyed 6065 * or changed in any other way, it is safe to unlock it, and 6066 * let the dentry try. If two users try to make the same dir at 6067 * the same time, then the instance_delete() will determine the 6068 * winner. 6069 */ 6070 mutex_unlock(&inode->i_mutex); 6071 6072 ret = instance_delete(dentry->d_iname); 6073 6074 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT); 6075 mutex_lock(&dentry->d_inode->i_mutex); 6076 6077 return ret; 6078 } 6079 6080 static const struct inode_operations instance_dir_inode_operations = { 6081 .lookup = simple_lookup, 6082 .mkdir = instance_mkdir, 6083 .rmdir = instance_rmdir, 6084 }; 6085 6086 static __init void create_trace_instances(struct dentry *d_tracer) 6087 { 6088 trace_instance_dir = debugfs_create_dir("instances", d_tracer); 6089 if (WARN_ON(!trace_instance_dir)) 6090 return; 6091 6092 /* Hijack the dir inode operations, to allow mkdir */ 6093 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations; 6094 } 6095 6096 static void 6097 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) 6098 { 6099 int cpu; 6100 6101 trace_create_file("trace_options", 0644, d_tracer, 6102 tr, &tracing_iter_fops); 6103 6104 trace_create_file("trace", 0644, d_tracer, 6105 tr, &tracing_fops); 6106 6107 trace_create_file("trace_pipe", 0444, d_tracer, 6108 tr, &tracing_pipe_fops); 6109 6110 trace_create_file("buffer_size_kb", 0644, d_tracer, 6111 tr, &tracing_entries_fops); 6112 6113 trace_create_file("buffer_total_size_kb", 0444, d_tracer, 6114 tr, &tracing_total_entries_fops); 6115 6116 trace_create_file("free_buffer", 0200, d_tracer, 6117 tr, &tracing_free_buffer_fops); 6118 6119 trace_create_file("trace_marker", 0220, d_tracer, 6120 tr, &tracing_mark_fops); 6121 6122 trace_create_file("trace_clock", 0644, d_tracer, tr, 6123 &trace_clock_fops); 6124 6125 trace_create_file("tracing_on", 0644, d_tracer, 6126 tr, &rb_simple_fops); 6127 6128 #ifdef CONFIG_TRACER_SNAPSHOT 6129 trace_create_file("snapshot", 0644, d_tracer, 6130 tr, &snapshot_fops); 6131 #endif 6132 6133 for_each_tracing_cpu(cpu) 6134 tracing_init_debugfs_percpu(tr, cpu); 6135 6136 } 6137 6138 static __init int tracer_init_debugfs(void) 6139 { 6140 struct dentry *d_tracer; 6141 6142 trace_access_lock_init(); 6143 6144 d_tracer = tracing_init_dentry(); 6145 if (!d_tracer) 6146 return 0; 6147 6148 init_tracer_debugfs(&global_trace, d_tracer); 6149 6150 trace_create_file("tracing_cpumask", 0644, d_tracer, 6151 &global_trace, &tracing_cpumask_fops); 6152 6153 trace_create_file("available_tracers", 0444, d_tracer, 6154 &global_trace, &show_traces_fops); 6155 6156 trace_create_file("current_tracer", 0644, d_tracer, 6157 &global_trace, &set_tracer_fops); 6158 6159 #ifdef CONFIG_TRACER_MAX_TRACE 6160 trace_create_file("tracing_max_latency", 0644, d_tracer, 6161 &tracing_max_latency, &tracing_max_lat_fops); 6162 #endif 6163 6164 trace_create_file("tracing_thresh", 0644, d_tracer, 6165 &tracing_thresh, &tracing_max_lat_fops); 6166 6167 trace_create_file("README", 0444, d_tracer, 6168 NULL, &tracing_readme_fops); 6169 6170 trace_create_file("saved_cmdlines", 0444, d_tracer, 6171 NULL, &tracing_saved_cmdlines_fops); 6172 6173 #ifdef CONFIG_DYNAMIC_FTRACE 6174 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 6175 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 6176 #endif 6177 6178 create_trace_instances(d_tracer); 6179 6180 create_trace_options_dir(&global_trace); 6181 6182 return 0; 6183 } 6184 6185 static int trace_panic_handler(struct notifier_block *this, 6186 unsigned long event, void *unused) 6187 { 6188 if (ftrace_dump_on_oops) 6189 ftrace_dump(ftrace_dump_on_oops); 6190 return NOTIFY_OK; 6191 } 6192 6193 static struct notifier_block trace_panic_notifier = { 6194 .notifier_call = trace_panic_handler, 6195 .next = NULL, 6196 .priority = 150 /* priority: INT_MAX >= x >= 0 */ 6197 }; 6198 6199 static int trace_die_handler(struct notifier_block *self, 6200 unsigned long val, 6201 void *data) 6202 { 6203 switch (val) { 6204 case DIE_OOPS: 6205 if (ftrace_dump_on_oops) 6206 ftrace_dump(ftrace_dump_on_oops); 6207 break; 6208 default: 6209 break; 6210 } 6211 return NOTIFY_OK; 6212 } 6213 6214 static struct notifier_block trace_die_notifier = { 6215 .notifier_call = trace_die_handler, 6216 .priority = 200 6217 }; 6218 6219 /* 6220 * printk is set to max of 1024, we really don't need it that big. 6221 * Nothing should be printing 1000 characters anyway. 6222 */ 6223 #define TRACE_MAX_PRINT 1000 6224 6225 /* 6226 * Define here KERN_TRACE so that we have one place to modify 6227 * it if we decide to change what log level the ftrace dump 6228 * should be at. 6229 */ 6230 #define KERN_TRACE KERN_EMERG 6231 6232 void 6233 trace_printk_seq(struct trace_seq *s) 6234 { 6235 /* Probably should print a warning here. */ 6236 if (s->len >= TRACE_MAX_PRINT) 6237 s->len = TRACE_MAX_PRINT; 6238 6239 /* should be zero ended, but we are paranoid. */ 6240 s->buffer[s->len] = 0; 6241 6242 printk(KERN_TRACE "%s", s->buffer); 6243 6244 trace_seq_init(s); 6245 } 6246 6247 void trace_init_global_iter(struct trace_iterator *iter) 6248 { 6249 iter->tr = &global_trace; 6250 iter->trace = iter->tr->current_trace; 6251 iter->cpu_file = RING_BUFFER_ALL_CPUS; 6252 iter->trace_buffer = &global_trace.trace_buffer; 6253 } 6254 6255 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) 6256 { 6257 /* use static because iter can be a bit big for the stack */ 6258 static struct trace_iterator iter; 6259 static atomic_t dump_running; 6260 unsigned int old_userobj; 6261 unsigned long flags; 6262 int cnt = 0, cpu; 6263 6264 /* Only allow one dump user at a time. */ 6265 if (atomic_inc_return(&dump_running) != 1) { 6266 atomic_dec(&dump_running); 6267 return; 6268 } 6269 6270 /* 6271 * Always turn off tracing when we dump. 6272 * We don't need to show trace output of what happens 6273 * between multiple crashes. 6274 * 6275 * If the user does a sysrq-z, then they can re-enable 6276 * tracing with echo 1 > tracing_on. 6277 */ 6278 tracing_off(); 6279 6280 local_irq_save(flags); 6281 6282 /* Simulate the iterator */ 6283 trace_init_global_iter(&iter); 6284 6285 for_each_tracing_cpu(cpu) { 6286 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled); 6287 } 6288 6289 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; 6290 6291 /* don't look at user memory in panic mode */ 6292 trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 6293 6294 switch (oops_dump_mode) { 6295 case DUMP_ALL: 6296 iter.cpu_file = RING_BUFFER_ALL_CPUS; 6297 break; 6298 case DUMP_ORIG: 6299 iter.cpu_file = raw_smp_processor_id(); 6300 break; 6301 case DUMP_NONE: 6302 goto out_enable; 6303 default: 6304 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); 6305 iter.cpu_file = RING_BUFFER_ALL_CPUS; 6306 } 6307 6308 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 6309 6310 /* Did function tracer already get disabled? */ 6311 if (ftrace_is_dead()) { 6312 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); 6313 printk("# MAY BE MISSING FUNCTION EVENTS\n"); 6314 } 6315 6316 /* 6317 * We need to stop all tracing on all CPUS to read the 6318 * the next buffer. This is a bit expensive, but is 6319 * not done often. We fill all what we can read, 6320 * and then release the locks again. 6321 */ 6322 6323 while (!trace_empty(&iter)) { 6324 6325 if (!cnt) 6326 printk(KERN_TRACE "---------------------------------\n"); 6327 6328 cnt++; 6329 6330 /* reset all but tr, trace, and overruns */ 6331 memset(&iter.seq, 0, 6332 sizeof(struct trace_iterator) - 6333 offsetof(struct trace_iterator, seq)); 6334 iter.iter_flags |= TRACE_FILE_LAT_FMT; 6335 iter.pos = -1; 6336 6337 if (trace_find_next_entry_inc(&iter) != NULL) { 6338 int ret; 6339 6340 ret = print_trace_line(&iter); 6341 if (ret != TRACE_TYPE_NO_CONSUME) 6342 trace_consume(&iter); 6343 } 6344 touch_nmi_watchdog(); 6345 6346 trace_printk_seq(&iter.seq); 6347 } 6348 6349 if (!cnt) 6350 printk(KERN_TRACE " (ftrace buffer empty)\n"); 6351 else 6352 printk(KERN_TRACE "---------------------------------\n"); 6353 6354 out_enable: 6355 trace_flags |= old_userobj; 6356 6357 for_each_tracing_cpu(cpu) { 6358 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); 6359 } 6360 atomic_dec(&dump_running); 6361 local_irq_restore(flags); 6362 } 6363 EXPORT_SYMBOL_GPL(ftrace_dump); 6364 6365 __init static int tracer_alloc_buffers(void) 6366 { 6367 int ring_buf_size; 6368 int ret = -ENOMEM; 6369 6370 6371 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 6372 goto out; 6373 6374 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) 6375 goto out_free_buffer_mask; 6376 6377 /* Only allocate trace_printk buffers if a trace_printk exists */ 6378 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) 6379 /* Must be called before global_trace.buffer is allocated */ 6380 trace_printk_init_buffers(); 6381 6382 /* To save memory, keep the ring buffer size to its minimum */ 6383 if (ring_buffer_expanded) 6384 ring_buf_size = trace_buf_size; 6385 else 6386 ring_buf_size = 1; 6387 6388 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 6389 cpumask_copy(tracing_cpumask, cpu_all_mask); 6390 6391 raw_spin_lock_init(&global_trace.start_lock); 6392 6393 /* TODO: make the number of buffers hot pluggable with CPUS */ 6394 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { 6395 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 6396 WARN_ON(1); 6397 goto out_free_cpumask; 6398 } 6399 6400 if (global_trace.buffer_disabled) 6401 tracing_off(); 6402 6403 trace_init_cmdlines(); 6404 6405 /* 6406 * register_tracer() might reference current_trace, so it 6407 * needs to be set before we register anything. This is 6408 * just a bootstrap of current_trace anyway. 6409 */ 6410 global_trace.current_trace = &nop_trace; 6411 6412 register_tracer(&nop_trace); 6413 6414 /* All seems OK, enable tracing */ 6415 tracing_disabled = 0; 6416 6417 atomic_notifier_chain_register(&panic_notifier_list, 6418 &trace_panic_notifier); 6419 6420 register_die_notifier(&trace_die_notifier); 6421 6422 global_trace.flags = TRACE_ARRAY_FL_GLOBAL; 6423 6424 INIT_LIST_HEAD(&global_trace.systems); 6425 INIT_LIST_HEAD(&global_trace.events); 6426 list_add(&global_trace.list, &ftrace_trace_arrays); 6427 6428 while (trace_boot_options) { 6429 char *option; 6430 6431 option = strsep(&trace_boot_options, ","); 6432 trace_set_options(&global_trace, option); 6433 } 6434 6435 register_snapshot_cmd(); 6436 6437 return 0; 6438 6439 out_free_cpumask: 6440 free_percpu(global_trace.trace_buffer.data); 6441 #ifdef CONFIG_TRACER_MAX_TRACE 6442 free_percpu(global_trace.max_buffer.data); 6443 #endif 6444 free_cpumask_var(tracing_cpumask); 6445 out_free_buffer_mask: 6446 free_cpumask_var(tracing_buffer_mask); 6447 out: 6448 return ret; 6449 } 6450 6451 __init static int clear_boot_tracer(void) 6452 { 6453 /* 6454 * The default tracer at boot buffer is an init section. 6455 * This function is called in lateinit. If we did not 6456 * find the boot tracer, then clear it out, to prevent 6457 * later registration from accessing the buffer that is 6458 * about to be freed. 6459 */ 6460 if (!default_bootup_tracer) 6461 return 0; 6462 6463 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", 6464 default_bootup_tracer); 6465 default_bootup_tracer = NULL; 6466 6467 return 0; 6468 } 6469 6470 early_initcall(tracer_alloc_buffers); 6471 fs_initcall(tracer_init_debugfs); 6472 late_initcall(clear_boot_tracer); 6473