1 /* 2 * ring buffer based function tracer 3 * 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 6 * 7 * Originally taken from the RT patch by: 8 * Arnaldo Carvalho de Melo <acme@redhat.com> 9 * 10 * Based on code from the latency_tracer, that is: 11 * Copyright (C) 2004-2006 Ingo Molnar 12 * Copyright (C) 2004 Nadia Yvette Chambers 13 */ 14 #include <linux/ring_buffer.h> 15 #include <generated/utsrelease.h> 16 #include <linux/stacktrace.h> 17 #include <linux/writeback.h> 18 #include <linux/kallsyms.h> 19 #include <linux/seq_file.h> 20 #include <linux/notifier.h> 21 #include <linux/irqflags.h> 22 #include <linux/debugfs.h> 23 #include <linux/pagemap.h> 24 #include <linux/hardirq.h> 25 #include <linux/linkage.h> 26 #include <linux/uaccess.h> 27 #include <linux/kprobes.h> 28 #include <linux/ftrace.h> 29 #include <linux/module.h> 30 #include <linux/percpu.h> 31 #include <linux/splice.h> 32 #include <linux/kdebug.h> 33 #include <linux/string.h> 34 #include <linux/rwsem.h> 35 #include <linux/slab.h> 36 #include <linux/ctype.h> 37 #include <linux/init.h> 38 #include <linux/poll.h> 39 #include <linux/nmi.h> 40 #include <linux/fs.h> 41 #include <linux/sched/rt.h> 42 43 #include "trace.h" 44 #include "trace_output.h" 45 46 /* 47 * On boot up, the ring buffer is set to the minimum size, so that 48 * we do not waste memory on systems that are not using tracing. 49 */ 50 bool ring_buffer_expanded; 51 52 /* 53 * We need to change this state when a selftest is running. 54 * A selftest will lurk into the ring-buffer to count the 55 * entries inserted during the selftest although some concurrent 56 * insertions into the ring-buffer such as trace_printk could occurred 57 * at the same time, giving false positive or negative results. 58 */ 59 static bool __read_mostly tracing_selftest_running; 60 61 /* 62 * If a tracer is running, we do not want to run SELFTEST. 63 */ 64 bool __read_mostly tracing_selftest_disabled; 65 66 /* For tracers that don't implement custom flags */ 67 static struct tracer_opt dummy_tracer_opt[] = { 68 { } 69 }; 70 71 static struct tracer_flags dummy_tracer_flags = { 72 .val = 0, 73 .opts = dummy_tracer_opt 74 }; 75 76 static int dummy_set_flag(u32 old_flags, u32 bit, int set) 77 { 78 return 0; 79 } 80 81 /* 82 * To prevent the comm cache from being overwritten when no 83 * tracing is active, only save the comm when a trace event 84 * occurred. 85 */ 86 static DEFINE_PER_CPU(bool, trace_cmdline_save); 87 88 /* 89 * Kill all tracing for good (never come back). 90 * It is initialized to 1 but will turn to zero if the initialization 91 * of the tracer is successful. But that is the only place that sets 92 * this back to zero. 93 */ 94 static int tracing_disabled = 1; 95 96 DEFINE_PER_CPU(int, ftrace_cpu_disabled); 97 98 cpumask_var_t __read_mostly tracing_buffer_mask; 99 100 /* 101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops 102 * 103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops 104 * is set, then ftrace_dump is called. This will output the contents 105 * of the ftrace buffers to the console. This is very useful for 106 * capturing traces that lead to crashes and outputing it to a 107 * serial console. 108 * 109 * It is default off, but you can enable it with either specifying 110 * "ftrace_dump_on_oops" in the kernel command line, or setting 111 * /proc/sys/kernel/ftrace_dump_on_oops 112 * Set 1 if you want to dump buffers of all CPUs 113 * Set 2 if you want to dump the buffer of the CPU that triggered oops 114 */ 115 116 enum ftrace_dump_mode ftrace_dump_on_oops; 117 118 /* When set, tracing will stop when a WARN*() is hit */ 119 int __disable_trace_on_warning; 120 121 static int tracing_set_tracer(const char *buf); 122 123 #define MAX_TRACER_SIZE 100 124 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 125 static char *default_bootup_tracer; 126 127 static bool allocate_snapshot; 128 129 static int __init set_cmdline_ftrace(char *str) 130 { 131 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 132 default_bootup_tracer = bootup_tracer_buf; 133 /* We are using ftrace early, expand it */ 134 ring_buffer_expanded = true; 135 return 1; 136 } 137 __setup("ftrace=", set_cmdline_ftrace); 138 139 static int __init set_ftrace_dump_on_oops(char *str) 140 { 141 if (*str++ != '=' || !*str) { 142 ftrace_dump_on_oops = DUMP_ALL; 143 return 1; 144 } 145 146 if (!strcmp("orig_cpu", str)) { 147 ftrace_dump_on_oops = DUMP_ORIG; 148 return 1; 149 } 150 151 return 0; 152 } 153 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 154 155 static int __init stop_trace_on_warning(char *str) 156 { 157 __disable_trace_on_warning = 1; 158 return 1; 159 } 160 __setup("traceoff_on_warning=", stop_trace_on_warning); 161 162 static int __init boot_alloc_snapshot(char *str) 163 { 164 allocate_snapshot = true; 165 /* We also need the main ring buffer expanded */ 166 ring_buffer_expanded = true; 167 return 1; 168 } 169 __setup("alloc_snapshot", boot_alloc_snapshot); 170 171 172 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; 173 static char *trace_boot_options __initdata; 174 175 static int __init set_trace_boot_options(char *str) 176 { 177 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); 178 trace_boot_options = trace_boot_options_buf; 179 return 0; 180 } 181 __setup("trace_options=", set_trace_boot_options); 182 183 184 unsigned long long ns2usecs(cycle_t nsec) 185 { 186 nsec += 500; 187 do_div(nsec, 1000); 188 return nsec; 189 } 190 191 /* 192 * The global_trace is the descriptor that holds the tracing 193 * buffers for the live tracing. For each CPU, it contains 194 * a link list of pages that will store trace entries. The 195 * page descriptor of the pages in the memory is used to hold 196 * the link list by linking the lru item in the page descriptor 197 * to each of the pages in the buffer per CPU. 198 * 199 * For each active CPU there is a data field that holds the 200 * pages for the buffer for that CPU. Each CPU has the same number 201 * of pages allocated for its buffer. 202 */ 203 static struct trace_array global_trace; 204 205 LIST_HEAD(ftrace_trace_arrays); 206 207 int trace_array_get(struct trace_array *this_tr) 208 { 209 struct trace_array *tr; 210 int ret = -ENODEV; 211 212 mutex_lock(&trace_types_lock); 213 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 214 if (tr == this_tr) { 215 tr->ref++; 216 ret = 0; 217 break; 218 } 219 } 220 mutex_unlock(&trace_types_lock); 221 222 return ret; 223 } 224 225 static void __trace_array_put(struct trace_array *this_tr) 226 { 227 WARN_ON(!this_tr->ref); 228 this_tr->ref--; 229 } 230 231 void trace_array_put(struct trace_array *this_tr) 232 { 233 mutex_lock(&trace_types_lock); 234 __trace_array_put(this_tr); 235 mutex_unlock(&trace_types_lock); 236 } 237 238 int filter_check_discard(struct ftrace_event_file *file, void *rec, 239 struct ring_buffer *buffer, 240 struct ring_buffer_event *event) 241 { 242 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) && 243 !filter_match_preds(file->filter, rec)) { 244 ring_buffer_discard_commit(buffer, event); 245 return 1; 246 } 247 248 return 0; 249 } 250 EXPORT_SYMBOL_GPL(filter_check_discard); 251 252 int call_filter_check_discard(struct ftrace_event_call *call, void *rec, 253 struct ring_buffer *buffer, 254 struct ring_buffer_event *event) 255 { 256 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && 257 !filter_match_preds(call->filter, rec)) { 258 ring_buffer_discard_commit(buffer, event); 259 return 1; 260 } 261 262 return 0; 263 } 264 EXPORT_SYMBOL_GPL(call_filter_check_discard); 265 266 cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) 267 { 268 u64 ts; 269 270 /* Early boot up does not have a buffer yet */ 271 if (!buf->buffer) 272 return trace_clock_local(); 273 274 ts = ring_buffer_time_stamp(buf->buffer, cpu); 275 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); 276 277 return ts; 278 } 279 280 cycle_t ftrace_now(int cpu) 281 { 282 return buffer_ftrace_now(&global_trace.trace_buffer, cpu); 283 } 284 285 /** 286 * tracing_is_enabled - Show if global_trace has been disabled 287 * 288 * Shows if the global trace has been enabled or not. It uses the 289 * mirror flag "buffer_disabled" to be used in fast paths such as for 290 * the irqsoff tracer. But it may be inaccurate due to races. If you 291 * need to know the accurate state, use tracing_is_on() which is a little 292 * slower, but accurate. 293 */ 294 int tracing_is_enabled(void) 295 { 296 /* 297 * For quick access (irqsoff uses this in fast path), just 298 * return the mirror variable of the state of the ring buffer. 299 * It's a little racy, but we don't really care. 300 */ 301 smp_rmb(); 302 return !global_trace.buffer_disabled; 303 } 304 305 /* 306 * trace_buf_size is the size in bytes that is allocated 307 * for a buffer. Note, the number of bytes is always rounded 308 * to page size. 309 * 310 * This number is purposely set to a low number of 16384. 311 * If the dump on oops happens, it will be much appreciated 312 * to not have to wait for all that output. Anyway this can be 313 * boot time and run time configurable. 314 */ 315 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ 316 317 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; 318 319 /* trace_types holds a link list of available tracers. */ 320 static struct tracer *trace_types __read_mostly; 321 322 /* 323 * trace_types_lock is used to protect the trace_types list. 324 */ 325 DEFINE_MUTEX(trace_types_lock); 326 327 /* 328 * serialize the access of the ring buffer 329 * 330 * ring buffer serializes readers, but it is low level protection. 331 * The validity of the events (which returns by ring_buffer_peek() ..etc) 332 * are not protected by ring buffer. 333 * 334 * The content of events may become garbage if we allow other process consumes 335 * these events concurrently: 336 * A) the page of the consumed events may become a normal page 337 * (not reader page) in ring buffer, and this page will be rewrited 338 * by events producer. 339 * B) The page of the consumed events may become a page for splice_read, 340 * and this page will be returned to system. 341 * 342 * These primitives allow multi process access to different cpu ring buffer 343 * concurrently. 344 * 345 * These primitives don't distinguish read-only and read-consume access. 346 * Multi read-only access are also serialized. 347 */ 348 349 #ifdef CONFIG_SMP 350 static DECLARE_RWSEM(all_cpu_access_lock); 351 static DEFINE_PER_CPU(struct mutex, cpu_access_lock); 352 353 static inline void trace_access_lock(int cpu) 354 { 355 if (cpu == RING_BUFFER_ALL_CPUS) { 356 /* gain it for accessing the whole ring buffer. */ 357 down_write(&all_cpu_access_lock); 358 } else { 359 /* gain it for accessing a cpu ring buffer. */ 360 361 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ 362 down_read(&all_cpu_access_lock); 363 364 /* Secondly block other access to this @cpu ring buffer. */ 365 mutex_lock(&per_cpu(cpu_access_lock, cpu)); 366 } 367 } 368 369 static inline void trace_access_unlock(int cpu) 370 { 371 if (cpu == RING_BUFFER_ALL_CPUS) { 372 up_write(&all_cpu_access_lock); 373 } else { 374 mutex_unlock(&per_cpu(cpu_access_lock, cpu)); 375 up_read(&all_cpu_access_lock); 376 } 377 } 378 379 static inline void trace_access_lock_init(void) 380 { 381 int cpu; 382 383 for_each_possible_cpu(cpu) 384 mutex_init(&per_cpu(cpu_access_lock, cpu)); 385 } 386 387 #else 388 389 static DEFINE_MUTEX(access_lock); 390 391 static inline void trace_access_lock(int cpu) 392 { 393 (void)cpu; 394 mutex_lock(&access_lock); 395 } 396 397 static inline void trace_access_unlock(int cpu) 398 { 399 (void)cpu; 400 mutex_unlock(&access_lock); 401 } 402 403 static inline void trace_access_lock_init(void) 404 { 405 } 406 407 #endif 408 409 /* trace_flags holds trace_options default values */ 410 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | 411 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | 412 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | 413 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION; 414 415 static void tracer_tracing_on(struct trace_array *tr) 416 { 417 if (tr->trace_buffer.buffer) 418 ring_buffer_record_on(tr->trace_buffer.buffer); 419 /* 420 * This flag is looked at when buffers haven't been allocated 421 * yet, or by some tracers (like irqsoff), that just want to 422 * know if the ring buffer has been disabled, but it can handle 423 * races of where it gets disabled but we still do a record. 424 * As the check is in the fast path of the tracers, it is more 425 * important to be fast than accurate. 426 */ 427 tr->buffer_disabled = 0; 428 /* Make the flag seen by readers */ 429 smp_wmb(); 430 } 431 432 /** 433 * tracing_on - enable tracing buffers 434 * 435 * This function enables tracing buffers that may have been 436 * disabled with tracing_off. 437 */ 438 void tracing_on(void) 439 { 440 tracer_tracing_on(&global_trace); 441 } 442 EXPORT_SYMBOL_GPL(tracing_on); 443 444 /** 445 * __trace_puts - write a constant string into the trace buffer. 446 * @ip: The address of the caller 447 * @str: The constant string to write 448 * @size: The size of the string. 449 */ 450 int __trace_puts(unsigned long ip, const char *str, int size) 451 { 452 struct ring_buffer_event *event; 453 struct ring_buffer *buffer; 454 struct print_entry *entry; 455 unsigned long irq_flags; 456 int alloc; 457 458 alloc = sizeof(*entry) + size + 2; /* possible \n added */ 459 460 local_save_flags(irq_flags); 461 buffer = global_trace.trace_buffer.buffer; 462 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 463 irq_flags, preempt_count()); 464 if (!event) 465 return 0; 466 467 entry = ring_buffer_event_data(event); 468 entry->ip = ip; 469 470 memcpy(&entry->buf, str, size); 471 472 /* Add a newline if necessary */ 473 if (entry->buf[size - 1] != '\n') { 474 entry->buf[size] = '\n'; 475 entry->buf[size + 1] = '\0'; 476 } else 477 entry->buf[size] = '\0'; 478 479 __buffer_unlock_commit(buffer, event); 480 481 return size; 482 } 483 EXPORT_SYMBOL_GPL(__trace_puts); 484 485 /** 486 * __trace_bputs - write the pointer to a constant string into trace buffer 487 * @ip: The address of the caller 488 * @str: The constant string to write to the buffer to 489 */ 490 int __trace_bputs(unsigned long ip, const char *str) 491 { 492 struct ring_buffer_event *event; 493 struct ring_buffer *buffer; 494 struct bputs_entry *entry; 495 unsigned long irq_flags; 496 int size = sizeof(struct bputs_entry); 497 498 local_save_flags(irq_flags); 499 buffer = global_trace.trace_buffer.buffer; 500 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, 501 irq_flags, preempt_count()); 502 if (!event) 503 return 0; 504 505 entry = ring_buffer_event_data(event); 506 entry->ip = ip; 507 entry->str = str; 508 509 __buffer_unlock_commit(buffer, event); 510 511 return 1; 512 } 513 EXPORT_SYMBOL_GPL(__trace_bputs); 514 515 #ifdef CONFIG_TRACER_SNAPSHOT 516 /** 517 * trace_snapshot - take a snapshot of the current buffer. 518 * 519 * This causes a swap between the snapshot buffer and the current live 520 * tracing buffer. You can use this to take snapshots of the live 521 * trace when some condition is triggered, but continue to trace. 522 * 523 * Note, make sure to allocate the snapshot with either 524 * a tracing_snapshot_alloc(), or by doing it manually 525 * with: echo 1 > /sys/kernel/debug/tracing/snapshot 526 * 527 * If the snapshot buffer is not allocated, it will stop tracing. 528 * Basically making a permanent snapshot. 529 */ 530 void tracing_snapshot(void) 531 { 532 struct trace_array *tr = &global_trace; 533 struct tracer *tracer = tr->current_trace; 534 unsigned long flags; 535 536 if (in_nmi()) { 537 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); 538 internal_trace_puts("*** snapshot is being ignored ***\n"); 539 return; 540 } 541 542 if (!tr->allocated_snapshot) { 543 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n"); 544 internal_trace_puts("*** stopping trace here! ***\n"); 545 tracing_off(); 546 return; 547 } 548 549 /* Note, snapshot can not be used when the tracer uses it */ 550 if (tracer->use_max_tr) { 551 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n"); 552 internal_trace_puts("*** Can not use snapshot (sorry) ***\n"); 553 return; 554 } 555 556 local_irq_save(flags); 557 update_max_tr(tr, current, smp_processor_id()); 558 local_irq_restore(flags); 559 } 560 EXPORT_SYMBOL_GPL(tracing_snapshot); 561 562 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, 563 struct trace_buffer *size_buf, int cpu_id); 564 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); 565 566 static int alloc_snapshot(struct trace_array *tr) 567 { 568 int ret; 569 570 if (!tr->allocated_snapshot) { 571 572 /* allocate spare buffer */ 573 ret = resize_buffer_duplicate_size(&tr->max_buffer, 574 &tr->trace_buffer, RING_BUFFER_ALL_CPUS); 575 if (ret < 0) 576 return ret; 577 578 tr->allocated_snapshot = true; 579 } 580 581 return 0; 582 } 583 584 void free_snapshot(struct trace_array *tr) 585 { 586 /* 587 * We don't free the ring buffer. instead, resize it because 588 * The max_tr ring buffer has some state (e.g. ring->clock) and 589 * we want preserve it. 590 */ 591 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); 592 set_buffer_entries(&tr->max_buffer, 1); 593 tracing_reset_online_cpus(&tr->max_buffer); 594 tr->allocated_snapshot = false; 595 } 596 597 /** 598 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer. 599 * 600 * This is similar to trace_snapshot(), but it will allocate the 601 * snapshot buffer if it isn't already allocated. Use this only 602 * where it is safe to sleep, as the allocation may sleep. 603 * 604 * This causes a swap between the snapshot buffer and the current live 605 * tracing buffer. You can use this to take snapshots of the live 606 * trace when some condition is triggered, but continue to trace. 607 */ 608 void tracing_snapshot_alloc(void) 609 { 610 struct trace_array *tr = &global_trace; 611 int ret; 612 613 ret = alloc_snapshot(tr); 614 if (WARN_ON(ret < 0)) 615 return; 616 617 tracing_snapshot(); 618 } 619 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 620 #else 621 void tracing_snapshot(void) 622 { 623 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); 624 } 625 EXPORT_SYMBOL_GPL(tracing_snapshot); 626 void tracing_snapshot_alloc(void) 627 { 628 /* Give warning */ 629 tracing_snapshot(); 630 } 631 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 632 #endif /* CONFIG_TRACER_SNAPSHOT */ 633 634 static void tracer_tracing_off(struct trace_array *tr) 635 { 636 if (tr->trace_buffer.buffer) 637 ring_buffer_record_off(tr->trace_buffer.buffer); 638 /* 639 * This flag is looked at when buffers haven't been allocated 640 * yet, or by some tracers (like irqsoff), that just want to 641 * know if the ring buffer has been disabled, but it can handle 642 * races of where it gets disabled but we still do a record. 643 * As the check is in the fast path of the tracers, it is more 644 * important to be fast than accurate. 645 */ 646 tr->buffer_disabled = 1; 647 /* Make the flag seen by readers */ 648 smp_wmb(); 649 } 650 651 /** 652 * tracing_off - turn off tracing buffers 653 * 654 * This function stops the tracing buffers from recording data. 655 * It does not disable any overhead the tracers themselves may 656 * be causing. This function simply causes all recording to 657 * the ring buffers to fail. 658 */ 659 void tracing_off(void) 660 { 661 tracer_tracing_off(&global_trace); 662 } 663 EXPORT_SYMBOL_GPL(tracing_off); 664 665 void disable_trace_on_warning(void) 666 { 667 if (__disable_trace_on_warning) 668 tracing_off(); 669 } 670 671 /** 672 * tracer_tracing_is_on - show real state of ring buffer enabled 673 * @tr : the trace array to know if ring buffer is enabled 674 * 675 * Shows real state of the ring buffer if it is enabled or not. 676 */ 677 static int tracer_tracing_is_on(struct trace_array *tr) 678 { 679 if (tr->trace_buffer.buffer) 680 return ring_buffer_record_is_on(tr->trace_buffer.buffer); 681 return !tr->buffer_disabled; 682 } 683 684 /** 685 * tracing_is_on - show state of ring buffers enabled 686 */ 687 int tracing_is_on(void) 688 { 689 return tracer_tracing_is_on(&global_trace); 690 } 691 EXPORT_SYMBOL_GPL(tracing_is_on); 692 693 static int __init set_buf_size(char *str) 694 { 695 unsigned long buf_size; 696 697 if (!str) 698 return 0; 699 buf_size = memparse(str, &str); 700 /* nr_entries can not be zero */ 701 if (buf_size == 0) 702 return 0; 703 trace_buf_size = buf_size; 704 return 1; 705 } 706 __setup("trace_buf_size=", set_buf_size); 707 708 static int __init set_tracing_thresh(char *str) 709 { 710 unsigned long threshold; 711 int ret; 712 713 if (!str) 714 return 0; 715 ret = kstrtoul(str, 0, &threshold); 716 if (ret < 0) 717 return 0; 718 tracing_thresh = threshold * 1000; 719 return 1; 720 } 721 __setup("tracing_thresh=", set_tracing_thresh); 722 723 unsigned long nsecs_to_usecs(unsigned long nsecs) 724 { 725 return nsecs / 1000; 726 } 727 728 /* These must match the bit postions in trace_iterator_flags */ 729 static const char *trace_options[] = { 730 "print-parent", 731 "sym-offset", 732 "sym-addr", 733 "verbose", 734 "raw", 735 "hex", 736 "bin", 737 "block", 738 "stacktrace", 739 "trace_printk", 740 "ftrace_preempt", 741 "branch", 742 "annotate", 743 "userstacktrace", 744 "sym-userobj", 745 "printk-msg-only", 746 "context-info", 747 "latency-format", 748 "sleep-time", 749 "graph-time", 750 "record-cmd", 751 "overwrite", 752 "disable_on_free", 753 "irq-info", 754 "markers", 755 "function-trace", 756 NULL 757 }; 758 759 static struct { 760 u64 (*func)(void); 761 const char *name; 762 int in_ns; /* is this clock in nanoseconds? */ 763 } trace_clocks[] = { 764 { trace_clock_local, "local", 1 }, 765 { trace_clock_global, "global", 1 }, 766 { trace_clock_counter, "counter", 0 }, 767 { trace_clock_jiffies, "uptime", 1 }, 768 { trace_clock, "perf", 1 }, 769 ARCH_TRACE_CLOCKS 770 }; 771 772 /* 773 * trace_parser_get_init - gets the buffer for trace parser 774 */ 775 int trace_parser_get_init(struct trace_parser *parser, int size) 776 { 777 memset(parser, 0, sizeof(*parser)); 778 779 parser->buffer = kmalloc(size, GFP_KERNEL); 780 if (!parser->buffer) 781 return 1; 782 783 parser->size = size; 784 return 0; 785 } 786 787 /* 788 * trace_parser_put - frees the buffer for trace parser 789 */ 790 void trace_parser_put(struct trace_parser *parser) 791 { 792 kfree(parser->buffer); 793 } 794 795 /* 796 * trace_get_user - reads the user input string separated by space 797 * (matched by isspace(ch)) 798 * 799 * For each string found the 'struct trace_parser' is updated, 800 * and the function returns. 801 * 802 * Returns number of bytes read. 803 * 804 * See kernel/trace/trace.h for 'struct trace_parser' details. 805 */ 806 int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 807 size_t cnt, loff_t *ppos) 808 { 809 char ch; 810 size_t read = 0; 811 ssize_t ret; 812 813 if (!*ppos) 814 trace_parser_clear(parser); 815 816 ret = get_user(ch, ubuf++); 817 if (ret) 818 goto out; 819 820 read++; 821 cnt--; 822 823 /* 824 * The parser is not finished with the last write, 825 * continue reading the user input without skipping spaces. 826 */ 827 if (!parser->cont) { 828 /* skip white space */ 829 while (cnt && isspace(ch)) { 830 ret = get_user(ch, ubuf++); 831 if (ret) 832 goto out; 833 read++; 834 cnt--; 835 } 836 837 /* only spaces were written */ 838 if (isspace(ch)) { 839 *ppos += read; 840 ret = read; 841 goto out; 842 } 843 844 parser->idx = 0; 845 } 846 847 /* read the non-space input */ 848 while (cnt && !isspace(ch)) { 849 if (parser->idx < parser->size - 1) 850 parser->buffer[parser->idx++] = ch; 851 else { 852 ret = -EINVAL; 853 goto out; 854 } 855 ret = get_user(ch, ubuf++); 856 if (ret) 857 goto out; 858 read++; 859 cnt--; 860 } 861 862 /* We either got finished input or we have to wait for another call. */ 863 if (isspace(ch)) { 864 parser->buffer[parser->idx] = 0; 865 parser->cont = false; 866 } else if (parser->idx < parser->size - 1) { 867 parser->cont = true; 868 parser->buffer[parser->idx++] = ch; 869 } else { 870 ret = -EINVAL; 871 goto out; 872 } 873 874 *ppos += read; 875 ret = read; 876 877 out: 878 return ret; 879 } 880 881 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) 882 { 883 int len; 884 int ret; 885 886 if (!cnt) 887 return 0; 888 889 if (s->len <= s->readpos) 890 return -EBUSY; 891 892 len = s->len - s->readpos; 893 if (cnt > len) 894 cnt = len; 895 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); 896 if (ret == cnt) 897 return -EFAULT; 898 899 cnt -= ret; 900 901 s->readpos += cnt; 902 return cnt; 903 } 904 905 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) 906 { 907 int len; 908 909 if (s->len <= s->readpos) 910 return -EBUSY; 911 912 len = s->len - s->readpos; 913 if (cnt > len) 914 cnt = len; 915 memcpy(buf, s->buffer + s->readpos, cnt); 916 917 s->readpos += cnt; 918 return cnt; 919 } 920 921 /* 922 * ftrace_max_lock is used to protect the swapping of buffers 923 * when taking a max snapshot. The buffers themselves are 924 * protected by per_cpu spinlocks. But the action of the swap 925 * needs its own lock. 926 * 927 * This is defined as a arch_spinlock_t in order to help 928 * with performance when lockdep debugging is enabled. 929 * 930 * It is also used in other places outside the update_max_tr 931 * so it needs to be defined outside of the 932 * CONFIG_TRACER_MAX_TRACE. 933 */ 934 static arch_spinlock_t ftrace_max_lock = 935 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 936 937 unsigned long __read_mostly tracing_thresh; 938 939 #ifdef CONFIG_TRACER_MAX_TRACE 940 unsigned long __read_mostly tracing_max_latency; 941 942 /* 943 * Copy the new maximum trace into the separate maximum-trace 944 * structure. (this way the maximum trace is permanently saved, 945 * for later retrieval via /sys/kernel/debug/tracing/latency_trace) 946 */ 947 static void 948 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 949 { 950 struct trace_buffer *trace_buf = &tr->trace_buffer; 951 struct trace_buffer *max_buf = &tr->max_buffer; 952 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); 953 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); 954 955 max_buf->cpu = cpu; 956 max_buf->time_start = data->preempt_timestamp; 957 958 max_data->saved_latency = tracing_max_latency; 959 max_data->critical_start = data->critical_start; 960 max_data->critical_end = data->critical_end; 961 962 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); 963 max_data->pid = tsk->pid; 964 /* 965 * If tsk == current, then use current_uid(), as that does not use 966 * RCU. The irq tracer can be called out of RCU scope. 967 */ 968 if (tsk == current) 969 max_data->uid = current_uid(); 970 else 971 max_data->uid = task_uid(tsk); 972 973 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 974 max_data->policy = tsk->policy; 975 max_data->rt_priority = tsk->rt_priority; 976 977 /* record this tasks comm */ 978 tracing_record_cmdline(tsk); 979 } 980 981 /** 982 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 983 * @tr: tracer 984 * @tsk: the task with the latency 985 * @cpu: The cpu that initiated the trace. 986 * 987 * Flip the buffers between the @tr and the max_tr and record information 988 * about which task was the cause of this latency. 989 */ 990 void 991 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 992 { 993 struct ring_buffer *buf; 994 995 if (tr->stop_count) 996 return; 997 998 WARN_ON_ONCE(!irqs_disabled()); 999 1000 if (!tr->allocated_snapshot) { 1001 /* Only the nop tracer should hit this when disabling */ 1002 WARN_ON_ONCE(tr->current_trace != &nop_trace); 1003 return; 1004 } 1005 1006 arch_spin_lock(&ftrace_max_lock); 1007 1008 buf = tr->trace_buffer.buffer; 1009 tr->trace_buffer.buffer = tr->max_buffer.buffer; 1010 tr->max_buffer.buffer = buf; 1011 1012 __update_max_tr(tr, tsk, cpu); 1013 arch_spin_unlock(&ftrace_max_lock); 1014 } 1015 1016 /** 1017 * update_max_tr_single - only copy one trace over, and reset the rest 1018 * @tr - tracer 1019 * @tsk - task with the latency 1020 * @cpu - the cpu of the buffer to copy. 1021 * 1022 * Flip the trace of a single CPU buffer between the @tr and the max_tr. 1023 */ 1024 void 1025 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 1026 { 1027 int ret; 1028 1029 if (tr->stop_count) 1030 return; 1031 1032 WARN_ON_ONCE(!irqs_disabled()); 1033 if (!tr->allocated_snapshot) { 1034 /* Only the nop tracer should hit this when disabling */ 1035 WARN_ON_ONCE(tr->current_trace != &nop_trace); 1036 return; 1037 } 1038 1039 arch_spin_lock(&ftrace_max_lock); 1040 1041 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); 1042 1043 if (ret == -EBUSY) { 1044 /* 1045 * We failed to swap the buffer due to a commit taking 1046 * place on this CPU. We fail to record, but we reset 1047 * the max trace buffer (no one writes directly to it) 1048 * and flag that it failed. 1049 */ 1050 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, 1051 "Failed to swap buffers due to commit in progress\n"); 1052 } 1053 1054 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 1055 1056 __update_max_tr(tr, tsk, cpu); 1057 arch_spin_unlock(&ftrace_max_lock); 1058 } 1059 #endif /* CONFIG_TRACER_MAX_TRACE */ 1060 1061 static void default_wait_pipe(struct trace_iterator *iter) 1062 { 1063 /* Iterators are static, they should be filled or empty */ 1064 if (trace_buffer_iter(iter, iter->cpu_file)) 1065 return; 1066 1067 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); 1068 } 1069 1070 #ifdef CONFIG_FTRACE_STARTUP_TEST 1071 static int run_tracer_selftest(struct tracer *type) 1072 { 1073 struct trace_array *tr = &global_trace; 1074 struct tracer *saved_tracer = tr->current_trace; 1075 int ret; 1076 1077 if (!type->selftest || tracing_selftest_disabled) 1078 return 0; 1079 1080 /* 1081 * Run a selftest on this tracer. 1082 * Here we reset the trace buffer, and set the current 1083 * tracer to be this tracer. The tracer can then run some 1084 * internal tracing to verify that everything is in order. 1085 * If we fail, we do not register this tracer. 1086 */ 1087 tracing_reset_online_cpus(&tr->trace_buffer); 1088 1089 tr->current_trace = type; 1090 1091 #ifdef CONFIG_TRACER_MAX_TRACE 1092 if (type->use_max_tr) { 1093 /* If we expanded the buffers, make sure the max is expanded too */ 1094 if (ring_buffer_expanded) 1095 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, 1096 RING_BUFFER_ALL_CPUS); 1097 tr->allocated_snapshot = true; 1098 } 1099 #endif 1100 1101 /* the test is responsible for initializing and enabling */ 1102 pr_info("Testing tracer %s: ", type->name); 1103 ret = type->selftest(type, tr); 1104 /* the test is responsible for resetting too */ 1105 tr->current_trace = saved_tracer; 1106 if (ret) { 1107 printk(KERN_CONT "FAILED!\n"); 1108 /* Add the warning after printing 'FAILED' */ 1109 WARN_ON(1); 1110 return -1; 1111 } 1112 /* Only reset on passing, to avoid touching corrupted buffers */ 1113 tracing_reset_online_cpus(&tr->trace_buffer); 1114 1115 #ifdef CONFIG_TRACER_MAX_TRACE 1116 if (type->use_max_tr) { 1117 tr->allocated_snapshot = false; 1118 1119 /* Shrink the max buffer again */ 1120 if (ring_buffer_expanded) 1121 ring_buffer_resize(tr->max_buffer.buffer, 1, 1122 RING_BUFFER_ALL_CPUS); 1123 } 1124 #endif 1125 1126 printk(KERN_CONT "PASSED\n"); 1127 return 0; 1128 } 1129 #else 1130 static inline int run_tracer_selftest(struct tracer *type) 1131 { 1132 return 0; 1133 } 1134 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 1135 1136 /** 1137 * register_tracer - register a tracer with the ftrace system. 1138 * @type - the plugin for the tracer 1139 * 1140 * Register a new plugin tracer. 1141 */ 1142 int register_tracer(struct tracer *type) 1143 { 1144 struct tracer *t; 1145 int ret = 0; 1146 1147 if (!type->name) { 1148 pr_info("Tracer must have a name\n"); 1149 return -1; 1150 } 1151 1152 if (strlen(type->name) >= MAX_TRACER_SIZE) { 1153 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); 1154 return -1; 1155 } 1156 1157 mutex_lock(&trace_types_lock); 1158 1159 tracing_selftest_running = true; 1160 1161 for (t = trace_types; t; t = t->next) { 1162 if (strcmp(type->name, t->name) == 0) { 1163 /* already found */ 1164 pr_info("Tracer %s already registered\n", 1165 type->name); 1166 ret = -1; 1167 goto out; 1168 } 1169 } 1170 1171 if (!type->set_flag) 1172 type->set_flag = &dummy_set_flag; 1173 if (!type->flags) 1174 type->flags = &dummy_tracer_flags; 1175 else 1176 if (!type->flags->opts) 1177 type->flags->opts = dummy_tracer_opt; 1178 if (!type->wait_pipe) 1179 type->wait_pipe = default_wait_pipe; 1180 1181 ret = run_tracer_selftest(type); 1182 if (ret < 0) 1183 goto out; 1184 1185 type->next = trace_types; 1186 trace_types = type; 1187 1188 out: 1189 tracing_selftest_running = false; 1190 mutex_unlock(&trace_types_lock); 1191 1192 if (ret || !default_bootup_tracer) 1193 goto out_unlock; 1194 1195 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) 1196 goto out_unlock; 1197 1198 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 1199 /* Do we want this tracer to start on bootup? */ 1200 tracing_set_tracer(type->name); 1201 default_bootup_tracer = NULL; 1202 /* disable other selftests, since this will break it. */ 1203 tracing_selftest_disabled = true; 1204 #ifdef CONFIG_FTRACE_STARTUP_TEST 1205 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", 1206 type->name); 1207 #endif 1208 1209 out_unlock: 1210 return ret; 1211 } 1212 1213 void tracing_reset(struct trace_buffer *buf, int cpu) 1214 { 1215 struct ring_buffer *buffer = buf->buffer; 1216 1217 if (!buffer) 1218 return; 1219 1220 ring_buffer_record_disable(buffer); 1221 1222 /* Make sure all commits have finished */ 1223 synchronize_sched(); 1224 ring_buffer_reset_cpu(buffer, cpu); 1225 1226 ring_buffer_record_enable(buffer); 1227 } 1228 1229 void tracing_reset_online_cpus(struct trace_buffer *buf) 1230 { 1231 struct ring_buffer *buffer = buf->buffer; 1232 int cpu; 1233 1234 if (!buffer) 1235 return; 1236 1237 ring_buffer_record_disable(buffer); 1238 1239 /* Make sure all commits have finished */ 1240 synchronize_sched(); 1241 1242 buf->time_start = buffer_ftrace_now(buf, buf->cpu); 1243 1244 for_each_online_cpu(cpu) 1245 ring_buffer_reset_cpu(buffer, cpu); 1246 1247 ring_buffer_record_enable(buffer); 1248 } 1249 1250 /* Must have trace_types_lock held */ 1251 void tracing_reset_all_online_cpus(void) 1252 { 1253 struct trace_array *tr; 1254 1255 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 1256 tracing_reset_online_cpus(&tr->trace_buffer); 1257 #ifdef CONFIG_TRACER_MAX_TRACE 1258 tracing_reset_online_cpus(&tr->max_buffer); 1259 #endif 1260 } 1261 } 1262 1263 #define SAVED_CMDLINES 128 1264 #define NO_CMDLINE_MAP UINT_MAX 1265 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 1266 static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 1267 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; 1268 static int cmdline_idx; 1269 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; 1270 1271 /* temporary disable recording */ 1272 static atomic_t trace_record_cmdline_disabled __read_mostly; 1273 1274 static void trace_init_cmdlines(void) 1275 { 1276 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline)); 1277 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid)); 1278 cmdline_idx = 0; 1279 } 1280 1281 int is_tracing_stopped(void) 1282 { 1283 return global_trace.stop_count; 1284 } 1285 1286 /** 1287 * tracing_start - quick start of the tracer 1288 * 1289 * If tracing is enabled but was stopped by tracing_stop, 1290 * this will start the tracer back up. 1291 */ 1292 void tracing_start(void) 1293 { 1294 struct ring_buffer *buffer; 1295 unsigned long flags; 1296 1297 if (tracing_disabled) 1298 return; 1299 1300 raw_spin_lock_irqsave(&global_trace.start_lock, flags); 1301 if (--global_trace.stop_count) { 1302 if (global_trace.stop_count < 0) { 1303 /* Someone screwed up their debugging */ 1304 WARN_ON_ONCE(1); 1305 global_trace.stop_count = 0; 1306 } 1307 goto out; 1308 } 1309 1310 /* Prevent the buffers from switching */ 1311 arch_spin_lock(&ftrace_max_lock); 1312 1313 buffer = global_trace.trace_buffer.buffer; 1314 if (buffer) 1315 ring_buffer_record_enable(buffer); 1316 1317 #ifdef CONFIG_TRACER_MAX_TRACE 1318 buffer = global_trace.max_buffer.buffer; 1319 if (buffer) 1320 ring_buffer_record_enable(buffer); 1321 #endif 1322 1323 arch_spin_unlock(&ftrace_max_lock); 1324 1325 ftrace_start(); 1326 out: 1327 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 1328 } 1329 1330 static void tracing_start_tr(struct trace_array *tr) 1331 { 1332 struct ring_buffer *buffer; 1333 unsigned long flags; 1334 1335 if (tracing_disabled) 1336 return; 1337 1338 /* If global, we need to also start the max tracer */ 1339 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 1340 return tracing_start(); 1341 1342 raw_spin_lock_irqsave(&tr->start_lock, flags); 1343 1344 if (--tr->stop_count) { 1345 if (tr->stop_count < 0) { 1346 /* Someone screwed up their debugging */ 1347 WARN_ON_ONCE(1); 1348 tr->stop_count = 0; 1349 } 1350 goto out; 1351 } 1352 1353 buffer = tr->trace_buffer.buffer; 1354 if (buffer) 1355 ring_buffer_record_enable(buffer); 1356 1357 out: 1358 raw_spin_unlock_irqrestore(&tr->start_lock, flags); 1359 } 1360 1361 /** 1362 * tracing_stop - quick stop of the tracer 1363 * 1364 * Light weight way to stop tracing. Use in conjunction with 1365 * tracing_start. 1366 */ 1367 void tracing_stop(void) 1368 { 1369 struct ring_buffer *buffer; 1370 unsigned long flags; 1371 1372 ftrace_stop(); 1373 raw_spin_lock_irqsave(&global_trace.start_lock, flags); 1374 if (global_trace.stop_count++) 1375 goto out; 1376 1377 /* Prevent the buffers from switching */ 1378 arch_spin_lock(&ftrace_max_lock); 1379 1380 buffer = global_trace.trace_buffer.buffer; 1381 if (buffer) 1382 ring_buffer_record_disable(buffer); 1383 1384 #ifdef CONFIG_TRACER_MAX_TRACE 1385 buffer = global_trace.max_buffer.buffer; 1386 if (buffer) 1387 ring_buffer_record_disable(buffer); 1388 #endif 1389 1390 arch_spin_unlock(&ftrace_max_lock); 1391 1392 out: 1393 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 1394 } 1395 1396 static void tracing_stop_tr(struct trace_array *tr) 1397 { 1398 struct ring_buffer *buffer; 1399 unsigned long flags; 1400 1401 /* If global, we need to also stop the max tracer */ 1402 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 1403 return tracing_stop(); 1404 1405 raw_spin_lock_irqsave(&tr->start_lock, flags); 1406 if (tr->stop_count++) 1407 goto out; 1408 1409 buffer = tr->trace_buffer.buffer; 1410 if (buffer) 1411 ring_buffer_record_disable(buffer); 1412 1413 out: 1414 raw_spin_unlock_irqrestore(&tr->start_lock, flags); 1415 } 1416 1417 void trace_stop_cmdline_recording(void); 1418 1419 static void trace_save_cmdline(struct task_struct *tsk) 1420 { 1421 unsigned pid, idx; 1422 1423 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) 1424 return; 1425 1426 /* 1427 * It's not the end of the world if we don't get 1428 * the lock, but we also don't want to spin 1429 * nor do we want to disable interrupts, 1430 * so if we miss here, then better luck next time. 1431 */ 1432 if (!arch_spin_trylock(&trace_cmdline_lock)) 1433 return; 1434 1435 idx = map_pid_to_cmdline[tsk->pid]; 1436 if (idx == NO_CMDLINE_MAP) { 1437 idx = (cmdline_idx + 1) % SAVED_CMDLINES; 1438 1439 /* 1440 * Check whether the cmdline buffer at idx has a pid 1441 * mapped. We are going to overwrite that entry so we 1442 * need to clear the map_pid_to_cmdline. Otherwise we 1443 * would read the new comm for the old pid. 1444 */ 1445 pid = map_cmdline_to_pid[idx]; 1446 if (pid != NO_CMDLINE_MAP) 1447 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; 1448 1449 map_cmdline_to_pid[idx] = tsk->pid; 1450 map_pid_to_cmdline[tsk->pid] = idx; 1451 1452 cmdline_idx = idx; 1453 } 1454 1455 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 1456 1457 arch_spin_unlock(&trace_cmdline_lock); 1458 } 1459 1460 void trace_find_cmdline(int pid, char comm[]) 1461 { 1462 unsigned map; 1463 1464 if (!pid) { 1465 strcpy(comm, "<idle>"); 1466 return; 1467 } 1468 1469 if (WARN_ON_ONCE(pid < 0)) { 1470 strcpy(comm, "<XXX>"); 1471 return; 1472 } 1473 1474 if (pid > PID_MAX_DEFAULT) { 1475 strcpy(comm, "<...>"); 1476 return; 1477 } 1478 1479 preempt_disable(); 1480 arch_spin_lock(&trace_cmdline_lock); 1481 map = map_pid_to_cmdline[pid]; 1482 if (map != NO_CMDLINE_MAP) 1483 strcpy(comm, saved_cmdlines[map]); 1484 else 1485 strcpy(comm, "<...>"); 1486 1487 arch_spin_unlock(&trace_cmdline_lock); 1488 preempt_enable(); 1489 } 1490 1491 void tracing_record_cmdline(struct task_struct *tsk) 1492 { 1493 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on()) 1494 return; 1495 1496 if (!__this_cpu_read(trace_cmdline_save)) 1497 return; 1498 1499 __this_cpu_write(trace_cmdline_save, false); 1500 1501 trace_save_cmdline(tsk); 1502 } 1503 1504 void 1505 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, 1506 int pc) 1507 { 1508 struct task_struct *tsk = current; 1509 1510 entry->preempt_count = pc & 0xff; 1511 entry->pid = (tsk) ? tsk->pid : 0; 1512 entry->flags = 1513 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 1514 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 1515 #else 1516 TRACE_FLAG_IRQS_NOSUPPORT | 1517 #endif 1518 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | 1519 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 1520 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | 1521 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); 1522 } 1523 EXPORT_SYMBOL_GPL(tracing_generic_entry_update); 1524 1525 struct ring_buffer_event * 1526 trace_buffer_lock_reserve(struct ring_buffer *buffer, 1527 int type, 1528 unsigned long len, 1529 unsigned long flags, int pc) 1530 { 1531 struct ring_buffer_event *event; 1532 1533 event = ring_buffer_lock_reserve(buffer, len); 1534 if (event != NULL) { 1535 struct trace_entry *ent = ring_buffer_event_data(event); 1536 1537 tracing_generic_entry_update(ent, flags, pc); 1538 ent->type = type; 1539 } 1540 1541 return event; 1542 } 1543 1544 void 1545 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) 1546 { 1547 __this_cpu_write(trace_cmdline_save, true); 1548 ring_buffer_unlock_commit(buffer, event); 1549 } 1550 1551 static inline void 1552 __trace_buffer_unlock_commit(struct ring_buffer *buffer, 1553 struct ring_buffer_event *event, 1554 unsigned long flags, int pc) 1555 { 1556 __buffer_unlock_commit(buffer, event); 1557 1558 ftrace_trace_stack(buffer, flags, 6, pc); 1559 ftrace_trace_userstack(buffer, flags, pc); 1560 } 1561 1562 void trace_buffer_unlock_commit(struct ring_buffer *buffer, 1563 struct ring_buffer_event *event, 1564 unsigned long flags, int pc) 1565 { 1566 __trace_buffer_unlock_commit(buffer, event, flags, pc); 1567 } 1568 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); 1569 1570 struct ring_buffer_event * 1571 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, 1572 struct ftrace_event_file *ftrace_file, 1573 int type, unsigned long len, 1574 unsigned long flags, int pc) 1575 { 1576 *current_rb = ftrace_file->tr->trace_buffer.buffer; 1577 return trace_buffer_lock_reserve(*current_rb, 1578 type, len, flags, pc); 1579 } 1580 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); 1581 1582 struct ring_buffer_event * 1583 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, 1584 int type, unsigned long len, 1585 unsigned long flags, int pc) 1586 { 1587 *current_rb = global_trace.trace_buffer.buffer; 1588 return trace_buffer_lock_reserve(*current_rb, 1589 type, len, flags, pc); 1590 } 1591 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); 1592 1593 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, 1594 struct ring_buffer_event *event, 1595 unsigned long flags, int pc) 1596 { 1597 __trace_buffer_unlock_commit(buffer, event, flags, pc); 1598 } 1599 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); 1600 1601 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, 1602 struct ring_buffer_event *event, 1603 unsigned long flags, int pc, 1604 struct pt_regs *regs) 1605 { 1606 __buffer_unlock_commit(buffer, event); 1607 1608 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); 1609 ftrace_trace_userstack(buffer, flags, pc); 1610 } 1611 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs); 1612 1613 void trace_current_buffer_discard_commit(struct ring_buffer *buffer, 1614 struct ring_buffer_event *event) 1615 { 1616 ring_buffer_discard_commit(buffer, event); 1617 } 1618 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); 1619 1620 void 1621 trace_function(struct trace_array *tr, 1622 unsigned long ip, unsigned long parent_ip, unsigned long flags, 1623 int pc) 1624 { 1625 struct ftrace_event_call *call = &event_function; 1626 struct ring_buffer *buffer = tr->trace_buffer.buffer; 1627 struct ring_buffer_event *event; 1628 struct ftrace_entry *entry; 1629 1630 /* If we are reading the ring buffer, don't trace */ 1631 if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) 1632 return; 1633 1634 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 1635 flags, pc); 1636 if (!event) 1637 return; 1638 entry = ring_buffer_event_data(event); 1639 entry->ip = ip; 1640 entry->parent_ip = parent_ip; 1641 1642 if (!call_filter_check_discard(call, entry, buffer, event)) 1643 __buffer_unlock_commit(buffer, event); 1644 } 1645 1646 #ifdef CONFIG_STACKTRACE 1647 1648 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) 1649 struct ftrace_stack { 1650 unsigned long calls[FTRACE_STACK_MAX_ENTRIES]; 1651 }; 1652 1653 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); 1654 static DEFINE_PER_CPU(int, ftrace_stack_reserve); 1655 1656 static void __ftrace_trace_stack(struct ring_buffer *buffer, 1657 unsigned long flags, 1658 int skip, int pc, struct pt_regs *regs) 1659 { 1660 struct ftrace_event_call *call = &event_kernel_stack; 1661 struct ring_buffer_event *event; 1662 struct stack_entry *entry; 1663 struct stack_trace trace; 1664 int use_stack; 1665 int size = FTRACE_STACK_ENTRIES; 1666 1667 trace.nr_entries = 0; 1668 trace.skip = skip; 1669 1670 /* 1671 * Since events can happen in NMIs there's no safe way to 1672 * use the per cpu ftrace_stacks. We reserve it and if an interrupt 1673 * or NMI comes in, it will just have to use the default 1674 * FTRACE_STACK_SIZE. 1675 */ 1676 preempt_disable_notrace(); 1677 1678 use_stack = __this_cpu_inc_return(ftrace_stack_reserve); 1679 /* 1680 * We don't need any atomic variables, just a barrier. 1681 * If an interrupt comes in, we don't care, because it would 1682 * have exited and put the counter back to what we want. 1683 * We just need a barrier to keep gcc from moving things 1684 * around. 1685 */ 1686 barrier(); 1687 if (use_stack == 1) { 1688 trace.entries = &__get_cpu_var(ftrace_stack).calls[0]; 1689 trace.max_entries = FTRACE_STACK_MAX_ENTRIES; 1690 1691 if (regs) 1692 save_stack_trace_regs(regs, &trace); 1693 else 1694 save_stack_trace(&trace); 1695 1696 if (trace.nr_entries > size) 1697 size = trace.nr_entries; 1698 } else 1699 /* From now on, use_stack is a boolean */ 1700 use_stack = 0; 1701 1702 size *= sizeof(unsigned long); 1703 1704 event = trace_buffer_lock_reserve(buffer, TRACE_STACK, 1705 sizeof(*entry) + size, flags, pc); 1706 if (!event) 1707 goto out; 1708 entry = ring_buffer_event_data(event); 1709 1710 memset(&entry->caller, 0, size); 1711 1712 if (use_stack) 1713 memcpy(&entry->caller, trace.entries, 1714 trace.nr_entries * sizeof(unsigned long)); 1715 else { 1716 trace.max_entries = FTRACE_STACK_ENTRIES; 1717 trace.entries = entry->caller; 1718 if (regs) 1719 save_stack_trace_regs(regs, &trace); 1720 else 1721 save_stack_trace(&trace); 1722 } 1723 1724 entry->size = trace.nr_entries; 1725 1726 if (!call_filter_check_discard(call, entry, buffer, event)) 1727 __buffer_unlock_commit(buffer, event); 1728 1729 out: 1730 /* Again, don't let gcc optimize things here */ 1731 barrier(); 1732 __this_cpu_dec(ftrace_stack_reserve); 1733 preempt_enable_notrace(); 1734 1735 } 1736 1737 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, 1738 int skip, int pc, struct pt_regs *regs) 1739 { 1740 if (!(trace_flags & TRACE_ITER_STACKTRACE)) 1741 return; 1742 1743 __ftrace_trace_stack(buffer, flags, skip, pc, regs); 1744 } 1745 1746 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, 1747 int skip, int pc) 1748 { 1749 if (!(trace_flags & TRACE_ITER_STACKTRACE)) 1750 return; 1751 1752 __ftrace_trace_stack(buffer, flags, skip, pc, NULL); 1753 } 1754 1755 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 1756 int pc) 1757 { 1758 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL); 1759 } 1760 1761 /** 1762 * trace_dump_stack - record a stack back trace in the trace buffer 1763 * @skip: Number of functions to skip (helper handlers) 1764 */ 1765 void trace_dump_stack(int skip) 1766 { 1767 unsigned long flags; 1768 1769 if (tracing_disabled || tracing_selftest_running) 1770 return; 1771 1772 local_save_flags(flags); 1773 1774 /* 1775 * Skip 3 more, seems to get us at the caller of 1776 * this function. 1777 */ 1778 skip += 3; 1779 __ftrace_trace_stack(global_trace.trace_buffer.buffer, 1780 flags, skip, preempt_count(), NULL); 1781 } 1782 1783 static DEFINE_PER_CPU(int, user_stack_count); 1784 1785 void 1786 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) 1787 { 1788 struct ftrace_event_call *call = &event_user_stack; 1789 struct ring_buffer_event *event; 1790 struct userstack_entry *entry; 1791 struct stack_trace trace; 1792 1793 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) 1794 return; 1795 1796 /* 1797 * NMIs can not handle page faults, even with fix ups. 1798 * The save user stack can (and often does) fault. 1799 */ 1800 if (unlikely(in_nmi())) 1801 return; 1802 1803 /* 1804 * prevent recursion, since the user stack tracing may 1805 * trigger other kernel events. 1806 */ 1807 preempt_disable(); 1808 if (__this_cpu_read(user_stack_count)) 1809 goto out; 1810 1811 __this_cpu_inc(user_stack_count); 1812 1813 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 1814 sizeof(*entry), flags, pc); 1815 if (!event) 1816 goto out_drop_count; 1817 entry = ring_buffer_event_data(event); 1818 1819 entry->tgid = current->tgid; 1820 memset(&entry->caller, 0, sizeof(entry->caller)); 1821 1822 trace.nr_entries = 0; 1823 trace.max_entries = FTRACE_STACK_ENTRIES; 1824 trace.skip = 0; 1825 trace.entries = entry->caller; 1826 1827 save_stack_trace_user(&trace); 1828 if (!call_filter_check_discard(call, entry, buffer, event)) 1829 __buffer_unlock_commit(buffer, event); 1830 1831 out_drop_count: 1832 __this_cpu_dec(user_stack_count); 1833 out: 1834 preempt_enable(); 1835 } 1836 1837 #ifdef UNUSED 1838 static void __trace_userstack(struct trace_array *tr, unsigned long flags) 1839 { 1840 ftrace_trace_userstack(tr, flags, preempt_count()); 1841 } 1842 #endif /* UNUSED */ 1843 1844 #endif /* CONFIG_STACKTRACE */ 1845 1846 /* created for use with alloc_percpu */ 1847 struct trace_buffer_struct { 1848 char buffer[TRACE_BUF_SIZE]; 1849 }; 1850 1851 static struct trace_buffer_struct *trace_percpu_buffer; 1852 static struct trace_buffer_struct *trace_percpu_sirq_buffer; 1853 static struct trace_buffer_struct *trace_percpu_irq_buffer; 1854 static struct trace_buffer_struct *trace_percpu_nmi_buffer; 1855 1856 /* 1857 * The buffer used is dependent on the context. There is a per cpu 1858 * buffer for normal context, softirq contex, hard irq context and 1859 * for NMI context. Thise allows for lockless recording. 1860 * 1861 * Note, if the buffers failed to be allocated, then this returns NULL 1862 */ 1863 static char *get_trace_buf(void) 1864 { 1865 struct trace_buffer_struct *percpu_buffer; 1866 1867 /* 1868 * If we have allocated per cpu buffers, then we do not 1869 * need to do any locking. 1870 */ 1871 if (in_nmi()) 1872 percpu_buffer = trace_percpu_nmi_buffer; 1873 else if (in_irq()) 1874 percpu_buffer = trace_percpu_irq_buffer; 1875 else if (in_softirq()) 1876 percpu_buffer = trace_percpu_sirq_buffer; 1877 else 1878 percpu_buffer = trace_percpu_buffer; 1879 1880 if (!percpu_buffer) 1881 return NULL; 1882 1883 return this_cpu_ptr(&percpu_buffer->buffer[0]); 1884 } 1885 1886 static int alloc_percpu_trace_buffer(void) 1887 { 1888 struct trace_buffer_struct *buffers; 1889 struct trace_buffer_struct *sirq_buffers; 1890 struct trace_buffer_struct *irq_buffers; 1891 struct trace_buffer_struct *nmi_buffers; 1892 1893 buffers = alloc_percpu(struct trace_buffer_struct); 1894 if (!buffers) 1895 goto err_warn; 1896 1897 sirq_buffers = alloc_percpu(struct trace_buffer_struct); 1898 if (!sirq_buffers) 1899 goto err_sirq; 1900 1901 irq_buffers = alloc_percpu(struct trace_buffer_struct); 1902 if (!irq_buffers) 1903 goto err_irq; 1904 1905 nmi_buffers = alloc_percpu(struct trace_buffer_struct); 1906 if (!nmi_buffers) 1907 goto err_nmi; 1908 1909 trace_percpu_buffer = buffers; 1910 trace_percpu_sirq_buffer = sirq_buffers; 1911 trace_percpu_irq_buffer = irq_buffers; 1912 trace_percpu_nmi_buffer = nmi_buffers; 1913 1914 return 0; 1915 1916 err_nmi: 1917 free_percpu(irq_buffers); 1918 err_irq: 1919 free_percpu(sirq_buffers); 1920 err_sirq: 1921 free_percpu(buffers); 1922 err_warn: 1923 WARN(1, "Could not allocate percpu trace_printk buffer"); 1924 return -ENOMEM; 1925 } 1926 1927 static int buffers_allocated; 1928 1929 void trace_printk_init_buffers(void) 1930 { 1931 if (buffers_allocated) 1932 return; 1933 1934 if (alloc_percpu_trace_buffer()) 1935 return; 1936 1937 pr_info("ftrace: Allocated trace_printk buffers\n"); 1938 1939 /* Expand the buffers to set size */ 1940 tracing_update_buffers(); 1941 1942 buffers_allocated = 1; 1943 1944 /* 1945 * trace_printk_init_buffers() can be called by modules. 1946 * If that happens, then we need to start cmdline recording 1947 * directly here. If the global_trace.buffer is already 1948 * allocated here, then this was called by module code. 1949 */ 1950 if (global_trace.trace_buffer.buffer) 1951 tracing_start_cmdline_record(); 1952 } 1953 1954 void trace_printk_start_comm(void) 1955 { 1956 /* Start tracing comms if trace printk is set */ 1957 if (!buffers_allocated) 1958 return; 1959 tracing_start_cmdline_record(); 1960 } 1961 1962 static void trace_printk_start_stop_comm(int enabled) 1963 { 1964 if (!buffers_allocated) 1965 return; 1966 1967 if (enabled) 1968 tracing_start_cmdline_record(); 1969 else 1970 tracing_stop_cmdline_record(); 1971 } 1972 1973 /** 1974 * trace_vbprintk - write binary msg to tracing buffer 1975 * 1976 */ 1977 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 1978 { 1979 struct ftrace_event_call *call = &event_bprint; 1980 struct ring_buffer_event *event; 1981 struct ring_buffer *buffer; 1982 struct trace_array *tr = &global_trace; 1983 struct bprint_entry *entry; 1984 unsigned long flags; 1985 char *tbuffer; 1986 int len = 0, size, pc; 1987 1988 if (unlikely(tracing_selftest_running || tracing_disabled)) 1989 return 0; 1990 1991 /* Don't pollute graph traces with trace_vprintk internals */ 1992 pause_graph_tracing(); 1993 1994 pc = preempt_count(); 1995 preempt_disable_notrace(); 1996 1997 tbuffer = get_trace_buf(); 1998 if (!tbuffer) { 1999 len = 0; 2000 goto out; 2001 } 2002 2003 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); 2004 2005 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) 2006 goto out; 2007 2008 local_save_flags(flags); 2009 size = sizeof(*entry) + sizeof(u32) * len; 2010 buffer = tr->trace_buffer.buffer; 2011 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, 2012 flags, pc); 2013 if (!event) 2014 goto out; 2015 entry = ring_buffer_event_data(event); 2016 entry->ip = ip; 2017 entry->fmt = fmt; 2018 2019 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 2020 if (!call_filter_check_discard(call, entry, buffer, event)) { 2021 __buffer_unlock_commit(buffer, event); 2022 ftrace_trace_stack(buffer, flags, 6, pc); 2023 } 2024 2025 out: 2026 preempt_enable_notrace(); 2027 unpause_graph_tracing(); 2028 2029 return len; 2030 } 2031 EXPORT_SYMBOL_GPL(trace_vbprintk); 2032 2033 static int 2034 __trace_array_vprintk(struct ring_buffer *buffer, 2035 unsigned long ip, const char *fmt, va_list args) 2036 { 2037 struct ftrace_event_call *call = &event_print; 2038 struct ring_buffer_event *event; 2039 int len = 0, size, pc; 2040 struct print_entry *entry; 2041 unsigned long flags; 2042 char *tbuffer; 2043 2044 if (tracing_disabled || tracing_selftest_running) 2045 return 0; 2046 2047 /* Don't pollute graph traces with trace_vprintk internals */ 2048 pause_graph_tracing(); 2049 2050 pc = preempt_count(); 2051 preempt_disable_notrace(); 2052 2053 2054 tbuffer = get_trace_buf(); 2055 if (!tbuffer) { 2056 len = 0; 2057 goto out; 2058 } 2059 2060 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); 2061 if (len > TRACE_BUF_SIZE) 2062 goto out; 2063 2064 local_save_flags(flags); 2065 size = sizeof(*entry) + len + 1; 2066 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 2067 flags, pc); 2068 if (!event) 2069 goto out; 2070 entry = ring_buffer_event_data(event); 2071 entry->ip = ip; 2072 2073 memcpy(&entry->buf, tbuffer, len); 2074 entry->buf[len] = '\0'; 2075 if (!call_filter_check_discard(call, entry, buffer, event)) { 2076 __buffer_unlock_commit(buffer, event); 2077 ftrace_trace_stack(buffer, flags, 6, pc); 2078 } 2079 out: 2080 preempt_enable_notrace(); 2081 unpause_graph_tracing(); 2082 2083 return len; 2084 } 2085 2086 int trace_array_vprintk(struct trace_array *tr, 2087 unsigned long ip, const char *fmt, va_list args) 2088 { 2089 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); 2090 } 2091 2092 int trace_array_printk(struct trace_array *tr, 2093 unsigned long ip, const char *fmt, ...) 2094 { 2095 int ret; 2096 va_list ap; 2097 2098 if (!(trace_flags & TRACE_ITER_PRINTK)) 2099 return 0; 2100 2101 va_start(ap, fmt); 2102 ret = trace_array_vprintk(tr, ip, fmt, ap); 2103 va_end(ap); 2104 return ret; 2105 } 2106 2107 int trace_array_printk_buf(struct ring_buffer *buffer, 2108 unsigned long ip, const char *fmt, ...) 2109 { 2110 int ret; 2111 va_list ap; 2112 2113 if (!(trace_flags & TRACE_ITER_PRINTK)) 2114 return 0; 2115 2116 va_start(ap, fmt); 2117 ret = __trace_array_vprintk(buffer, ip, fmt, ap); 2118 va_end(ap); 2119 return ret; 2120 } 2121 2122 int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 2123 { 2124 return trace_array_vprintk(&global_trace, ip, fmt, args); 2125 } 2126 EXPORT_SYMBOL_GPL(trace_vprintk); 2127 2128 static void trace_iterator_increment(struct trace_iterator *iter) 2129 { 2130 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); 2131 2132 iter->idx++; 2133 if (buf_iter) 2134 ring_buffer_read(buf_iter, NULL); 2135 } 2136 2137 static struct trace_entry * 2138 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, 2139 unsigned long *lost_events) 2140 { 2141 struct ring_buffer_event *event; 2142 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); 2143 2144 if (buf_iter) 2145 event = ring_buffer_iter_peek(buf_iter, ts); 2146 else 2147 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, 2148 lost_events); 2149 2150 if (event) { 2151 iter->ent_size = ring_buffer_event_length(event); 2152 return ring_buffer_event_data(event); 2153 } 2154 iter->ent_size = 0; 2155 return NULL; 2156 } 2157 2158 static struct trace_entry * 2159 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, 2160 unsigned long *missing_events, u64 *ent_ts) 2161 { 2162 struct ring_buffer *buffer = iter->trace_buffer->buffer; 2163 struct trace_entry *ent, *next = NULL; 2164 unsigned long lost_events = 0, next_lost = 0; 2165 int cpu_file = iter->cpu_file; 2166 u64 next_ts = 0, ts; 2167 int next_cpu = -1; 2168 int next_size = 0; 2169 int cpu; 2170 2171 /* 2172 * If we are in a per_cpu trace file, don't bother by iterating over 2173 * all cpu and peek directly. 2174 */ 2175 if (cpu_file > RING_BUFFER_ALL_CPUS) { 2176 if (ring_buffer_empty_cpu(buffer, cpu_file)) 2177 return NULL; 2178 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); 2179 if (ent_cpu) 2180 *ent_cpu = cpu_file; 2181 2182 return ent; 2183 } 2184 2185 for_each_tracing_cpu(cpu) { 2186 2187 if (ring_buffer_empty_cpu(buffer, cpu)) 2188 continue; 2189 2190 ent = peek_next_entry(iter, cpu, &ts, &lost_events); 2191 2192 /* 2193 * Pick the entry with the smallest timestamp: 2194 */ 2195 if (ent && (!next || ts < next_ts)) { 2196 next = ent; 2197 next_cpu = cpu; 2198 next_ts = ts; 2199 next_lost = lost_events; 2200 next_size = iter->ent_size; 2201 } 2202 } 2203 2204 iter->ent_size = next_size; 2205 2206 if (ent_cpu) 2207 *ent_cpu = next_cpu; 2208 2209 if (ent_ts) 2210 *ent_ts = next_ts; 2211 2212 if (missing_events) 2213 *missing_events = next_lost; 2214 2215 return next; 2216 } 2217 2218 /* Find the next real entry, without updating the iterator itself */ 2219 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 2220 int *ent_cpu, u64 *ent_ts) 2221 { 2222 return __find_next_entry(iter, ent_cpu, NULL, ent_ts); 2223 } 2224 2225 /* Find the next real entry, and increment the iterator to the next entry */ 2226 void *trace_find_next_entry_inc(struct trace_iterator *iter) 2227 { 2228 iter->ent = __find_next_entry(iter, &iter->cpu, 2229 &iter->lost_events, &iter->ts); 2230 2231 if (iter->ent) 2232 trace_iterator_increment(iter); 2233 2234 return iter->ent ? iter : NULL; 2235 } 2236 2237 static void trace_consume(struct trace_iterator *iter) 2238 { 2239 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts, 2240 &iter->lost_events); 2241 } 2242 2243 static void *s_next(struct seq_file *m, void *v, loff_t *pos) 2244 { 2245 struct trace_iterator *iter = m->private; 2246 int i = (int)*pos; 2247 void *ent; 2248 2249 WARN_ON_ONCE(iter->leftover); 2250 2251 (*pos)++; 2252 2253 /* can't go backwards */ 2254 if (iter->idx > i) 2255 return NULL; 2256 2257 if (iter->idx < 0) 2258 ent = trace_find_next_entry_inc(iter); 2259 else 2260 ent = iter; 2261 2262 while (ent && iter->idx < i) 2263 ent = trace_find_next_entry_inc(iter); 2264 2265 iter->pos = *pos; 2266 2267 return ent; 2268 } 2269 2270 void tracing_iter_reset(struct trace_iterator *iter, int cpu) 2271 { 2272 struct ring_buffer_event *event; 2273 struct ring_buffer_iter *buf_iter; 2274 unsigned long entries = 0; 2275 u64 ts; 2276 2277 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; 2278 2279 buf_iter = trace_buffer_iter(iter, cpu); 2280 if (!buf_iter) 2281 return; 2282 2283 ring_buffer_iter_reset(buf_iter); 2284 2285 /* 2286 * We could have the case with the max latency tracers 2287 * that a reset never took place on a cpu. This is evident 2288 * by the timestamp being before the start of the buffer. 2289 */ 2290 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { 2291 if (ts >= iter->trace_buffer->time_start) 2292 break; 2293 entries++; 2294 ring_buffer_read(buf_iter, NULL); 2295 } 2296 2297 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; 2298 } 2299 2300 /* 2301 * The current tracer is copied to avoid a global locking 2302 * all around. 2303 */ 2304 static void *s_start(struct seq_file *m, loff_t *pos) 2305 { 2306 struct trace_iterator *iter = m->private; 2307 struct trace_array *tr = iter->tr; 2308 int cpu_file = iter->cpu_file; 2309 void *p = NULL; 2310 loff_t l = 0; 2311 int cpu; 2312 2313 /* 2314 * copy the tracer to avoid using a global lock all around. 2315 * iter->trace is a copy of current_trace, the pointer to the 2316 * name may be used instead of a strcmp(), as iter->trace->name 2317 * will point to the same string as current_trace->name. 2318 */ 2319 mutex_lock(&trace_types_lock); 2320 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) 2321 *iter->trace = *tr->current_trace; 2322 mutex_unlock(&trace_types_lock); 2323 2324 #ifdef CONFIG_TRACER_MAX_TRACE 2325 if (iter->snapshot && iter->trace->use_max_tr) 2326 return ERR_PTR(-EBUSY); 2327 #endif 2328 2329 if (!iter->snapshot) 2330 atomic_inc(&trace_record_cmdline_disabled); 2331 2332 if (*pos != iter->pos) { 2333 iter->ent = NULL; 2334 iter->cpu = 0; 2335 iter->idx = -1; 2336 2337 if (cpu_file == RING_BUFFER_ALL_CPUS) { 2338 for_each_tracing_cpu(cpu) 2339 tracing_iter_reset(iter, cpu); 2340 } else 2341 tracing_iter_reset(iter, cpu_file); 2342 2343 iter->leftover = 0; 2344 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 2345 ; 2346 2347 } else { 2348 /* 2349 * If we overflowed the seq_file before, then we want 2350 * to just reuse the trace_seq buffer again. 2351 */ 2352 if (iter->leftover) 2353 p = iter; 2354 else { 2355 l = *pos - 1; 2356 p = s_next(m, p, &l); 2357 } 2358 } 2359 2360 trace_event_read_lock(); 2361 trace_access_lock(cpu_file); 2362 return p; 2363 } 2364 2365 static void s_stop(struct seq_file *m, void *p) 2366 { 2367 struct trace_iterator *iter = m->private; 2368 2369 #ifdef CONFIG_TRACER_MAX_TRACE 2370 if (iter->snapshot && iter->trace->use_max_tr) 2371 return; 2372 #endif 2373 2374 if (!iter->snapshot) 2375 atomic_dec(&trace_record_cmdline_disabled); 2376 2377 trace_access_unlock(iter->cpu_file); 2378 trace_event_read_unlock(); 2379 } 2380 2381 static void 2382 get_total_entries(struct trace_buffer *buf, 2383 unsigned long *total, unsigned long *entries) 2384 { 2385 unsigned long count; 2386 int cpu; 2387 2388 *total = 0; 2389 *entries = 0; 2390 2391 for_each_tracing_cpu(cpu) { 2392 count = ring_buffer_entries_cpu(buf->buffer, cpu); 2393 /* 2394 * If this buffer has skipped entries, then we hold all 2395 * entries for the trace and we need to ignore the 2396 * ones before the time stamp. 2397 */ 2398 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { 2399 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; 2400 /* total is the same as the entries */ 2401 *total += count; 2402 } else 2403 *total += count + 2404 ring_buffer_overrun_cpu(buf->buffer, cpu); 2405 *entries += count; 2406 } 2407 } 2408 2409 static void print_lat_help_header(struct seq_file *m) 2410 { 2411 seq_puts(m, "# _------=> CPU# \n"); 2412 seq_puts(m, "# / _-----=> irqs-off \n"); 2413 seq_puts(m, "# | / _----=> need-resched \n"); 2414 seq_puts(m, "# || / _---=> hardirq/softirq \n"); 2415 seq_puts(m, "# ||| / _--=> preempt-depth \n"); 2416 seq_puts(m, "# |||| / delay \n"); 2417 seq_puts(m, "# cmd pid ||||| time | caller \n"); 2418 seq_puts(m, "# \\ / ||||| \\ | / \n"); 2419 } 2420 2421 static void print_event_info(struct trace_buffer *buf, struct seq_file *m) 2422 { 2423 unsigned long total; 2424 unsigned long entries; 2425 2426 get_total_entries(buf, &total, &entries); 2427 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", 2428 entries, total, num_online_cpus()); 2429 seq_puts(m, "#\n"); 2430 } 2431 2432 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m) 2433 { 2434 print_event_info(buf, m); 2435 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); 2436 seq_puts(m, "# | | | | |\n"); 2437 } 2438 2439 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m) 2440 { 2441 print_event_info(buf, m); 2442 seq_puts(m, "# _-----=> irqs-off\n"); 2443 seq_puts(m, "# / _----=> need-resched\n"); 2444 seq_puts(m, "# | / _---=> hardirq/softirq\n"); 2445 seq_puts(m, "# || / _--=> preempt-depth\n"); 2446 seq_puts(m, "# ||| / delay\n"); 2447 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"); 2448 seq_puts(m, "# | | | |||| | |\n"); 2449 } 2450 2451 void 2452 print_trace_header(struct seq_file *m, struct trace_iterator *iter) 2453 { 2454 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 2455 struct trace_buffer *buf = iter->trace_buffer; 2456 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); 2457 struct tracer *type = iter->trace; 2458 unsigned long entries; 2459 unsigned long total; 2460 const char *name = "preemption"; 2461 2462 name = type->name; 2463 2464 get_total_entries(buf, &total, &entries); 2465 2466 seq_printf(m, "# %s latency trace v1.1.5 on %s\n", 2467 name, UTS_RELEASE); 2468 seq_puts(m, "# -----------------------------------" 2469 "---------------------------------\n"); 2470 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" 2471 " (M:%s VP:%d, KP:%d, SP:%d HP:%d", 2472 nsecs_to_usecs(data->saved_latency), 2473 entries, 2474 total, 2475 buf->cpu, 2476 #if defined(CONFIG_PREEMPT_NONE) 2477 "server", 2478 #elif defined(CONFIG_PREEMPT_VOLUNTARY) 2479 "desktop", 2480 #elif defined(CONFIG_PREEMPT) 2481 "preempt", 2482 #else 2483 "unknown", 2484 #endif 2485 /* These are reserved for later use */ 2486 0, 0, 0, 0); 2487 #ifdef CONFIG_SMP 2488 seq_printf(m, " #P:%d)\n", num_online_cpus()); 2489 #else 2490 seq_puts(m, ")\n"); 2491 #endif 2492 seq_puts(m, "# -----------------\n"); 2493 seq_printf(m, "# | task: %.16s-%d " 2494 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", 2495 data->comm, data->pid, 2496 from_kuid_munged(seq_user_ns(m), data->uid), data->nice, 2497 data->policy, data->rt_priority); 2498 seq_puts(m, "# -----------------\n"); 2499 2500 if (data->critical_start) { 2501 seq_puts(m, "# => started at: "); 2502 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); 2503 trace_print_seq(m, &iter->seq); 2504 seq_puts(m, "\n# => ended at: "); 2505 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); 2506 trace_print_seq(m, &iter->seq); 2507 seq_puts(m, "\n#\n"); 2508 } 2509 2510 seq_puts(m, "#\n"); 2511 } 2512 2513 static void test_cpu_buff_start(struct trace_iterator *iter) 2514 { 2515 struct trace_seq *s = &iter->seq; 2516 2517 if (!(trace_flags & TRACE_ITER_ANNOTATE)) 2518 return; 2519 2520 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 2521 return; 2522 2523 if (cpumask_test_cpu(iter->cpu, iter->started)) 2524 return; 2525 2526 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) 2527 return; 2528 2529 cpumask_set_cpu(iter->cpu, iter->started); 2530 2531 /* Don't print started cpu buffer for the first entry of the trace */ 2532 if (iter->idx > 1) 2533 trace_seq_printf(s, "##### CPU %u buffer started ####\n", 2534 iter->cpu); 2535 } 2536 2537 static enum print_line_t print_trace_fmt(struct trace_iterator *iter) 2538 { 2539 struct trace_seq *s = &iter->seq; 2540 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 2541 struct trace_entry *entry; 2542 struct trace_event *event; 2543 2544 entry = iter->ent; 2545 2546 test_cpu_buff_start(iter); 2547 2548 event = ftrace_find_event(entry->type); 2549 2550 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2551 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2552 if (!trace_print_lat_context(iter)) 2553 goto partial; 2554 } else { 2555 if (!trace_print_context(iter)) 2556 goto partial; 2557 } 2558 } 2559 2560 if (event) 2561 return event->funcs->trace(iter, sym_flags, event); 2562 2563 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) 2564 goto partial; 2565 2566 return TRACE_TYPE_HANDLED; 2567 partial: 2568 return TRACE_TYPE_PARTIAL_LINE; 2569 } 2570 2571 static enum print_line_t print_raw_fmt(struct trace_iterator *iter) 2572 { 2573 struct trace_seq *s = &iter->seq; 2574 struct trace_entry *entry; 2575 struct trace_event *event; 2576 2577 entry = iter->ent; 2578 2579 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2580 if (!trace_seq_printf(s, "%d %d %llu ", 2581 entry->pid, iter->cpu, iter->ts)) 2582 goto partial; 2583 } 2584 2585 event = ftrace_find_event(entry->type); 2586 if (event) 2587 return event->funcs->raw(iter, 0, event); 2588 2589 if (!trace_seq_printf(s, "%d ?\n", entry->type)) 2590 goto partial; 2591 2592 return TRACE_TYPE_HANDLED; 2593 partial: 2594 return TRACE_TYPE_PARTIAL_LINE; 2595 } 2596 2597 static enum print_line_t print_hex_fmt(struct trace_iterator *iter) 2598 { 2599 struct trace_seq *s = &iter->seq; 2600 unsigned char newline = '\n'; 2601 struct trace_entry *entry; 2602 struct trace_event *event; 2603 2604 entry = iter->ent; 2605 2606 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2607 SEQ_PUT_HEX_FIELD_RET(s, entry->pid); 2608 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); 2609 SEQ_PUT_HEX_FIELD_RET(s, iter->ts); 2610 } 2611 2612 event = ftrace_find_event(entry->type); 2613 if (event) { 2614 enum print_line_t ret = event->funcs->hex(iter, 0, event); 2615 if (ret != TRACE_TYPE_HANDLED) 2616 return ret; 2617 } 2618 2619 SEQ_PUT_FIELD_RET(s, newline); 2620 2621 return TRACE_TYPE_HANDLED; 2622 } 2623 2624 static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 2625 { 2626 struct trace_seq *s = &iter->seq; 2627 struct trace_entry *entry; 2628 struct trace_event *event; 2629 2630 entry = iter->ent; 2631 2632 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2633 SEQ_PUT_FIELD_RET(s, entry->pid); 2634 SEQ_PUT_FIELD_RET(s, iter->cpu); 2635 SEQ_PUT_FIELD_RET(s, iter->ts); 2636 } 2637 2638 event = ftrace_find_event(entry->type); 2639 return event ? event->funcs->binary(iter, 0, event) : 2640 TRACE_TYPE_HANDLED; 2641 } 2642 2643 int trace_empty(struct trace_iterator *iter) 2644 { 2645 struct ring_buffer_iter *buf_iter; 2646 int cpu; 2647 2648 /* If we are looking at one CPU buffer, only check that one */ 2649 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 2650 cpu = iter->cpu_file; 2651 buf_iter = trace_buffer_iter(iter, cpu); 2652 if (buf_iter) { 2653 if (!ring_buffer_iter_empty(buf_iter)) 2654 return 0; 2655 } else { 2656 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) 2657 return 0; 2658 } 2659 return 1; 2660 } 2661 2662 for_each_tracing_cpu(cpu) { 2663 buf_iter = trace_buffer_iter(iter, cpu); 2664 if (buf_iter) { 2665 if (!ring_buffer_iter_empty(buf_iter)) 2666 return 0; 2667 } else { 2668 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) 2669 return 0; 2670 } 2671 } 2672 2673 return 1; 2674 } 2675 2676 /* Called with trace_event_read_lock() held. */ 2677 enum print_line_t print_trace_line(struct trace_iterator *iter) 2678 { 2679 enum print_line_t ret; 2680 2681 if (iter->lost_events && 2682 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", 2683 iter->cpu, iter->lost_events)) 2684 return TRACE_TYPE_PARTIAL_LINE; 2685 2686 if (iter->trace && iter->trace->print_line) { 2687 ret = iter->trace->print_line(iter); 2688 if (ret != TRACE_TYPE_UNHANDLED) 2689 return ret; 2690 } 2691 2692 if (iter->ent->type == TRACE_BPUTS && 2693 trace_flags & TRACE_ITER_PRINTK && 2694 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 2695 return trace_print_bputs_msg_only(iter); 2696 2697 if (iter->ent->type == TRACE_BPRINT && 2698 trace_flags & TRACE_ITER_PRINTK && 2699 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 2700 return trace_print_bprintk_msg_only(iter); 2701 2702 if (iter->ent->type == TRACE_PRINT && 2703 trace_flags & TRACE_ITER_PRINTK && 2704 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 2705 return trace_print_printk_msg_only(iter); 2706 2707 if (trace_flags & TRACE_ITER_BIN) 2708 return print_bin_fmt(iter); 2709 2710 if (trace_flags & TRACE_ITER_HEX) 2711 return print_hex_fmt(iter); 2712 2713 if (trace_flags & TRACE_ITER_RAW) 2714 return print_raw_fmt(iter); 2715 2716 return print_trace_fmt(iter); 2717 } 2718 2719 void trace_latency_header(struct seq_file *m) 2720 { 2721 struct trace_iterator *iter = m->private; 2722 2723 /* print nothing if the buffers are empty */ 2724 if (trace_empty(iter)) 2725 return; 2726 2727 if (iter->iter_flags & TRACE_FILE_LAT_FMT) 2728 print_trace_header(m, iter); 2729 2730 if (!(trace_flags & TRACE_ITER_VERBOSE)) 2731 print_lat_help_header(m); 2732 } 2733 2734 void trace_default_header(struct seq_file *m) 2735 { 2736 struct trace_iterator *iter = m->private; 2737 2738 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) 2739 return; 2740 2741 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2742 /* print nothing if the buffers are empty */ 2743 if (trace_empty(iter)) 2744 return; 2745 print_trace_header(m, iter); 2746 if (!(trace_flags & TRACE_ITER_VERBOSE)) 2747 print_lat_help_header(m); 2748 } else { 2749 if (!(trace_flags & TRACE_ITER_VERBOSE)) { 2750 if (trace_flags & TRACE_ITER_IRQ_INFO) 2751 print_func_help_header_irq(iter->trace_buffer, m); 2752 else 2753 print_func_help_header(iter->trace_buffer, m); 2754 } 2755 } 2756 } 2757 2758 static void test_ftrace_alive(struct seq_file *m) 2759 { 2760 if (!ftrace_is_dead()) 2761 return; 2762 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"); 2763 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); 2764 } 2765 2766 #ifdef CONFIG_TRACER_MAX_TRACE 2767 static void show_snapshot_main_help(struct seq_file *m) 2768 { 2769 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); 2770 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); 2771 seq_printf(m, "# Takes a snapshot of the main buffer.\n"); 2772 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"); 2773 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); 2774 seq_printf(m, "# is not a '0' or '1')\n"); 2775 } 2776 2777 static void show_snapshot_percpu_help(struct seq_file *m) 2778 { 2779 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); 2780 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 2781 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); 2782 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n"); 2783 #else 2784 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n"); 2785 seq_printf(m, "# Must use main snapshot file to allocate.\n"); 2786 #endif 2787 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"); 2788 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); 2789 seq_printf(m, "# is not a '0' or '1')\n"); 2790 } 2791 2792 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) 2793 { 2794 if (iter->tr->allocated_snapshot) 2795 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n"); 2796 else 2797 seq_printf(m, "#\n# * Snapshot is freed *\n#\n"); 2798 2799 seq_printf(m, "# Snapshot commands:\n"); 2800 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 2801 show_snapshot_main_help(m); 2802 else 2803 show_snapshot_percpu_help(m); 2804 } 2805 #else 2806 /* Should never be called */ 2807 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } 2808 #endif 2809 2810 static int s_show(struct seq_file *m, void *v) 2811 { 2812 struct trace_iterator *iter = v; 2813 int ret; 2814 2815 if (iter->ent == NULL) { 2816 if (iter->tr) { 2817 seq_printf(m, "# tracer: %s\n", iter->trace->name); 2818 seq_puts(m, "#\n"); 2819 test_ftrace_alive(m); 2820 } 2821 if (iter->snapshot && trace_empty(iter)) 2822 print_snapshot_help(m, iter); 2823 else if (iter->trace && iter->trace->print_header) 2824 iter->trace->print_header(m); 2825 else 2826 trace_default_header(m); 2827 2828 } else if (iter->leftover) { 2829 /* 2830 * If we filled the seq_file buffer earlier, we 2831 * want to just show it now. 2832 */ 2833 ret = trace_print_seq(m, &iter->seq); 2834 2835 /* ret should this time be zero, but you never know */ 2836 iter->leftover = ret; 2837 2838 } else { 2839 print_trace_line(iter); 2840 ret = trace_print_seq(m, &iter->seq); 2841 /* 2842 * If we overflow the seq_file buffer, then it will 2843 * ask us for this data again at start up. 2844 * Use that instead. 2845 * ret is 0 if seq_file write succeeded. 2846 * -1 otherwise. 2847 */ 2848 iter->leftover = ret; 2849 } 2850 2851 return 0; 2852 } 2853 2854 /* 2855 * Should be used after trace_array_get(), trace_types_lock 2856 * ensures that i_cdev was already initialized. 2857 */ 2858 static inline int tracing_get_cpu(struct inode *inode) 2859 { 2860 if (inode->i_cdev) /* See trace_create_cpu_file() */ 2861 return (long)inode->i_cdev - 1; 2862 return RING_BUFFER_ALL_CPUS; 2863 } 2864 2865 static const struct seq_operations tracer_seq_ops = { 2866 .start = s_start, 2867 .next = s_next, 2868 .stop = s_stop, 2869 .show = s_show, 2870 }; 2871 2872 static struct trace_iterator * 2873 __tracing_open(struct inode *inode, struct file *file, bool snapshot) 2874 { 2875 struct trace_array *tr = inode->i_private; 2876 struct trace_iterator *iter; 2877 int cpu; 2878 2879 if (tracing_disabled) 2880 return ERR_PTR(-ENODEV); 2881 2882 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); 2883 if (!iter) 2884 return ERR_PTR(-ENOMEM); 2885 2886 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(), 2887 GFP_KERNEL); 2888 if (!iter->buffer_iter) 2889 goto release; 2890 2891 /* 2892 * We make a copy of the current tracer to avoid concurrent 2893 * changes on it while we are reading. 2894 */ 2895 mutex_lock(&trace_types_lock); 2896 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); 2897 if (!iter->trace) 2898 goto fail; 2899 2900 *iter->trace = *tr->current_trace; 2901 2902 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 2903 goto fail; 2904 2905 iter->tr = tr; 2906 2907 #ifdef CONFIG_TRACER_MAX_TRACE 2908 /* Currently only the top directory has a snapshot */ 2909 if (tr->current_trace->print_max || snapshot) 2910 iter->trace_buffer = &tr->max_buffer; 2911 else 2912 #endif 2913 iter->trace_buffer = &tr->trace_buffer; 2914 iter->snapshot = snapshot; 2915 iter->pos = -1; 2916 iter->cpu_file = tracing_get_cpu(inode); 2917 mutex_init(&iter->mutex); 2918 2919 /* Notify the tracer early; before we stop tracing. */ 2920 if (iter->trace && iter->trace->open) 2921 iter->trace->open(iter); 2922 2923 /* Annotate start of buffers if we had overruns */ 2924 if (ring_buffer_overruns(iter->trace_buffer->buffer)) 2925 iter->iter_flags |= TRACE_FILE_ANNOTATE; 2926 2927 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 2928 if (trace_clocks[tr->clock_id].in_ns) 2929 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 2930 2931 /* stop the trace while dumping if we are not opening "snapshot" */ 2932 if (!iter->snapshot) 2933 tracing_stop_tr(tr); 2934 2935 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { 2936 for_each_tracing_cpu(cpu) { 2937 iter->buffer_iter[cpu] = 2938 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); 2939 } 2940 ring_buffer_read_prepare_sync(); 2941 for_each_tracing_cpu(cpu) { 2942 ring_buffer_read_start(iter->buffer_iter[cpu]); 2943 tracing_iter_reset(iter, cpu); 2944 } 2945 } else { 2946 cpu = iter->cpu_file; 2947 iter->buffer_iter[cpu] = 2948 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); 2949 ring_buffer_read_prepare_sync(); 2950 ring_buffer_read_start(iter->buffer_iter[cpu]); 2951 tracing_iter_reset(iter, cpu); 2952 } 2953 2954 mutex_unlock(&trace_types_lock); 2955 2956 return iter; 2957 2958 fail: 2959 mutex_unlock(&trace_types_lock); 2960 kfree(iter->trace); 2961 kfree(iter->buffer_iter); 2962 release: 2963 seq_release_private(inode, file); 2964 return ERR_PTR(-ENOMEM); 2965 } 2966 2967 int tracing_open_generic(struct inode *inode, struct file *filp) 2968 { 2969 if (tracing_disabled) 2970 return -ENODEV; 2971 2972 filp->private_data = inode->i_private; 2973 return 0; 2974 } 2975 2976 bool tracing_is_disabled(void) 2977 { 2978 return (tracing_disabled) ? true: false; 2979 } 2980 2981 /* 2982 * Open and update trace_array ref count. 2983 * Must have the current trace_array passed to it. 2984 */ 2985 static int tracing_open_generic_tr(struct inode *inode, struct file *filp) 2986 { 2987 struct trace_array *tr = inode->i_private; 2988 2989 if (tracing_disabled) 2990 return -ENODEV; 2991 2992 if (trace_array_get(tr) < 0) 2993 return -ENODEV; 2994 2995 filp->private_data = inode->i_private; 2996 2997 return 0; 2998 } 2999 3000 static int tracing_release(struct inode *inode, struct file *file) 3001 { 3002 struct trace_array *tr = inode->i_private; 3003 struct seq_file *m = file->private_data; 3004 struct trace_iterator *iter; 3005 int cpu; 3006 3007 if (!(file->f_mode & FMODE_READ)) { 3008 trace_array_put(tr); 3009 return 0; 3010 } 3011 3012 /* Writes do not use seq_file */ 3013 iter = m->private; 3014 mutex_lock(&trace_types_lock); 3015 3016 for_each_tracing_cpu(cpu) { 3017 if (iter->buffer_iter[cpu]) 3018 ring_buffer_read_finish(iter->buffer_iter[cpu]); 3019 } 3020 3021 if (iter->trace && iter->trace->close) 3022 iter->trace->close(iter); 3023 3024 if (!iter->snapshot) 3025 /* reenable tracing if it was previously enabled */ 3026 tracing_start_tr(tr); 3027 3028 __trace_array_put(tr); 3029 3030 mutex_unlock(&trace_types_lock); 3031 3032 mutex_destroy(&iter->mutex); 3033 free_cpumask_var(iter->started); 3034 kfree(iter->trace); 3035 kfree(iter->buffer_iter); 3036 seq_release_private(inode, file); 3037 3038 return 0; 3039 } 3040 3041 static int tracing_release_generic_tr(struct inode *inode, struct file *file) 3042 { 3043 struct trace_array *tr = inode->i_private; 3044 3045 trace_array_put(tr); 3046 return 0; 3047 } 3048 3049 static int tracing_single_release_tr(struct inode *inode, struct file *file) 3050 { 3051 struct trace_array *tr = inode->i_private; 3052 3053 trace_array_put(tr); 3054 3055 return single_release(inode, file); 3056 } 3057 3058 static int tracing_open(struct inode *inode, struct file *file) 3059 { 3060 struct trace_array *tr = inode->i_private; 3061 struct trace_iterator *iter; 3062 int ret = 0; 3063 3064 if (trace_array_get(tr) < 0) 3065 return -ENODEV; 3066 3067 /* If this file was open for write, then erase contents */ 3068 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 3069 int cpu = tracing_get_cpu(inode); 3070 3071 if (cpu == RING_BUFFER_ALL_CPUS) 3072 tracing_reset_online_cpus(&tr->trace_buffer); 3073 else 3074 tracing_reset(&tr->trace_buffer, cpu); 3075 } 3076 3077 if (file->f_mode & FMODE_READ) { 3078 iter = __tracing_open(inode, file, false); 3079 if (IS_ERR(iter)) 3080 ret = PTR_ERR(iter); 3081 else if (trace_flags & TRACE_ITER_LATENCY_FMT) 3082 iter->iter_flags |= TRACE_FILE_LAT_FMT; 3083 } 3084 3085 if (ret < 0) 3086 trace_array_put(tr); 3087 3088 return ret; 3089 } 3090 3091 static void * 3092 t_next(struct seq_file *m, void *v, loff_t *pos) 3093 { 3094 struct tracer *t = v; 3095 3096 (*pos)++; 3097 3098 if (t) 3099 t = t->next; 3100 3101 return t; 3102 } 3103 3104 static void *t_start(struct seq_file *m, loff_t *pos) 3105 { 3106 struct tracer *t; 3107 loff_t l = 0; 3108 3109 mutex_lock(&trace_types_lock); 3110 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) 3111 ; 3112 3113 return t; 3114 } 3115 3116 static void t_stop(struct seq_file *m, void *p) 3117 { 3118 mutex_unlock(&trace_types_lock); 3119 } 3120 3121 static int t_show(struct seq_file *m, void *v) 3122 { 3123 struct tracer *t = v; 3124 3125 if (!t) 3126 return 0; 3127 3128 seq_printf(m, "%s", t->name); 3129 if (t->next) 3130 seq_putc(m, ' '); 3131 else 3132 seq_putc(m, '\n'); 3133 3134 return 0; 3135 } 3136 3137 static const struct seq_operations show_traces_seq_ops = { 3138 .start = t_start, 3139 .next = t_next, 3140 .stop = t_stop, 3141 .show = t_show, 3142 }; 3143 3144 static int show_traces_open(struct inode *inode, struct file *file) 3145 { 3146 if (tracing_disabled) 3147 return -ENODEV; 3148 3149 return seq_open(file, &show_traces_seq_ops); 3150 } 3151 3152 static ssize_t 3153 tracing_write_stub(struct file *filp, const char __user *ubuf, 3154 size_t count, loff_t *ppos) 3155 { 3156 return count; 3157 } 3158 3159 static loff_t tracing_seek(struct file *file, loff_t offset, int origin) 3160 { 3161 if (file->f_mode & FMODE_READ) 3162 return seq_lseek(file, offset, origin); 3163 else 3164 return 0; 3165 } 3166 3167 static const struct file_operations tracing_fops = { 3168 .open = tracing_open, 3169 .read = seq_read, 3170 .write = tracing_write_stub, 3171 .llseek = tracing_seek, 3172 .release = tracing_release, 3173 }; 3174 3175 static const struct file_operations show_traces_fops = { 3176 .open = show_traces_open, 3177 .read = seq_read, 3178 .release = seq_release, 3179 .llseek = seq_lseek, 3180 }; 3181 3182 /* 3183 * The tracer itself will not take this lock, but still we want 3184 * to provide a consistent cpumask to user-space: 3185 */ 3186 static DEFINE_MUTEX(tracing_cpumask_update_lock); 3187 3188 /* 3189 * Temporary storage for the character representation of the 3190 * CPU bitmask (and one more byte for the newline): 3191 */ 3192 static char mask_str[NR_CPUS + 1]; 3193 3194 static ssize_t 3195 tracing_cpumask_read(struct file *filp, char __user *ubuf, 3196 size_t count, loff_t *ppos) 3197 { 3198 struct trace_array *tr = file_inode(filp)->i_private; 3199 int len; 3200 3201 mutex_lock(&tracing_cpumask_update_lock); 3202 3203 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask); 3204 if (count - len < 2) { 3205 count = -EINVAL; 3206 goto out_err; 3207 } 3208 len += sprintf(mask_str + len, "\n"); 3209 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); 3210 3211 out_err: 3212 mutex_unlock(&tracing_cpumask_update_lock); 3213 3214 return count; 3215 } 3216 3217 static ssize_t 3218 tracing_cpumask_write(struct file *filp, const char __user *ubuf, 3219 size_t count, loff_t *ppos) 3220 { 3221 struct trace_array *tr = file_inode(filp)->i_private; 3222 cpumask_var_t tracing_cpumask_new; 3223 int err, cpu; 3224 3225 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) 3226 return -ENOMEM; 3227 3228 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 3229 if (err) 3230 goto err_unlock; 3231 3232 mutex_lock(&tracing_cpumask_update_lock); 3233 3234 local_irq_disable(); 3235 arch_spin_lock(&ftrace_max_lock); 3236 for_each_tracing_cpu(cpu) { 3237 /* 3238 * Increase/decrease the disabled counter if we are 3239 * about to flip a bit in the cpumask: 3240 */ 3241 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && 3242 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 3243 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); 3244 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); 3245 } 3246 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && 3247 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 3248 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); 3249 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); 3250 } 3251 } 3252 arch_spin_unlock(&ftrace_max_lock); 3253 local_irq_enable(); 3254 3255 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); 3256 3257 mutex_unlock(&tracing_cpumask_update_lock); 3258 free_cpumask_var(tracing_cpumask_new); 3259 3260 return count; 3261 3262 err_unlock: 3263 free_cpumask_var(tracing_cpumask_new); 3264 3265 return err; 3266 } 3267 3268 static const struct file_operations tracing_cpumask_fops = { 3269 .open = tracing_open_generic_tr, 3270 .read = tracing_cpumask_read, 3271 .write = tracing_cpumask_write, 3272 .release = tracing_release_generic_tr, 3273 .llseek = generic_file_llseek, 3274 }; 3275 3276 static int tracing_trace_options_show(struct seq_file *m, void *v) 3277 { 3278 struct tracer_opt *trace_opts; 3279 struct trace_array *tr = m->private; 3280 u32 tracer_flags; 3281 int i; 3282 3283 mutex_lock(&trace_types_lock); 3284 tracer_flags = tr->current_trace->flags->val; 3285 trace_opts = tr->current_trace->flags->opts; 3286 3287 for (i = 0; trace_options[i]; i++) { 3288 if (trace_flags & (1 << i)) 3289 seq_printf(m, "%s\n", trace_options[i]); 3290 else 3291 seq_printf(m, "no%s\n", trace_options[i]); 3292 } 3293 3294 for (i = 0; trace_opts[i].name; i++) { 3295 if (tracer_flags & trace_opts[i].bit) 3296 seq_printf(m, "%s\n", trace_opts[i].name); 3297 else 3298 seq_printf(m, "no%s\n", trace_opts[i].name); 3299 } 3300 mutex_unlock(&trace_types_lock); 3301 3302 return 0; 3303 } 3304 3305 static int __set_tracer_option(struct tracer *trace, 3306 struct tracer_flags *tracer_flags, 3307 struct tracer_opt *opts, int neg) 3308 { 3309 int ret; 3310 3311 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); 3312 if (ret) 3313 return ret; 3314 3315 if (neg) 3316 tracer_flags->val &= ~opts->bit; 3317 else 3318 tracer_flags->val |= opts->bit; 3319 return 0; 3320 } 3321 3322 /* Try to assign a tracer specific option */ 3323 static int set_tracer_option(struct tracer *trace, char *cmp, int neg) 3324 { 3325 struct tracer_flags *tracer_flags = trace->flags; 3326 struct tracer_opt *opts = NULL; 3327 int i; 3328 3329 for (i = 0; tracer_flags->opts[i].name; i++) { 3330 opts = &tracer_flags->opts[i]; 3331 3332 if (strcmp(cmp, opts->name) == 0) 3333 return __set_tracer_option(trace, trace->flags, 3334 opts, neg); 3335 } 3336 3337 return -EINVAL; 3338 } 3339 3340 /* Some tracers require overwrite to stay enabled */ 3341 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) 3342 { 3343 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) 3344 return -1; 3345 3346 return 0; 3347 } 3348 3349 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) 3350 { 3351 /* do nothing if flag is already set */ 3352 if (!!(trace_flags & mask) == !!enabled) 3353 return 0; 3354 3355 /* Give the tracer a chance to approve the change */ 3356 if (tr->current_trace->flag_changed) 3357 if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled)) 3358 return -EINVAL; 3359 3360 if (enabled) 3361 trace_flags |= mask; 3362 else 3363 trace_flags &= ~mask; 3364 3365 if (mask == TRACE_ITER_RECORD_CMD) 3366 trace_event_enable_cmd_record(enabled); 3367 3368 if (mask == TRACE_ITER_OVERWRITE) { 3369 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); 3370 #ifdef CONFIG_TRACER_MAX_TRACE 3371 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); 3372 #endif 3373 } 3374 3375 if (mask == TRACE_ITER_PRINTK) 3376 trace_printk_start_stop_comm(enabled); 3377 3378 return 0; 3379 } 3380 3381 static int trace_set_options(struct trace_array *tr, char *option) 3382 { 3383 char *cmp; 3384 int neg = 0; 3385 int ret = -ENODEV; 3386 int i; 3387 3388 cmp = strstrip(option); 3389 3390 if (strncmp(cmp, "no", 2) == 0) { 3391 neg = 1; 3392 cmp += 2; 3393 } 3394 3395 mutex_lock(&trace_types_lock); 3396 3397 for (i = 0; trace_options[i]; i++) { 3398 if (strcmp(cmp, trace_options[i]) == 0) { 3399 ret = set_tracer_flag(tr, 1 << i, !neg); 3400 break; 3401 } 3402 } 3403 3404 /* If no option could be set, test the specific tracer options */ 3405 if (!trace_options[i]) 3406 ret = set_tracer_option(tr->current_trace, cmp, neg); 3407 3408 mutex_unlock(&trace_types_lock); 3409 3410 return ret; 3411 } 3412 3413 static ssize_t 3414 tracing_trace_options_write(struct file *filp, const char __user *ubuf, 3415 size_t cnt, loff_t *ppos) 3416 { 3417 struct seq_file *m = filp->private_data; 3418 struct trace_array *tr = m->private; 3419 char buf[64]; 3420 int ret; 3421 3422 if (cnt >= sizeof(buf)) 3423 return -EINVAL; 3424 3425 if (copy_from_user(&buf, ubuf, cnt)) 3426 return -EFAULT; 3427 3428 buf[cnt] = 0; 3429 3430 ret = trace_set_options(tr, buf); 3431 if (ret < 0) 3432 return ret; 3433 3434 *ppos += cnt; 3435 3436 return cnt; 3437 } 3438 3439 static int tracing_trace_options_open(struct inode *inode, struct file *file) 3440 { 3441 struct trace_array *tr = inode->i_private; 3442 int ret; 3443 3444 if (tracing_disabled) 3445 return -ENODEV; 3446 3447 if (trace_array_get(tr) < 0) 3448 return -ENODEV; 3449 3450 ret = single_open(file, tracing_trace_options_show, inode->i_private); 3451 if (ret < 0) 3452 trace_array_put(tr); 3453 3454 return ret; 3455 } 3456 3457 static const struct file_operations tracing_iter_fops = { 3458 .open = tracing_trace_options_open, 3459 .read = seq_read, 3460 .llseek = seq_lseek, 3461 .release = tracing_single_release_tr, 3462 .write = tracing_trace_options_write, 3463 }; 3464 3465 static const char readme_msg[] = 3466 "tracing mini-HOWTO:\n\n" 3467 "# echo 0 > tracing_on : quick way to disable tracing\n" 3468 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n" 3469 " Important files:\n" 3470 " trace\t\t\t- The static contents of the buffer\n" 3471 "\t\t\t To clear the buffer write into this file: echo > trace\n" 3472 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" 3473 " current_tracer\t- function and latency tracers\n" 3474 " available_tracers\t- list of configured tracers for current_tracer\n" 3475 " buffer_size_kb\t- view and modify size of per cpu buffer\n" 3476 " buffer_total_size_kb - view total size of all cpu buffers\n\n" 3477 " trace_clock\t\t-change the clock used to order events\n" 3478 " local: Per cpu clock but may not be synced across CPUs\n" 3479 " global: Synced across CPUs but slows tracing down.\n" 3480 " counter: Not a clock, but just an increment\n" 3481 " uptime: Jiffy counter from time of boot\n" 3482 " perf: Same clock that perf events use\n" 3483 #ifdef CONFIG_X86_64 3484 " x86-tsc: TSC cycle counter\n" 3485 #endif 3486 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n" 3487 " tracing_cpumask\t- Limit which CPUs to trace\n" 3488 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" 3489 "\t\t\t Remove sub-buffer with rmdir\n" 3490 " trace_options\t\t- Set format or modify how tracing happens\n" 3491 "\t\t\t Disable an option by adding a suffix 'no' to the option name\n" 3492 #ifdef CONFIG_DYNAMIC_FTRACE 3493 "\n available_filter_functions - list of functions that can be filtered on\n" 3494 " set_ftrace_filter\t- echo function name in here to only trace these functions\n" 3495 " accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" 3496 " modules: Can select a group via module\n" 3497 " Format: :mod:<module-name>\n" 3498 " example: echo :mod:ext3 > set_ftrace_filter\n" 3499 " triggers: a command to perform when function is hit\n" 3500 " Format: <function>:<trigger>[:count]\n" 3501 " trigger: traceon, traceoff\n" 3502 " enable_event:<system>:<event>\n" 3503 " disable_event:<system>:<event>\n" 3504 #ifdef CONFIG_STACKTRACE 3505 " stacktrace\n" 3506 #endif 3507 #ifdef CONFIG_TRACER_SNAPSHOT 3508 " snapshot\n" 3509 #endif 3510 " example: echo do_fault:traceoff > set_ftrace_filter\n" 3511 " echo do_trap:traceoff:3 > set_ftrace_filter\n" 3512 " The first one will disable tracing every time do_fault is hit\n" 3513 " The second will disable tracing at most 3 times when do_trap is hit\n" 3514 " The first time do trap is hit and it disables tracing, the counter\n" 3515 " will decrement to 2. If tracing is already disabled, the counter\n" 3516 " will not decrement. It only decrements when the trigger did work\n" 3517 " To remove trigger without count:\n" 3518 " echo '!<function>:<trigger> > set_ftrace_filter\n" 3519 " To remove trigger with a count:\n" 3520 " echo '!<function>:<trigger>:0 > set_ftrace_filter\n" 3521 " set_ftrace_notrace\t- echo function name in here to never trace.\n" 3522 " accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" 3523 " modules: Can select a group via module command :mod:\n" 3524 " Does not accept triggers\n" 3525 #endif /* CONFIG_DYNAMIC_FTRACE */ 3526 #ifdef CONFIG_FUNCTION_TRACER 3527 " set_ftrace_pid\t- Write pid(s) to only function trace those pids (function)\n" 3528 #endif 3529 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 3530 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" 3531 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" 3532 #endif 3533 #ifdef CONFIG_TRACER_SNAPSHOT 3534 "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n" 3535 "\t\t\t Read the contents for more information\n" 3536 #endif 3537 #ifdef CONFIG_STACK_TRACER 3538 " stack_trace\t\t- Shows the max stack trace when active\n" 3539 " stack_max_size\t- Shows current max stack size that was traced\n" 3540 "\t\t\t Write into this file to reset the max size (trigger a new trace)\n" 3541 #ifdef CONFIG_DYNAMIC_FTRACE 3542 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n" 3543 #endif 3544 #endif /* CONFIG_STACK_TRACER */ 3545 ; 3546 3547 static ssize_t 3548 tracing_readme_read(struct file *filp, char __user *ubuf, 3549 size_t cnt, loff_t *ppos) 3550 { 3551 return simple_read_from_buffer(ubuf, cnt, ppos, 3552 readme_msg, strlen(readme_msg)); 3553 } 3554 3555 static const struct file_operations tracing_readme_fops = { 3556 .open = tracing_open_generic, 3557 .read = tracing_readme_read, 3558 .llseek = generic_file_llseek, 3559 }; 3560 3561 static ssize_t 3562 tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, 3563 size_t cnt, loff_t *ppos) 3564 { 3565 char *buf_comm; 3566 char *file_buf; 3567 char *buf; 3568 int len = 0; 3569 int pid; 3570 int i; 3571 3572 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL); 3573 if (!file_buf) 3574 return -ENOMEM; 3575 3576 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL); 3577 if (!buf_comm) { 3578 kfree(file_buf); 3579 return -ENOMEM; 3580 } 3581 3582 buf = file_buf; 3583 3584 for (i = 0; i < SAVED_CMDLINES; i++) { 3585 int r; 3586 3587 pid = map_cmdline_to_pid[i]; 3588 if (pid == -1 || pid == NO_CMDLINE_MAP) 3589 continue; 3590 3591 trace_find_cmdline(pid, buf_comm); 3592 r = sprintf(buf, "%d %s\n", pid, buf_comm); 3593 buf += r; 3594 len += r; 3595 } 3596 3597 len = simple_read_from_buffer(ubuf, cnt, ppos, 3598 file_buf, len); 3599 3600 kfree(file_buf); 3601 kfree(buf_comm); 3602 3603 return len; 3604 } 3605 3606 static const struct file_operations tracing_saved_cmdlines_fops = { 3607 .open = tracing_open_generic, 3608 .read = tracing_saved_cmdlines_read, 3609 .llseek = generic_file_llseek, 3610 }; 3611 3612 static ssize_t 3613 tracing_set_trace_read(struct file *filp, char __user *ubuf, 3614 size_t cnt, loff_t *ppos) 3615 { 3616 struct trace_array *tr = filp->private_data; 3617 char buf[MAX_TRACER_SIZE+2]; 3618 int r; 3619 3620 mutex_lock(&trace_types_lock); 3621 r = sprintf(buf, "%s\n", tr->current_trace->name); 3622 mutex_unlock(&trace_types_lock); 3623 3624 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3625 } 3626 3627 int tracer_init(struct tracer *t, struct trace_array *tr) 3628 { 3629 tracing_reset_online_cpus(&tr->trace_buffer); 3630 return t->init(tr); 3631 } 3632 3633 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val) 3634 { 3635 int cpu; 3636 3637 for_each_tracing_cpu(cpu) 3638 per_cpu_ptr(buf->data, cpu)->entries = val; 3639 } 3640 3641 #ifdef CONFIG_TRACER_MAX_TRACE 3642 /* resize @tr's buffer to the size of @size_tr's entries */ 3643 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, 3644 struct trace_buffer *size_buf, int cpu_id) 3645 { 3646 int cpu, ret = 0; 3647 3648 if (cpu_id == RING_BUFFER_ALL_CPUS) { 3649 for_each_tracing_cpu(cpu) { 3650 ret = ring_buffer_resize(trace_buf->buffer, 3651 per_cpu_ptr(size_buf->data, cpu)->entries, cpu); 3652 if (ret < 0) 3653 break; 3654 per_cpu_ptr(trace_buf->data, cpu)->entries = 3655 per_cpu_ptr(size_buf->data, cpu)->entries; 3656 } 3657 } else { 3658 ret = ring_buffer_resize(trace_buf->buffer, 3659 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); 3660 if (ret == 0) 3661 per_cpu_ptr(trace_buf->data, cpu_id)->entries = 3662 per_cpu_ptr(size_buf->data, cpu_id)->entries; 3663 } 3664 3665 return ret; 3666 } 3667 #endif /* CONFIG_TRACER_MAX_TRACE */ 3668 3669 static int __tracing_resize_ring_buffer(struct trace_array *tr, 3670 unsigned long size, int cpu) 3671 { 3672 int ret; 3673 3674 /* 3675 * If kernel or user changes the size of the ring buffer 3676 * we use the size that was given, and we can forget about 3677 * expanding it later. 3678 */ 3679 ring_buffer_expanded = true; 3680 3681 /* May be called before buffers are initialized */ 3682 if (!tr->trace_buffer.buffer) 3683 return 0; 3684 3685 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu); 3686 if (ret < 0) 3687 return ret; 3688 3689 #ifdef CONFIG_TRACER_MAX_TRACE 3690 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) || 3691 !tr->current_trace->use_max_tr) 3692 goto out; 3693 3694 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); 3695 if (ret < 0) { 3696 int r = resize_buffer_duplicate_size(&tr->trace_buffer, 3697 &tr->trace_buffer, cpu); 3698 if (r < 0) { 3699 /* 3700 * AARGH! We are left with different 3701 * size max buffer!!!! 3702 * The max buffer is our "snapshot" buffer. 3703 * When a tracer needs a snapshot (one of the 3704 * latency tracers), it swaps the max buffer 3705 * with the saved snap shot. We succeeded to 3706 * update the size of the main buffer, but failed to 3707 * update the size of the max buffer. But when we tried 3708 * to reset the main buffer to the original size, we 3709 * failed there too. This is very unlikely to 3710 * happen, but if it does, warn and kill all 3711 * tracing. 3712 */ 3713 WARN_ON(1); 3714 tracing_disabled = 1; 3715 } 3716 return ret; 3717 } 3718 3719 if (cpu == RING_BUFFER_ALL_CPUS) 3720 set_buffer_entries(&tr->max_buffer, size); 3721 else 3722 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size; 3723 3724 out: 3725 #endif /* CONFIG_TRACER_MAX_TRACE */ 3726 3727 if (cpu == RING_BUFFER_ALL_CPUS) 3728 set_buffer_entries(&tr->trace_buffer, size); 3729 else 3730 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size; 3731 3732 return ret; 3733 } 3734 3735 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr, 3736 unsigned long size, int cpu_id) 3737 { 3738 int ret = size; 3739 3740 mutex_lock(&trace_types_lock); 3741 3742 if (cpu_id != RING_BUFFER_ALL_CPUS) { 3743 /* make sure, this cpu is enabled in the mask */ 3744 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { 3745 ret = -EINVAL; 3746 goto out; 3747 } 3748 } 3749 3750 ret = __tracing_resize_ring_buffer(tr, size, cpu_id); 3751 if (ret < 0) 3752 ret = -ENOMEM; 3753 3754 out: 3755 mutex_unlock(&trace_types_lock); 3756 3757 return ret; 3758 } 3759 3760 3761 /** 3762 * tracing_update_buffers - used by tracing facility to expand ring buffers 3763 * 3764 * To save on memory when the tracing is never used on a system with it 3765 * configured in. The ring buffers are set to a minimum size. But once 3766 * a user starts to use the tracing facility, then they need to grow 3767 * to their default size. 3768 * 3769 * This function is to be called when a tracer is about to be used. 3770 */ 3771 int tracing_update_buffers(void) 3772 { 3773 int ret = 0; 3774 3775 mutex_lock(&trace_types_lock); 3776 if (!ring_buffer_expanded) 3777 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size, 3778 RING_BUFFER_ALL_CPUS); 3779 mutex_unlock(&trace_types_lock); 3780 3781 return ret; 3782 } 3783 3784 struct trace_option_dentry; 3785 3786 static struct trace_option_dentry * 3787 create_trace_option_files(struct trace_array *tr, struct tracer *tracer); 3788 3789 static void 3790 destroy_trace_option_files(struct trace_option_dentry *topts); 3791 3792 static int tracing_set_tracer(const char *buf) 3793 { 3794 static struct trace_option_dentry *topts; 3795 struct trace_array *tr = &global_trace; 3796 struct tracer *t; 3797 #ifdef CONFIG_TRACER_MAX_TRACE 3798 bool had_max_tr; 3799 #endif 3800 int ret = 0; 3801 3802 mutex_lock(&trace_types_lock); 3803 3804 if (!ring_buffer_expanded) { 3805 ret = __tracing_resize_ring_buffer(tr, trace_buf_size, 3806 RING_BUFFER_ALL_CPUS); 3807 if (ret < 0) 3808 goto out; 3809 ret = 0; 3810 } 3811 3812 for (t = trace_types; t; t = t->next) { 3813 if (strcmp(t->name, buf) == 0) 3814 break; 3815 } 3816 if (!t) { 3817 ret = -EINVAL; 3818 goto out; 3819 } 3820 if (t == tr->current_trace) 3821 goto out; 3822 3823 trace_branch_disable(); 3824 3825 tr->current_trace->enabled = false; 3826 3827 if (tr->current_trace->reset) 3828 tr->current_trace->reset(tr); 3829 3830 /* Current trace needs to be nop_trace before synchronize_sched */ 3831 tr->current_trace = &nop_trace; 3832 3833 #ifdef CONFIG_TRACER_MAX_TRACE 3834 had_max_tr = tr->allocated_snapshot; 3835 3836 if (had_max_tr && !t->use_max_tr) { 3837 /* 3838 * We need to make sure that the update_max_tr sees that 3839 * current_trace changed to nop_trace to keep it from 3840 * swapping the buffers after we resize it. 3841 * The update_max_tr is called from interrupts disabled 3842 * so a synchronized_sched() is sufficient. 3843 */ 3844 synchronize_sched(); 3845 free_snapshot(tr); 3846 } 3847 #endif 3848 destroy_trace_option_files(topts); 3849 3850 topts = create_trace_option_files(tr, t); 3851 3852 #ifdef CONFIG_TRACER_MAX_TRACE 3853 if (t->use_max_tr && !had_max_tr) { 3854 ret = alloc_snapshot(tr); 3855 if (ret < 0) 3856 goto out; 3857 } 3858 #endif 3859 3860 if (t->init) { 3861 ret = tracer_init(t, tr); 3862 if (ret) 3863 goto out; 3864 } 3865 3866 tr->current_trace = t; 3867 tr->current_trace->enabled = true; 3868 trace_branch_enable(tr); 3869 out: 3870 mutex_unlock(&trace_types_lock); 3871 3872 return ret; 3873 } 3874 3875 static ssize_t 3876 tracing_set_trace_write(struct file *filp, const char __user *ubuf, 3877 size_t cnt, loff_t *ppos) 3878 { 3879 char buf[MAX_TRACER_SIZE+1]; 3880 int i; 3881 size_t ret; 3882 int err; 3883 3884 ret = cnt; 3885 3886 if (cnt > MAX_TRACER_SIZE) 3887 cnt = MAX_TRACER_SIZE; 3888 3889 if (copy_from_user(&buf, ubuf, cnt)) 3890 return -EFAULT; 3891 3892 buf[cnt] = 0; 3893 3894 /* strip ending whitespace. */ 3895 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) 3896 buf[i] = 0; 3897 3898 err = tracing_set_tracer(buf); 3899 if (err) 3900 return err; 3901 3902 *ppos += ret; 3903 3904 return ret; 3905 } 3906 3907 static ssize_t 3908 tracing_max_lat_read(struct file *filp, char __user *ubuf, 3909 size_t cnt, loff_t *ppos) 3910 { 3911 unsigned long *ptr = filp->private_data; 3912 char buf[64]; 3913 int r; 3914 3915 r = snprintf(buf, sizeof(buf), "%ld\n", 3916 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); 3917 if (r > sizeof(buf)) 3918 r = sizeof(buf); 3919 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3920 } 3921 3922 static ssize_t 3923 tracing_max_lat_write(struct file *filp, const char __user *ubuf, 3924 size_t cnt, loff_t *ppos) 3925 { 3926 unsigned long *ptr = filp->private_data; 3927 unsigned long val; 3928 int ret; 3929 3930 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 3931 if (ret) 3932 return ret; 3933 3934 *ptr = val * 1000; 3935 3936 return cnt; 3937 } 3938 3939 static int tracing_open_pipe(struct inode *inode, struct file *filp) 3940 { 3941 struct trace_array *tr = inode->i_private; 3942 struct trace_iterator *iter; 3943 int ret = 0; 3944 3945 if (tracing_disabled) 3946 return -ENODEV; 3947 3948 if (trace_array_get(tr) < 0) 3949 return -ENODEV; 3950 3951 mutex_lock(&trace_types_lock); 3952 3953 /* create a buffer to store the information to pass to userspace */ 3954 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 3955 if (!iter) { 3956 ret = -ENOMEM; 3957 __trace_array_put(tr); 3958 goto out; 3959 } 3960 3961 /* 3962 * We make a copy of the current tracer to avoid concurrent 3963 * changes on it while we are reading. 3964 */ 3965 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL); 3966 if (!iter->trace) { 3967 ret = -ENOMEM; 3968 goto fail; 3969 } 3970 *iter->trace = *tr->current_trace; 3971 3972 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 3973 ret = -ENOMEM; 3974 goto fail; 3975 } 3976 3977 /* trace pipe does not show start of buffer */ 3978 cpumask_setall(iter->started); 3979 3980 if (trace_flags & TRACE_ITER_LATENCY_FMT) 3981 iter->iter_flags |= TRACE_FILE_LAT_FMT; 3982 3983 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 3984 if (trace_clocks[tr->clock_id].in_ns) 3985 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 3986 3987 iter->tr = tr; 3988 iter->trace_buffer = &tr->trace_buffer; 3989 iter->cpu_file = tracing_get_cpu(inode); 3990 mutex_init(&iter->mutex); 3991 filp->private_data = iter; 3992 3993 if (iter->trace->pipe_open) 3994 iter->trace->pipe_open(iter); 3995 3996 nonseekable_open(inode, filp); 3997 out: 3998 mutex_unlock(&trace_types_lock); 3999 return ret; 4000 4001 fail: 4002 kfree(iter->trace); 4003 kfree(iter); 4004 __trace_array_put(tr); 4005 mutex_unlock(&trace_types_lock); 4006 return ret; 4007 } 4008 4009 static int tracing_release_pipe(struct inode *inode, struct file *file) 4010 { 4011 struct trace_iterator *iter = file->private_data; 4012 struct trace_array *tr = inode->i_private; 4013 4014 mutex_lock(&trace_types_lock); 4015 4016 if (iter->trace->pipe_close) 4017 iter->trace->pipe_close(iter); 4018 4019 mutex_unlock(&trace_types_lock); 4020 4021 free_cpumask_var(iter->started); 4022 mutex_destroy(&iter->mutex); 4023 kfree(iter->trace); 4024 kfree(iter); 4025 4026 trace_array_put(tr); 4027 4028 return 0; 4029 } 4030 4031 static unsigned int 4032 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) 4033 { 4034 /* Iterators are static, they should be filled or empty */ 4035 if (trace_buffer_iter(iter, iter->cpu_file)) 4036 return POLLIN | POLLRDNORM; 4037 4038 if (trace_flags & TRACE_ITER_BLOCK) 4039 /* 4040 * Always select as readable when in blocking mode 4041 */ 4042 return POLLIN | POLLRDNORM; 4043 else 4044 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file, 4045 filp, poll_table); 4046 } 4047 4048 static unsigned int 4049 tracing_poll_pipe(struct file *filp, poll_table *poll_table) 4050 { 4051 struct trace_iterator *iter = filp->private_data; 4052 4053 return trace_poll(iter, filp, poll_table); 4054 } 4055 4056 /* 4057 * This is a make-shift waitqueue. 4058 * A tracer might use this callback on some rare cases: 4059 * 4060 * 1) the current tracer might hold the runqueue lock when it wakes up 4061 * a reader, hence a deadlock (sched, function, and function graph tracers) 4062 * 2) the function tracers, trace all functions, we don't want 4063 * the overhead of calling wake_up and friends 4064 * (and tracing them too) 4065 * 4066 * Anyway, this is really very primitive wakeup. 4067 */ 4068 void poll_wait_pipe(struct trace_iterator *iter) 4069 { 4070 set_current_state(TASK_INTERRUPTIBLE); 4071 /* sleep for 100 msecs, and try again. */ 4072 schedule_timeout(HZ / 10); 4073 } 4074 4075 /* Must be called with trace_types_lock mutex held. */ 4076 static int tracing_wait_pipe(struct file *filp) 4077 { 4078 struct trace_iterator *iter = filp->private_data; 4079 4080 while (trace_empty(iter)) { 4081 4082 if ((filp->f_flags & O_NONBLOCK)) { 4083 return -EAGAIN; 4084 } 4085 4086 mutex_unlock(&iter->mutex); 4087 4088 iter->trace->wait_pipe(iter); 4089 4090 mutex_lock(&iter->mutex); 4091 4092 if (signal_pending(current)) 4093 return -EINTR; 4094 4095 /* 4096 * We block until we read something and tracing is disabled. 4097 * We still block if tracing is disabled, but we have never 4098 * read anything. This allows a user to cat this file, and 4099 * then enable tracing. But after we have read something, 4100 * we give an EOF when tracing is again disabled. 4101 * 4102 * iter->pos will be 0 if we haven't read anything. 4103 */ 4104 if (!tracing_is_on() && iter->pos) 4105 break; 4106 } 4107 4108 return 1; 4109 } 4110 4111 /* 4112 * Consumer reader. 4113 */ 4114 static ssize_t 4115 tracing_read_pipe(struct file *filp, char __user *ubuf, 4116 size_t cnt, loff_t *ppos) 4117 { 4118 struct trace_iterator *iter = filp->private_data; 4119 struct trace_array *tr = iter->tr; 4120 ssize_t sret; 4121 4122 /* return any leftover data */ 4123 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 4124 if (sret != -EBUSY) 4125 return sret; 4126 4127 trace_seq_init(&iter->seq); 4128 4129 /* copy the tracer to avoid using a global lock all around */ 4130 mutex_lock(&trace_types_lock); 4131 if (unlikely(iter->trace->name != tr->current_trace->name)) 4132 *iter->trace = *tr->current_trace; 4133 mutex_unlock(&trace_types_lock); 4134 4135 /* 4136 * Avoid more than one consumer on a single file descriptor 4137 * This is just a matter of traces coherency, the ring buffer itself 4138 * is protected. 4139 */ 4140 mutex_lock(&iter->mutex); 4141 if (iter->trace->read) { 4142 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); 4143 if (sret) 4144 goto out; 4145 } 4146 4147 waitagain: 4148 sret = tracing_wait_pipe(filp); 4149 if (sret <= 0) 4150 goto out; 4151 4152 /* stop when tracing is finished */ 4153 if (trace_empty(iter)) { 4154 sret = 0; 4155 goto out; 4156 } 4157 4158 if (cnt >= PAGE_SIZE) 4159 cnt = PAGE_SIZE - 1; 4160 4161 /* reset all but tr, trace, and overruns */ 4162 memset(&iter->seq, 0, 4163 sizeof(struct trace_iterator) - 4164 offsetof(struct trace_iterator, seq)); 4165 cpumask_clear(iter->started); 4166 iter->pos = -1; 4167 4168 trace_event_read_lock(); 4169 trace_access_lock(iter->cpu_file); 4170 while (trace_find_next_entry_inc(iter) != NULL) { 4171 enum print_line_t ret; 4172 int len = iter->seq.len; 4173 4174 ret = print_trace_line(iter); 4175 if (ret == TRACE_TYPE_PARTIAL_LINE) { 4176 /* don't print partial lines */ 4177 iter->seq.len = len; 4178 break; 4179 } 4180 if (ret != TRACE_TYPE_NO_CONSUME) 4181 trace_consume(iter); 4182 4183 if (iter->seq.len >= cnt) 4184 break; 4185 4186 /* 4187 * Setting the full flag means we reached the trace_seq buffer 4188 * size and we should leave by partial output condition above. 4189 * One of the trace_seq_* functions is not used properly. 4190 */ 4191 WARN_ONCE(iter->seq.full, "full flag set for trace type %d", 4192 iter->ent->type); 4193 } 4194 trace_access_unlock(iter->cpu_file); 4195 trace_event_read_unlock(); 4196 4197 /* Now copy what we have to the user */ 4198 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 4199 if (iter->seq.readpos >= iter->seq.len) 4200 trace_seq_init(&iter->seq); 4201 4202 /* 4203 * If there was nothing to send to user, in spite of consuming trace 4204 * entries, go back to wait for more entries. 4205 */ 4206 if (sret == -EBUSY) 4207 goto waitagain; 4208 4209 out: 4210 mutex_unlock(&iter->mutex); 4211 4212 return sret; 4213 } 4214 4215 static void tracing_pipe_buf_release(struct pipe_inode_info *pipe, 4216 struct pipe_buffer *buf) 4217 { 4218 __free_page(buf->page); 4219 } 4220 4221 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, 4222 unsigned int idx) 4223 { 4224 __free_page(spd->pages[idx]); 4225 } 4226 4227 static const struct pipe_buf_operations tracing_pipe_buf_ops = { 4228 .can_merge = 0, 4229 .map = generic_pipe_buf_map, 4230 .unmap = generic_pipe_buf_unmap, 4231 .confirm = generic_pipe_buf_confirm, 4232 .release = tracing_pipe_buf_release, 4233 .steal = generic_pipe_buf_steal, 4234 .get = generic_pipe_buf_get, 4235 }; 4236 4237 static size_t 4238 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) 4239 { 4240 size_t count; 4241 int ret; 4242 4243 /* Seq buffer is page-sized, exactly what we need. */ 4244 for (;;) { 4245 count = iter->seq.len; 4246 ret = print_trace_line(iter); 4247 count = iter->seq.len - count; 4248 if (rem < count) { 4249 rem = 0; 4250 iter->seq.len -= count; 4251 break; 4252 } 4253 if (ret == TRACE_TYPE_PARTIAL_LINE) { 4254 iter->seq.len -= count; 4255 break; 4256 } 4257 4258 if (ret != TRACE_TYPE_NO_CONSUME) 4259 trace_consume(iter); 4260 rem -= count; 4261 if (!trace_find_next_entry_inc(iter)) { 4262 rem = 0; 4263 iter->ent = NULL; 4264 break; 4265 } 4266 } 4267 4268 return rem; 4269 } 4270 4271 static ssize_t tracing_splice_read_pipe(struct file *filp, 4272 loff_t *ppos, 4273 struct pipe_inode_info *pipe, 4274 size_t len, 4275 unsigned int flags) 4276 { 4277 struct page *pages_def[PIPE_DEF_BUFFERS]; 4278 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 4279 struct trace_iterator *iter = filp->private_data; 4280 struct splice_pipe_desc spd = { 4281 .pages = pages_def, 4282 .partial = partial_def, 4283 .nr_pages = 0, /* This gets updated below. */ 4284 .nr_pages_max = PIPE_DEF_BUFFERS, 4285 .flags = flags, 4286 .ops = &tracing_pipe_buf_ops, 4287 .spd_release = tracing_spd_release_pipe, 4288 }; 4289 struct trace_array *tr = iter->tr; 4290 ssize_t ret; 4291 size_t rem; 4292 unsigned int i; 4293 4294 if (splice_grow_spd(pipe, &spd)) 4295 return -ENOMEM; 4296 4297 /* copy the tracer to avoid using a global lock all around */ 4298 mutex_lock(&trace_types_lock); 4299 if (unlikely(iter->trace->name != tr->current_trace->name)) 4300 *iter->trace = *tr->current_trace; 4301 mutex_unlock(&trace_types_lock); 4302 4303 mutex_lock(&iter->mutex); 4304 4305 if (iter->trace->splice_read) { 4306 ret = iter->trace->splice_read(iter, filp, 4307 ppos, pipe, len, flags); 4308 if (ret) 4309 goto out_err; 4310 } 4311 4312 ret = tracing_wait_pipe(filp); 4313 if (ret <= 0) 4314 goto out_err; 4315 4316 if (!iter->ent && !trace_find_next_entry_inc(iter)) { 4317 ret = -EFAULT; 4318 goto out_err; 4319 } 4320 4321 trace_event_read_lock(); 4322 trace_access_lock(iter->cpu_file); 4323 4324 /* Fill as many pages as possible. */ 4325 for (i = 0, rem = len; i < pipe->buffers && rem; i++) { 4326 spd.pages[i] = alloc_page(GFP_KERNEL); 4327 if (!spd.pages[i]) 4328 break; 4329 4330 rem = tracing_fill_pipe_page(rem, iter); 4331 4332 /* Copy the data into the page, so we can start over. */ 4333 ret = trace_seq_to_buffer(&iter->seq, 4334 page_address(spd.pages[i]), 4335 iter->seq.len); 4336 if (ret < 0) { 4337 __free_page(spd.pages[i]); 4338 break; 4339 } 4340 spd.partial[i].offset = 0; 4341 spd.partial[i].len = iter->seq.len; 4342 4343 trace_seq_init(&iter->seq); 4344 } 4345 4346 trace_access_unlock(iter->cpu_file); 4347 trace_event_read_unlock(); 4348 mutex_unlock(&iter->mutex); 4349 4350 spd.nr_pages = i; 4351 4352 ret = splice_to_pipe(pipe, &spd); 4353 out: 4354 splice_shrink_spd(&spd); 4355 return ret; 4356 4357 out_err: 4358 mutex_unlock(&iter->mutex); 4359 goto out; 4360 } 4361 4362 static ssize_t 4363 tracing_entries_read(struct file *filp, char __user *ubuf, 4364 size_t cnt, loff_t *ppos) 4365 { 4366 struct inode *inode = file_inode(filp); 4367 struct trace_array *tr = inode->i_private; 4368 int cpu = tracing_get_cpu(inode); 4369 char buf[64]; 4370 int r = 0; 4371 ssize_t ret; 4372 4373 mutex_lock(&trace_types_lock); 4374 4375 if (cpu == RING_BUFFER_ALL_CPUS) { 4376 int cpu, buf_size_same; 4377 unsigned long size; 4378 4379 size = 0; 4380 buf_size_same = 1; 4381 /* check if all cpu sizes are same */ 4382 for_each_tracing_cpu(cpu) { 4383 /* fill in the size from first enabled cpu */ 4384 if (size == 0) 4385 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries; 4386 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) { 4387 buf_size_same = 0; 4388 break; 4389 } 4390 } 4391 4392 if (buf_size_same) { 4393 if (!ring_buffer_expanded) 4394 r = sprintf(buf, "%lu (expanded: %lu)\n", 4395 size >> 10, 4396 trace_buf_size >> 10); 4397 else 4398 r = sprintf(buf, "%lu\n", size >> 10); 4399 } else 4400 r = sprintf(buf, "X\n"); 4401 } else 4402 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); 4403 4404 mutex_unlock(&trace_types_lock); 4405 4406 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 4407 return ret; 4408 } 4409 4410 static ssize_t 4411 tracing_entries_write(struct file *filp, const char __user *ubuf, 4412 size_t cnt, loff_t *ppos) 4413 { 4414 struct inode *inode = file_inode(filp); 4415 struct trace_array *tr = inode->i_private; 4416 unsigned long val; 4417 int ret; 4418 4419 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 4420 if (ret) 4421 return ret; 4422 4423 /* must have at least 1 entry */ 4424 if (!val) 4425 return -EINVAL; 4426 4427 /* value is in KB */ 4428 val <<= 10; 4429 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); 4430 if (ret < 0) 4431 return ret; 4432 4433 *ppos += cnt; 4434 4435 return cnt; 4436 } 4437 4438 static ssize_t 4439 tracing_total_entries_read(struct file *filp, char __user *ubuf, 4440 size_t cnt, loff_t *ppos) 4441 { 4442 struct trace_array *tr = filp->private_data; 4443 char buf[64]; 4444 int r, cpu; 4445 unsigned long size = 0, expanded_size = 0; 4446 4447 mutex_lock(&trace_types_lock); 4448 for_each_tracing_cpu(cpu) { 4449 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10; 4450 if (!ring_buffer_expanded) 4451 expanded_size += trace_buf_size >> 10; 4452 } 4453 if (ring_buffer_expanded) 4454 r = sprintf(buf, "%lu\n", size); 4455 else 4456 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); 4457 mutex_unlock(&trace_types_lock); 4458 4459 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 4460 } 4461 4462 static ssize_t 4463 tracing_free_buffer_write(struct file *filp, const char __user *ubuf, 4464 size_t cnt, loff_t *ppos) 4465 { 4466 /* 4467 * There is no need to read what the user has written, this function 4468 * is just to make sure that there is no error when "echo" is used 4469 */ 4470 4471 *ppos += cnt; 4472 4473 return cnt; 4474 } 4475 4476 static int 4477 tracing_free_buffer_release(struct inode *inode, struct file *filp) 4478 { 4479 struct trace_array *tr = inode->i_private; 4480 4481 /* disable tracing ? */ 4482 if (trace_flags & TRACE_ITER_STOP_ON_FREE) 4483 tracer_tracing_off(tr); 4484 /* resize the ring buffer to 0 */ 4485 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); 4486 4487 trace_array_put(tr); 4488 4489 return 0; 4490 } 4491 4492 static ssize_t 4493 tracing_mark_write(struct file *filp, const char __user *ubuf, 4494 size_t cnt, loff_t *fpos) 4495 { 4496 unsigned long addr = (unsigned long)ubuf; 4497 struct trace_array *tr = filp->private_data; 4498 struct ring_buffer_event *event; 4499 struct ring_buffer *buffer; 4500 struct print_entry *entry; 4501 unsigned long irq_flags; 4502 struct page *pages[2]; 4503 void *map_page[2]; 4504 int nr_pages = 1; 4505 ssize_t written; 4506 int offset; 4507 int size; 4508 int len; 4509 int ret; 4510 int i; 4511 4512 if (tracing_disabled) 4513 return -EINVAL; 4514 4515 if (!(trace_flags & TRACE_ITER_MARKERS)) 4516 return -EINVAL; 4517 4518 if (cnt > TRACE_BUF_SIZE) 4519 cnt = TRACE_BUF_SIZE; 4520 4521 /* 4522 * Userspace is injecting traces into the kernel trace buffer. 4523 * We want to be as non intrusive as possible. 4524 * To do so, we do not want to allocate any special buffers 4525 * or take any locks, but instead write the userspace data 4526 * straight into the ring buffer. 4527 * 4528 * First we need to pin the userspace buffer into memory, 4529 * which, most likely it is, because it just referenced it. 4530 * But there's no guarantee that it is. By using get_user_pages_fast() 4531 * and kmap_atomic/kunmap_atomic() we can get access to the 4532 * pages directly. We then write the data directly into the 4533 * ring buffer. 4534 */ 4535 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); 4536 4537 /* check if we cross pages */ 4538 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK)) 4539 nr_pages = 2; 4540 4541 offset = addr & (PAGE_SIZE - 1); 4542 addr &= PAGE_MASK; 4543 4544 ret = get_user_pages_fast(addr, nr_pages, 0, pages); 4545 if (ret < nr_pages) { 4546 while (--ret >= 0) 4547 put_page(pages[ret]); 4548 written = -EFAULT; 4549 goto out; 4550 } 4551 4552 for (i = 0; i < nr_pages; i++) 4553 map_page[i] = kmap_atomic(pages[i]); 4554 4555 local_save_flags(irq_flags); 4556 size = sizeof(*entry) + cnt + 2; /* possible \n added */ 4557 buffer = tr->trace_buffer.buffer; 4558 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 4559 irq_flags, preempt_count()); 4560 if (!event) { 4561 /* Ring buffer disabled, return as if not open for write */ 4562 written = -EBADF; 4563 goto out_unlock; 4564 } 4565 4566 entry = ring_buffer_event_data(event); 4567 entry->ip = _THIS_IP_; 4568 4569 if (nr_pages == 2) { 4570 len = PAGE_SIZE - offset; 4571 memcpy(&entry->buf, map_page[0] + offset, len); 4572 memcpy(&entry->buf[len], map_page[1], cnt - len); 4573 } else 4574 memcpy(&entry->buf, map_page[0] + offset, cnt); 4575 4576 if (entry->buf[cnt - 1] != '\n') { 4577 entry->buf[cnt] = '\n'; 4578 entry->buf[cnt + 1] = '\0'; 4579 } else 4580 entry->buf[cnt] = '\0'; 4581 4582 __buffer_unlock_commit(buffer, event); 4583 4584 written = cnt; 4585 4586 *fpos += written; 4587 4588 out_unlock: 4589 for (i = 0; i < nr_pages; i++){ 4590 kunmap_atomic(map_page[i]); 4591 put_page(pages[i]); 4592 } 4593 out: 4594 return written; 4595 } 4596 4597 static int tracing_clock_show(struct seq_file *m, void *v) 4598 { 4599 struct trace_array *tr = m->private; 4600 int i; 4601 4602 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) 4603 seq_printf(m, 4604 "%s%s%s%s", i ? " " : "", 4605 i == tr->clock_id ? "[" : "", trace_clocks[i].name, 4606 i == tr->clock_id ? "]" : ""); 4607 seq_putc(m, '\n'); 4608 4609 return 0; 4610 } 4611 4612 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, 4613 size_t cnt, loff_t *fpos) 4614 { 4615 struct seq_file *m = filp->private_data; 4616 struct trace_array *tr = m->private; 4617 char buf[64]; 4618 const char *clockstr; 4619 int i; 4620 4621 if (cnt >= sizeof(buf)) 4622 return -EINVAL; 4623 4624 if (copy_from_user(&buf, ubuf, cnt)) 4625 return -EFAULT; 4626 4627 buf[cnt] = 0; 4628 4629 clockstr = strstrip(buf); 4630 4631 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { 4632 if (strcmp(trace_clocks[i].name, clockstr) == 0) 4633 break; 4634 } 4635 if (i == ARRAY_SIZE(trace_clocks)) 4636 return -EINVAL; 4637 4638 mutex_lock(&trace_types_lock); 4639 4640 tr->clock_id = i; 4641 4642 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func); 4643 4644 /* 4645 * New clock may not be consistent with the previous clock. 4646 * Reset the buffer so that it doesn't have incomparable timestamps. 4647 */ 4648 tracing_reset_online_cpus(&tr->trace_buffer); 4649 4650 #ifdef CONFIG_TRACER_MAX_TRACE 4651 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer) 4652 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); 4653 tracing_reset_online_cpus(&tr->max_buffer); 4654 #endif 4655 4656 mutex_unlock(&trace_types_lock); 4657 4658 *fpos += cnt; 4659 4660 return cnt; 4661 } 4662 4663 static int tracing_clock_open(struct inode *inode, struct file *file) 4664 { 4665 struct trace_array *tr = inode->i_private; 4666 int ret; 4667 4668 if (tracing_disabled) 4669 return -ENODEV; 4670 4671 if (trace_array_get(tr)) 4672 return -ENODEV; 4673 4674 ret = single_open(file, tracing_clock_show, inode->i_private); 4675 if (ret < 0) 4676 trace_array_put(tr); 4677 4678 return ret; 4679 } 4680 4681 struct ftrace_buffer_info { 4682 struct trace_iterator iter; 4683 void *spare; 4684 unsigned int read; 4685 }; 4686 4687 #ifdef CONFIG_TRACER_SNAPSHOT 4688 static int tracing_snapshot_open(struct inode *inode, struct file *file) 4689 { 4690 struct trace_array *tr = inode->i_private; 4691 struct trace_iterator *iter; 4692 struct seq_file *m; 4693 int ret = 0; 4694 4695 if (trace_array_get(tr) < 0) 4696 return -ENODEV; 4697 4698 if (file->f_mode & FMODE_READ) { 4699 iter = __tracing_open(inode, file, true); 4700 if (IS_ERR(iter)) 4701 ret = PTR_ERR(iter); 4702 } else { 4703 /* Writes still need the seq_file to hold the private data */ 4704 ret = -ENOMEM; 4705 m = kzalloc(sizeof(*m), GFP_KERNEL); 4706 if (!m) 4707 goto out; 4708 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 4709 if (!iter) { 4710 kfree(m); 4711 goto out; 4712 } 4713 ret = 0; 4714 4715 iter->tr = tr; 4716 iter->trace_buffer = &tr->max_buffer; 4717 iter->cpu_file = tracing_get_cpu(inode); 4718 m->private = iter; 4719 file->private_data = m; 4720 } 4721 out: 4722 if (ret < 0) 4723 trace_array_put(tr); 4724 4725 return ret; 4726 } 4727 4728 static ssize_t 4729 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, 4730 loff_t *ppos) 4731 { 4732 struct seq_file *m = filp->private_data; 4733 struct trace_iterator *iter = m->private; 4734 struct trace_array *tr = iter->tr; 4735 unsigned long val; 4736 int ret; 4737 4738 ret = tracing_update_buffers(); 4739 if (ret < 0) 4740 return ret; 4741 4742 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 4743 if (ret) 4744 return ret; 4745 4746 mutex_lock(&trace_types_lock); 4747 4748 if (tr->current_trace->use_max_tr) { 4749 ret = -EBUSY; 4750 goto out; 4751 } 4752 4753 switch (val) { 4754 case 0: 4755 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 4756 ret = -EINVAL; 4757 break; 4758 } 4759 if (tr->allocated_snapshot) 4760 free_snapshot(tr); 4761 break; 4762 case 1: 4763 /* Only allow per-cpu swap if the ring buffer supports it */ 4764 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP 4765 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 4766 ret = -EINVAL; 4767 break; 4768 } 4769 #endif 4770 if (!tr->allocated_snapshot) { 4771 ret = alloc_snapshot(tr); 4772 if (ret < 0) 4773 break; 4774 } 4775 local_irq_disable(); 4776 /* Now, we're going to swap */ 4777 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 4778 update_max_tr(tr, current, smp_processor_id()); 4779 else 4780 update_max_tr_single(tr, current, iter->cpu_file); 4781 local_irq_enable(); 4782 break; 4783 default: 4784 if (tr->allocated_snapshot) { 4785 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 4786 tracing_reset_online_cpus(&tr->max_buffer); 4787 else 4788 tracing_reset(&tr->max_buffer, iter->cpu_file); 4789 } 4790 break; 4791 } 4792 4793 if (ret >= 0) { 4794 *ppos += cnt; 4795 ret = cnt; 4796 } 4797 out: 4798 mutex_unlock(&trace_types_lock); 4799 return ret; 4800 } 4801 4802 static int tracing_snapshot_release(struct inode *inode, struct file *file) 4803 { 4804 struct seq_file *m = file->private_data; 4805 int ret; 4806 4807 ret = tracing_release(inode, file); 4808 4809 if (file->f_mode & FMODE_READ) 4810 return ret; 4811 4812 /* If write only, the seq_file is just a stub */ 4813 if (m) 4814 kfree(m->private); 4815 kfree(m); 4816 4817 return 0; 4818 } 4819 4820 static int tracing_buffers_open(struct inode *inode, struct file *filp); 4821 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, 4822 size_t count, loff_t *ppos); 4823 static int tracing_buffers_release(struct inode *inode, struct file *file); 4824 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, 4825 struct pipe_inode_info *pipe, size_t len, unsigned int flags); 4826 4827 static int snapshot_raw_open(struct inode *inode, struct file *filp) 4828 { 4829 struct ftrace_buffer_info *info; 4830 int ret; 4831 4832 ret = tracing_buffers_open(inode, filp); 4833 if (ret < 0) 4834 return ret; 4835 4836 info = filp->private_data; 4837 4838 if (info->iter.trace->use_max_tr) { 4839 tracing_buffers_release(inode, filp); 4840 return -EBUSY; 4841 } 4842 4843 info->iter.snapshot = true; 4844 info->iter.trace_buffer = &info->iter.tr->max_buffer; 4845 4846 return ret; 4847 } 4848 4849 #endif /* CONFIG_TRACER_SNAPSHOT */ 4850 4851 4852 static const struct file_operations tracing_max_lat_fops = { 4853 .open = tracing_open_generic, 4854 .read = tracing_max_lat_read, 4855 .write = tracing_max_lat_write, 4856 .llseek = generic_file_llseek, 4857 }; 4858 4859 static const struct file_operations set_tracer_fops = { 4860 .open = tracing_open_generic, 4861 .read = tracing_set_trace_read, 4862 .write = tracing_set_trace_write, 4863 .llseek = generic_file_llseek, 4864 }; 4865 4866 static const struct file_operations tracing_pipe_fops = { 4867 .open = tracing_open_pipe, 4868 .poll = tracing_poll_pipe, 4869 .read = tracing_read_pipe, 4870 .splice_read = tracing_splice_read_pipe, 4871 .release = tracing_release_pipe, 4872 .llseek = no_llseek, 4873 }; 4874 4875 static const struct file_operations tracing_entries_fops = { 4876 .open = tracing_open_generic_tr, 4877 .read = tracing_entries_read, 4878 .write = tracing_entries_write, 4879 .llseek = generic_file_llseek, 4880 .release = tracing_release_generic_tr, 4881 }; 4882 4883 static const struct file_operations tracing_total_entries_fops = { 4884 .open = tracing_open_generic_tr, 4885 .read = tracing_total_entries_read, 4886 .llseek = generic_file_llseek, 4887 .release = tracing_release_generic_tr, 4888 }; 4889 4890 static const struct file_operations tracing_free_buffer_fops = { 4891 .open = tracing_open_generic_tr, 4892 .write = tracing_free_buffer_write, 4893 .release = tracing_free_buffer_release, 4894 }; 4895 4896 static const struct file_operations tracing_mark_fops = { 4897 .open = tracing_open_generic_tr, 4898 .write = tracing_mark_write, 4899 .llseek = generic_file_llseek, 4900 .release = tracing_release_generic_tr, 4901 }; 4902 4903 static const struct file_operations trace_clock_fops = { 4904 .open = tracing_clock_open, 4905 .read = seq_read, 4906 .llseek = seq_lseek, 4907 .release = tracing_single_release_tr, 4908 .write = tracing_clock_write, 4909 }; 4910 4911 #ifdef CONFIG_TRACER_SNAPSHOT 4912 static const struct file_operations snapshot_fops = { 4913 .open = tracing_snapshot_open, 4914 .read = seq_read, 4915 .write = tracing_snapshot_write, 4916 .llseek = tracing_seek, 4917 .release = tracing_snapshot_release, 4918 }; 4919 4920 static const struct file_operations snapshot_raw_fops = { 4921 .open = snapshot_raw_open, 4922 .read = tracing_buffers_read, 4923 .release = tracing_buffers_release, 4924 .splice_read = tracing_buffers_splice_read, 4925 .llseek = no_llseek, 4926 }; 4927 4928 #endif /* CONFIG_TRACER_SNAPSHOT */ 4929 4930 static int tracing_buffers_open(struct inode *inode, struct file *filp) 4931 { 4932 struct trace_array *tr = inode->i_private; 4933 struct ftrace_buffer_info *info; 4934 int ret; 4935 4936 if (tracing_disabled) 4937 return -ENODEV; 4938 4939 if (trace_array_get(tr) < 0) 4940 return -ENODEV; 4941 4942 info = kzalloc(sizeof(*info), GFP_KERNEL); 4943 if (!info) { 4944 trace_array_put(tr); 4945 return -ENOMEM; 4946 } 4947 4948 mutex_lock(&trace_types_lock); 4949 4950 info->iter.tr = tr; 4951 info->iter.cpu_file = tracing_get_cpu(inode); 4952 info->iter.trace = tr->current_trace; 4953 info->iter.trace_buffer = &tr->trace_buffer; 4954 info->spare = NULL; 4955 /* Force reading ring buffer for first read */ 4956 info->read = (unsigned int)-1; 4957 4958 filp->private_data = info; 4959 4960 mutex_unlock(&trace_types_lock); 4961 4962 ret = nonseekable_open(inode, filp); 4963 if (ret < 0) 4964 trace_array_put(tr); 4965 4966 return ret; 4967 } 4968 4969 static unsigned int 4970 tracing_buffers_poll(struct file *filp, poll_table *poll_table) 4971 { 4972 struct ftrace_buffer_info *info = filp->private_data; 4973 struct trace_iterator *iter = &info->iter; 4974 4975 return trace_poll(iter, filp, poll_table); 4976 } 4977 4978 static ssize_t 4979 tracing_buffers_read(struct file *filp, char __user *ubuf, 4980 size_t count, loff_t *ppos) 4981 { 4982 struct ftrace_buffer_info *info = filp->private_data; 4983 struct trace_iterator *iter = &info->iter; 4984 ssize_t ret; 4985 ssize_t size; 4986 4987 if (!count) 4988 return 0; 4989 4990 mutex_lock(&trace_types_lock); 4991 4992 #ifdef CONFIG_TRACER_MAX_TRACE 4993 if (iter->snapshot && iter->tr->current_trace->use_max_tr) { 4994 size = -EBUSY; 4995 goto out_unlock; 4996 } 4997 #endif 4998 4999 if (!info->spare) 5000 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, 5001 iter->cpu_file); 5002 size = -ENOMEM; 5003 if (!info->spare) 5004 goto out_unlock; 5005 5006 /* Do we have previous read data to read? */ 5007 if (info->read < PAGE_SIZE) 5008 goto read; 5009 5010 again: 5011 trace_access_lock(iter->cpu_file); 5012 ret = ring_buffer_read_page(iter->trace_buffer->buffer, 5013 &info->spare, 5014 count, 5015 iter->cpu_file, 0); 5016 trace_access_unlock(iter->cpu_file); 5017 5018 if (ret < 0) { 5019 if (trace_empty(iter)) { 5020 if ((filp->f_flags & O_NONBLOCK)) { 5021 size = -EAGAIN; 5022 goto out_unlock; 5023 } 5024 mutex_unlock(&trace_types_lock); 5025 iter->trace->wait_pipe(iter); 5026 mutex_lock(&trace_types_lock); 5027 if (signal_pending(current)) { 5028 size = -EINTR; 5029 goto out_unlock; 5030 } 5031 goto again; 5032 } 5033 size = 0; 5034 goto out_unlock; 5035 } 5036 5037 info->read = 0; 5038 read: 5039 size = PAGE_SIZE - info->read; 5040 if (size > count) 5041 size = count; 5042 5043 ret = copy_to_user(ubuf, info->spare + info->read, size); 5044 if (ret == size) { 5045 size = -EFAULT; 5046 goto out_unlock; 5047 } 5048 size -= ret; 5049 5050 *ppos += size; 5051 info->read += size; 5052 5053 out_unlock: 5054 mutex_unlock(&trace_types_lock); 5055 5056 return size; 5057 } 5058 5059 static int tracing_buffers_release(struct inode *inode, struct file *file) 5060 { 5061 struct ftrace_buffer_info *info = file->private_data; 5062 struct trace_iterator *iter = &info->iter; 5063 5064 mutex_lock(&trace_types_lock); 5065 5066 __trace_array_put(iter->tr); 5067 5068 if (info->spare) 5069 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare); 5070 kfree(info); 5071 5072 mutex_unlock(&trace_types_lock); 5073 5074 return 0; 5075 } 5076 5077 struct buffer_ref { 5078 struct ring_buffer *buffer; 5079 void *page; 5080 int ref; 5081 }; 5082 5083 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, 5084 struct pipe_buffer *buf) 5085 { 5086 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 5087 5088 if (--ref->ref) 5089 return; 5090 5091 ring_buffer_free_read_page(ref->buffer, ref->page); 5092 kfree(ref); 5093 buf->private = 0; 5094 } 5095 5096 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, 5097 struct pipe_buffer *buf) 5098 { 5099 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 5100 5101 ref->ref++; 5102 } 5103 5104 /* Pipe buffer operations for a buffer. */ 5105 static const struct pipe_buf_operations buffer_pipe_buf_ops = { 5106 .can_merge = 0, 5107 .map = generic_pipe_buf_map, 5108 .unmap = generic_pipe_buf_unmap, 5109 .confirm = generic_pipe_buf_confirm, 5110 .release = buffer_pipe_buf_release, 5111 .steal = generic_pipe_buf_steal, 5112 .get = buffer_pipe_buf_get, 5113 }; 5114 5115 /* 5116 * Callback from splice_to_pipe(), if we need to release some pages 5117 * at the end of the spd in case we error'ed out in filling the pipe. 5118 */ 5119 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) 5120 { 5121 struct buffer_ref *ref = 5122 (struct buffer_ref *)spd->partial[i].private; 5123 5124 if (--ref->ref) 5125 return; 5126 5127 ring_buffer_free_read_page(ref->buffer, ref->page); 5128 kfree(ref); 5129 spd->partial[i].private = 0; 5130 } 5131 5132 static ssize_t 5133 tracing_buffers_splice_read(struct file *file, loff_t *ppos, 5134 struct pipe_inode_info *pipe, size_t len, 5135 unsigned int flags) 5136 { 5137 struct ftrace_buffer_info *info = file->private_data; 5138 struct trace_iterator *iter = &info->iter; 5139 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 5140 struct page *pages_def[PIPE_DEF_BUFFERS]; 5141 struct splice_pipe_desc spd = { 5142 .pages = pages_def, 5143 .partial = partial_def, 5144 .nr_pages_max = PIPE_DEF_BUFFERS, 5145 .flags = flags, 5146 .ops = &buffer_pipe_buf_ops, 5147 .spd_release = buffer_spd_release, 5148 }; 5149 struct buffer_ref *ref; 5150 int entries, size, i; 5151 ssize_t ret; 5152 5153 mutex_lock(&trace_types_lock); 5154 5155 #ifdef CONFIG_TRACER_MAX_TRACE 5156 if (iter->snapshot && iter->tr->current_trace->use_max_tr) { 5157 ret = -EBUSY; 5158 goto out; 5159 } 5160 #endif 5161 5162 if (splice_grow_spd(pipe, &spd)) { 5163 ret = -ENOMEM; 5164 goto out; 5165 } 5166 5167 if (*ppos & (PAGE_SIZE - 1)) { 5168 ret = -EINVAL; 5169 goto out; 5170 } 5171 5172 if (len & (PAGE_SIZE - 1)) { 5173 if (len < PAGE_SIZE) { 5174 ret = -EINVAL; 5175 goto out; 5176 } 5177 len &= PAGE_MASK; 5178 } 5179 5180 again: 5181 trace_access_lock(iter->cpu_file); 5182 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); 5183 5184 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) { 5185 struct page *page; 5186 int r; 5187 5188 ref = kzalloc(sizeof(*ref), GFP_KERNEL); 5189 if (!ref) 5190 break; 5191 5192 ref->ref = 1; 5193 ref->buffer = iter->trace_buffer->buffer; 5194 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); 5195 if (!ref->page) { 5196 kfree(ref); 5197 break; 5198 } 5199 5200 r = ring_buffer_read_page(ref->buffer, &ref->page, 5201 len, iter->cpu_file, 1); 5202 if (r < 0) { 5203 ring_buffer_free_read_page(ref->buffer, ref->page); 5204 kfree(ref); 5205 break; 5206 } 5207 5208 /* 5209 * zero out any left over data, this is going to 5210 * user land. 5211 */ 5212 size = ring_buffer_page_len(ref->page); 5213 if (size < PAGE_SIZE) 5214 memset(ref->page + size, 0, PAGE_SIZE - size); 5215 5216 page = virt_to_page(ref->page); 5217 5218 spd.pages[i] = page; 5219 spd.partial[i].len = PAGE_SIZE; 5220 spd.partial[i].offset = 0; 5221 spd.partial[i].private = (unsigned long)ref; 5222 spd.nr_pages++; 5223 *ppos += PAGE_SIZE; 5224 5225 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); 5226 } 5227 5228 trace_access_unlock(iter->cpu_file); 5229 spd.nr_pages = i; 5230 5231 /* did we read anything? */ 5232 if (!spd.nr_pages) { 5233 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) { 5234 ret = -EAGAIN; 5235 goto out; 5236 } 5237 mutex_unlock(&trace_types_lock); 5238 iter->trace->wait_pipe(iter); 5239 mutex_lock(&trace_types_lock); 5240 if (signal_pending(current)) { 5241 ret = -EINTR; 5242 goto out; 5243 } 5244 goto again; 5245 } 5246 5247 ret = splice_to_pipe(pipe, &spd); 5248 splice_shrink_spd(&spd); 5249 out: 5250 mutex_unlock(&trace_types_lock); 5251 5252 return ret; 5253 } 5254 5255 static const struct file_operations tracing_buffers_fops = { 5256 .open = tracing_buffers_open, 5257 .read = tracing_buffers_read, 5258 .poll = tracing_buffers_poll, 5259 .release = tracing_buffers_release, 5260 .splice_read = tracing_buffers_splice_read, 5261 .llseek = no_llseek, 5262 }; 5263 5264 static ssize_t 5265 tracing_stats_read(struct file *filp, char __user *ubuf, 5266 size_t count, loff_t *ppos) 5267 { 5268 struct inode *inode = file_inode(filp); 5269 struct trace_array *tr = inode->i_private; 5270 struct trace_buffer *trace_buf = &tr->trace_buffer; 5271 int cpu = tracing_get_cpu(inode); 5272 struct trace_seq *s; 5273 unsigned long cnt; 5274 unsigned long long t; 5275 unsigned long usec_rem; 5276 5277 s = kmalloc(sizeof(*s), GFP_KERNEL); 5278 if (!s) 5279 return -ENOMEM; 5280 5281 trace_seq_init(s); 5282 5283 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu); 5284 trace_seq_printf(s, "entries: %ld\n", cnt); 5285 5286 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu); 5287 trace_seq_printf(s, "overrun: %ld\n", cnt); 5288 5289 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu); 5290 trace_seq_printf(s, "commit overrun: %ld\n", cnt); 5291 5292 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); 5293 trace_seq_printf(s, "bytes: %ld\n", cnt); 5294 5295 if (trace_clocks[tr->clock_id].in_ns) { 5296 /* local or global for trace_clock */ 5297 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 5298 usec_rem = do_div(t, USEC_PER_SEC); 5299 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", 5300 t, usec_rem); 5301 5302 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu)); 5303 usec_rem = do_div(t, USEC_PER_SEC); 5304 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); 5305 } else { 5306 /* counter or tsc mode for trace_clock */ 5307 trace_seq_printf(s, "oldest event ts: %llu\n", 5308 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 5309 5310 trace_seq_printf(s, "now ts: %llu\n", 5311 ring_buffer_time_stamp(trace_buf->buffer, cpu)); 5312 } 5313 5314 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu); 5315 trace_seq_printf(s, "dropped events: %ld\n", cnt); 5316 5317 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); 5318 trace_seq_printf(s, "read events: %ld\n", cnt); 5319 5320 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); 5321 5322 kfree(s); 5323 5324 return count; 5325 } 5326 5327 static const struct file_operations tracing_stats_fops = { 5328 .open = tracing_open_generic_tr, 5329 .read = tracing_stats_read, 5330 .llseek = generic_file_llseek, 5331 .release = tracing_release_generic_tr, 5332 }; 5333 5334 #ifdef CONFIG_DYNAMIC_FTRACE 5335 5336 int __weak ftrace_arch_read_dyn_info(char *buf, int size) 5337 { 5338 return 0; 5339 } 5340 5341 static ssize_t 5342 tracing_read_dyn_info(struct file *filp, char __user *ubuf, 5343 size_t cnt, loff_t *ppos) 5344 { 5345 static char ftrace_dyn_info_buffer[1024]; 5346 static DEFINE_MUTEX(dyn_info_mutex); 5347 unsigned long *p = filp->private_data; 5348 char *buf = ftrace_dyn_info_buffer; 5349 int size = ARRAY_SIZE(ftrace_dyn_info_buffer); 5350 int r; 5351 5352 mutex_lock(&dyn_info_mutex); 5353 r = sprintf(buf, "%ld ", *p); 5354 5355 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); 5356 buf[r++] = '\n'; 5357 5358 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 5359 5360 mutex_unlock(&dyn_info_mutex); 5361 5362 return r; 5363 } 5364 5365 static const struct file_operations tracing_dyn_info_fops = { 5366 .open = tracing_open_generic, 5367 .read = tracing_read_dyn_info, 5368 .llseek = generic_file_llseek, 5369 }; 5370 #endif /* CONFIG_DYNAMIC_FTRACE */ 5371 5372 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) 5373 static void 5374 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data) 5375 { 5376 tracing_snapshot(); 5377 } 5378 5379 static void 5380 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data) 5381 { 5382 unsigned long *count = (long *)data; 5383 5384 if (!*count) 5385 return; 5386 5387 if (*count != -1) 5388 (*count)--; 5389 5390 tracing_snapshot(); 5391 } 5392 5393 static int 5394 ftrace_snapshot_print(struct seq_file *m, unsigned long ip, 5395 struct ftrace_probe_ops *ops, void *data) 5396 { 5397 long count = (long)data; 5398 5399 seq_printf(m, "%ps:", (void *)ip); 5400 5401 seq_printf(m, "snapshot"); 5402 5403 if (count == -1) 5404 seq_printf(m, ":unlimited\n"); 5405 else 5406 seq_printf(m, ":count=%ld\n", count); 5407 5408 return 0; 5409 } 5410 5411 static struct ftrace_probe_ops snapshot_probe_ops = { 5412 .func = ftrace_snapshot, 5413 .print = ftrace_snapshot_print, 5414 }; 5415 5416 static struct ftrace_probe_ops snapshot_count_probe_ops = { 5417 .func = ftrace_count_snapshot, 5418 .print = ftrace_snapshot_print, 5419 }; 5420 5421 static int 5422 ftrace_trace_snapshot_callback(struct ftrace_hash *hash, 5423 char *glob, char *cmd, char *param, int enable) 5424 { 5425 struct ftrace_probe_ops *ops; 5426 void *count = (void *)-1; 5427 char *number; 5428 int ret; 5429 5430 /* hash funcs only work with set_ftrace_filter */ 5431 if (!enable) 5432 return -EINVAL; 5433 5434 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; 5435 5436 if (glob[0] == '!') { 5437 unregister_ftrace_function_probe_func(glob+1, ops); 5438 return 0; 5439 } 5440 5441 if (!param) 5442 goto out_reg; 5443 5444 number = strsep(¶m, ":"); 5445 5446 if (!strlen(number)) 5447 goto out_reg; 5448 5449 /* 5450 * We use the callback data field (which is a pointer) 5451 * as our counter. 5452 */ 5453 ret = kstrtoul(number, 0, (unsigned long *)&count); 5454 if (ret) 5455 return ret; 5456 5457 out_reg: 5458 ret = register_ftrace_function_probe(glob, ops, count); 5459 5460 if (ret >= 0) 5461 alloc_snapshot(&global_trace); 5462 5463 return ret < 0 ? ret : 0; 5464 } 5465 5466 static struct ftrace_func_command ftrace_snapshot_cmd = { 5467 .name = "snapshot", 5468 .func = ftrace_trace_snapshot_callback, 5469 }; 5470 5471 static __init int register_snapshot_cmd(void) 5472 { 5473 return register_ftrace_command(&ftrace_snapshot_cmd); 5474 } 5475 #else 5476 static inline __init int register_snapshot_cmd(void) { return 0; } 5477 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ 5478 5479 struct dentry *tracing_init_dentry_tr(struct trace_array *tr) 5480 { 5481 if (tr->dir) 5482 return tr->dir; 5483 5484 if (!debugfs_initialized()) 5485 return NULL; 5486 5487 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 5488 tr->dir = debugfs_create_dir("tracing", NULL); 5489 5490 if (!tr->dir) 5491 pr_warn_once("Could not create debugfs directory 'tracing'\n"); 5492 5493 return tr->dir; 5494 } 5495 5496 struct dentry *tracing_init_dentry(void) 5497 { 5498 return tracing_init_dentry_tr(&global_trace); 5499 } 5500 5501 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) 5502 { 5503 struct dentry *d_tracer; 5504 5505 if (tr->percpu_dir) 5506 return tr->percpu_dir; 5507 5508 d_tracer = tracing_init_dentry_tr(tr); 5509 if (!d_tracer) 5510 return NULL; 5511 5512 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer); 5513 5514 WARN_ONCE(!tr->percpu_dir, 5515 "Could not create debugfs directory 'per_cpu/%d'\n", cpu); 5516 5517 return tr->percpu_dir; 5518 } 5519 5520 static struct dentry * 5521 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, 5522 void *data, long cpu, const struct file_operations *fops) 5523 { 5524 struct dentry *ret = trace_create_file(name, mode, parent, data, fops); 5525 5526 if (ret) /* See tracing_get_cpu() */ 5527 ret->d_inode->i_cdev = (void *)(cpu + 1); 5528 return ret; 5529 } 5530 5531 static void 5532 tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) 5533 { 5534 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); 5535 struct dentry *d_cpu; 5536 char cpu_dir[30]; /* 30 characters should be more than enough */ 5537 5538 if (!d_percpu) 5539 return; 5540 5541 snprintf(cpu_dir, 30, "cpu%ld", cpu); 5542 d_cpu = debugfs_create_dir(cpu_dir, d_percpu); 5543 if (!d_cpu) { 5544 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); 5545 return; 5546 } 5547 5548 /* per cpu trace_pipe */ 5549 trace_create_cpu_file("trace_pipe", 0444, d_cpu, 5550 tr, cpu, &tracing_pipe_fops); 5551 5552 /* per cpu trace */ 5553 trace_create_cpu_file("trace", 0644, d_cpu, 5554 tr, cpu, &tracing_fops); 5555 5556 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, 5557 tr, cpu, &tracing_buffers_fops); 5558 5559 trace_create_cpu_file("stats", 0444, d_cpu, 5560 tr, cpu, &tracing_stats_fops); 5561 5562 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, 5563 tr, cpu, &tracing_entries_fops); 5564 5565 #ifdef CONFIG_TRACER_SNAPSHOT 5566 trace_create_cpu_file("snapshot", 0644, d_cpu, 5567 tr, cpu, &snapshot_fops); 5568 5569 trace_create_cpu_file("snapshot_raw", 0444, d_cpu, 5570 tr, cpu, &snapshot_raw_fops); 5571 #endif 5572 } 5573 5574 #ifdef CONFIG_FTRACE_SELFTEST 5575 /* Let selftest have access to static functions in this file */ 5576 #include "trace_selftest.c" 5577 #endif 5578 5579 struct trace_option_dentry { 5580 struct tracer_opt *opt; 5581 struct tracer_flags *flags; 5582 struct trace_array *tr; 5583 struct dentry *entry; 5584 }; 5585 5586 static ssize_t 5587 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, 5588 loff_t *ppos) 5589 { 5590 struct trace_option_dentry *topt = filp->private_data; 5591 char *buf; 5592 5593 if (topt->flags->val & topt->opt->bit) 5594 buf = "1\n"; 5595 else 5596 buf = "0\n"; 5597 5598 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 5599 } 5600 5601 static ssize_t 5602 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, 5603 loff_t *ppos) 5604 { 5605 struct trace_option_dentry *topt = filp->private_data; 5606 unsigned long val; 5607 int ret; 5608 5609 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 5610 if (ret) 5611 return ret; 5612 5613 if (val != 0 && val != 1) 5614 return -EINVAL; 5615 5616 if (!!(topt->flags->val & topt->opt->bit) != val) { 5617 mutex_lock(&trace_types_lock); 5618 ret = __set_tracer_option(topt->tr->current_trace, topt->flags, 5619 topt->opt, !val); 5620 mutex_unlock(&trace_types_lock); 5621 if (ret) 5622 return ret; 5623 } 5624 5625 *ppos += cnt; 5626 5627 return cnt; 5628 } 5629 5630 5631 static const struct file_operations trace_options_fops = { 5632 .open = tracing_open_generic, 5633 .read = trace_options_read, 5634 .write = trace_options_write, 5635 .llseek = generic_file_llseek, 5636 }; 5637 5638 static ssize_t 5639 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, 5640 loff_t *ppos) 5641 { 5642 long index = (long)filp->private_data; 5643 char *buf; 5644 5645 if (trace_flags & (1 << index)) 5646 buf = "1\n"; 5647 else 5648 buf = "0\n"; 5649 5650 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 5651 } 5652 5653 static ssize_t 5654 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, 5655 loff_t *ppos) 5656 { 5657 struct trace_array *tr = &global_trace; 5658 long index = (long)filp->private_data; 5659 unsigned long val; 5660 int ret; 5661 5662 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 5663 if (ret) 5664 return ret; 5665 5666 if (val != 0 && val != 1) 5667 return -EINVAL; 5668 5669 mutex_lock(&trace_types_lock); 5670 ret = set_tracer_flag(tr, 1 << index, val); 5671 mutex_unlock(&trace_types_lock); 5672 5673 if (ret < 0) 5674 return ret; 5675 5676 *ppos += cnt; 5677 5678 return cnt; 5679 } 5680 5681 static const struct file_operations trace_options_core_fops = { 5682 .open = tracing_open_generic, 5683 .read = trace_options_core_read, 5684 .write = trace_options_core_write, 5685 .llseek = generic_file_llseek, 5686 }; 5687 5688 struct dentry *trace_create_file(const char *name, 5689 umode_t mode, 5690 struct dentry *parent, 5691 void *data, 5692 const struct file_operations *fops) 5693 { 5694 struct dentry *ret; 5695 5696 ret = debugfs_create_file(name, mode, parent, data, fops); 5697 if (!ret) 5698 pr_warning("Could not create debugfs '%s' entry\n", name); 5699 5700 return ret; 5701 } 5702 5703 5704 static struct dentry *trace_options_init_dentry(struct trace_array *tr) 5705 { 5706 struct dentry *d_tracer; 5707 5708 if (tr->options) 5709 return tr->options; 5710 5711 d_tracer = tracing_init_dentry_tr(tr); 5712 if (!d_tracer) 5713 return NULL; 5714 5715 tr->options = debugfs_create_dir("options", d_tracer); 5716 if (!tr->options) { 5717 pr_warning("Could not create debugfs directory 'options'\n"); 5718 return NULL; 5719 } 5720 5721 return tr->options; 5722 } 5723 5724 static void 5725 create_trace_option_file(struct trace_array *tr, 5726 struct trace_option_dentry *topt, 5727 struct tracer_flags *flags, 5728 struct tracer_opt *opt) 5729 { 5730 struct dentry *t_options; 5731 5732 t_options = trace_options_init_dentry(tr); 5733 if (!t_options) 5734 return; 5735 5736 topt->flags = flags; 5737 topt->opt = opt; 5738 topt->tr = tr; 5739 5740 topt->entry = trace_create_file(opt->name, 0644, t_options, topt, 5741 &trace_options_fops); 5742 5743 } 5744 5745 static struct trace_option_dentry * 5746 create_trace_option_files(struct trace_array *tr, struct tracer *tracer) 5747 { 5748 struct trace_option_dentry *topts; 5749 struct tracer_flags *flags; 5750 struct tracer_opt *opts; 5751 int cnt; 5752 5753 if (!tracer) 5754 return NULL; 5755 5756 flags = tracer->flags; 5757 5758 if (!flags || !flags->opts) 5759 return NULL; 5760 5761 opts = flags->opts; 5762 5763 for (cnt = 0; opts[cnt].name; cnt++) 5764 ; 5765 5766 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); 5767 if (!topts) 5768 return NULL; 5769 5770 for (cnt = 0; opts[cnt].name; cnt++) 5771 create_trace_option_file(tr, &topts[cnt], flags, 5772 &opts[cnt]); 5773 5774 return topts; 5775 } 5776 5777 static void 5778 destroy_trace_option_files(struct trace_option_dentry *topts) 5779 { 5780 int cnt; 5781 5782 if (!topts) 5783 return; 5784 5785 for (cnt = 0; topts[cnt].opt; cnt++) { 5786 if (topts[cnt].entry) 5787 debugfs_remove(topts[cnt].entry); 5788 } 5789 5790 kfree(topts); 5791 } 5792 5793 static struct dentry * 5794 create_trace_option_core_file(struct trace_array *tr, 5795 const char *option, long index) 5796 { 5797 struct dentry *t_options; 5798 5799 t_options = trace_options_init_dentry(tr); 5800 if (!t_options) 5801 return NULL; 5802 5803 return trace_create_file(option, 0644, t_options, (void *)index, 5804 &trace_options_core_fops); 5805 } 5806 5807 static __init void create_trace_options_dir(struct trace_array *tr) 5808 { 5809 struct dentry *t_options; 5810 int i; 5811 5812 t_options = trace_options_init_dentry(tr); 5813 if (!t_options) 5814 return; 5815 5816 for (i = 0; trace_options[i]; i++) 5817 create_trace_option_core_file(tr, trace_options[i], i); 5818 } 5819 5820 static ssize_t 5821 rb_simple_read(struct file *filp, char __user *ubuf, 5822 size_t cnt, loff_t *ppos) 5823 { 5824 struct trace_array *tr = filp->private_data; 5825 char buf[64]; 5826 int r; 5827 5828 r = tracer_tracing_is_on(tr); 5829 r = sprintf(buf, "%d\n", r); 5830 5831 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 5832 } 5833 5834 static ssize_t 5835 rb_simple_write(struct file *filp, const char __user *ubuf, 5836 size_t cnt, loff_t *ppos) 5837 { 5838 struct trace_array *tr = filp->private_data; 5839 struct ring_buffer *buffer = tr->trace_buffer.buffer; 5840 unsigned long val; 5841 int ret; 5842 5843 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 5844 if (ret) 5845 return ret; 5846 5847 if (buffer) { 5848 mutex_lock(&trace_types_lock); 5849 if (val) { 5850 tracer_tracing_on(tr); 5851 if (tr->current_trace->start) 5852 tr->current_trace->start(tr); 5853 } else { 5854 tracer_tracing_off(tr); 5855 if (tr->current_trace->stop) 5856 tr->current_trace->stop(tr); 5857 } 5858 mutex_unlock(&trace_types_lock); 5859 } 5860 5861 (*ppos)++; 5862 5863 return cnt; 5864 } 5865 5866 static const struct file_operations rb_simple_fops = { 5867 .open = tracing_open_generic_tr, 5868 .read = rb_simple_read, 5869 .write = rb_simple_write, 5870 .release = tracing_release_generic_tr, 5871 .llseek = default_llseek, 5872 }; 5873 5874 struct dentry *trace_instance_dir; 5875 5876 static void 5877 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer); 5878 5879 static int 5880 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) 5881 { 5882 enum ring_buffer_flags rb_flags; 5883 5884 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; 5885 5886 buf->buffer = ring_buffer_alloc(size, rb_flags); 5887 if (!buf->buffer) 5888 return -ENOMEM; 5889 5890 buf->data = alloc_percpu(struct trace_array_cpu); 5891 if (!buf->data) { 5892 ring_buffer_free(buf->buffer); 5893 return -ENOMEM; 5894 } 5895 5896 /* Allocate the first page for all buffers */ 5897 set_buffer_entries(&tr->trace_buffer, 5898 ring_buffer_size(tr->trace_buffer.buffer, 0)); 5899 5900 return 0; 5901 } 5902 5903 static int allocate_trace_buffers(struct trace_array *tr, int size) 5904 { 5905 int ret; 5906 5907 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size); 5908 if (ret) 5909 return ret; 5910 5911 #ifdef CONFIG_TRACER_MAX_TRACE 5912 ret = allocate_trace_buffer(tr, &tr->max_buffer, 5913 allocate_snapshot ? size : 1); 5914 if (WARN_ON(ret)) { 5915 ring_buffer_free(tr->trace_buffer.buffer); 5916 free_percpu(tr->trace_buffer.data); 5917 return -ENOMEM; 5918 } 5919 tr->allocated_snapshot = allocate_snapshot; 5920 5921 /* 5922 * Only the top level trace array gets its snapshot allocated 5923 * from the kernel command line. 5924 */ 5925 allocate_snapshot = false; 5926 #endif 5927 return 0; 5928 } 5929 5930 static int new_instance_create(const char *name) 5931 { 5932 struct trace_array *tr; 5933 int ret; 5934 5935 mutex_lock(&trace_types_lock); 5936 5937 ret = -EEXIST; 5938 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 5939 if (tr->name && strcmp(tr->name, name) == 0) 5940 goto out_unlock; 5941 } 5942 5943 ret = -ENOMEM; 5944 tr = kzalloc(sizeof(*tr), GFP_KERNEL); 5945 if (!tr) 5946 goto out_unlock; 5947 5948 tr->name = kstrdup(name, GFP_KERNEL); 5949 if (!tr->name) 5950 goto out_free_tr; 5951 5952 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) 5953 goto out_free_tr; 5954 5955 cpumask_copy(tr->tracing_cpumask, cpu_all_mask); 5956 5957 raw_spin_lock_init(&tr->start_lock); 5958 5959 tr->current_trace = &nop_trace; 5960 5961 INIT_LIST_HEAD(&tr->systems); 5962 INIT_LIST_HEAD(&tr->events); 5963 5964 if (allocate_trace_buffers(tr, trace_buf_size) < 0) 5965 goto out_free_tr; 5966 5967 tr->dir = debugfs_create_dir(name, trace_instance_dir); 5968 if (!tr->dir) 5969 goto out_free_tr; 5970 5971 ret = event_trace_add_tracer(tr->dir, tr); 5972 if (ret) { 5973 debugfs_remove_recursive(tr->dir); 5974 goto out_free_tr; 5975 } 5976 5977 init_tracer_debugfs(tr, tr->dir); 5978 5979 list_add(&tr->list, &ftrace_trace_arrays); 5980 5981 mutex_unlock(&trace_types_lock); 5982 5983 return 0; 5984 5985 out_free_tr: 5986 if (tr->trace_buffer.buffer) 5987 ring_buffer_free(tr->trace_buffer.buffer); 5988 free_cpumask_var(tr->tracing_cpumask); 5989 kfree(tr->name); 5990 kfree(tr); 5991 5992 out_unlock: 5993 mutex_unlock(&trace_types_lock); 5994 5995 return ret; 5996 5997 } 5998 5999 static int instance_delete(const char *name) 6000 { 6001 struct trace_array *tr; 6002 int found = 0; 6003 int ret; 6004 6005 mutex_lock(&trace_types_lock); 6006 6007 ret = -ENODEV; 6008 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 6009 if (tr->name && strcmp(tr->name, name) == 0) { 6010 found = 1; 6011 break; 6012 } 6013 } 6014 if (!found) 6015 goto out_unlock; 6016 6017 ret = -EBUSY; 6018 if (tr->ref) 6019 goto out_unlock; 6020 6021 list_del(&tr->list); 6022 6023 event_trace_del_tracer(tr); 6024 debugfs_remove_recursive(tr->dir); 6025 free_percpu(tr->trace_buffer.data); 6026 ring_buffer_free(tr->trace_buffer.buffer); 6027 6028 kfree(tr->name); 6029 kfree(tr); 6030 6031 ret = 0; 6032 6033 out_unlock: 6034 mutex_unlock(&trace_types_lock); 6035 6036 return ret; 6037 } 6038 6039 static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode) 6040 { 6041 struct dentry *parent; 6042 int ret; 6043 6044 /* Paranoid: Make sure the parent is the "instances" directory */ 6045 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias); 6046 if (WARN_ON_ONCE(parent != trace_instance_dir)) 6047 return -ENOENT; 6048 6049 /* 6050 * The inode mutex is locked, but debugfs_create_dir() will also 6051 * take the mutex. As the instances directory can not be destroyed 6052 * or changed in any other way, it is safe to unlock it, and 6053 * let the dentry try. If two users try to make the same dir at 6054 * the same time, then the new_instance_create() will determine the 6055 * winner. 6056 */ 6057 mutex_unlock(&inode->i_mutex); 6058 6059 ret = new_instance_create(dentry->d_iname); 6060 6061 mutex_lock(&inode->i_mutex); 6062 6063 return ret; 6064 } 6065 6066 static int instance_rmdir(struct inode *inode, struct dentry *dentry) 6067 { 6068 struct dentry *parent; 6069 int ret; 6070 6071 /* Paranoid: Make sure the parent is the "instances" directory */ 6072 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias); 6073 if (WARN_ON_ONCE(parent != trace_instance_dir)) 6074 return -ENOENT; 6075 6076 /* The caller did a dget() on dentry */ 6077 mutex_unlock(&dentry->d_inode->i_mutex); 6078 6079 /* 6080 * The inode mutex is locked, but debugfs_create_dir() will also 6081 * take the mutex. As the instances directory can not be destroyed 6082 * or changed in any other way, it is safe to unlock it, and 6083 * let the dentry try. If two users try to make the same dir at 6084 * the same time, then the instance_delete() will determine the 6085 * winner. 6086 */ 6087 mutex_unlock(&inode->i_mutex); 6088 6089 ret = instance_delete(dentry->d_iname); 6090 6091 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT); 6092 mutex_lock(&dentry->d_inode->i_mutex); 6093 6094 return ret; 6095 } 6096 6097 static const struct inode_operations instance_dir_inode_operations = { 6098 .lookup = simple_lookup, 6099 .mkdir = instance_mkdir, 6100 .rmdir = instance_rmdir, 6101 }; 6102 6103 static __init void create_trace_instances(struct dentry *d_tracer) 6104 { 6105 trace_instance_dir = debugfs_create_dir("instances", d_tracer); 6106 if (WARN_ON(!trace_instance_dir)) 6107 return; 6108 6109 /* Hijack the dir inode operations, to allow mkdir */ 6110 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations; 6111 } 6112 6113 static void 6114 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) 6115 { 6116 int cpu; 6117 6118 trace_create_file("tracing_cpumask", 0644, d_tracer, 6119 tr, &tracing_cpumask_fops); 6120 6121 trace_create_file("trace_options", 0644, d_tracer, 6122 tr, &tracing_iter_fops); 6123 6124 trace_create_file("trace", 0644, d_tracer, 6125 tr, &tracing_fops); 6126 6127 trace_create_file("trace_pipe", 0444, d_tracer, 6128 tr, &tracing_pipe_fops); 6129 6130 trace_create_file("buffer_size_kb", 0644, d_tracer, 6131 tr, &tracing_entries_fops); 6132 6133 trace_create_file("buffer_total_size_kb", 0444, d_tracer, 6134 tr, &tracing_total_entries_fops); 6135 6136 trace_create_file("free_buffer", 0200, d_tracer, 6137 tr, &tracing_free_buffer_fops); 6138 6139 trace_create_file("trace_marker", 0220, d_tracer, 6140 tr, &tracing_mark_fops); 6141 6142 trace_create_file("trace_clock", 0644, d_tracer, tr, 6143 &trace_clock_fops); 6144 6145 trace_create_file("tracing_on", 0644, d_tracer, 6146 tr, &rb_simple_fops); 6147 6148 #ifdef CONFIG_TRACER_SNAPSHOT 6149 trace_create_file("snapshot", 0644, d_tracer, 6150 tr, &snapshot_fops); 6151 #endif 6152 6153 for_each_tracing_cpu(cpu) 6154 tracing_init_debugfs_percpu(tr, cpu); 6155 6156 } 6157 6158 static __init int tracer_init_debugfs(void) 6159 { 6160 struct dentry *d_tracer; 6161 6162 trace_access_lock_init(); 6163 6164 d_tracer = tracing_init_dentry(); 6165 if (!d_tracer) 6166 return 0; 6167 6168 init_tracer_debugfs(&global_trace, d_tracer); 6169 6170 trace_create_file("available_tracers", 0444, d_tracer, 6171 &global_trace, &show_traces_fops); 6172 6173 trace_create_file("current_tracer", 0644, d_tracer, 6174 &global_trace, &set_tracer_fops); 6175 6176 #ifdef CONFIG_TRACER_MAX_TRACE 6177 trace_create_file("tracing_max_latency", 0644, d_tracer, 6178 &tracing_max_latency, &tracing_max_lat_fops); 6179 #endif 6180 6181 trace_create_file("tracing_thresh", 0644, d_tracer, 6182 &tracing_thresh, &tracing_max_lat_fops); 6183 6184 trace_create_file("README", 0444, d_tracer, 6185 NULL, &tracing_readme_fops); 6186 6187 trace_create_file("saved_cmdlines", 0444, d_tracer, 6188 NULL, &tracing_saved_cmdlines_fops); 6189 6190 #ifdef CONFIG_DYNAMIC_FTRACE 6191 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 6192 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 6193 #endif 6194 6195 create_trace_instances(d_tracer); 6196 6197 create_trace_options_dir(&global_trace); 6198 6199 return 0; 6200 } 6201 6202 static int trace_panic_handler(struct notifier_block *this, 6203 unsigned long event, void *unused) 6204 { 6205 if (ftrace_dump_on_oops) 6206 ftrace_dump(ftrace_dump_on_oops); 6207 return NOTIFY_OK; 6208 } 6209 6210 static struct notifier_block trace_panic_notifier = { 6211 .notifier_call = trace_panic_handler, 6212 .next = NULL, 6213 .priority = 150 /* priority: INT_MAX >= x >= 0 */ 6214 }; 6215 6216 static int trace_die_handler(struct notifier_block *self, 6217 unsigned long val, 6218 void *data) 6219 { 6220 switch (val) { 6221 case DIE_OOPS: 6222 if (ftrace_dump_on_oops) 6223 ftrace_dump(ftrace_dump_on_oops); 6224 break; 6225 default: 6226 break; 6227 } 6228 return NOTIFY_OK; 6229 } 6230 6231 static struct notifier_block trace_die_notifier = { 6232 .notifier_call = trace_die_handler, 6233 .priority = 200 6234 }; 6235 6236 /* 6237 * printk is set to max of 1024, we really don't need it that big. 6238 * Nothing should be printing 1000 characters anyway. 6239 */ 6240 #define TRACE_MAX_PRINT 1000 6241 6242 /* 6243 * Define here KERN_TRACE so that we have one place to modify 6244 * it if we decide to change what log level the ftrace dump 6245 * should be at. 6246 */ 6247 #define KERN_TRACE KERN_EMERG 6248 6249 void 6250 trace_printk_seq(struct trace_seq *s) 6251 { 6252 /* Probably should print a warning here. */ 6253 if (s->len >= TRACE_MAX_PRINT) 6254 s->len = TRACE_MAX_PRINT; 6255 6256 /* should be zero ended, but we are paranoid. */ 6257 s->buffer[s->len] = 0; 6258 6259 printk(KERN_TRACE "%s", s->buffer); 6260 6261 trace_seq_init(s); 6262 } 6263 6264 void trace_init_global_iter(struct trace_iterator *iter) 6265 { 6266 iter->tr = &global_trace; 6267 iter->trace = iter->tr->current_trace; 6268 iter->cpu_file = RING_BUFFER_ALL_CPUS; 6269 iter->trace_buffer = &global_trace.trace_buffer; 6270 6271 if (iter->trace && iter->trace->open) 6272 iter->trace->open(iter); 6273 6274 /* Annotate start of buffers if we had overruns */ 6275 if (ring_buffer_overruns(iter->trace_buffer->buffer)) 6276 iter->iter_flags |= TRACE_FILE_ANNOTATE; 6277 6278 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 6279 if (trace_clocks[iter->tr->clock_id].in_ns) 6280 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 6281 } 6282 6283 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) 6284 { 6285 /* use static because iter can be a bit big for the stack */ 6286 static struct trace_iterator iter; 6287 static atomic_t dump_running; 6288 unsigned int old_userobj; 6289 unsigned long flags; 6290 int cnt = 0, cpu; 6291 6292 /* Only allow one dump user at a time. */ 6293 if (atomic_inc_return(&dump_running) != 1) { 6294 atomic_dec(&dump_running); 6295 return; 6296 } 6297 6298 /* 6299 * Always turn off tracing when we dump. 6300 * We don't need to show trace output of what happens 6301 * between multiple crashes. 6302 * 6303 * If the user does a sysrq-z, then they can re-enable 6304 * tracing with echo 1 > tracing_on. 6305 */ 6306 tracing_off(); 6307 6308 local_irq_save(flags); 6309 6310 /* Simulate the iterator */ 6311 trace_init_global_iter(&iter); 6312 6313 for_each_tracing_cpu(cpu) { 6314 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled); 6315 } 6316 6317 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; 6318 6319 /* don't look at user memory in panic mode */ 6320 trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 6321 6322 switch (oops_dump_mode) { 6323 case DUMP_ALL: 6324 iter.cpu_file = RING_BUFFER_ALL_CPUS; 6325 break; 6326 case DUMP_ORIG: 6327 iter.cpu_file = raw_smp_processor_id(); 6328 break; 6329 case DUMP_NONE: 6330 goto out_enable; 6331 default: 6332 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); 6333 iter.cpu_file = RING_BUFFER_ALL_CPUS; 6334 } 6335 6336 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 6337 6338 /* Did function tracer already get disabled? */ 6339 if (ftrace_is_dead()) { 6340 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); 6341 printk("# MAY BE MISSING FUNCTION EVENTS\n"); 6342 } 6343 6344 /* 6345 * We need to stop all tracing on all CPUS to read the 6346 * the next buffer. This is a bit expensive, but is 6347 * not done often. We fill all what we can read, 6348 * and then release the locks again. 6349 */ 6350 6351 while (!trace_empty(&iter)) { 6352 6353 if (!cnt) 6354 printk(KERN_TRACE "---------------------------------\n"); 6355 6356 cnt++; 6357 6358 /* reset all but tr, trace, and overruns */ 6359 memset(&iter.seq, 0, 6360 sizeof(struct trace_iterator) - 6361 offsetof(struct trace_iterator, seq)); 6362 iter.iter_flags |= TRACE_FILE_LAT_FMT; 6363 iter.pos = -1; 6364 6365 if (trace_find_next_entry_inc(&iter) != NULL) { 6366 int ret; 6367 6368 ret = print_trace_line(&iter); 6369 if (ret != TRACE_TYPE_NO_CONSUME) 6370 trace_consume(&iter); 6371 } 6372 touch_nmi_watchdog(); 6373 6374 trace_printk_seq(&iter.seq); 6375 } 6376 6377 if (!cnt) 6378 printk(KERN_TRACE " (ftrace buffer empty)\n"); 6379 else 6380 printk(KERN_TRACE "---------------------------------\n"); 6381 6382 out_enable: 6383 trace_flags |= old_userobj; 6384 6385 for_each_tracing_cpu(cpu) { 6386 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); 6387 } 6388 atomic_dec(&dump_running); 6389 local_irq_restore(flags); 6390 } 6391 EXPORT_SYMBOL_GPL(ftrace_dump); 6392 6393 __init static int tracer_alloc_buffers(void) 6394 { 6395 int ring_buf_size; 6396 int ret = -ENOMEM; 6397 6398 6399 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 6400 goto out; 6401 6402 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) 6403 goto out_free_buffer_mask; 6404 6405 /* Only allocate trace_printk buffers if a trace_printk exists */ 6406 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) 6407 /* Must be called before global_trace.buffer is allocated */ 6408 trace_printk_init_buffers(); 6409 6410 /* To save memory, keep the ring buffer size to its minimum */ 6411 if (ring_buffer_expanded) 6412 ring_buf_size = trace_buf_size; 6413 else 6414 ring_buf_size = 1; 6415 6416 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 6417 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask); 6418 6419 raw_spin_lock_init(&global_trace.start_lock); 6420 6421 /* TODO: make the number of buffers hot pluggable with CPUS */ 6422 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { 6423 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 6424 WARN_ON(1); 6425 goto out_free_cpumask; 6426 } 6427 6428 if (global_trace.buffer_disabled) 6429 tracing_off(); 6430 6431 trace_init_cmdlines(); 6432 6433 /* 6434 * register_tracer() might reference current_trace, so it 6435 * needs to be set before we register anything. This is 6436 * just a bootstrap of current_trace anyway. 6437 */ 6438 global_trace.current_trace = &nop_trace; 6439 6440 register_tracer(&nop_trace); 6441 6442 /* All seems OK, enable tracing */ 6443 tracing_disabled = 0; 6444 6445 atomic_notifier_chain_register(&panic_notifier_list, 6446 &trace_panic_notifier); 6447 6448 register_die_notifier(&trace_die_notifier); 6449 6450 global_trace.flags = TRACE_ARRAY_FL_GLOBAL; 6451 6452 INIT_LIST_HEAD(&global_trace.systems); 6453 INIT_LIST_HEAD(&global_trace.events); 6454 list_add(&global_trace.list, &ftrace_trace_arrays); 6455 6456 while (trace_boot_options) { 6457 char *option; 6458 6459 option = strsep(&trace_boot_options, ","); 6460 trace_set_options(&global_trace, option); 6461 } 6462 6463 register_snapshot_cmd(); 6464 6465 return 0; 6466 6467 out_free_cpumask: 6468 free_percpu(global_trace.trace_buffer.data); 6469 #ifdef CONFIG_TRACER_MAX_TRACE 6470 free_percpu(global_trace.max_buffer.data); 6471 #endif 6472 free_cpumask_var(global_trace.tracing_cpumask); 6473 out_free_buffer_mask: 6474 free_cpumask_var(tracing_buffer_mask); 6475 out: 6476 return ret; 6477 } 6478 6479 __init static int clear_boot_tracer(void) 6480 { 6481 /* 6482 * The default tracer at boot buffer is an init section. 6483 * This function is called in lateinit. If we did not 6484 * find the boot tracer, then clear it out, to prevent 6485 * later registration from accessing the buffer that is 6486 * about to be freed. 6487 */ 6488 if (!default_bootup_tracer) 6489 return 0; 6490 6491 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", 6492 default_bootup_tracer); 6493 default_bootup_tracer = NULL; 6494 6495 return 0; 6496 } 6497 6498 early_initcall(tracer_alloc_buffers); 6499 fs_initcall(tracer_init_debugfs); 6500 late_initcall(clear_boot_tracer); 6501