1 /* 2 * ring buffer based function tracer 3 * 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 6 * 7 * Originally taken from the RT patch by: 8 * Arnaldo Carvalho de Melo <acme@redhat.com> 9 * 10 * Based on code from the latency_tracer, that is: 11 * Copyright (C) 2004-2006 Ingo Molnar 12 * Copyright (C) 2004 Nadia Yvette Chambers 13 */ 14 #include <linux/ring_buffer.h> 15 #include <generated/utsrelease.h> 16 #include <linux/stacktrace.h> 17 #include <linux/writeback.h> 18 #include <linux/kallsyms.h> 19 #include <linux/seq_file.h> 20 #include <linux/notifier.h> 21 #include <linux/irqflags.h> 22 #include <linux/debugfs.h> 23 #include <linux/pagemap.h> 24 #include <linux/hardirq.h> 25 #include <linux/linkage.h> 26 #include <linux/uaccess.h> 27 #include <linux/kprobes.h> 28 #include <linux/ftrace.h> 29 #include <linux/module.h> 30 #include <linux/percpu.h> 31 #include <linux/splice.h> 32 #include <linux/kdebug.h> 33 #include <linux/string.h> 34 #include <linux/rwsem.h> 35 #include <linux/slab.h> 36 #include <linux/ctype.h> 37 #include <linux/init.h> 38 #include <linux/poll.h> 39 #include <linux/nmi.h> 40 #include <linux/fs.h> 41 #include <linux/sched/rt.h> 42 43 #include "trace.h" 44 #include "trace_output.h" 45 46 /* 47 * On boot up, the ring buffer is set to the minimum size, so that 48 * we do not waste memory on systems that are not using tracing. 49 */ 50 bool ring_buffer_expanded; 51 52 /* 53 * We need to change this state when a selftest is running. 54 * A selftest will lurk into the ring-buffer to count the 55 * entries inserted during the selftest although some concurrent 56 * insertions into the ring-buffer such as trace_printk could occurred 57 * at the same time, giving false positive or negative results. 58 */ 59 static bool __read_mostly tracing_selftest_running; 60 61 /* 62 * If a tracer is running, we do not want to run SELFTEST. 63 */ 64 bool __read_mostly tracing_selftest_disabled; 65 66 /* For tracers that don't implement custom flags */ 67 static struct tracer_opt dummy_tracer_opt[] = { 68 { } 69 }; 70 71 static struct tracer_flags dummy_tracer_flags = { 72 .val = 0, 73 .opts = dummy_tracer_opt 74 }; 75 76 static int dummy_set_flag(u32 old_flags, u32 bit, int set) 77 { 78 return 0; 79 } 80 81 /* 82 * To prevent the comm cache from being overwritten when no 83 * tracing is active, only save the comm when a trace event 84 * occurred. 85 */ 86 static DEFINE_PER_CPU(bool, trace_cmdline_save); 87 88 /* 89 * Kill all tracing for good (never come back). 90 * It is initialized to 1 but will turn to zero if the initialization 91 * of the tracer is successful. But that is the only place that sets 92 * this back to zero. 93 */ 94 static int tracing_disabled = 1; 95 96 DEFINE_PER_CPU(int, ftrace_cpu_disabled); 97 98 cpumask_var_t __read_mostly tracing_buffer_mask; 99 100 /* 101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops 102 * 103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops 104 * is set, then ftrace_dump is called. This will output the contents 105 * of the ftrace buffers to the console. This is very useful for 106 * capturing traces that lead to crashes and outputing it to a 107 * serial console. 108 * 109 * It is default off, but you can enable it with either specifying 110 * "ftrace_dump_on_oops" in the kernel command line, or setting 111 * /proc/sys/kernel/ftrace_dump_on_oops 112 * Set 1 if you want to dump buffers of all CPUs 113 * Set 2 if you want to dump the buffer of the CPU that triggered oops 114 */ 115 116 enum ftrace_dump_mode ftrace_dump_on_oops; 117 118 /* When set, tracing will stop when a WARN*() is hit */ 119 int __disable_trace_on_warning; 120 121 static int tracing_set_tracer(const char *buf); 122 123 #define MAX_TRACER_SIZE 100 124 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 125 static char *default_bootup_tracer; 126 127 static bool allocate_snapshot; 128 129 static int __init set_cmdline_ftrace(char *str) 130 { 131 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 132 default_bootup_tracer = bootup_tracer_buf; 133 /* We are using ftrace early, expand it */ 134 ring_buffer_expanded = true; 135 return 1; 136 } 137 __setup("ftrace=", set_cmdline_ftrace); 138 139 static int __init set_ftrace_dump_on_oops(char *str) 140 { 141 if (*str++ != '=' || !*str) { 142 ftrace_dump_on_oops = DUMP_ALL; 143 return 1; 144 } 145 146 if (!strcmp("orig_cpu", str)) { 147 ftrace_dump_on_oops = DUMP_ORIG; 148 return 1; 149 } 150 151 return 0; 152 } 153 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 154 155 static int __init stop_trace_on_warning(char *str) 156 { 157 __disable_trace_on_warning = 1; 158 return 1; 159 } 160 __setup("traceoff_on_warning=", stop_trace_on_warning); 161 162 static int __init boot_alloc_snapshot(char *str) 163 { 164 allocate_snapshot = true; 165 /* We also need the main ring buffer expanded */ 166 ring_buffer_expanded = true; 167 return 1; 168 } 169 __setup("alloc_snapshot", boot_alloc_snapshot); 170 171 172 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; 173 static char *trace_boot_options __initdata; 174 175 static int __init set_trace_boot_options(char *str) 176 { 177 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); 178 trace_boot_options = trace_boot_options_buf; 179 return 0; 180 } 181 __setup("trace_options=", set_trace_boot_options); 182 183 184 unsigned long long ns2usecs(cycle_t nsec) 185 { 186 nsec += 500; 187 do_div(nsec, 1000); 188 return nsec; 189 } 190 191 /* 192 * The global_trace is the descriptor that holds the tracing 193 * buffers for the live tracing. For each CPU, it contains 194 * a link list of pages that will store trace entries. The 195 * page descriptor of the pages in the memory is used to hold 196 * the link list by linking the lru item in the page descriptor 197 * to each of the pages in the buffer per CPU. 198 * 199 * For each active CPU there is a data field that holds the 200 * pages for the buffer for that CPU. Each CPU has the same number 201 * of pages allocated for its buffer. 202 */ 203 static struct trace_array global_trace; 204 205 LIST_HEAD(ftrace_trace_arrays); 206 207 int trace_array_get(struct trace_array *this_tr) 208 { 209 struct trace_array *tr; 210 int ret = -ENODEV; 211 212 mutex_lock(&trace_types_lock); 213 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 214 if (tr == this_tr) { 215 tr->ref++; 216 ret = 0; 217 break; 218 } 219 } 220 mutex_unlock(&trace_types_lock); 221 222 return ret; 223 } 224 225 static void __trace_array_put(struct trace_array *this_tr) 226 { 227 WARN_ON(!this_tr->ref); 228 this_tr->ref--; 229 } 230 231 void trace_array_put(struct trace_array *this_tr) 232 { 233 mutex_lock(&trace_types_lock); 234 __trace_array_put(this_tr); 235 mutex_unlock(&trace_types_lock); 236 } 237 238 int filter_current_check_discard(struct ring_buffer *buffer, 239 struct ftrace_event_call *call, void *rec, 240 struct ring_buffer_event *event) 241 { 242 return filter_check_discard(call, rec, buffer, event); 243 } 244 EXPORT_SYMBOL_GPL(filter_current_check_discard); 245 246 cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) 247 { 248 u64 ts; 249 250 /* Early boot up does not have a buffer yet */ 251 if (!buf->buffer) 252 return trace_clock_local(); 253 254 ts = ring_buffer_time_stamp(buf->buffer, cpu); 255 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); 256 257 return ts; 258 } 259 260 cycle_t ftrace_now(int cpu) 261 { 262 return buffer_ftrace_now(&global_trace.trace_buffer, cpu); 263 } 264 265 /** 266 * tracing_is_enabled - Show if global_trace has been disabled 267 * 268 * Shows if the global trace has been enabled or not. It uses the 269 * mirror flag "buffer_disabled" to be used in fast paths such as for 270 * the irqsoff tracer. But it may be inaccurate due to races. If you 271 * need to know the accurate state, use tracing_is_on() which is a little 272 * slower, but accurate. 273 */ 274 int tracing_is_enabled(void) 275 { 276 /* 277 * For quick access (irqsoff uses this in fast path), just 278 * return the mirror variable of the state of the ring buffer. 279 * It's a little racy, but we don't really care. 280 */ 281 smp_rmb(); 282 return !global_trace.buffer_disabled; 283 } 284 285 /* 286 * trace_buf_size is the size in bytes that is allocated 287 * for a buffer. Note, the number of bytes is always rounded 288 * to page size. 289 * 290 * This number is purposely set to a low number of 16384. 291 * If the dump on oops happens, it will be much appreciated 292 * to not have to wait for all that output. Anyway this can be 293 * boot time and run time configurable. 294 */ 295 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ 296 297 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; 298 299 /* trace_types holds a link list of available tracers. */ 300 static struct tracer *trace_types __read_mostly; 301 302 /* 303 * trace_types_lock is used to protect the trace_types list. 304 */ 305 DEFINE_MUTEX(trace_types_lock); 306 307 /* 308 * serialize the access of the ring buffer 309 * 310 * ring buffer serializes readers, but it is low level protection. 311 * The validity of the events (which returns by ring_buffer_peek() ..etc) 312 * are not protected by ring buffer. 313 * 314 * The content of events may become garbage if we allow other process consumes 315 * these events concurrently: 316 * A) the page of the consumed events may become a normal page 317 * (not reader page) in ring buffer, and this page will be rewrited 318 * by events producer. 319 * B) The page of the consumed events may become a page for splice_read, 320 * and this page will be returned to system. 321 * 322 * These primitives allow multi process access to different cpu ring buffer 323 * concurrently. 324 * 325 * These primitives don't distinguish read-only and read-consume access. 326 * Multi read-only access are also serialized. 327 */ 328 329 #ifdef CONFIG_SMP 330 static DECLARE_RWSEM(all_cpu_access_lock); 331 static DEFINE_PER_CPU(struct mutex, cpu_access_lock); 332 333 static inline void trace_access_lock(int cpu) 334 { 335 if (cpu == RING_BUFFER_ALL_CPUS) { 336 /* gain it for accessing the whole ring buffer. */ 337 down_write(&all_cpu_access_lock); 338 } else { 339 /* gain it for accessing a cpu ring buffer. */ 340 341 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ 342 down_read(&all_cpu_access_lock); 343 344 /* Secondly block other access to this @cpu ring buffer. */ 345 mutex_lock(&per_cpu(cpu_access_lock, cpu)); 346 } 347 } 348 349 static inline void trace_access_unlock(int cpu) 350 { 351 if (cpu == RING_BUFFER_ALL_CPUS) { 352 up_write(&all_cpu_access_lock); 353 } else { 354 mutex_unlock(&per_cpu(cpu_access_lock, cpu)); 355 up_read(&all_cpu_access_lock); 356 } 357 } 358 359 static inline void trace_access_lock_init(void) 360 { 361 int cpu; 362 363 for_each_possible_cpu(cpu) 364 mutex_init(&per_cpu(cpu_access_lock, cpu)); 365 } 366 367 #else 368 369 static DEFINE_MUTEX(access_lock); 370 371 static inline void trace_access_lock(int cpu) 372 { 373 (void)cpu; 374 mutex_lock(&access_lock); 375 } 376 377 static inline void trace_access_unlock(int cpu) 378 { 379 (void)cpu; 380 mutex_unlock(&access_lock); 381 } 382 383 static inline void trace_access_lock_init(void) 384 { 385 } 386 387 #endif 388 389 /* trace_flags holds trace_options default values */ 390 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | 391 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | 392 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | 393 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION; 394 395 static void tracer_tracing_on(struct trace_array *tr) 396 { 397 if (tr->trace_buffer.buffer) 398 ring_buffer_record_on(tr->trace_buffer.buffer); 399 /* 400 * This flag is looked at when buffers haven't been allocated 401 * yet, or by some tracers (like irqsoff), that just want to 402 * know if the ring buffer has been disabled, but it can handle 403 * races of where it gets disabled but we still do a record. 404 * As the check is in the fast path of the tracers, it is more 405 * important to be fast than accurate. 406 */ 407 tr->buffer_disabled = 0; 408 /* Make the flag seen by readers */ 409 smp_wmb(); 410 } 411 412 /** 413 * tracing_on - enable tracing buffers 414 * 415 * This function enables tracing buffers that may have been 416 * disabled with tracing_off. 417 */ 418 void tracing_on(void) 419 { 420 tracer_tracing_on(&global_trace); 421 } 422 EXPORT_SYMBOL_GPL(tracing_on); 423 424 /** 425 * __trace_puts - write a constant string into the trace buffer. 426 * @ip: The address of the caller 427 * @str: The constant string to write 428 * @size: The size of the string. 429 */ 430 int __trace_puts(unsigned long ip, const char *str, int size) 431 { 432 struct ring_buffer_event *event; 433 struct ring_buffer *buffer; 434 struct print_entry *entry; 435 unsigned long irq_flags; 436 int alloc; 437 438 alloc = sizeof(*entry) + size + 2; /* possible \n added */ 439 440 local_save_flags(irq_flags); 441 buffer = global_trace.trace_buffer.buffer; 442 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 443 irq_flags, preempt_count()); 444 if (!event) 445 return 0; 446 447 entry = ring_buffer_event_data(event); 448 entry->ip = ip; 449 450 memcpy(&entry->buf, str, size); 451 452 /* Add a newline if necessary */ 453 if (entry->buf[size - 1] != '\n') { 454 entry->buf[size] = '\n'; 455 entry->buf[size + 1] = '\0'; 456 } else 457 entry->buf[size] = '\0'; 458 459 __buffer_unlock_commit(buffer, event); 460 461 return size; 462 } 463 EXPORT_SYMBOL_GPL(__trace_puts); 464 465 /** 466 * __trace_bputs - write the pointer to a constant string into trace buffer 467 * @ip: The address of the caller 468 * @str: The constant string to write to the buffer to 469 */ 470 int __trace_bputs(unsigned long ip, const char *str) 471 { 472 struct ring_buffer_event *event; 473 struct ring_buffer *buffer; 474 struct bputs_entry *entry; 475 unsigned long irq_flags; 476 int size = sizeof(struct bputs_entry); 477 478 local_save_flags(irq_flags); 479 buffer = global_trace.trace_buffer.buffer; 480 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, 481 irq_flags, preempt_count()); 482 if (!event) 483 return 0; 484 485 entry = ring_buffer_event_data(event); 486 entry->ip = ip; 487 entry->str = str; 488 489 __buffer_unlock_commit(buffer, event); 490 491 return 1; 492 } 493 EXPORT_SYMBOL_GPL(__trace_bputs); 494 495 #ifdef CONFIG_TRACER_SNAPSHOT 496 /** 497 * trace_snapshot - take a snapshot of the current buffer. 498 * 499 * This causes a swap between the snapshot buffer and the current live 500 * tracing buffer. You can use this to take snapshots of the live 501 * trace when some condition is triggered, but continue to trace. 502 * 503 * Note, make sure to allocate the snapshot with either 504 * a tracing_snapshot_alloc(), or by doing it manually 505 * with: echo 1 > /sys/kernel/debug/tracing/snapshot 506 * 507 * If the snapshot buffer is not allocated, it will stop tracing. 508 * Basically making a permanent snapshot. 509 */ 510 void tracing_snapshot(void) 511 { 512 struct trace_array *tr = &global_trace; 513 struct tracer *tracer = tr->current_trace; 514 unsigned long flags; 515 516 if (in_nmi()) { 517 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); 518 internal_trace_puts("*** snapshot is being ignored ***\n"); 519 return; 520 } 521 522 if (!tr->allocated_snapshot) { 523 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n"); 524 internal_trace_puts("*** stopping trace here! ***\n"); 525 tracing_off(); 526 return; 527 } 528 529 /* Note, snapshot can not be used when the tracer uses it */ 530 if (tracer->use_max_tr) { 531 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n"); 532 internal_trace_puts("*** Can not use snapshot (sorry) ***\n"); 533 return; 534 } 535 536 local_irq_save(flags); 537 update_max_tr(tr, current, smp_processor_id()); 538 local_irq_restore(flags); 539 } 540 EXPORT_SYMBOL_GPL(tracing_snapshot); 541 542 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, 543 struct trace_buffer *size_buf, int cpu_id); 544 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); 545 546 static int alloc_snapshot(struct trace_array *tr) 547 { 548 int ret; 549 550 if (!tr->allocated_snapshot) { 551 552 /* allocate spare buffer */ 553 ret = resize_buffer_duplicate_size(&tr->max_buffer, 554 &tr->trace_buffer, RING_BUFFER_ALL_CPUS); 555 if (ret < 0) 556 return ret; 557 558 tr->allocated_snapshot = true; 559 } 560 561 return 0; 562 } 563 564 void free_snapshot(struct trace_array *tr) 565 { 566 /* 567 * We don't free the ring buffer. instead, resize it because 568 * The max_tr ring buffer has some state (e.g. ring->clock) and 569 * we want preserve it. 570 */ 571 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); 572 set_buffer_entries(&tr->max_buffer, 1); 573 tracing_reset_online_cpus(&tr->max_buffer); 574 tr->allocated_snapshot = false; 575 } 576 577 /** 578 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer. 579 * 580 * This is similar to trace_snapshot(), but it will allocate the 581 * snapshot buffer if it isn't already allocated. Use this only 582 * where it is safe to sleep, as the allocation may sleep. 583 * 584 * This causes a swap between the snapshot buffer and the current live 585 * tracing buffer. You can use this to take snapshots of the live 586 * trace when some condition is triggered, but continue to trace. 587 */ 588 void tracing_snapshot_alloc(void) 589 { 590 struct trace_array *tr = &global_trace; 591 int ret; 592 593 ret = alloc_snapshot(tr); 594 if (WARN_ON(ret < 0)) 595 return; 596 597 tracing_snapshot(); 598 } 599 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 600 #else 601 void tracing_snapshot(void) 602 { 603 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); 604 } 605 EXPORT_SYMBOL_GPL(tracing_snapshot); 606 void tracing_snapshot_alloc(void) 607 { 608 /* Give warning */ 609 tracing_snapshot(); 610 } 611 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 612 #endif /* CONFIG_TRACER_SNAPSHOT */ 613 614 static void tracer_tracing_off(struct trace_array *tr) 615 { 616 if (tr->trace_buffer.buffer) 617 ring_buffer_record_off(tr->trace_buffer.buffer); 618 /* 619 * This flag is looked at when buffers haven't been allocated 620 * yet, or by some tracers (like irqsoff), that just want to 621 * know if the ring buffer has been disabled, but it can handle 622 * races of where it gets disabled but we still do a record. 623 * As the check is in the fast path of the tracers, it is more 624 * important to be fast than accurate. 625 */ 626 tr->buffer_disabled = 1; 627 /* Make the flag seen by readers */ 628 smp_wmb(); 629 } 630 631 /** 632 * tracing_off - turn off tracing buffers 633 * 634 * This function stops the tracing buffers from recording data. 635 * It does not disable any overhead the tracers themselves may 636 * be causing. This function simply causes all recording to 637 * the ring buffers to fail. 638 */ 639 void tracing_off(void) 640 { 641 tracer_tracing_off(&global_trace); 642 } 643 EXPORT_SYMBOL_GPL(tracing_off); 644 645 void disable_trace_on_warning(void) 646 { 647 if (__disable_trace_on_warning) 648 tracing_off(); 649 } 650 651 /** 652 * tracer_tracing_is_on - show real state of ring buffer enabled 653 * @tr : the trace array to know if ring buffer is enabled 654 * 655 * Shows real state of the ring buffer if it is enabled or not. 656 */ 657 static int tracer_tracing_is_on(struct trace_array *tr) 658 { 659 if (tr->trace_buffer.buffer) 660 return ring_buffer_record_is_on(tr->trace_buffer.buffer); 661 return !tr->buffer_disabled; 662 } 663 664 /** 665 * tracing_is_on - show state of ring buffers enabled 666 */ 667 int tracing_is_on(void) 668 { 669 return tracer_tracing_is_on(&global_trace); 670 } 671 EXPORT_SYMBOL_GPL(tracing_is_on); 672 673 static int __init set_buf_size(char *str) 674 { 675 unsigned long buf_size; 676 677 if (!str) 678 return 0; 679 buf_size = memparse(str, &str); 680 /* nr_entries can not be zero */ 681 if (buf_size == 0) 682 return 0; 683 trace_buf_size = buf_size; 684 return 1; 685 } 686 __setup("trace_buf_size=", set_buf_size); 687 688 static int __init set_tracing_thresh(char *str) 689 { 690 unsigned long threshold; 691 int ret; 692 693 if (!str) 694 return 0; 695 ret = kstrtoul(str, 0, &threshold); 696 if (ret < 0) 697 return 0; 698 tracing_thresh = threshold * 1000; 699 return 1; 700 } 701 __setup("tracing_thresh=", set_tracing_thresh); 702 703 unsigned long nsecs_to_usecs(unsigned long nsecs) 704 { 705 return nsecs / 1000; 706 } 707 708 /* These must match the bit postions in trace_iterator_flags */ 709 static const char *trace_options[] = { 710 "print-parent", 711 "sym-offset", 712 "sym-addr", 713 "verbose", 714 "raw", 715 "hex", 716 "bin", 717 "block", 718 "stacktrace", 719 "trace_printk", 720 "ftrace_preempt", 721 "branch", 722 "annotate", 723 "userstacktrace", 724 "sym-userobj", 725 "printk-msg-only", 726 "context-info", 727 "latency-format", 728 "sleep-time", 729 "graph-time", 730 "record-cmd", 731 "overwrite", 732 "disable_on_free", 733 "irq-info", 734 "markers", 735 "function-trace", 736 NULL 737 }; 738 739 static struct { 740 u64 (*func)(void); 741 const char *name; 742 int in_ns; /* is this clock in nanoseconds? */ 743 } trace_clocks[] = { 744 { trace_clock_local, "local", 1 }, 745 { trace_clock_global, "global", 1 }, 746 { trace_clock_counter, "counter", 0 }, 747 { trace_clock_jiffies, "uptime", 1 }, 748 { trace_clock, "perf", 1 }, 749 ARCH_TRACE_CLOCKS 750 }; 751 752 /* 753 * trace_parser_get_init - gets the buffer for trace parser 754 */ 755 int trace_parser_get_init(struct trace_parser *parser, int size) 756 { 757 memset(parser, 0, sizeof(*parser)); 758 759 parser->buffer = kmalloc(size, GFP_KERNEL); 760 if (!parser->buffer) 761 return 1; 762 763 parser->size = size; 764 return 0; 765 } 766 767 /* 768 * trace_parser_put - frees the buffer for trace parser 769 */ 770 void trace_parser_put(struct trace_parser *parser) 771 { 772 kfree(parser->buffer); 773 } 774 775 /* 776 * trace_get_user - reads the user input string separated by space 777 * (matched by isspace(ch)) 778 * 779 * For each string found the 'struct trace_parser' is updated, 780 * and the function returns. 781 * 782 * Returns number of bytes read. 783 * 784 * See kernel/trace/trace.h for 'struct trace_parser' details. 785 */ 786 int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 787 size_t cnt, loff_t *ppos) 788 { 789 char ch; 790 size_t read = 0; 791 ssize_t ret; 792 793 if (!*ppos) 794 trace_parser_clear(parser); 795 796 ret = get_user(ch, ubuf++); 797 if (ret) 798 goto out; 799 800 read++; 801 cnt--; 802 803 /* 804 * The parser is not finished with the last write, 805 * continue reading the user input without skipping spaces. 806 */ 807 if (!parser->cont) { 808 /* skip white space */ 809 while (cnt && isspace(ch)) { 810 ret = get_user(ch, ubuf++); 811 if (ret) 812 goto out; 813 read++; 814 cnt--; 815 } 816 817 /* only spaces were written */ 818 if (isspace(ch)) { 819 *ppos += read; 820 ret = read; 821 goto out; 822 } 823 824 parser->idx = 0; 825 } 826 827 /* read the non-space input */ 828 while (cnt && !isspace(ch)) { 829 if (parser->idx < parser->size - 1) 830 parser->buffer[parser->idx++] = ch; 831 else { 832 ret = -EINVAL; 833 goto out; 834 } 835 ret = get_user(ch, ubuf++); 836 if (ret) 837 goto out; 838 read++; 839 cnt--; 840 } 841 842 /* We either got finished input or we have to wait for another call. */ 843 if (isspace(ch)) { 844 parser->buffer[parser->idx] = 0; 845 parser->cont = false; 846 } else { 847 parser->cont = true; 848 parser->buffer[parser->idx++] = ch; 849 } 850 851 *ppos += read; 852 ret = read; 853 854 out: 855 return ret; 856 } 857 858 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) 859 { 860 int len; 861 int ret; 862 863 if (!cnt) 864 return 0; 865 866 if (s->len <= s->readpos) 867 return -EBUSY; 868 869 len = s->len - s->readpos; 870 if (cnt > len) 871 cnt = len; 872 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); 873 if (ret == cnt) 874 return -EFAULT; 875 876 cnt -= ret; 877 878 s->readpos += cnt; 879 return cnt; 880 } 881 882 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) 883 { 884 int len; 885 886 if (s->len <= s->readpos) 887 return -EBUSY; 888 889 len = s->len - s->readpos; 890 if (cnt > len) 891 cnt = len; 892 memcpy(buf, s->buffer + s->readpos, cnt); 893 894 s->readpos += cnt; 895 return cnt; 896 } 897 898 /* 899 * ftrace_max_lock is used to protect the swapping of buffers 900 * when taking a max snapshot. The buffers themselves are 901 * protected by per_cpu spinlocks. But the action of the swap 902 * needs its own lock. 903 * 904 * This is defined as a arch_spinlock_t in order to help 905 * with performance when lockdep debugging is enabled. 906 * 907 * It is also used in other places outside the update_max_tr 908 * so it needs to be defined outside of the 909 * CONFIG_TRACER_MAX_TRACE. 910 */ 911 static arch_spinlock_t ftrace_max_lock = 912 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 913 914 unsigned long __read_mostly tracing_thresh; 915 916 #ifdef CONFIG_TRACER_MAX_TRACE 917 unsigned long __read_mostly tracing_max_latency; 918 919 /* 920 * Copy the new maximum trace into the separate maximum-trace 921 * structure. (this way the maximum trace is permanently saved, 922 * for later retrieval via /sys/kernel/debug/tracing/latency_trace) 923 */ 924 static void 925 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 926 { 927 struct trace_buffer *trace_buf = &tr->trace_buffer; 928 struct trace_buffer *max_buf = &tr->max_buffer; 929 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); 930 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); 931 932 max_buf->cpu = cpu; 933 max_buf->time_start = data->preempt_timestamp; 934 935 max_data->saved_latency = tracing_max_latency; 936 max_data->critical_start = data->critical_start; 937 max_data->critical_end = data->critical_end; 938 939 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); 940 max_data->pid = tsk->pid; 941 /* 942 * If tsk == current, then use current_uid(), as that does not use 943 * RCU. The irq tracer can be called out of RCU scope. 944 */ 945 if (tsk == current) 946 max_data->uid = current_uid(); 947 else 948 max_data->uid = task_uid(tsk); 949 950 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 951 max_data->policy = tsk->policy; 952 max_data->rt_priority = tsk->rt_priority; 953 954 /* record this tasks comm */ 955 tracing_record_cmdline(tsk); 956 } 957 958 /** 959 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 960 * @tr: tracer 961 * @tsk: the task with the latency 962 * @cpu: The cpu that initiated the trace. 963 * 964 * Flip the buffers between the @tr and the max_tr and record information 965 * about which task was the cause of this latency. 966 */ 967 void 968 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 969 { 970 struct ring_buffer *buf; 971 972 if (tr->stop_count) 973 return; 974 975 WARN_ON_ONCE(!irqs_disabled()); 976 977 if (!tr->allocated_snapshot) { 978 /* Only the nop tracer should hit this when disabling */ 979 WARN_ON_ONCE(tr->current_trace != &nop_trace); 980 return; 981 } 982 983 arch_spin_lock(&ftrace_max_lock); 984 985 buf = tr->trace_buffer.buffer; 986 tr->trace_buffer.buffer = tr->max_buffer.buffer; 987 tr->max_buffer.buffer = buf; 988 989 __update_max_tr(tr, tsk, cpu); 990 arch_spin_unlock(&ftrace_max_lock); 991 } 992 993 /** 994 * update_max_tr_single - only copy one trace over, and reset the rest 995 * @tr - tracer 996 * @tsk - task with the latency 997 * @cpu - the cpu of the buffer to copy. 998 * 999 * Flip the trace of a single CPU buffer between the @tr and the max_tr. 1000 */ 1001 void 1002 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 1003 { 1004 int ret; 1005 1006 if (tr->stop_count) 1007 return; 1008 1009 WARN_ON_ONCE(!irqs_disabled()); 1010 if (!tr->allocated_snapshot) { 1011 /* Only the nop tracer should hit this when disabling */ 1012 WARN_ON_ONCE(tr->current_trace != &nop_trace); 1013 return; 1014 } 1015 1016 arch_spin_lock(&ftrace_max_lock); 1017 1018 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu); 1019 1020 if (ret == -EBUSY) { 1021 /* 1022 * We failed to swap the buffer due to a commit taking 1023 * place on this CPU. We fail to record, but we reset 1024 * the max trace buffer (no one writes directly to it) 1025 * and flag that it failed. 1026 */ 1027 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, 1028 "Failed to swap buffers due to commit in progress\n"); 1029 } 1030 1031 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 1032 1033 __update_max_tr(tr, tsk, cpu); 1034 arch_spin_unlock(&ftrace_max_lock); 1035 } 1036 #endif /* CONFIG_TRACER_MAX_TRACE */ 1037 1038 static void default_wait_pipe(struct trace_iterator *iter) 1039 { 1040 /* Iterators are static, they should be filled or empty */ 1041 if (trace_buffer_iter(iter, iter->cpu_file)) 1042 return; 1043 1044 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); 1045 } 1046 1047 #ifdef CONFIG_FTRACE_STARTUP_TEST 1048 static int run_tracer_selftest(struct tracer *type) 1049 { 1050 struct trace_array *tr = &global_trace; 1051 struct tracer *saved_tracer = tr->current_trace; 1052 int ret; 1053 1054 if (!type->selftest || tracing_selftest_disabled) 1055 return 0; 1056 1057 /* 1058 * Run a selftest on this tracer. 1059 * Here we reset the trace buffer, and set the current 1060 * tracer to be this tracer. The tracer can then run some 1061 * internal tracing to verify that everything is in order. 1062 * If we fail, we do not register this tracer. 1063 */ 1064 tracing_reset_online_cpus(&tr->trace_buffer); 1065 1066 tr->current_trace = type; 1067 1068 #ifdef CONFIG_TRACER_MAX_TRACE 1069 if (type->use_max_tr) { 1070 /* If we expanded the buffers, make sure the max is expanded too */ 1071 if (ring_buffer_expanded) 1072 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, 1073 RING_BUFFER_ALL_CPUS); 1074 tr->allocated_snapshot = true; 1075 } 1076 #endif 1077 1078 /* the test is responsible for initializing and enabling */ 1079 pr_info("Testing tracer %s: ", type->name); 1080 ret = type->selftest(type, tr); 1081 /* the test is responsible for resetting too */ 1082 tr->current_trace = saved_tracer; 1083 if (ret) { 1084 printk(KERN_CONT "FAILED!\n"); 1085 /* Add the warning after printing 'FAILED' */ 1086 WARN_ON(1); 1087 return -1; 1088 } 1089 /* Only reset on passing, to avoid touching corrupted buffers */ 1090 tracing_reset_online_cpus(&tr->trace_buffer); 1091 1092 #ifdef CONFIG_TRACER_MAX_TRACE 1093 if (type->use_max_tr) { 1094 tr->allocated_snapshot = false; 1095 1096 /* Shrink the max buffer again */ 1097 if (ring_buffer_expanded) 1098 ring_buffer_resize(tr->max_buffer.buffer, 1, 1099 RING_BUFFER_ALL_CPUS); 1100 } 1101 #endif 1102 1103 printk(KERN_CONT "PASSED\n"); 1104 return 0; 1105 } 1106 #else 1107 static inline int run_tracer_selftest(struct tracer *type) 1108 { 1109 return 0; 1110 } 1111 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 1112 1113 /** 1114 * register_tracer - register a tracer with the ftrace system. 1115 * @type - the plugin for the tracer 1116 * 1117 * Register a new plugin tracer. 1118 */ 1119 int register_tracer(struct tracer *type) 1120 { 1121 struct tracer *t; 1122 int ret = 0; 1123 1124 if (!type->name) { 1125 pr_info("Tracer must have a name\n"); 1126 return -1; 1127 } 1128 1129 if (strlen(type->name) >= MAX_TRACER_SIZE) { 1130 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); 1131 return -1; 1132 } 1133 1134 mutex_lock(&trace_types_lock); 1135 1136 tracing_selftest_running = true; 1137 1138 for (t = trace_types; t; t = t->next) { 1139 if (strcmp(type->name, t->name) == 0) { 1140 /* already found */ 1141 pr_info("Tracer %s already registered\n", 1142 type->name); 1143 ret = -1; 1144 goto out; 1145 } 1146 } 1147 1148 if (!type->set_flag) 1149 type->set_flag = &dummy_set_flag; 1150 if (!type->flags) 1151 type->flags = &dummy_tracer_flags; 1152 else 1153 if (!type->flags->opts) 1154 type->flags->opts = dummy_tracer_opt; 1155 if (!type->wait_pipe) 1156 type->wait_pipe = default_wait_pipe; 1157 1158 ret = run_tracer_selftest(type); 1159 if (ret < 0) 1160 goto out; 1161 1162 type->next = trace_types; 1163 trace_types = type; 1164 1165 out: 1166 tracing_selftest_running = false; 1167 mutex_unlock(&trace_types_lock); 1168 1169 if (ret || !default_bootup_tracer) 1170 goto out_unlock; 1171 1172 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) 1173 goto out_unlock; 1174 1175 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 1176 /* Do we want this tracer to start on bootup? */ 1177 tracing_set_tracer(type->name); 1178 default_bootup_tracer = NULL; 1179 /* disable other selftests, since this will break it. */ 1180 tracing_selftest_disabled = true; 1181 #ifdef CONFIG_FTRACE_STARTUP_TEST 1182 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", 1183 type->name); 1184 #endif 1185 1186 out_unlock: 1187 return ret; 1188 } 1189 1190 void tracing_reset(struct trace_buffer *buf, int cpu) 1191 { 1192 struct ring_buffer *buffer = buf->buffer; 1193 1194 if (!buffer) 1195 return; 1196 1197 ring_buffer_record_disable(buffer); 1198 1199 /* Make sure all commits have finished */ 1200 synchronize_sched(); 1201 ring_buffer_reset_cpu(buffer, cpu); 1202 1203 ring_buffer_record_enable(buffer); 1204 } 1205 1206 void tracing_reset_online_cpus(struct trace_buffer *buf) 1207 { 1208 struct ring_buffer *buffer = buf->buffer; 1209 int cpu; 1210 1211 if (!buffer) 1212 return; 1213 1214 ring_buffer_record_disable(buffer); 1215 1216 /* Make sure all commits have finished */ 1217 synchronize_sched(); 1218 1219 buf->time_start = buffer_ftrace_now(buf, buf->cpu); 1220 1221 for_each_online_cpu(cpu) 1222 ring_buffer_reset_cpu(buffer, cpu); 1223 1224 ring_buffer_record_enable(buffer); 1225 } 1226 1227 /* Must have trace_types_lock held */ 1228 void tracing_reset_all_online_cpus(void) 1229 { 1230 struct trace_array *tr; 1231 1232 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 1233 tracing_reset_online_cpus(&tr->trace_buffer); 1234 #ifdef CONFIG_TRACER_MAX_TRACE 1235 tracing_reset_online_cpus(&tr->max_buffer); 1236 #endif 1237 } 1238 } 1239 1240 #define SAVED_CMDLINES 128 1241 #define NO_CMDLINE_MAP UINT_MAX 1242 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 1243 static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 1244 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; 1245 static int cmdline_idx; 1246 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; 1247 1248 /* temporary disable recording */ 1249 static atomic_t trace_record_cmdline_disabled __read_mostly; 1250 1251 static void trace_init_cmdlines(void) 1252 { 1253 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline)); 1254 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid)); 1255 cmdline_idx = 0; 1256 } 1257 1258 int is_tracing_stopped(void) 1259 { 1260 return global_trace.stop_count; 1261 } 1262 1263 /** 1264 * ftrace_off_permanent - disable all ftrace code permanently 1265 * 1266 * This should only be called when a serious anomally has 1267 * been detected. This will turn off the function tracing, 1268 * ring buffers, and other tracing utilites. It takes no 1269 * locks and can be called from any context. 1270 */ 1271 void ftrace_off_permanent(void) 1272 { 1273 tracing_disabled = 1; 1274 ftrace_stop(); 1275 tracing_off_permanent(); 1276 } 1277 1278 /** 1279 * tracing_start - quick start of the tracer 1280 * 1281 * If tracing is enabled but was stopped by tracing_stop, 1282 * this will start the tracer back up. 1283 */ 1284 void tracing_start(void) 1285 { 1286 struct ring_buffer *buffer; 1287 unsigned long flags; 1288 1289 if (tracing_disabled) 1290 return; 1291 1292 raw_spin_lock_irqsave(&global_trace.start_lock, flags); 1293 if (--global_trace.stop_count) { 1294 if (global_trace.stop_count < 0) { 1295 /* Someone screwed up their debugging */ 1296 WARN_ON_ONCE(1); 1297 global_trace.stop_count = 0; 1298 } 1299 goto out; 1300 } 1301 1302 /* Prevent the buffers from switching */ 1303 arch_spin_lock(&ftrace_max_lock); 1304 1305 buffer = global_trace.trace_buffer.buffer; 1306 if (buffer) 1307 ring_buffer_record_enable(buffer); 1308 1309 #ifdef CONFIG_TRACER_MAX_TRACE 1310 buffer = global_trace.max_buffer.buffer; 1311 if (buffer) 1312 ring_buffer_record_enable(buffer); 1313 #endif 1314 1315 arch_spin_unlock(&ftrace_max_lock); 1316 1317 ftrace_start(); 1318 out: 1319 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 1320 } 1321 1322 static void tracing_start_tr(struct trace_array *tr) 1323 { 1324 struct ring_buffer *buffer; 1325 unsigned long flags; 1326 1327 if (tracing_disabled) 1328 return; 1329 1330 /* If global, we need to also start the max tracer */ 1331 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 1332 return tracing_start(); 1333 1334 raw_spin_lock_irqsave(&tr->start_lock, flags); 1335 1336 if (--tr->stop_count) { 1337 if (tr->stop_count < 0) { 1338 /* Someone screwed up their debugging */ 1339 WARN_ON_ONCE(1); 1340 tr->stop_count = 0; 1341 } 1342 goto out; 1343 } 1344 1345 buffer = tr->trace_buffer.buffer; 1346 if (buffer) 1347 ring_buffer_record_enable(buffer); 1348 1349 out: 1350 raw_spin_unlock_irqrestore(&tr->start_lock, flags); 1351 } 1352 1353 /** 1354 * tracing_stop - quick stop of the tracer 1355 * 1356 * Light weight way to stop tracing. Use in conjunction with 1357 * tracing_start. 1358 */ 1359 void tracing_stop(void) 1360 { 1361 struct ring_buffer *buffer; 1362 unsigned long flags; 1363 1364 ftrace_stop(); 1365 raw_spin_lock_irqsave(&global_trace.start_lock, flags); 1366 if (global_trace.stop_count++) 1367 goto out; 1368 1369 /* Prevent the buffers from switching */ 1370 arch_spin_lock(&ftrace_max_lock); 1371 1372 buffer = global_trace.trace_buffer.buffer; 1373 if (buffer) 1374 ring_buffer_record_disable(buffer); 1375 1376 #ifdef CONFIG_TRACER_MAX_TRACE 1377 buffer = global_trace.max_buffer.buffer; 1378 if (buffer) 1379 ring_buffer_record_disable(buffer); 1380 #endif 1381 1382 arch_spin_unlock(&ftrace_max_lock); 1383 1384 out: 1385 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 1386 } 1387 1388 static void tracing_stop_tr(struct trace_array *tr) 1389 { 1390 struct ring_buffer *buffer; 1391 unsigned long flags; 1392 1393 /* If global, we need to also stop the max tracer */ 1394 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 1395 return tracing_stop(); 1396 1397 raw_spin_lock_irqsave(&tr->start_lock, flags); 1398 if (tr->stop_count++) 1399 goto out; 1400 1401 buffer = tr->trace_buffer.buffer; 1402 if (buffer) 1403 ring_buffer_record_disable(buffer); 1404 1405 out: 1406 raw_spin_unlock_irqrestore(&tr->start_lock, flags); 1407 } 1408 1409 void trace_stop_cmdline_recording(void); 1410 1411 static void trace_save_cmdline(struct task_struct *tsk) 1412 { 1413 unsigned pid, idx; 1414 1415 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) 1416 return; 1417 1418 /* 1419 * It's not the end of the world if we don't get 1420 * the lock, but we also don't want to spin 1421 * nor do we want to disable interrupts, 1422 * so if we miss here, then better luck next time. 1423 */ 1424 if (!arch_spin_trylock(&trace_cmdline_lock)) 1425 return; 1426 1427 idx = map_pid_to_cmdline[tsk->pid]; 1428 if (idx == NO_CMDLINE_MAP) { 1429 idx = (cmdline_idx + 1) % SAVED_CMDLINES; 1430 1431 /* 1432 * Check whether the cmdline buffer at idx has a pid 1433 * mapped. We are going to overwrite that entry so we 1434 * need to clear the map_pid_to_cmdline. Otherwise we 1435 * would read the new comm for the old pid. 1436 */ 1437 pid = map_cmdline_to_pid[idx]; 1438 if (pid != NO_CMDLINE_MAP) 1439 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; 1440 1441 map_cmdline_to_pid[idx] = tsk->pid; 1442 map_pid_to_cmdline[tsk->pid] = idx; 1443 1444 cmdline_idx = idx; 1445 } 1446 1447 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 1448 1449 arch_spin_unlock(&trace_cmdline_lock); 1450 } 1451 1452 void trace_find_cmdline(int pid, char comm[]) 1453 { 1454 unsigned map; 1455 1456 if (!pid) { 1457 strcpy(comm, "<idle>"); 1458 return; 1459 } 1460 1461 if (WARN_ON_ONCE(pid < 0)) { 1462 strcpy(comm, "<XXX>"); 1463 return; 1464 } 1465 1466 if (pid > PID_MAX_DEFAULT) { 1467 strcpy(comm, "<...>"); 1468 return; 1469 } 1470 1471 preempt_disable(); 1472 arch_spin_lock(&trace_cmdline_lock); 1473 map = map_pid_to_cmdline[pid]; 1474 if (map != NO_CMDLINE_MAP) 1475 strcpy(comm, saved_cmdlines[map]); 1476 else 1477 strcpy(comm, "<...>"); 1478 1479 arch_spin_unlock(&trace_cmdline_lock); 1480 preempt_enable(); 1481 } 1482 1483 void tracing_record_cmdline(struct task_struct *tsk) 1484 { 1485 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on()) 1486 return; 1487 1488 if (!__this_cpu_read(trace_cmdline_save)) 1489 return; 1490 1491 __this_cpu_write(trace_cmdline_save, false); 1492 1493 trace_save_cmdline(tsk); 1494 } 1495 1496 void 1497 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, 1498 int pc) 1499 { 1500 struct task_struct *tsk = current; 1501 1502 entry->preempt_count = pc & 0xff; 1503 entry->pid = (tsk) ? tsk->pid : 0; 1504 entry->flags = 1505 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 1506 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 1507 #else 1508 TRACE_FLAG_IRQS_NOSUPPORT | 1509 #endif 1510 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | 1511 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 1512 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | 1513 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); 1514 } 1515 EXPORT_SYMBOL_GPL(tracing_generic_entry_update); 1516 1517 struct ring_buffer_event * 1518 trace_buffer_lock_reserve(struct ring_buffer *buffer, 1519 int type, 1520 unsigned long len, 1521 unsigned long flags, int pc) 1522 { 1523 struct ring_buffer_event *event; 1524 1525 event = ring_buffer_lock_reserve(buffer, len); 1526 if (event != NULL) { 1527 struct trace_entry *ent = ring_buffer_event_data(event); 1528 1529 tracing_generic_entry_update(ent, flags, pc); 1530 ent->type = type; 1531 } 1532 1533 return event; 1534 } 1535 1536 void 1537 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) 1538 { 1539 __this_cpu_write(trace_cmdline_save, true); 1540 ring_buffer_unlock_commit(buffer, event); 1541 } 1542 1543 static inline void 1544 __trace_buffer_unlock_commit(struct ring_buffer *buffer, 1545 struct ring_buffer_event *event, 1546 unsigned long flags, int pc) 1547 { 1548 __buffer_unlock_commit(buffer, event); 1549 1550 ftrace_trace_stack(buffer, flags, 6, pc); 1551 ftrace_trace_userstack(buffer, flags, pc); 1552 } 1553 1554 void trace_buffer_unlock_commit(struct ring_buffer *buffer, 1555 struct ring_buffer_event *event, 1556 unsigned long flags, int pc) 1557 { 1558 __trace_buffer_unlock_commit(buffer, event, flags, pc); 1559 } 1560 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); 1561 1562 struct ring_buffer_event * 1563 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, 1564 struct ftrace_event_file *ftrace_file, 1565 int type, unsigned long len, 1566 unsigned long flags, int pc) 1567 { 1568 *current_rb = ftrace_file->tr->trace_buffer.buffer; 1569 return trace_buffer_lock_reserve(*current_rb, 1570 type, len, flags, pc); 1571 } 1572 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); 1573 1574 struct ring_buffer_event * 1575 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, 1576 int type, unsigned long len, 1577 unsigned long flags, int pc) 1578 { 1579 *current_rb = global_trace.trace_buffer.buffer; 1580 return trace_buffer_lock_reserve(*current_rb, 1581 type, len, flags, pc); 1582 } 1583 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); 1584 1585 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, 1586 struct ring_buffer_event *event, 1587 unsigned long flags, int pc) 1588 { 1589 __trace_buffer_unlock_commit(buffer, event, flags, pc); 1590 } 1591 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); 1592 1593 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, 1594 struct ring_buffer_event *event, 1595 unsigned long flags, int pc, 1596 struct pt_regs *regs) 1597 { 1598 __buffer_unlock_commit(buffer, event); 1599 1600 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); 1601 ftrace_trace_userstack(buffer, flags, pc); 1602 } 1603 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs); 1604 1605 void trace_current_buffer_discard_commit(struct ring_buffer *buffer, 1606 struct ring_buffer_event *event) 1607 { 1608 ring_buffer_discard_commit(buffer, event); 1609 } 1610 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); 1611 1612 void 1613 trace_function(struct trace_array *tr, 1614 unsigned long ip, unsigned long parent_ip, unsigned long flags, 1615 int pc) 1616 { 1617 struct ftrace_event_call *call = &event_function; 1618 struct ring_buffer *buffer = tr->trace_buffer.buffer; 1619 struct ring_buffer_event *event; 1620 struct ftrace_entry *entry; 1621 1622 /* If we are reading the ring buffer, don't trace */ 1623 if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) 1624 return; 1625 1626 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 1627 flags, pc); 1628 if (!event) 1629 return; 1630 entry = ring_buffer_event_data(event); 1631 entry->ip = ip; 1632 entry->parent_ip = parent_ip; 1633 1634 if (!filter_check_discard(call, entry, buffer, event)) 1635 __buffer_unlock_commit(buffer, event); 1636 } 1637 1638 #ifdef CONFIG_STACKTRACE 1639 1640 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) 1641 struct ftrace_stack { 1642 unsigned long calls[FTRACE_STACK_MAX_ENTRIES]; 1643 }; 1644 1645 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); 1646 static DEFINE_PER_CPU(int, ftrace_stack_reserve); 1647 1648 static void __ftrace_trace_stack(struct ring_buffer *buffer, 1649 unsigned long flags, 1650 int skip, int pc, struct pt_regs *regs) 1651 { 1652 struct ftrace_event_call *call = &event_kernel_stack; 1653 struct ring_buffer_event *event; 1654 struct stack_entry *entry; 1655 struct stack_trace trace; 1656 int use_stack; 1657 int size = FTRACE_STACK_ENTRIES; 1658 1659 trace.nr_entries = 0; 1660 trace.skip = skip; 1661 1662 /* 1663 * Since events can happen in NMIs there's no safe way to 1664 * use the per cpu ftrace_stacks. We reserve it and if an interrupt 1665 * or NMI comes in, it will just have to use the default 1666 * FTRACE_STACK_SIZE. 1667 */ 1668 preempt_disable_notrace(); 1669 1670 use_stack = __this_cpu_inc_return(ftrace_stack_reserve); 1671 /* 1672 * We don't need any atomic variables, just a barrier. 1673 * If an interrupt comes in, we don't care, because it would 1674 * have exited and put the counter back to what we want. 1675 * We just need a barrier to keep gcc from moving things 1676 * around. 1677 */ 1678 barrier(); 1679 if (use_stack == 1) { 1680 trace.entries = &__get_cpu_var(ftrace_stack).calls[0]; 1681 trace.max_entries = FTRACE_STACK_MAX_ENTRIES; 1682 1683 if (regs) 1684 save_stack_trace_regs(regs, &trace); 1685 else 1686 save_stack_trace(&trace); 1687 1688 if (trace.nr_entries > size) 1689 size = trace.nr_entries; 1690 } else 1691 /* From now on, use_stack is a boolean */ 1692 use_stack = 0; 1693 1694 size *= sizeof(unsigned long); 1695 1696 event = trace_buffer_lock_reserve(buffer, TRACE_STACK, 1697 sizeof(*entry) + size, flags, pc); 1698 if (!event) 1699 goto out; 1700 entry = ring_buffer_event_data(event); 1701 1702 memset(&entry->caller, 0, size); 1703 1704 if (use_stack) 1705 memcpy(&entry->caller, trace.entries, 1706 trace.nr_entries * sizeof(unsigned long)); 1707 else { 1708 trace.max_entries = FTRACE_STACK_ENTRIES; 1709 trace.entries = entry->caller; 1710 if (regs) 1711 save_stack_trace_regs(regs, &trace); 1712 else 1713 save_stack_trace(&trace); 1714 } 1715 1716 entry->size = trace.nr_entries; 1717 1718 if (!filter_check_discard(call, entry, buffer, event)) 1719 __buffer_unlock_commit(buffer, event); 1720 1721 out: 1722 /* Again, don't let gcc optimize things here */ 1723 barrier(); 1724 __this_cpu_dec(ftrace_stack_reserve); 1725 preempt_enable_notrace(); 1726 1727 } 1728 1729 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, 1730 int skip, int pc, struct pt_regs *regs) 1731 { 1732 if (!(trace_flags & TRACE_ITER_STACKTRACE)) 1733 return; 1734 1735 __ftrace_trace_stack(buffer, flags, skip, pc, regs); 1736 } 1737 1738 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, 1739 int skip, int pc) 1740 { 1741 if (!(trace_flags & TRACE_ITER_STACKTRACE)) 1742 return; 1743 1744 __ftrace_trace_stack(buffer, flags, skip, pc, NULL); 1745 } 1746 1747 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 1748 int pc) 1749 { 1750 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL); 1751 } 1752 1753 /** 1754 * trace_dump_stack - record a stack back trace in the trace buffer 1755 * @skip: Number of functions to skip (helper handlers) 1756 */ 1757 void trace_dump_stack(int skip) 1758 { 1759 unsigned long flags; 1760 1761 if (tracing_disabled || tracing_selftest_running) 1762 return; 1763 1764 local_save_flags(flags); 1765 1766 /* 1767 * Skip 3 more, seems to get us at the caller of 1768 * this function. 1769 */ 1770 skip += 3; 1771 __ftrace_trace_stack(global_trace.trace_buffer.buffer, 1772 flags, skip, preempt_count(), NULL); 1773 } 1774 1775 static DEFINE_PER_CPU(int, user_stack_count); 1776 1777 void 1778 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) 1779 { 1780 struct ftrace_event_call *call = &event_user_stack; 1781 struct ring_buffer_event *event; 1782 struct userstack_entry *entry; 1783 struct stack_trace trace; 1784 1785 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) 1786 return; 1787 1788 /* 1789 * NMIs can not handle page faults, even with fix ups. 1790 * The save user stack can (and often does) fault. 1791 */ 1792 if (unlikely(in_nmi())) 1793 return; 1794 1795 /* 1796 * prevent recursion, since the user stack tracing may 1797 * trigger other kernel events. 1798 */ 1799 preempt_disable(); 1800 if (__this_cpu_read(user_stack_count)) 1801 goto out; 1802 1803 __this_cpu_inc(user_stack_count); 1804 1805 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 1806 sizeof(*entry), flags, pc); 1807 if (!event) 1808 goto out_drop_count; 1809 entry = ring_buffer_event_data(event); 1810 1811 entry->tgid = current->tgid; 1812 memset(&entry->caller, 0, sizeof(entry->caller)); 1813 1814 trace.nr_entries = 0; 1815 trace.max_entries = FTRACE_STACK_ENTRIES; 1816 trace.skip = 0; 1817 trace.entries = entry->caller; 1818 1819 save_stack_trace_user(&trace); 1820 if (!filter_check_discard(call, entry, buffer, event)) 1821 __buffer_unlock_commit(buffer, event); 1822 1823 out_drop_count: 1824 __this_cpu_dec(user_stack_count); 1825 out: 1826 preempt_enable(); 1827 } 1828 1829 #ifdef UNUSED 1830 static void __trace_userstack(struct trace_array *tr, unsigned long flags) 1831 { 1832 ftrace_trace_userstack(tr, flags, preempt_count()); 1833 } 1834 #endif /* UNUSED */ 1835 1836 #endif /* CONFIG_STACKTRACE */ 1837 1838 /* created for use with alloc_percpu */ 1839 struct trace_buffer_struct { 1840 char buffer[TRACE_BUF_SIZE]; 1841 }; 1842 1843 static struct trace_buffer_struct *trace_percpu_buffer; 1844 static struct trace_buffer_struct *trace_percpu_sirq_buffer; 1845 static struct trace_buffer_struct *trace_percpu_irq_buffer; 1846 static struct trace_buffer_struct *trace_percpu_nmi_buffer; 1847 1848 /* 1849 * The buffer used is dependent on the context. There is a per cpu 1850 * buffer for normal context, softirq contex, hard irq context and 1851 * for NMI context. Thise allows for lockless recording. 1852 * 1853 * Note, if the buffers failed to be allocated, then this returns NULL 1854 */ 1855 static char *get_trace_buf(void) 1856 { 1857 struct trace_buffer_struct *percpu_buffer; 1858 1859 /* 1860 * If we have allocated per cpu buffers, then we do not 1861 * need to do any locking. 1862 */ 1863 if (in_nmi()) 1864 percpu_buffer = trace_percpu_nmi_buffer; 1865 else if (in_irq()) 1866 percpu_buffer = trace_percpu_irq_buffer; 1867 else if (in_softirq()) 1868 percpu_buffer = trace_percpu_sirq_buffer; 1869 else 1870 percpu_buffer = trace_percpu_buffer; 1871 1872 if (!percpu_buffer) 1873 return NULL; 1874 1875 return this_cpu_ptr(&percpu_buffer->buffer[0]); 1876 } 1877 1878 static int alloc_percpu_trace_buffer(void) 1879 { 1880 struct trace_buffer_struct *buffers; 1881 struct trace_buffer_struct *sirq_buffers; 1882 struct trace_buffer_struct *irq_buffers; 1883 struct trace_buffer_struct *nmi_buffers; 1884 1885 buffers = alloc_percpu(struct trace_buffer_struct); 1886 if (!buffers) 1887 goto err_warn; 1888 1889 sirq_buffers = alloc_percpu(struct trace_buffer_struct); 1890 if (!sirq_buffers) 1891 goto err_sirq; 1892 1893 irq_buffers = alloc_percpu(struct trace_buffer_struct); 1894 if (!irq_buffers) 1895 goto err_irq; 1896 1897 nmi_buffers = alloc_percpu(struct trace_buffer_struct); 1898 if (!nmi_buffers) 1899 goto err_nmi; 1900 1901 trace_percpu_buffer = buffers; 1902 trace_percpu_sirq_buffer = sirq_buffers; 1903 trace_percpu_irq_buffer = irq_buffers; 1904 trace_percpu_nmi_buffer = nmi_buffers; 1905 1906 return 0; 1907 1908 err_nmi: 1909 free_percpu(irq_buffers); 1910 err_irq: 1911 free_percpu(sirq_buffers); 1912 err_sirq: 1913 free_percpu(buffers); 1914 err_warn: 1915 WARN(1, "Could not allocate percpu trace_printk buffer"); 1916 return -ENOMEM; 1917 } 1918 1919 static int buffers_allocated; 1920 1921 void trace_printk_init_buffers(void) 1922 { 1923 if (buffers_allocated) 1924 return; 1925 1926 if (alloc_percpu_trace_buffer()) 1927 return; 1928 1929 pr_info("ftrace: Allocated trace_printk buffers\n"); 1930 1931 /* Expand the buffers to set size */ 1932 tracing_update_buffers(); 1933 1934 buffers_allocated = 1; 1935 1936 /* 1937 * trace_printk_init_buffers() can be called by modules. 1938 * If that happens, then we need to start cmdline recording 1939 * directly here. If the global_trace.buffer is already 1940 * allocated here, then this was called by module code. 1941 */ 1942 if (global_trace.trace_buffer.buffer) 1943 tracing_start_cmdline_record(); 1944 } 1945 1946 void trace_printk_start_comm(void) 1947 { 1948 /* Start tracing comms if trace printk is set */ 1949 if (!buffers_allocated) 1950 return; 1951 tracing_start_cmdline_record(); 1952 } 1953 1954 static void trace_printk_start_stop_comm(int enabled) 1955 { 1956 if (!buffers_allocated) 1957 return; 1958 1959 if (enabled) 1960 tracing_start_cmdline_record(); 1961 else 1962 tracing_stop_cmdline_record(); 1963 } 1964 1965 /** 1966 * trace_vbprintk - write binary msg to tracing buffer 1967 * 1968 */ 1969 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 1970 { 1971 struct ftrace_event_call *call = &event_bprint; 1972 struct ring_buffer_event *event; 1973 struct ring_buffer *buffer; 1974 struct trace_array *tr = &global_trace; 1975 struct bprint_entry *entry; 1976 unsigned long flags; 1977 char *tbuffer; 1978 int len = 0, size, pc; 1979 1980 if (unlikely(tracing_selftest_running || tracing_disabled)) 1981 return 0; 1982 1983 /* Don't pollute graph traces with trace_vprintk internals */ 1984 pause_graph_tracing(); 1985 1986 pc = preempt_count(); 1987 preempt_disable_notrace(); 1988 1989 tbuffer = get_trace_buf(); 1990 if (!tbuffer) { 1991 len = 0; 1992 goto out; 1993 } 1994 1995 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); 1996 1997 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) 1998 goto out; 1999 2000 local_save_flags(flags); 2001 size = sizeof(*entry) + sizeof(u32) * len; 2002 buffer = tr->trace_buffer.buffer; 2003 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, 2004 flags, pc); 2005 if (!event) 2006 goto out; 2007 entry = ring_buffer_event_data(event); 2008 entry->ip = ip; 2009 entry->fmt = fmt; 2010 2011 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 2012 if (!filter_check_discard(call, entry, buffer, event)) { 2013 __buffer_unlock_commit(buffer, event); 2014 ftrace_trace_stack(buffer, flags, 6, pc); 2015 } 2016 2017 out: 2018 preempt_enable_notrace(); 2019 unpause_graph_tracing(); 2020 2021 return len; 2022 } 2023 EXPORT_SYMBOL_GPL(trace_vbprintk); 2024 2025 static int 2026 __trace_array_vprintk(struct ring_buffer *buffer, 2027 unsigned long ip, const char *fmt, va_list args) 2028 { 2029 struct ftrace_event_call *call = &event_print; 2030 struct ring_buffer_event *event; 2031 int len = 0, size, pc; 2032 struct print_entry *entry; 2033 unsigned long flags; 2034 char *tbuffer; 2035 2036 if (tracing_disabled || tracing_selftest_running) 2037 return 0; 2038 2039 /* Don't pollute graph traces with trace_vprintk internals */ 2040 pause_graph_tracing(); 2041 2042 pc = preempt_count(); 2043 preempt_disable_notrace(); 2044 2045 2046 tbuffer = get_trace_buf(); 2047 if (!tbuffer) { 2048 len = 0; 2049 goto out; 2050 } 2051 2052 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); 2053 if (len > TRACE_BUF_SIZE) 2054 goto out; 2055 2056 local_save_flags(flags); 2057 size = sizeof(*entry) + len + 1; 2058 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 2059 flags, pc); 2060 if (!event) 2061 goto out; 2062 entry = ring_buffer_event_data(event); 2063 entry->ip = ip; 2064 2065 memcpy(&entry->buf, tbuffer, len); 2066 entry->buf[len] = '\0'; 2067 if (!filter_check_discard(call, entry, buffer, event)) { 2068 __buffer_unlock_commit(buffer, event); 2069 ftrace_trace_stack(buffer, flags, 6, pc); 2070 } 2071 out: 2072 preempt_enable_notrace(); 2073 unpause_graph_tracing(); 2074 2075 return len; 2076 } 2077 2078 int trace_array_vprintk(struct trace_array *tr, 2079 unsigned long ip, const char *fmt, va_list args) 2080 { 2081 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); 2082 } 2083 2084 int trace_array_printk(struct trace_array *tr, 2085 unsigned long ip, const char *fmt, ...) 2086 { 2087 int ret; 2088 va_list ap; 2089 2090 if (!(trace_flags & TRACE_ITER_PRINTK)) 2091 return 0; 2092 2093 va_start(ap, fmt); 2094 ret = trace_array_vprintk(tr, ip, fmt, ap); 2095 va_end(ap); 2096 return ret; 2097 } 2098 2099 int trace_array_printk_buf(struct ring_buffer *buffer, 2100 unsigned long ip, const char *fmt, ...) 2101 { 2102 int ret; 2103 va_list ap; 2104 2105 if (!(trace_flags & TRACE_ITER_PRINTK)) 2106 return 0; 2107 2108 va_start(ap, fmt); 2109 ret = __trace_array_vprintk(buffer, ip, fmt, ap); 2110 va_end(ap); 2111 return ret; 2112 } 2113 2114 int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 2115 { 2116 return trace_array_vprintk(&global_trace, ip, fmt, args); 2117 } 2118 EXPORT_SYMBOL_GPL(trace_vprintk); 2119 2120 static void trace_iterator_increment(struct trace_iterator *iter) 2121 { 2122 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); 2123 2124 iter->idx++; 2125 if (buf_iter) 2126 ring_buffer_read(buf_iter, NULL); 2127 } 2128 2129 static struct trace_entry * 2130 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, 2131 unsigned long *lost_events) 2132 { 2133 struct ring_buffer_event *event; 2134 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); 2135 2136 if (buf_iter) 2137 event = ring_buffer_iter_peek(buf_iter, ts); 2138 else 2139 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, 2140 lost_events); 2141 2142 if (event) { 2143 iter->ent_size = ring_buffer_event_length(event); 2144 return ring_buffer_event_data(event); 2145 } 2146 iter->ent_size = 0; 2147 return NULL; 2148 } 2149 2150 static struct trace_entry * 2151 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, 2152 unsigned long *missing_events, u64 *ent_ts) 2153 { 2154 struct ring_buffer *buffer = iter->trace_buffer->buffer; 2155 struct trace_entry *ent, *next = NULL; 2156 unsigned long lost_events = 0, next_lost = 0; 2157 int cpu_file = iter->cpu_file; 2158 u64 next_ts = 0, ts; 2159 int next_cpu = -1; 2160 int next_size = 0; 2161 int cpu; 2162 2163 /* 2164 * If we are in a per_cpu trace file, don't bother by iterating over 2165 * all cpu and peek directly. 2166 */ 2167 if (cpu_file > RING_BUFFER_ALL_CPUS) { 2168 if (ring_buffer_empty_cpu(buffer, cpu_file)) 2169 return NULL; 2170 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); 2171 if (ent_cpu) 2172 *ent_cpu = cpu_file; 2173 2174 return ent; 2175 } 2176 2177 for_each_tracing_cpu(cpu) { 2178 2179 if (ring_buffer_empty_cpu(buffer, cpu)) 2180 continue; 2181 2182 ent = peek_next_entry(iter, cpu, &ts, &lost_events); 2183 2184 /* 2185 * Pick the entry with the smallest timestamp: 2186 */ 2187 if (ent && (!next || ts < next_ts)) { 2188 next = ent; 2189 next_cpu = cpu; 2190 next_ts = ts; 2191 next_lost = lost_events; 2192 next_size = iter->ent_size; 2193 } 2194 } 2195 2196 iter->ent_size = next_size; 2197 2198 if (ent_cpu) 2199 *ent_cpu = next_cpu; 2200 2201 if (ent_ts) 2202 *ent_ts = next_ts; 2203 2204 if (missing_events) 2205 *missing_events = next_lost; 2206 2207 return next; 2208 } 2209 2210 /* Find the next real entry, without updating the iterator itself */ 2211 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 2212 int *ent_cpu, u64 *ent_ts) 2213 { 2214 return __find_next_entry(iter, ent_cpu, NULL, ent_ts); 2215 } 2216 2217 /* Find the next real entry, and increment the iterator to the next entry */ 2218 void *trace_find_next_entry_inc(struct trace_iterator *iter) 2219 { 2220 iter->ent = __find_next_entry(iter, &iter->cpu, 2221 &iter->lost_events, &iter->ts); 2222 2223 if (iter->ent) 2224 trace_iterator_increment(iter); 2225 2226 return iter->ent ? iter : NULL; 2227 } 2228 2229 static void trace_consume(struct trace_iterator *iter) 2230 { 2231 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts, 2232 &iter->lost_events); 2233 } 2234 2235 static void *s_next(struct seq_file *m, void *v, loff_t *pos) 2236 { 2237 struct trace_iterator *iter = m->private; 2238 int i = (int)*pos; 2239 void *ent; 2240 2241 WARN_ON_ONCE(iter->leftover); 2242 2243 (*pos)++; 2244 2245 /* can't go backwards */ 2246 if (iter->idx > i) 2247 return NULL; 2248 2249 if (iter->idx < 0) 2250 ent = trace_find_next_entry_inc(iter); 2251 else 2252 ent = iter; 2253 2254 while (ent && iter->idx < i) 2255 ent = trace_find_next_entry_inc(iter); 2256 2257 iter->pos = *pos; 2258 2259 return ent; 2260 } 2261 2262 void tracing_iter_reset(struct trace_iterator *iter, int cpu) 2263 { 2264 struct ring_buffer_event *event; 2265 struct ring_buffer_iter *buf_iter; 2266 unsigned long entries = 0; 2267 u64 ts; 2268 2269 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; 2270 2271 buf_iter = trace_buffer_iter(iter, cpu); 2272 if (!buf_iter) 2273 return; 2274 2275 ring_buffer_iter_reset(buf_iter); 2276 2277 /* 2278 * We could have the case with the max latency tracers 2279 * that a reset never took place on a cpu. This is evident 2280 * by the timestamp being before the start of the buffer. 2281 */ 2282 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { 2283 if (ts >= iter->trace_buffer->time_start) 2284 break; 2285 entries++; 2286 ring_buffer_read(buf_iter, NULL); 2287 } 2288 2289 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; 2290 } 2291 2292 /* 2293 * The current tracer is copied to avoid a global locking 2294 * all around. 2295 */ 2296 static void *s_start(struct seq_file *m, loff_t *pos) 2297 { 2298 struct trace_iterator *iter = m->private; 2299 struct trace_array *tr = iter->tr; 2300 int cpu_file = iter->cpu_file; 2301 void *p = NULL; 2302 loff_t l = 0; 2303 int cpu; 2304 2305 /* 2306 * copy the tracer to avoid using a global lock all around. 2307 * iter->trace is a copy of current_trace, the pointer to the 2308 * name may be used instead of a strcmp(), as iter->trace->name 2309 * will point to the same string as current_trace->name. 2310 */ 2311 mutex_lock(&trace_types_lock); 2312 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) 2313 *iter->trace = *tr->current_trace; 2314 mutex_unlock(&trace_types_lock); 2315 2316 #ifdef CONFIG_TRACER_MAX_TRACE 2317 if (iter->snapshot && iter->trace->use_max_tr) 2318 return ERR_PTR(-EBUSY); 2319 #endif 2320 2321 if (!iter->snapshot) 2322 atomic_inc(&trace_record_cmdline_disabled); 2323 2324 if (*pos != iter->pos) { 2325 iter->ent = NULL; 2326 iter->cpu = 0; 2327 iter->idx = -1; 2328 2329 if (cpu_file == RING_BUFFER_ALL_CPUS) { 2330 for_each_tracing_cpu(cpu) 2331 tracing_iter_reset(iter, cpu); 2332 } else 2333 tracing_iter_reset(iter, cpu_file); 2334 2335 iter->leftover = 0; 2336 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 2337 ; 2338 2339 } else { 2340 /* 2341 * If we overflowed the seq_file before, then we want 2342 * to just reuse the trace_seq buffer again. 2343 */ 2344 if (iter->leftover) 2345 p = iter; 2346 else { 2347 l = *pos - 1; 2348 p = s_next(m, p, &l); 2349 } 2350 } 2351 2352 trace_event_read_lock(); 2353 trace_access_lock(cpu_file); 2354 return p; 2355 } 2356 2357 static void s_stop(struct seq_file *m, void *p) 2358 { 2359 struct trace_iterator *iter = m->private; 2360 2361 #ifdef CONFIG_TRACER_MAX_TRACE 2362 if (iter->snapshot && iter->trace->use_max_tr) 2363 return; 2364 #endif 2365 2366 if (!iter->snapshot) 2367 atomic_dec(&trace_record_cmdline_disabled); 2368 2369 trace_access_unlock(iter->cpu_file); 2370 trace_event_read_unlock(); 2371 } 2372 2373 static void 2374 get_total_entries(struct trace_buffer *buf, 2375 unsigned long *total, unsigned long *entries) 2376 { 2377 unsigned long count; 2378 int cpu; 2379 2380 *total = 0; 2381 *entries = 0; 2382 2383 for_each_tracing_cpu(cpu) { 2384 count = ring_buffer_entries_cpu(buf->buffer, cpu); 2385 /* 2386 * If this buffer has skipped entries, then we hold all 2387 * entries for the trace and we need to ignore the 2388 * ones before the time stamp. 2389 */ 2390 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { 2391 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; 2392 /* total is the same as the entries */ 2393 *total += count; 2394 } else 2395 *total += count + 2396 ring_buffer_overrun_cpu(buf->buffer, cpu); 2397 *entries += count; 2398 } 2399 } 2400 2401 static void print_lat_help_header(struct seq_file *m) 2402 { 2403 seq_puts(m, "# _------=> CPU# \n"); 2404 seq_puts(m, "# / _-----=> irqs-off \n"); 2405 seq_puts(m, "# | / _----=> need-resched \n"); 2406 seq_puts(m, "# || / _---=> hardirq/softirq \n"); 2407 seq_puts(m, "# ||| / _--=> preempt-depth \n"); 2408 seq_puts(m, "# |||| / delay \n"); 2409 seq_puts(m, "# cmd pid ||||| time | caller \n"); 2410 seq_puts(m, "# \\ / ||||| \\ | / \n"); 2411 } 2412 2413 static void print_event_info(struct trace_buffer *buf, struct seq_file *m) 2414 { 2415 unsigned long total; 2416 unsigned long entries; 2417 2418 get_total_entries(buf, &total, &entries); 2419 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", 2420 entries, total, num_online_cpus()); 2421 seq_puts(m, "#\n"); 2422 } 2423 2424 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m) 2425 { 2426 print_event_info(buf, m); 2427 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); 2428 seq_puts(m, "# | | | | |\n"); 2429 } 2430 2431 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m) 2432 { 2433 print_event_info(buf, m); 2434 seq_puts(m, "# _-----=> irqs-off\n"); 2435 seq_puts(m, "# / _----=> need-resched\n"); 2436 seq_puts(m, "# | / _---=> hardirq/softirq\n"); 2437 seq_puts(m, "# || / _--=> preempt-depth\n"); 2438 seq_puts(m, "# ||| / delay\n"); 2439 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"); 2440 seq_puts(m, "# | | | |||| | |\n"); 2441 } 2442 2443 void 2444 print_trace_header(struct seq_file *m, struct trace_iterator *iter) 2445 { 2446 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 2447 struct trace_buffer *buf = iter->trace_buffer; 2448 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); 2449 struct tracer *type = iter->trace; 2450 unsigned long entries; 2451 unsigned long total; 2452 const char *name = "preemption"; 2453 2454 name = type->name; 2455 2456 get_total_entries(buf, &total, &entries); 2457 2458 seq_printf(m, "# %s latency trace v1.1.5 on %s\n", 2459 name, UTS_RELEASE); 2460 seq_puts(m, "# -----------------------------------" 2461 "---------------------------------\n"); 2462 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" 2463 " (M:%s VP:%d, KP:%d, SP:%d HP:%d", 2464 nsecs_to_usecs(data->saved_latency), 2465 entries, 2466 total, 2467 buf->cpu, 2468 #if defined(CONFIG_PREEMPT_NONE) 2469 "server", 2470 #elif defined(CONFIG_PREEMPT_VOLUNTARY) 2471 "desktop", 2472 #elif defined(CONFIG_PREEMPT) 2473 "preempt", 2474 #else 2475 "unknown", 2476 #endif 2477 /* These are reserved for later use */ 2478 0, 0, 0, 0); 2479 #ifdef CONFIG_SMP 2480 seq_printf(m, " #P:%d)\n", num_online_cpus()); 2481 #else 2482 seq_puts(m, ")\n"); 2483 #endif 2484 seq_puts(m, "# -----------------\n"); 2485 seq_printf(m, "# | task: %.16s-%d " 2486 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", 2487 data->comm, data->pid, 2488 from_kuid_munged(seq_user_ns(m), data->uid), data->nice, 2489 data->policy, data->rt_priority); 2490 seq_puts(m, "# -----------------\n"); 2491 2492 if (data->critical_start) { 2493 seq_puts(m, "# => started at: "); 2494 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); 2495 trace_print_seq(m, &iter->seq); 2496 seq_puts(m, "\n# => ended at: "); 2497 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); 2498 trace_print_seq(m, &iter->seq); 2499 seq_puts(m, "\n#\n"); 2500 } 2501 2502 seq_puts(m, "#\n"); 2503 } 2504 2505 static void test_cpu_buff_start(struct trace_iterator *iter) 2506 { 2507 struct trace_seq *s = &iter->seq; 2508 2509 if (!(trace_flags & TRACE_ITER_ANNOTATE)) 2510 return; 2511 2512 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 2513 return; 2514 2515 if (cpumask_test_cpu(iter->cpu, iter->started)) 2516 return; 2517 2518 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) 2519 return; 2520 2521 cpumask_set_cpu(iter->cpu, iter->started); 2522 2523 /* Don't print started cpu buffer for the first entry of the trace */ 2524 if (iter->idx > 1) 2525 trace_seq_printf(s, "##### CPU %u buffer started ####\n", 2526 iter->cpu); 2527 } 2528 2529 static enum print_line_t print_trace_fmt(struct trace_iterator *iter) 2530 { 2531 struct trace_seq *s = &iter->seq; 2532 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 2533 struct trace_entry *entry; 2534 struct trace_event *event; 2535 2536 entry = iter->ent; 2537 2538 test_cpu_buff_start(iter); 2539 2540 event = ftrace_find_event(entry->type); 2541 2542 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2543 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2544 if (!trace_print_lat_context(iter)) 2545 goto partial; 2546 } else { 2547 if (!trace_print_context(iter)) 2548 goto partial; 2549 } 2550 } 2551 2552 if (event) 2553 return event->funcs->trace(iter, sym_flags, event); 2554 2555 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) 2556 goto partial; 2557 2558 return TRACE_TYPE_HANDLED; 2559 partial: 2560 return TRACE_TYPE_PARTIAL_LINE; 2561 } 2562 2563 static enum print_line_t print_raw_fmt(struct trace_iterator *iter) 2564 { 2565 struct trace_seq *s = &iter->seq; 2566 struct trace_entry *entry; 2567 struct trace_event *event; 2568 2569 entry = iter->ent; 2570 2571 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2572 if (!trace_seq_printf(s, "%d %d %llu ", 2573 entry->pid, iter->cpu, iter->ts)) 2574 goto partial; 2575 } 2576 2577 event = ftrace_find_event(entry->type); 2578 if (event) 2579 return event->funcs->raw(iter, 0, event); 2580 2581 if (!trace_seq_printf(s, "%d ?\n", entry->type)) 2582 goto partial; 2583 2584 return TRACE_TYPE_HANDLED; 2585 partial: 2586 return TRACE_TYPE_PARTIAL_LINE; 2587 } 2588 2589 static enum print_line_t print_hex_fmt(struct trace_iterator *iter) 2590 { 2591 struct trace_seq *s = &iter->seq; 2592 unsigned char newline = '\n'; 2593 struct trace_entry *entry; 2594 struct trace_event *event; 2595 2596 entry = iter->ent; 2597 2598 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2599 SEQ_PUT_HEX_FIELD_RET(s, entry->pid); 2600 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); 2601 SEQ_PUT_HEX_FIELD_RET(s, iter->ts); 2602 } 2603 2604 event = ftrace_find_event(entry->type); 2605 if (event) { 2606 enum print_line_t ret = event->funcs->hex(iter, 0, event); 2607 if (ret != TRACE_TYPE_HANDLED) 2608 return ret; 2609 } 2610 2611 SEQ_PUT_FIELD_RET(s, newline); 2612 2613 return TRACE_TYPE_HANDLED; 2614 } 2615 2616 static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 2617 { 2618 struct trace_seq *s = &iter->seq; 2619 struct trace_entry *entry; 2620 struct trace_event *event; 2621 2622 entry = iter->ent; 2623 2624 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2625 SEQ_PUT_FIELD_RET(s, entry->pid); 2626 SEQ_PUT_FIELD_RET(s, iter->cpu); 2627 SEQ_PUT_FIELD_RET(s, iter->ts); 2628 } 2629 2630 event = ftrace_find_event(entry->type); 2631 return event ? event->funcs->binary(iter, 0, event) : 2632 TRACE_TYPE_HANDLED; 2633 } 2634 2635 int trace_empty(struct trace_iterator *iter) 2636 { 2637 struct ring_buffer_iter *buf_iter; 2638 int cpu; 2639 2640 /* If we are looking at one CPU buffer, only check that one */ 2641 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 2642 cpu = iter->cpu_file; 2643 buf_iter = trace_buffer_iter(iter, cpu); 2644 if (buf_iter) { 2645 if (!ring_buffer_iter_empty(buf_iter)) 2646 return 0; 2647 } else { 2648 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) 2649 return 0; 2650 } 2651 return 1; 2652 } 2653 2654 for_each_tracing_cpu(cpu) { 2655 buf_iter = trace_buffer_iter(iter, cpu); 2656 if (buf_iter) { 2657 if (!ring_buffer_iter_empty(buf_iter)) 2658 return 0; 2659 } else { 2660 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) 2661 return 0; 2662 } 2663 } 2664 2665 return 1; 2666 } 2667 2668 /* Called with trace_event_read_lock() held. */ 2669 enum print_line_t print_trace_line(struct trace_iterator *iter) 2670 { 2671 enum print_line_t ret; 2672 2673 if (iter->lost_events && 2674 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", 2675 iter->cpu, iter->lost_events)) 2676 return TRACE_TYPE_PARTIAL_LINE; 2677 2678 if (iter->trace && iter->trace->print_line) { 2679 ret = iter->trace->print_line(iter); 2680 if (ret != TRACE_TYPE_UNHANDLED) 2681 return ret; 2682 } 2683 2684 if (iter->ent->type == TRACE_BPUTS && 2685 trace_flags & TRACE_ITER_PRINTK && 2686 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 2687 return trace_print_bputs_msg_only(iter); 2688 2689 if (iter->ent->type == TRACE_BPRINT && 2690 trace_flags & TRACE_ITER_PRINTK && 2691 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 2692 return trace_print_bprintk_msg_only(iter); 2693 2694 if (iter->ent->type == TRACE_PRINT && 2695 trace_flags & TRACE_ITER_PRINTK && 2696 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 2697 return trace_print_printk_msg_only(iter); 2698 2699 if (trace_flags & TRACE_ITER_BIN) 2700 return print_bin_fmt(iter); 2701 2702 if (trace_flags & TRACE_ITER_HEX) 2703 return print_hex_fmt(iter); 2704 2705 if (trace_flags & TRACE_ITER_RAW) 2706 return print_raw_fmt(iter); 2707 2708 return print_trace_fmt(iter); 2709 } 2710 2711 void trace_latency_header(struct seq_file *m) 2712 { 2713 struct trace_iterator *iter = m->private; 2714 2715 /* print nothing if the buffers are empty */ 2716 if (trace_empty(iter)) 2717 return; 2718 2719 if (iter->iter_flags & TRACE_FILE_LAT_FMT) 2720 print_trace_header(m, iter); 2721 2722 if (!(trace_flags & TRACE_ITER_VERBOSE)) 2723 print_lat_help_header(m); 2724 } 2725 2726 void trace_default_header(struct seq_file *m) 2727 { 2728 struct trace_iterator *iter = m->private; 2729 2730 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) 2731 return; 2732 2733 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2734 /* print nothing if the buffers are empty */ 2735 if (trace_empty(iter)) 2736 return; 2737 print_trace_header(m, iter); 2738 if (!(trace_flags & TRACE_ITER_VERBOSE)) 2739 print_lat_help_header(m); 2740 } else { 2741 if (!(trace_flags & TRACE_ITER_VERBOSE)) { 2742 if (trace_flags & TRACE_ITER_IRQ_INFO) 2743 print_func_help_header_irq(iter->trace_buffer, m); 2744 else 2745 print_func_help_header(iter->trace_buffer, m); 2746 } 2747 } 2748 } 2749 2750 static void test_ftrace_alive(struct seq_file *m) 2751 { 2752 if (!ftrace_is_dead()) 2753 return; 2754 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"); 2755 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); 2756 } 2757 2758 #ifdef CONFIG_TRACER_MAX_TRACE 2759 static void show_snapshot_main_help(struct seq_file *m) 2760 { 2761 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); 2762 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); 2763 seq_printf(m, "# Takes a snapshot of the main buffer.\n"); 2764 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n"); 2765 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); 2766 seq_printf(m, "# is not a '0' or '1')\n"); 2767 } 2768 2769 static void show_snapshot_percpu_help(struct seq_file *m) 2770 { 2771 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); 2772 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 2773 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); 2774 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n"); 2775 #else 2776 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n"); 2777 seq_printf(m, "# Must use main snapshot file to allocate.\n"); 2778 #endif 2779 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"); 2780 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); 2781 seq_printf(m, "# is not a '0' or '1')\n"); 2782 } 2783 2784 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) 2785 { 2786 if (iter->tr->allocated_snapshot) 2787 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n"); 2788 else 2789 seq_printf(m, "#\n# * Snapshot is freed *\n#\n"); 2790 2791 seq_printf(m, "# Snapshot commands:\n"); 2792 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 2793 show_snapshot_main_help(m); 2794 else 2795 show_snapshot_percpu_help(m); 2796 } 2797 #else 2798 /* Should never be called */ 2799 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } 2800 #endif 2801 2802 static int s_show(struct seq_file *m, void *v) 2803 { 2804 struct trace_iterator *iter = v; 2805 int ret; 2806 2807 if (iter->ent == NULL) { 2808 if (iter->tr) { 2809 seq_printf(m, "# tracer: %s\n", iter->trace->name); 2810 seq_puts(m, "#\n"); 2811 test_ftrace_alive(m); 2812 } 2813 if (iter->snapshot && trace_empty(iter)) 2814 print_snapshot_help(m, iter); 2815 else if (iter->trace && iter->trace->print_header) 2816 iter->trace->print_header(m); 2817 else 2818 trace_default_header(m); 2819 2820 } else if (iter->leftover) { 2821 /* 2822 * If we filled the seq_file buffer earlier, we 2823 * want to just show it now. 2824 */ 2825 ret = trace_print_seq(m, &iter->seq); 2826 2827 /* ret should this time be zero, but you never know */ 2828 iter->leftover = ret; 2829 2830 } else { 2831 print_trace_line(iter); 2832 ret = trace_print_seq(m, &iter->seq); 2833 /* 2834 * If we overflow the seq_file buffer, then it will 2835 * ask us for this data again at start up. 2836 * Use that instead. 2837 * ret is 0 if seq_file write succeeded. 2838 * -1 otherwise. 2839 */ 2840 iter->leftover = ret; 2841 } 2842 2843 return 0; 2844 } 2845 2846 /* 2847 * Should be used after trace_array_get(), trace_types_lock 2848 * ensures that i_cdev was already initialized. 2849 */ 2850 static inline int tracing_get_cpu(struct inode *inode) 2851 { 2852 if (inode->i_cdev) /* See trace_create_cpu_file() */ 2853 return (long)inode->i_cdev - 1; 2854 return RING_BUFFER_ALL_CPUS; 2855 } 2856 2857 static const struct seq_operations tracer_seq_ops = { 2858 .start = s_start, 2859 .next = s_next, 2860 .stop = s_stop, 2861 .show = s_show, 2862 }; 2863 2864 static struct trace_iterator * 2865 __tracing_open(struct inode *inode, struct file *file, bool snapshot) 2866 { 2867 struct trace_array *tr = inode->i_private; 2868 struct trace_iterator *iter; 2869 int cpu; 2870 2871 if (tracing_disabled) 2872 return ERR_PTR(-ENODEV); 2873 2874 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); 2875 if (!iter) 2876 return ERR_PTR(-ENOMEM); 2877 2878 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(), 2879 GFP_KERNEL); 2880 if (!iter->buffer_iter) 2881 goto release; 2882 2883 /* 2884 * We make a copy of the current tracer to avoid concurrent 2885 * changes on it while we are reading. 2886 */ 2887 mutex_lock(&trace_types_lock); 2888 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); 2889 if (!iter->trace) 2890 goto fail; 2891 2892 *iter->trace = *tr->current_trace; 2893 2894 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 2895 goto fail; 2896 2897 iter->tr = tr; 2898 2899 #ifdef CONFIG_TRACER_MAX_TRACE 2900 /* Currently only the top directory has a snapshot */ 2901 if (tr->current_trace->print_max || snapshot) 2902 iter->trace_buffer = &tr->max_buffer; 2903 else 2904 #endif 2905 iter->trace_buffer = &tr->trace_buffer; 2906 iter->snapshot = snapshot; 2907 iter->pos = -1; 2908 iter->cpu_file = tracing_get_cpu(inode); 2909 mutex_init(&iter->mutex); 2910 2911 /* Notify the tracer early; before we stop tracing. */ 2912 if (iter->trace && iter->trace->open) 2913 iter->trace->open(iter); 2914 2915 /* Annotate start of buffers if we had overruns */ 2916 if (ring_buffer_overruns(iter->trace_buffer->buffer)) 2917 iter->iter_flags |= TRACE_FILE_ANNOTATE; 2918 2919 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 2920 if (trace_clocks[tr->clock_id].in_ns) 2921 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 2922 2923 /* stop the trace while dumping if we are not opening "snapshot" */ 2924 if (!iter->snapshot) 2925 tracing_stop_tr(tr); 2926 2927 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { 2928 for_each_tracing_cpu(cpu) { 2929 iter->buffer_iter[cpu] = 2930 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); 2931 } 2932 ring_buffer_read_prepare_sync(); 2933 for_each_tracing_cpu(cpu) { 2934 ring_buffer_read_start(iter->buffer_iter[cpu]); 2935 tracing_iter_reset(iter, cpu); 2936 } 2937 } else { 2938 cpu = iter->cpu_file; 2939 iter->buffer_iter[cpu] = 2940 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); 2941 ring_buffer_read_prepare_sync(); 2942 ring_buffer_read_start(iter->buffer_iter[cpu]); 2943 tracing_iter_reset(iter, cpu); 2944 } 2945 2946 mutex_unlock(&trace_types_lock); 2947 2948 return iter; 2949 2950 fail: 2951 mutex_unlock(&trace_types_lock); 2952 kfree(iter->trace); 2953 kfree(iter->buffer_iter); 2954 release: 2955 seq_release_private(inode, file); 2956 return ERR_PTR(-ENOMEM); 2957 } 2958 2959 int tracing_open_generic(struct inode *inode, struct file *filp) 2960 { 2961 if (tracing_disabled) 2962 return -ENODEV; 2963 2964 filp->private_data = inode->i_private; 2965 return 0; 2966 } 2967 2968 /* 2969 * Open and update trace_array ref count. 2970 * Must have the current trace_array passed to it. 2971 */ 2972 static int tracing_open_generic_tr(struct inode *inode, struct file *filp) 2973 { 2974 struct trace_array *tr = inode->i_private; 2975 2976 if (tracing_disabled) 2977 return -ENODEV; 2978 2979 if (trace_array_get(tr) < 0) 2980 return -ENODEV; 2981 2982 filp->private_data = inode->i_private; 2983 2984 return 0; 2985 } 2986 2987 static int tracing_release(struct inode *inode, struct file *file) 2988 { 2989 struct trace_array *tr = inode->i_private; 2990 struct seq_file *m = file->private_data; 2991 struct trace_iterator *iter; 2992 int cpu; 2993 2994 if (!(file->f_mode & FMODE_READ)) { 2995 trace_array_put(tr); 2996 return 0; 2997 } 2998 2999 /* Writes do not use seq_file */ 3000 iter = m->private; 3001 mutex_lock(&trace_types_lock); 3002 3003 for_each_tracing_cpu(cpu) { 3004 if (iter->buffer_iter[cpu]) 3005 ring_buffer_read_finish(iter->buffer_iter[cpu]); 3006 } 3007 3008 if (iter->trace && iter->trace->close) 3009 iter->trace->close(iter); 3010 3011 if (!iter->snapshot) 3012 /* reenable tracing if it was previously enabled */ 3013 tracing_start_tr(tr); 3014 3015 __trace_array_put(tr); 3016 3017 mutex_unlock(&trace_types_lock); 3018 3019 mutex_destroy(&iter->mutex); 3020 free_cpumask_var(iter->started); 3021 kfree(iter->trace); 3022 kfree(iter->buffer_iter); 3023 seq_release_private(inode, file); 3024 3025 return 0; 3026 } 3027 3028 static int tracing_release_generic_tr(struct inode *inode, struct file *file) 3029 { 3030 struct trace_array *tr = inode->i_private; 3031 3032 trace_array_put(tr); 3033 return 0; 3034 } 3035 3036 static int tracing_single_release_tr(struct inode *inode, struct file *file) 3037 { 3038 struct trace_array *tr = inode->i_private; 3039 3040 trace_array_put(tr); 3041 3042 return single_release(inode, file); 3043 } 3044 3045 static int tracing_open(struct inode *inode, struct file *file) 3046 { 3047 struct trace_array *tr = inode->i_private; 3048 struct trace_iterator *iter; 3049 int ret = 0; 3050 3051 if (trace_array_get(tr) < 0) 3052 return -ENODEV; 3053 3054 /* If this file was open for write, then erase contents */ 3055 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 3056 int cpu = tracing_get_cpu(inode); 3057 3058 if (cpu == RING_BUFFER_ALL_CPUS) 3059 tracing_reset_online_cpus(&tr->trace_buffer); 3060 else 3061 tracing_reset(&tr->trace_buffer, cpu); 3062 } 3063 3064 if (file->f_mode & FMODE_READ) { 3065 iter = __tracing_open(inode, file, false); 3066 if (IS_ERR(iter)) 3067 ret = PTR_ERR(iter); 3068 else if (trace_flags & TRACE_ITER_LATENCY_FMT) 3069 iter->iter_flags |= TRACE_FILE_LAT_FMT; 3070 } 3071 3072 if (ret < 0) 3073 trace_array_put(tr); 3074 3075 return ret; 3076 } 3077 3078 static void * 3079 t_next(struct seq_file *m, void *v, loff_t *pos) 3080 { 3081 struct tracer *t = v; 3082 3083 (*pos)++; 3084 3085 if (t) 3086 t = t->next; 3087 3088 return t; 3089 } 3090 3091 static void *t_start(struct seq_file *m, loff_t *pos) 3092 { 3093 struct tracer *t; 3094 loff_t l = 0; 3095 3096 mutex_lock(&trace_types_lock); 3097 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) 3098 ; 3099 3100 return t; 3101 } 3102 3103 static void t_stop(struct seq_file *m, void *p) 3104 { 3105 mutex_unlock(&trace_types_lock); 3106 } 3107 3108 static int t_show(struct seq_file *m, void *v) 3109 { 3110 struct tracer *t = v; 3111 3112 if (!t) 3113 return 0; 3114 3115 seq_printf(m, "%s", t->name); 3116 if (t->next) 3117 seq_putc(m, ' '); 3118 else 3119 seq_putc(m, '\n'); 3120 3121 return 0; 3122 } 3123 3124 static const struct seq_operations show_traces_seq_ops = { 3125 .start = t_start, 3126 .next = t_next, 3127 .stop = t_stop, 3128 .show = t_show, 3129 }; 3130 3131 static int show_traces_open(struct inode *inode, struct file *file) 3132 { 3133 if (tracing_disabled) 3134 return -ENODEV; 3135 3136 return seq_open(file, &show_traces_seq_ops); 3137 } 3138 3139 static ssize_t 3140 tracing_write_stub(struct file *filp, const char __user *ubuf, 3141 size_t count, loff_t *ppos) 3142 { 3143 return count; 3144 } 3145 3146 static loff_t tracing_seek(struct file *file, loff_t offset, int origin) 3147 { 3148 if (file->f_mode & FMODE_READ) 3149 return seq_lseek(file, offset, origin); 3150 else 3151 return 0; 3152 } 3153 3154 static const struct file_operations tracing_fops = { 3155 .open = tracing_open, 3156 .read = seq_read, 3157 .write = tracing_write_stub, 3158 .llseek = tracing_seek, 3159 .release = tracing_release, 3160 }; 3161 3162 static const struct file_operations show_traces_fops = { 3163 .open = show_traces_open, 3164 .read = seq_read, 3165 .release = seq_release, 3166 .llseek = seq_lseek, 3167 }; 3168 3169 /* 3170 * The tracer itself will not take this lock, but still we want 3171 * to provide a consistent cpumask to user-space: 3172 */ 3173 static DEFINE_MUTEX(tracing_cpumask_update_lock); 3174 3175 /* 3176 * Temporary storage for the character representation of the 3177 * CPU bitmask (and one more byte for the newline): 3178 */ 3179 static char mask_str[NR_CPUS + 1]; 3180 3181 static ssize_t 3182 tracing_cpumask_read(struct file *filp, char __user *ubuf, 3183 size_t count, loff_t *ppos) 3184 { 3185 struct trace_array *tr = file_inode(filp)->i_private; 3186 int len; 3187 3188 mutex_lock(&tracing_cpumask_update_lock); 3189 3190 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask); 3191 if (count - len < 2) { 3192 count = -EINVAL; 3193 goto out_err; 3194 } 3195 len += sprintf(mask_str + len, "\n"); 3196 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); 3197 3198 out_err: 3199 mutex_unlock(&tracing_cpumask_update_lock); 3200 3201 return count; 3202 } 3203 3204 static ssize_t 3205 tracing_cpumask_write(struct file *filp, const char __user *ubuf, 3206 size_t count, loff_t *ppos) 3207 { 3208 struct trace_array *tr = file_inode(filp)->i_private; 3209 cpumask_var_t tracing_cpumask_new; 3210 int err, cpu; 3211 3212 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) 3213 return -ENOMEM; 3214 3215 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 3216 if (err) 3217 goto err_unlock; 3218 3219 mutex_lock(&tracing_cpumask_update_lock); 3220 3221 local_irq_disable(); 3222 arch_spin_lock(&ftrace_max_lock); 3223 for_each_tracing_cpu(cpu) { 3224 /* 3225 * Increase/decrease the disabled counter if we are 3226 * about to flip a bit in the cpumask: 3227 */ 3228 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && 3229 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 3230 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); 3231 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); 3232 } 3233 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && 3234 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 3235 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); 3236 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); 3237 } 3238 } 3239 arch_spin_unlock(&ftrace_max_lock); 3240 local_irq_enable(); 3241 3242 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); 3243 3244 mutex_unlock(&tracing_cpumask_update_lock); 3245 free_cpumask_var(tracing_cpumask_new); 3246 3247 return count; 3248 3249 err_unlock: 3250 free_cpumask_var(tracing_cpumask_new); 3251 3252 return err; 3253 } 3254 3255 static const struct file_operations tracing_cpumask_fops = { 3256 .open = tracing_open_generic_tr, 3257 .read = tracing_cpumask_read, 3258 .write = tracing_cpumask_write, 3259 .release = tracing_release_generic_tr, 3260 .llseek = generic_file_llseek, 3261 }; 3262 3263 static int tracing_trace_options_show(struct seq_file *m, void *v) 3264 { 3265 struct tracer_opt *trace_opts; 3266 struct trace_array *tr = m->private; 3267 u32 tracer_flags; 3268 int i; 3269 3270 mutex_lock(&trace_types_lock); 3271 tracer_flags = tr->current_trace->flags->val; 3272 trace_opts = tr->current_trace->flags->opts; 3273 3274 for (i = 0; trace_options[i]; i++) { 3275 if (trace_flags & (1 << i)) 3276 seq_printf(m, "%s\n", trace_options[i]); 3277 else 3278 seq_printf(m, "no%s\n", trace_options[i]); 3279 } 3280 3281 for (i = 0; trace_opts[i].name; i++) { 3282 if (tracer_flags & trace_opts[i].bit) 3283 seq_printf(m, "%s\n", trace_opts[i].name); 3284 else 3285 seq_printf(m, "no%s\n", trace_opts[i].name); 3286 } 3287 mutex_unlock(&trace_types_lock); 3288 3289 return 0; 3290 } 3291 3292 static int __set_tracer_option(struct tracer *trace, 3293 struct tracer_flags *tracer_flags, 3294 struct tracer_opt *opts, int neg) 3295 { 3296 int ret; 3297 3298 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); 3299 if (ret) 3300 return ret; 3301 3302 if (neg) 3303 tracer_flags->val &= ~opts->bit; 3304 else 3305 tracer_flags->val |= opts->bit; 3306 return 0; 3307 } 3308 3309 /* Try to assign a tracer specific option */ 3310 static int set_tracer_option(struct tracer *trace, char *cmp, int neg) 3311 { 3312 struct tracer_flags *tracer_flags = trace->flags; 3313 struct tracer_opt *opts = NULL; 3314 int i; 3315 3316 for (i = 0; tracer_flags->opts[i].name; i++) { 3317 opts = &tracer_flags->opts[i]; 3318 3319 if (strcmp(cmp, opts->name) == 0) 3320 return __set_tracer_option(trace, trace->flags, 3321 opts, neg); 3322 } 3323 3324 return -EINVAL; 3325 } 3326 3327 /* Some tracers require overwrite to stay enabled */ 3328 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) 3329 { 3330 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) 3331 return -1; 3332 3333 return 0; 3334 } 3335 3336 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) 3337 { 3338 /* do nothing if flag is already set */ 3339 if (!!(trace_flags & mask) == !!enabled) 3340 return 0; 3341 3342 /* Give the tracer a chance to approve the change */ 3343 if (tr->current_trace->flag_changed) 3344 if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled)) 3345 return -EINVAL; 3346 3347 if (enabled) 3348 trace_flags |= mask; 3349 else 3350 trace_flags &= ~mask; 3351 3352 if (mask == TRACE_ITER_RECORD_CMD) 3353 trace_event_enable_cmd_record(enabled); 3354 3355 if (mask == TRACE_ITER_OVERWRITE) { 3356 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled); 3357 #ifdef CONFIG_TRACER_MAX_TRACE 3358 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); 3359 #endif 3360 } 3361 3362 if (mask == TRACE_ITER_PRINTK) 3363 trace_printk_start_stop_comm(enabled); 3364 3365 return 0; 3366 } 3367 3368 static int trace_set_options(struct trace_array *tr, char *option) 3369 { 3370 char *cmp; 3371 int neg = 0; 3372 int ret = -ENODEV; 3373 int i; 3374 3375 cmp = strstrip(option); 3376 3377 if (strncmp(cmp, "no", 2) == 0) { 3378 neg = 1; 3379 cmp += 2; 3380 } 3381 3382 mutex_lock(&trace_types_lock); 3383 3384 for (i = 0; trace_options[i]; i++) { 3385 if (strcmp(cmp, trace_options[i]) == 0) { 3386 ret = set_tracer_flag(tr, 1 << i, !neg); 3387 break; 3388 } 3389 } 3390 3391 /* If no option could be set, test the specific tracer options */ 3392 if (!trace_options[i]) 3393 ret = set_tracer_option(tr->current_trace, cmp, neg); 3394 3395 mutex_unlock(&trace_types_lock); 3396 3397 return ret; 3398 } 3399 3400 static ssize_t 3401 tracing_trace_options_write(struct file *filp, const char __user *ubuf, 3402 size_t cnt, loff_t *ppos) 3403 { 3404 struct seq_file *m = filp->private_data; 3405 struct trace_array *tr = m->private; 3406 char buf[64]; 3407 int ret; 3408 3409 if (cnt >= sizeof(buf)) 3410 return -EINVAL; 3411 3412 if (copy_from_user(&buf, ubuf, cnt)) 3413 return -EFAULT; 3414 3415 buf[cnt] = 0; 3416 3417 ret = trace_set_options(tr, buf); 3418 if (ret < 0) 3419 return ret; 3420 3421 *ppos += cnt; 3422 3423 return cnt; 3424 } 3425 3426 static int tracing_trace_options_open(struct inode *inode, struct file *file) 3427 { 3428 struct trace_array *tr = inode->i_private; 3429 int ret; 3430 3431 if (tracing_disabled) 3432 return -ENODEV; 3433 3434 if (trace_array_get(tr) < 0) 3435 return -ENODEV; 3436 3437 ret = single_open(file, tracing_trace_options_show, inode->i_private); 3438 if (ret < 0) 3439 trace_array_put(tr); 3440 3441 return ret; 3442 } 3443 3444 static const struct file_operations tracing_iter_fops = { 3445 .open = tracing_trace_options_open, 3446 .read = seq_read, 3447 .llseek = seq_lseek, 3448 .release = tracing_single_release_tr, 3449 .write = tracing_trace_options_write, 3450 }; 3451 3452 static const char readme_msg[] = 3453 "tracing mini-HOWTO:\n\n" 3454 "# echo 0 > tracing_on : quick way to disable tracing\n" 3455 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n" 3456 " Important files:\n" 3457 " trace\t\t\t- The static contents of the buffer\n" 3458 "\t\t\t To clear the buffer write into this file: echo > trace\n" 3459 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" 3460 " current_tracer\t- function and latency tracers\n" 3461 " available_tracers\t- list of configured tracers for current_tracer\n" 3462 " buffer_size_kb\t- view and modify size of per cpu buffer\n" 3463 " buffer_total_size_kb - view total size of all cpu buffers\n\n" 3464 " trace_clock\t\t-change the clock used to order events\n" 3465 " local: Per cpu clock but may not be synced across CPUs\n" 3466 " global: Synced across CPUs but slows tracing down.\n" 3467 " counter: Not a clock, but just an increment\n" 3468 " uptime: Jiffy counter from time of boot\n" 3469 " perf: Same clock that perf events use\n" 3470 #ifdef CONFIG_X86_64 3471 " x86-tsc: TSC cycle counter\n" 3472 #endif 3473 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n" 3474 " tracing_cpumask\t- Limit which CPUs to trace\n" 3475 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" 3476 "\t\t\t Remove sub-buffer with rmdir\n" 3477 " trace_options\t\t- Set format or modify how tracing happens\n" 3478 "\t\t\t Disable an option by adding a suffix 'no' to the option name\n" 3479 #ifdef CONFIG_DYNAMIC_FTRACE 3480 "\n available_filter_functions - list of functions that can be filtered on\n" 3481 " set_ftrace_filter\t- echo function name in here to only trace these functions\n" 3482 " accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" 3483 " modules: Can select a group via module\n" 3484 " Format: :mod:<module-name>\n" 3485 " example: echo :mod:ext3 > set_ftrace_filter\n" 3486 " triggers: a command to perform when function is hit\n" 3487 " Format: <function>:<trigger>[:count]\n" 3488 " trigger: traceon, traceoff\n" 3489 " enable_event:<system>:<event>\n" 3490 " disable_event:<system>:<event>\n" 3491 #ifdef CONFIG_STACKTRACE 3492 " stacktrace\n" 3493 #endif 3494 #ifdef CONFIG_TRACER_SNAPSHOT 3495 " snapshot\n" 3496 #endif 3497 " example: echo do_fault:traceoff > set_ftrace_filter\n" 3498 " echo do_trap:traceoff:3 > set_ftrace_filter\n" 3499 " The first one will disable tracing every time do_fault is hit\n" 3500 " The second will disable tracing at most 3 times when do_trap is hit\n" 3501 " The first time do trap is hit and it disables tracing, the counter\n" 3502 " will decrement to 2. If tracing is already disabled, the counter\n" 3503 " will not decrement. It only decrements when the trigger did work\n" 3504 " To remove trigger without count:\n" 3505 " echo '!<function>:<trigger> > set_ftrace_filter\n" 3506 " To remove trigger with a count:\n" 3507 " echo '!<function>:<trigger>:0 > set_ftrace_filter\n" 3508 " set_ftrace_notrace\t- echo function name in here to never trace.\n" 3509 " accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" 3510 " modules: Can select a group via module command :mod:\n" 3511 " Does not accept triggers\n" 3512 #endif /* CONFIG_DYNAMIC_FTRACE */ 3513 #ifdef CONFIG_FUNCTION_TRACER 3514 " set_ftrace_pid\t- Write pid(s) to only function trace those pids (function)\n" 3515 #endif 3516 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 3517 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" 3518 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" 3519 #endif 3520 #ifdef CONFIG_TRACER_SNAPSHOT 3521 "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n" 3522 "\t\t\t Read the contents for more information\n" 3523 #endif 3524 #ifdef CONFIG_STACK_TRACER 3525 " stack_trace\t\t- Shows the max stack trace when active\n" 3526 " stack_max_size\t- Shows current max stack size that was traced\n" 3527 "\t\t\t Write into this file to reset the max size (trigger a new trace)\n" 3528 #ifdef CONFIG_DYNAMIC_FTRACE 3529 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n" 3530 #endif 3531 #endif /* CONFIG_STACK_TRACER */ 3532 ; 3533 3534 static ssize_t 3535 tracing_readme_read(struct file *filp, char __user *ubuf, 3536 size_t cnt, loff_t *ppos) 3537 { 3538 return simple_read_from_buffer(ubuf, cnt, ppos, 3539 readme_msg, strlen(readme_msg)); 3540 } 3541 3542 static const struct file_operations tracing_readme_fops = { 3543 .open = tracing_open_generic, 3544 .read = tracing_readme_read, 3545 .llseek = generic_file_llseek, 3546 }; 3547 3548 static ssize_t 3549 tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, 3550 size_t cnt, loff_t *ppos) 3551 { 3552 char *buf_comm; 3553 char *file_buf; 3554 char *buf; 3555 int len = 0; 3556 int pid; 3557 int i; 3558 3559 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL); 3560 if (!file_buf) 3561 return -ENOMEM; 3562 3563 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL); 3564 if (!buf_comm) { 3565 kfree(file_buf); 3566 return -ENOMEM; 3567 } 3568 3569 buf = file_buf; 3570 3571 for (i = 0; i < SAVED_CMDLINES; i++) { 3572 int r; 3573 3574 pid = map_cmdline_to_pid[i]; 3575 if (pid == -1 || pid == NO_CMDLINE_MAP) 3576 continue; 3577 3578 trace_find_cmdline(pid, buf_comm); 3579 r = sprintf(buf, "%d %s\n", pid, buf_comm); 3580 buf += r; 3581 len += r; 3582 } 3583 3584 len = simple_read_from_buffer(ubuf, cnt, ppos, 3585 file_buf, len); 3586 3587 kfree(file_buf); 3588 kfree(buf_comm); 3589 3590 return len; 3591 } 3592 3593 static const struct file_operations tracing_saved_cmdlines_fops = { 3594 .open = tracing_open_generic, 3595 .read = tracing_saved_cmdlines_read, 3596 .llseek = generic_file_llseek, 3597 }; 3598 3599 static ssize_t 3600 tracing_set_trace_read(struct file *filp, char __user *ubuf, 3601 size_t cnt, loff_t *ppos) 3602 { 3603 struct trace_array *tr = filp->private_data; 3604 char buf[MAX_TRACER_SIZE+2]; 3605 int r; 3606 3607 mutex_lock(&trace_types_lock); 3608 r = sprintf(buf, "%s\n", tr->current_trace->name); 3609 mutex_unlock(&trace_types_lock); 3610 3611 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3612 } 3613 3614 int tracer_init(struct tracer *t, struct trace_array *tr) 3615 { 3616 tracing_reset_online_cpus(&tr->trace_buffer); 3617 return t->init(tr); 3618 } 3619 3620 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val) 3621 { 3622 int cpu; 3623 3624 for_each_tracing_cpu(cpu) 3625 per_cpu_ptr(buf->data, cpu)->entries = val; 3626 } 3627 3628 #ifdef CONFIG_TRACER_MAX_TRACE 3629 /* resize @tr's buffer to the size of @size_tr's entries */ 3630 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, 3631 struct trace_buffer *size_buf, int cpu_id) 3632 { 3633 int cpu, ret = 0; 3634 3635 if (cpu_id == RING_BUFFER_ALL_CPUS) { 3636 for_each_tracing_cpu(cpu) { 3637 ret = ring_buffer_resize(trace_buf->buffer, 3638 per_cpu_ptr(size_buf->data, cpu)->entries, cpu); 3639 if (ret < 0) 3640 break; 3641 per_cpu_ptr(trace_buf->data, cpu)->entries = 3642 per_cpu_ptr(size_buf->data, cpu)->entries; 3643 } 3644 } else { 3645 ret = ring_buffer_resize(trace_buf->buffer, 3646 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); 3647 if (ret == 0) 3648 per_cpu_ptr(trace_buf->data, cpu_id)->entries = 3649 per_cpu_ptr(size_buf->data, cpu_id)->entries; 3650 } 3651 3652 return ret; 3653 } 3654 #endif /* CONFIG_TRACER_MAX_TRACE */ 3655 3656 static int __tracing_resize_ring_buffer(struct trace_array *tr, 3657 unsigned long size, int cpu) 3658 { 3659 int ret; 3660 3661 /* 3662 * If kernel or user changes the size of the ring buffer 3663 * we use the size that was given, and we can forget about 3664 * expanding it later. 3665 */ 3666 ring_buffer_expanded = true; 3667 3668 /* May be called before buffers are initialized */ 3669 if (!tr->trace_buffer.buffer) 3670 return 0; 3671 3672 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu); 3673 if (ret < 0) 3674 return ret; 3675 3676 #ifdef CONFIG_TRACER_MAX_TRACE 3677 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) || 3678 !tr->current_trace->use_max_tr) 3679 goto out; 3680 3681 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); 3682 if (ret < 0) { 3683 int r = resize_buffer_duplicate_size(&tr->trace_buffer, 3684 &tr->trace_buffer, cpu); 3685 if (r < 0) { 3686 /* 3687 * AARGH! We are left with different 3688 * size max buffer!!!! 3689 * The max buffer is our "snapshot" buffer. 3690 * When a tracer needs a snapshot (one of the 3691 * latency tracers), it swaps the max buffer 3692 * with the saved snap shot. We succeeded to 3693 * update the size of the main buffer, but failed to 3694 * update the size of the max buffer. But when we tried 3695 * to reset the main buffer to the original size, we 3696 * failed there too. This is very unlikely to 3697 * happen, but if it does, warn and kill all 3698 * tracing. 3699 */ 3700 WARN_ON(1); 3701 tracing_disabled = 1; 3702 } 3703 return ret; 3704 } 3705 3706 if (cpu == RING_BUFFER_ALL_CPUS) 3707 set_buffer_entries(&tr->max_buffer, size); 3708 else 3709 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size; 3710 3711 out: 3712 #endif /* CONFIG_TRACER_MAX_TRACE */ 3713 3714 if (cpu == RING_BUFFER_ALL_CPUS) 3715 set_buffer_entries(&tr->trace_buffer, size); 3716 else 3717 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size; 3718 3719 return ret; 3720 } 3721 3722 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr, 3723 unsigned long size, int cpu_id) 3724 { 3725 int ret = size; 3726 3727 mutex_lock(&trace_types_lock); 3728 3729 if (cpu_id != RING_BUFFER_ALL_CPUS) { 3730 /* make sure, this cpu is enabled in the mask */ 3731 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { 3732 ret = -EINVAL; 3733 goto out; 3734 } 3735 } 3736 3737 ret = __tracing_resize_ring_buffer(tr, size, cpu_id); 3738 if (ret < 0) 3739 ret = -ENOMEM; 3740 3741 out: 3742 mutex_unlock(&trace_types_lock); 3743 3744 return ret; 3745 } 3746 3747 3748 /** 3749 * tracing_update_buffers - used by tracing facility to expand ring buffers 3750 * 3751 * To save on memory when the tracing is never used on a system with it 3752 * configured in. The ring buffers are set to a minimum size. But once 3753 * a user starts to use the tracing facility, then they need to grow 3754 * to their default size. 3755 * 3756 * This function is to be called when a tracer is about to be used. 3757 */ 3758 int tracing_update_buffers(void) 3759 { 3760 int ret = 0; 3761 3762 mutex_lock(&trace_types_lock); 3763 if (!ring_buffer_expanded) 3764 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size, 3765 RING_BUFFER_ALL_CPUS); 3766 mutex_unlock(&trace_types_lock); 3767 3768 return ret; 3769 } 3770 3771 struct trace_option_dentry; 3772 3773 static struct trace_option_dentry * 3774 create_trace_option_files(struct trace_array *tr, struct tracer *tracer); 3775 3776 static void 3777 destroy_trace_option_files(struct trace_option_dentry *topts); 3778 3779 static int tracing_set_tracer(const char *buf) 3780 { 3781 static struct trace_option_dentry *topts; 3782 struct trace_array *tr = &global_trace; 3783 struct tracer *t; 3784 #ifdef CONFIG_TRACER_MAX_TRACE 3785 bool had_max_tr; 3786 #endif 3787 int ret = 0; 3788 3789 mutex_lock(&trace_types_lock); 3790 3791 if (!ring_buffer_expanded) { 3792 ret = __tracing_resize_ring_buffer(tr, trace_buf_size, 3793 RING_BUFFER_ALL_CPUS); 3794 if (ret < 0) 3795 goto out; 3796 ret = 0; 3797 } 3798 3799 for (t = trace_types; t; t = t->next) { 3800 if (strcmp(t->name, buf) == 0) 3801 break; 3802 } 3803 if (!t) { 3804 ret = -EINVAL; 3805 goto out; 3806 } 3807 if (t == tr->current_trace) 3808 goto out; 3809 3810 trace_branch_disable(); 3811 3812 tr->current_trace->enabled = false; 3813 3814 if (tr->current_trace->reset) 3815 tr->current_trace->reset(tr); 3816 3817 /* Current trace needs to be nop_trace before synchronize_sched */ 3818 tr->current_trace = &nop_trace; 3819 3820 #ifdef CONFIG_TRACER_MAX_TRACE 3821 had_max_tr = tr->allocated_snapshot; 3822 3823 if (had_max_tr && !t->use_max_tr) { 3824 /* 3825 * We need to make sure that the update_max_tr sees that 3826 * current_trace changed to nop_trace to keep it from 3827 * swapping the buffers after we resize it. 3828 * The update_max_tr is called from interrupts disabled 3829 * so a synchronized_sched() is sufficient. 3830 */ 3831 synchronize_sched(); 3832 free_snapshot(tr); 3833 } 3834 #endif 3835 destroy_trace_option_files(topts); 3836 3837 topts = create_trace_option_files(tr, t); 3838 3839 #ifdef CONFIG_TRACER_MAX_TRACE 3840 if (t->use_max_tr && !had_max_tr) { 3841 ret = alloc_snapshot(tr); 3842 if (ret < 0) 3843 goto out; 3844 } 3845 #endif 3846 3847 if (t->init) { 3848 ret = tracer_init(t, tr); 3849 if (ret) 3850 goto out; 3851 } 3852 3853 tr->current_trace = t; 3854 tr->current_trace->enabled = true; 3855 trace_branch_enable(tr); 3856 out: 3857 mutex_unlock(&trace_types_lock); 3858 3859 return ret; 3860 } 3861 3862 static ssize_t 3863 tracing_set_trace_write(struct file *filp, const char __user *ubuf, 3864 size_t cnt, loff_t *ppos) 3865 { 3866 char buf[MAX_TRACER_SIZE+1]; 3867 int i; 3868 size_t ret; 3869 int err; 3870 3871 ret = cnt; 3872 3873 if (cnt > MAX_TRACER_SIZE) 3874 cnt = MAX_TRACER_SIZE; 3875 3876 if (copy_from_user(&buf, ubuf, cnt)) 3877 return -EFAULT; 3878 3879 buf[cnt] = 0; 3880 3881 /* strip ending whitespace. */ 3882 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) 3883 buf[i] = 0; 3884 3885 err = tracing_set_tracer(buf); 3886 if (err) 3887 return err; 3888 3889 *ppos += ret; 3890 3891 return ret; 3892 } 3893 3894 static ssize_t 3895 tracing_max_lat_read(struct file *filp, char __user *ubuf, 3896 size_t cnt, loff_t *ppos) 3897 { 3898 unsigned long *ptr = filp->private_data; 3899 char buf[64]; 3900 int r; 3901 3902 r = snprintf(buf, sizeof(buf), "%ld\n", 3903 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); 3904 if (r > sizeof(buf)) 3905 r = sizeof(buf); 3906 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3907 } 3908 3909 static ssize_t 3910 tracing_max_lat_write(struct file *filp, const char __user *ubuf, 3911 size_t cnt, loff_t *ppos) 3912 { 3913 unsigned long *ptr = filp->private_data; 3914 unsigned long val; 3915 int ret; 3916 3917 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 3918 if (ret) 3919 return ret; 3920 3921 *ptr = val * 1000; 3922 3923 return cnt; 3924 } 3925 3926 static int tracing_open_pipe(struct inode *inode, struct file *filp) 3927 { 3928 struct trace_array *tr = inode->i_private; 3929 struct trace_iterator *iter; 3930 int ret = 0; 3931 3932 if (tracing_disabled) 3933 return -ENODEV; 3934 3935 if (trace_array_get(tr) < 0) 3936 return -ENODEV; 3937 3938 mutex_lock(&trace_types_lock); 3939 3940 /* create a buffer to store the information to pass to userspace */ 3941 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 3942 if (!iter) { 3943 ret = -ENOMEM; 3944 __trace_array_put(tr); 3945 goto out; 3946 } 3947 3948 /* 3949 * We make a copy of the current tracer to avoid concurrent 3950 * changes on it while we are reading. 3951 */ 3952 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL); 3953 if (!iter->trace) { 3954 ret = -ENOMEM; 3955 goto fail; 3956 } 3957 *iter->trace = *tr->current_trace; 3958 3959 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 3960 ret = -ENOMEM; 3961 goto fail; 3962 } 3963 3964 /* trace pipe does not show start of buffer */ 3965 cpumask_setall(iter->started); 3966 3967 if (trace_flags & TRACE_ITER_LATENCY_FMT) 3968 iter->iter_flags |= TRACE_FILE_LAT_FMT; 3969 3970 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 3971 if (trace_clocks[tr->clock_id].in_ns) 3972 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 3973 3974 iter->tr = tr; 3975 iter->trace_buffer = &tr->trace_buffer; 3976 iter->cpu_file = tracing_get_cpu(inode); 3977 mutex_init(&iter->mutex); 3978 filp->private_data = iter; 3979 3980 if (iter->trace->pipe_open) 3981 iter->trace->pipe_open(iter); 3982 3983 nonseekable_open(inode, filp); 3984 out: 3985 mutex_unlock(&trace_types_lock); 3986 return ret; 3987 3988 fail: 3989 kfree(iter->trace); 3990 kfree(iter); 3991 __trace_array_put(tr); 3992 mutex_unlock(&trace_types_lock); 3993 return ret; 3994 } 3995 3996 static int tracing_release_pipe(struct inode *inode, struct file *file) 3997 { 3998 struct trace_iterator *iter = file->private_data; 3999 struct trace_array *tr = inode->i_private; 4000 4001 mutex_lock(&trace_types_lock); 4002 4003 if (iter->trace->pipe_close) 4004 iter->trace->pipe_close(iter); 4005 4006 mutex_unlock(&trace_types_lock); 4007 4008 free_cpumask_var(iter->started); 4009 mutex_destroy(&iter->mutex); 4010 kfree(iter->trace); 4011 kfree(iter); 4012 4013 trace_array_put(tr); 4014 4015 return 0; 4016 } 4017 4018 static unsigned int 4019 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) 4020 { 4021 /* Iterators are static, they should be filled or empty */ 4022 if (trace_buffer_iter(iter, iter->cpu_file)) 4023 return POLLIN | POLLRDNORM; 4024 4025 if (trace_flags & TRACE_ITER_BLOCK) 4026 /* 4027 * Always select as readable when in blocking mode 4028 */ 4029 return POLLIN | POLLRDNORM; 4030 else 4031 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file, 4032 filp, poll_table); 4033 } 4034 4035 static unsigned int 4036 tracing_poll_pipe(struct file *filp, poll_table *poll_table) 4037 { 4038 struct trace_iterator *iter = filp->private_data; 4039 4040 return trace_poll(iter, filp, poll_table); 4041 } 4042 4043 /* 4044 * This is a make-shift waitqueue. 4045 * A tracer might use this callback on some rare cases: 4046 * 4047 * 1) the current tracer might hold the runqueue lock when it wakes up 4048 * a reader, hence a deadlock (sched, function, and function graph tracers) 4049 * 2) the function tracers, trace all functions, we don't want 4050 * the overhead of calling wake_up and friends 4051 * (and tracing them too) 4052 * 4053 * Anyway, this is really very primitive wakeup. 4054 */ 4055 void poll_wait_pipe(struct trace_iterator *iter) 4056 { 4057 set_current_state(TASK_INTERRUPTIBLE); 4058 /* sleep for 100 msecs, and try again. */ 4059 schedule_timeout(HZ / 10); 4060 } 4061 4062 /* Must be called with trace_types_lock mutex held. */ 4063 static int tracing_wait_pipe(struct file *filp) 4064 { 4065 struct trace_iterator *iter = filp->private_data; 4066 4067 while (trace_empty(iter)) { 4068 4069 if ((filp->f_flags & O_NONBLOCK)) { 4070 return -EAGAIN; 4071 } 4072 4073 mutex_unlock(&iter->mutex); 4074 4075 iter->trace->wait_pipe(iter); 4076 4077 mutex_lock(&iter->mutex); 4078 4079 if (signal_pending(current)) 4080 return -EINTR; 4081 4082 /* 4083 * We block until we read something and tracing is disabled. 4084 * We still block if tracing is disabled, but we have never 4085 * read anything. This allows a user to cat this file, and 4086 * then enable tracing. But after we have read something, 4087 * we give an EOF when tracing is again disabled. 4088 * 4089 * iter->pos will be 0 if we haven't read anything. 4090 */ 4091 if (!tracing_is_on() && iter->pos) 4092 break; 4093 } 4094 4095 return 1; 4096 } 4097 4098 /* 4099 * Consumer reader. 4100 */ 4101 static ssize_t 4102 tracing_read_pipe(struct file *filp, char __user *ubuf, 4103 size_t cnt, loff_t *ppos) 4104 { 4105 struct trace_iterator *iter = filp->private_data; 4106 struct trace_array *tr = iter->tr; 4107 ssize_t sret; 4108 4109 /* return any leftover data */ 4110 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 4111 if (sret != -EBUSY) 4112 return sret; 4113 4114 trace_seq_init(&iter->seq); 4115 4116 /* copy the tracer to avoid using a global lock all around */ 4117 mutex_lock(&trace_types_lock); 4118 if (unlikely(iter->trace->name != tr->current_trace->name)) 4119 *iter->trace = *tr->current_trace; 4120 mutex_unlock(&trace_types_lock); 4121 4122 /* 4123 * Avoid more than one consumer on a single file descriptor 4124 * This is just a matter of traces coherency, the ring buffer itself 4125 * is protected. 4126 */ 4127 mutex_lock(&iter->mutex); 4128 if (iter->trace->read) { 4129 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); 4130 if (sret) 4131 goto out; 4132 } 4133 4134 waitagain: 4135 sret = tracing_wait_pipe(filp); 4136 if (sret <= 0) 4137 goto out; 4138 4139 /* stop when tracing is finished */ 4140 if (trace_empty(iter)) { 4141 sret = 0; 4142 goto out; 4143 } 4144 4145 if (cnt >= PAGE_SIZE) 4146 cnt = PAGE_SIZE - 1; 4147 4148 /* reset all but tr, trace, and overruns */ 4149 memset(&iter->seq, 0, 4150 sizeof(struct trace_iterator) - 4151 offsetof(struct trace_iterator, seq)); 4152 cpumask_clear(iter->started); 4153 iter->pos = -1; 4154 4155 trace_event_read_lock(); 4156 trace_access_lock(iter->cpu_file); 4157 while (trace_find_next_entry_inc(iter) != NULL) { 4158 enum print_line_t ret; 4159 int len = iter->seq.len; 4160 4161 ret = print_trace_line(iter); 4162 if (ret == TRACE_TYPE_PARTIAL_LINE) { 4163 /* don't print partial lines */ 4164 iter->seq.len = len; 4165 break; 4166 } 4167 if (ret != TRACE_TYPE_NO_CONSUME) 4168 trace_consume(iter); 4169 4170 if (iter->seq.len >= cnt) 4171 break; 4172 4173 /* 4174 * Setting the full flag means we reached the trace_seq buffer 4175 * size and we should leave by partial output condition above. 4176 * One of the trace_seq_* functions is not used properly. 4177 */ 4178 WARN_ONCE(iter->seq.full, "full flag set for trace type %d", 4179 iter->ent->type); 4180 } 4181 trace_access_unlock(iter->cpu_file); 4182 trace_event_read_unlock(); 4183 4184 /* Now copy what we have to the user */ 4185 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 4186 if (iter->seq.readpos >= iter->seq.len) 4187 trace_seq_init(&iter->seq); 4188 4189 /* 4190 * If there was nothing to send to user, in spite of consuming trace 4191 * entries, go back to wait for more entries. 4192 */ 4193 if (sret == -EBUSY) 4194 goto waitagain; 4195 4196 out: 4197 mutex_unlock(&iter->mutex); 4198 4199 return sret; 4200 } 4201 4202 static void tracing_pipe_buf_release(struct pipe_inode_info *pipe, 4203 struct pipe_buffer *buf) 4204 { 4205 __free_page(buf->page); 4206 } 4207 4208 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, 4209 unsigned int idx) 4210 { 4211 __free_page(spd->pages[idx]); 4212 } 4213 4214 static const struct pipe_buf_operations tracing_pipe_buf_ops = { 4215 .can_merge = 0, 4216 .map = generic_pipe_buf_map, 4217 .unmap = generic_pipe_buf_unmap, 4218 .confirm = generic_pipe_buf_confirm, 4219 .release = tracing_pipe_buf_release, 4220 .steal = generic_pipe_buf_steal, 4221 .get = generic_pipe_buf_get, 4222 }; 4223 4224 static size_t 4225 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) 4226 { 4227 size_t count; 4228 int ret; 4229 4230 /* Seq buffer is page-sized, exactly what we need. */ 4231 for (;;) { 4232 count = iter->seq.len; 4233 ret = print_trace_line(iter); 4234 count = iter->seq.len - count; 4235 if (rem < count) { 4236 rem = 0; 4237 iter->seq.len -= count; 4238 break; 4239 } 4240 if (ret == TRACE_TYPE_PARTIAL_LINE) { 4241 iter->seq.len -= count; 4242 break; 4243 } 4244 4245 if (ret != TRACE_TYPE_NO_CONSUME) 4246 trace_consume(iter); 4247 rem -= count; 4248 if (!trace_find_next_entry_inc(iter)) { 4249 rem = 0; 4250 iter->ent = NULL; 4251 break; 4252 } 4253 } 4254 4255 return rem; 4256 } 4257 4258 static ssize_t tracing_splice_read_pipe(struct file *filp, 4259 loff_t *ppos, 4260 struct pipe_inode_info *pipe, 4261 size_t len, 4262 unsigned int flags) 4263 { 4264 struct page *pages_def[PIPE_DEF_BUFFERS]; 4265 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 4266 struct trace_iterator *iter = filp->private_data; 4267 struct splice_pipe_desc spd = { 4268 .pages = pages_def, 4269 .partial = partial_def, 4270 .nr_pages = 0, /* This gets updated below. */ 4271 .nr_pages_max = PIPE_DEF_BUFFERS, 4272 .flags = flags, 4273 .ops = &tracing_pipe_buf_ops, 4274 .spd_release = tracing_spd_release_pipe, 4275 }; 4276 struct trace_array *tr = iter->tr; 4277 ssize_t ret; 4278 size_t rem; 4279 unsigned int i; 4280 4281 if (splice_grow_spd(pipe, &spd)) 4282 return -ENOMEM; 4283 4284 /* copy the tracer to avoid using a global lock all around */ 4285 mutex_lock(&trace_types_lock); 4286 if (unlikely(iter->trace->name != tr->current_trace->name)) 4287 *iter->trace = *tr->current_trace; 4288 mutex_unlock(&trace_types_lock); 4289 4290 mutex_lock(&iter->mutex); 4291 4292 if (iter->trace->splice_read) { 4293 ret = iter->trace->splice_read(iter, filp, 4294 ppos, pipe, len, flags); 4295 if (ret) 4296 goto out_err; 4297 } 4298 4299 ret = tracing_wait_pipe(filp); 4300 if (ret <= 0) 4301 goto out_err; 4302 4303 if (!iter->ent && !trace_find_next_entry_inc(iter)) { 4304 ret = -EFAULT; 4305 goto out_err; 4306 } 4307 4308 trace_event_read_lock(); 4309 trace_access_lock(iter->cpu_file); 4310 4311 /* Fill as many pages as possible. */ 4312 for (i = 0, rem = len; i < pipe->buffers && rem; i++) { 4313 spd.pages[i] = alloc_page(GFP_KERNEL); 4314 if (!spd.pages[i]) 4315 break; 4316 4317 rem = tracing_fill_pipe_page(rem, iter); 4318 4319 /* Copy the data into the page, so we can start over. */ 4320 ret = trace_seq_to_buffer(&iter->seq, 4321 page_address(spd.pages[i]), 4322 iter->seq.len); 4323 if (ret < 0) { 4324 __free_page(spd.pages[i]); 4325 break; 4326 } 4327 spd.partial[i].offset = 0; 4328 spd.partial[i].len = iter->seq.len; 4329 4330 trace_seq_init(&iter->seq); 4331 } 4332 4333 trace_access_unlock(iter->cpu_file); 4334 trace_event_read_unlock(); 4335 mutex_unlock(&iter->mutex); 4336 4337 spd.nr_pages = i; 4338 4339 ret = splice_to_pipe(pipe, &spd); 4340 out: 4341 splice_shrink_spd(&spd); 4342 return ret; 4343 4344 out_err: 4345 mutex_unlock(&iter->mutex); 4346 goto out; 4347 } 4348 4349 static ssize_t 4350 tracing_entries_read(struct file *filp, char __user *ubuf, 4351 size_t cnt, loff_t *ppos) 4352 { 4353 struct inode *inode = file_inode(filp); 4354 struct trace_array *tr = inode->i_private; 4355 int cpu = tracing_get_cpu(inode); 4356 char buf[64]; 4357 int r = 0; 4358 ssize_t ret; 4359 4360 mutex_lock(&trace_types_lock); 4361 4362 if (cpu == RING_BUFFER_ALL_CPUS) { 4363 int cpu, buf_size_same; 4364 unsigned long size; 4365 4366 size = 0; 4367 buf_size_same = 1; 4368 /* check if all cpu sizes are same */ 4369 for_each_tracing_cpu(cpu) { 4370 /* fill in the size from first enabled cpu */ 4371 if (size == 0) 4372 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries; 4373 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) { 4374 buf_size_same = 0; 4375 break; 4376 } 4377 } 4378 4379 if (buf_size_same) { 4380 if (!ring_buffer_expanded) 4381 r = sprintf(buf, "%lu (expanded: %lu)\n", 4382 size >> 10, 4383 trace_buf_size >> 10); 4384 else 4385 r = sprintf(buf, "%lu\n", size >> 10); 4386 } else 4387 r = sprintf(buf, "X\n"); 4388 } else 4389 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); 4390 4391 mutex_unlock(&trace_types_lock); 4392 4393 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 4394 return ret; 4395 } 4396 4397 static ssize_t 4398 tracing_entries_write(struct file *filp, const char __user *ubuf, 4399 size_t cnt, loff_t *ppos) 4400 { 4401 struct inode *inode = file_inode(filp); 4402 struct trace_array *tr = inode->i_private; 4403 unsigned long val; 4404 int ret; 4405 4406 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 4407 if (ret) 4408 return ret; 4409 4410 /* must have at least 1 entry */ 4411 if (!val) 4412 return -EINVAL; 4413 4414 /* value is in KB */ 4415 val <<= 10; 4416 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); 4417 if (ret < 0) 4418 return ret; 4419 4420 *ppos += cnt; 4421 4422 return cnt; 4423 } 4424 4425 static ssize_t 4426 tracing_total_entries_read(struct file *filp, char __user *ubuf, 4427 size_t cnt, loff_t *ppos) 4428 { 4429 struct trace_array *tr = filp->private_data; 4430 char buf[64]; 4431 int r, cpu; 4432 unsigned long size = 0, expanded_size = 0; 4433 4434 mutex_lock(&trace_types_lock); 4435 for_each_tracing_cpu(cpu) { 4436 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10; 4437 if (!ring_buffer_expanded) 4438 expanded_size += trace_buf_size >> 10; 4439 } 4440 if (ring_buffer_expanded) 4441 r = sprintf(buf, "%lu\n", size); 4442 else 4443 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); 4444 mutex_unlock(&trace_types_lock); 4445 4446 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 4447 } 4448 4449 static ssize_t 4450 tracing_free_buffer_write(struct file *filp, const char __user *ubuf, 4451 size_t cnt, loff_t *ppos) 4452 { 4453 /* 4454 * There is no need to read what the user has written, this function 4455 * is just to make sure that there is no error when "echo" is used 4456 */ 4457 4458 *ppos += cnt; 4459 4460 return cnt; 4461 } 4462 4463 static int 4464 tracing_free_buffer_release(struct inode *inode, struct file *filp) 4465 { 4466 struct trace_array *tr = inode->i_private; 4467 4468 /* disable tracing ? */ 4469 if (trace_flags & TRACE_ITER_STOP_ON_FREE) 4470 tracer_tracing_off(tr); 4471 /* resize the ring buffer to 0 */ 4472 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); 4473 4474 trace_array_put(tr); 4475 4476 return 0; 4477 } 4478 4479 static ssize_t 4480 tracing_mark_write(struct file *filp, const char __user *ubuf, 4481 size_t cnt, loff_t *fpos) 4482 { 4483 unsigned long addr = (unsigned long)ubuf; 4484 struct trace_array *tr = filp->private_data; 4485 struct ring_buffer_event *event; 4486 struct ring_buffer *buffer; 4487 struct print_entry *entry; 4488 unsigned long irq_flags; 4489 struct page *pages[2]; 4490 void *map_page[2]; 4491 int nr_pages = 1; 4492 ssize_t written; 4493 int offset; 4494 int size; 4495 int len; 4496 int ret; 4497 int i; 4498 4499 if (tracing_disabled) 4500 return -EINVAL; 4501 4502 if (!(trace_flags & TRACE_ITER_MARKERS)) 4503 return -EINVAL; 4504 4505 if (cnt > TRACE_BUF_SIZE) 4506 cnt = TRACE_BUF_SIZE; 4507 4508 /* 4509 * Userspace is injecting traces into the kernel trace buffer. 4510 * We want to be as non intrusive as possible. 4511 * To do so, we do not want to allocate any special buffers 4512 * or take any locks, but instead write the userspace data 4513 * straight into the ring buffer. 4514 * 4515 * First we need to pin the userspace buffer into memory, 4516 * which, most likely it is, because it just referenced it. 4517 * But there's no guarantee that it is. By using get_user_pages_fast() 4518 * and kmap_atomic/kunmap_atomic() we can get access to the 4519 * pages directly. We then write the data directly into the 4520 * ring buffer. 4521 */ 4522 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); 4523 4524 /* check if we cross pages */ 4525 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK)) 4526 nr_pages = 2; 4527 4528 offset = addr & (PAGE_SIZE - 1); 4529 addr &= PAGE_MASK; 4530 4531 ret = get_user_pages_fast(addr, nr_pages, 0, pages); 4532 if (ret < nr_pages) { 4533 while (--ret >= 0) 4534 put_page(pages[ret]); 4535 written = -EFAULT; 4536 goto out; 4537 } 4538 4539 for (i = 0; i < nr_pages; i++) 4540 map_page[i] = kmap_atomic(pages[i]); 4541 4542 local_save_flags(irq_flags); 4543 size = sizeof(*entry) + cnt + 2; /* possible \n added */ 4544 buffer = tr->trace_buffer.buffer; 4545 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 4546 irq_flags, preempt_count()); 4547 if (!event) { 4548 /* Ring buffer disabled, return as if not open for write */ 4549 written = -EBADF; 4550 goto out_unlock; 4551 } 4552 4553 entry = ring_buffer_event_data(event); 4554 entry->ip = _THIS_IP_; 4555 4556 if (nr_pages == 2) { 4557 len = PAGE_SIZE - offset; 4558 memcpy(&entry->buf, map_page[0] + offset, len); 4559 memcpy(&entry->buf[len], map_page[1], cnt - len); 4560 } else 4561 memcpy(&entry->buf, map_page[0] + offset, cnt); 4562 4563 if (entry->buf[cnt - 1] != '\n') { 4564 entry->buf[cnt] = '\n'; 4565 entry->buf[cnt + 1] = '\0'; 4566 } else 4567 entry->buf[cnt] = '\0'; 4568 4569 __buffer_unlock_commit(buffer, event); 4570 4571 written = cnt; 4572 4573 *fpos += written; 4574 4575 out_unlock: 4576 for (i = 0; i < nr_pages; i++){ 4577 kunmap_atomic(map_page[i]); 4578 put_page(pages[i]); 4579 } 4580 out: 4581 return written; 4582 } 4583 4584 static int tracing_clock_show(struct seq_file *m, void *v) 4585 { 4586 struct trace_array *tr = m->private; 4587 int i; 4588 4589 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) 4590 seq_printf(m, 4591 "%s%s%s%s", i ? " " : "", 4592 i == tr->clock_id ? "[" : "", trace_clocks[i].name, 4593 i == tr->clock_id ? "]" : ""); 4594 seq_putc(m, '\n'); 4595 4596 return 0; 4597 } 4598 4599 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, 4600 size_t cnt, loff_t *fpos) 4601 { 4602 struct seq_file *m = filp->private_data; 4603 struct trace_array *tr = m->private; 4604 char buf[64]; 4605 const char *clockstr; 4606 int i; 4607 4608 if (cnt >= sizeof(buf)) 4609 return -EINVAL; 4610 4611 if (copy_from_user(&buf, ubuf, cnt)) 4612 return -EFAULT; 4613 4614 buf[cnt] = 0; 4615 4616 clockstr = strstrip(buf); 4617 4618 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { 4619 if (strcmp(trace_clocks[i].name, clockstr) == 0) 4620 break; 4621 } 4622 if (i == ARRAY_SIZE(trace_clocks)) 4623 return -EINVAL; 4624 4625 mutex_lock(&trace_types_lock); 4626 4627 tr->clock_id = i; 4628 4629 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func); 4630 4631 /* 4632 * New clock may not be consistent with the previous clock. 4633 * Reset the buffer so that it doesn't have incomparable timestamps. 4634 */ 4635 tracing_reset_online_cpus(&tr->trace_buffer); 4636 4637 #ifdef CONFIG_TRACER_MAX_TRACE 4638 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer) 4639 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); 4640 tracing_reset_online_cpus(&tr->max_buffer); 4641 #endif 4642 4643 mutex_unlock(&trace_types_lock); 4644 4645 *fpos += cnt; 4646 4647 return cnt; 4648 } 4649 4650 static int tracing_clock_open(struct inode *inode, struct file *file) 4651 { 4652 struct trace_array *tr = inode->i_private; 4653 int ret; 4654 4655 if (tracing_disabled) 4656 return -ENODEV; 4657 4658 if (trace_array_get(tr)) 4659 return -ENODEV; 4660 4661 ret = single_open(file, tracing_clock_show, inode->i_private); 4662 if (ret < 0) 4663 trace_array_put(tr); 4664 4665 return ret; 4666 } 4667 4668 struct ftrace_buffer_info { 4669 struct trace_iterator iter; 4670 void *spare; 4671 unsigned int read; 4672 }; 4673 4674 #ifdef CONFIG_TRACER_SNAPSHOT 4675 static int tracing_snapshot_open(struct inode *inode, struct file *file) 4676 { 4677 struct trace_array *tr = inode->i_private; 4678 struct trace_iterator *iter; 4679 struct seq_file *m; 4680 int ret = 0; 4681 4682 if (trace_array_get(tr) < 0) 4683 return -ENODEV; 4684 4685 if (file->f_mode & FMODE_READ) { 4686 iter = __tracing_open(inode, file, true); 4687 if (IS_ERR(iter)) 4688 ret = PTR_ERR(iter); 4689 } else { 4690 /* Writes still need the seq_file to hold the private data */ 4691 ret = -ENOMEM; 4692 m = kzalloc(sizeof(*m), GFP_KERNEL); 4693 if (!m) 4694 goto out; 4695 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 4696 if (!iter) { 4697 kfree(m); 4698 goto out; 4699 } 4700 ret = 0; 4701 4702 iter->tr = tr; 4703 iter->trace_buffer = &tr->max_buffer; 4704 iter->cpu_file = tracing_get_cpu(inode); 4705 m->private = iter; 4706 file->private_data = m; 4707 } 4708 out: 4709 if (ret < 0) 4710 trace_array_put(tr); 4711 4712 return ret; 4713 } 4714 4715 static ssize_t 4716 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, 4717 loff_t *ppos) 4718 { 4719 struct seq_file *m = filp->private_data; 4720 struct trace_iterator *iter = m->private; 4721 struct trace_array *tr = iter->tr; 4722 unsigned long val; 4723 int ret; 4724 4725 ret = tracing_update_buffers(); 4726 if (ret < 0) 4727 return ret; 4728 4729 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 4730 if (ret) 4731 return ret; 4732 4733 mutex_lock(&trace_types_lock); 4734 4735 if (tr->current_trace->use_max_tr) { 4736 ret = -EBUSY; 4737 goto out; 4738 } 4739 4740 switch (val) { 4741 case 0: 4742 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 4743 ret = -EINVAL; 4744 break; 4745 } 4746 if (tr->allocated_snapshot) 4747 free_snapshot(tr); 4748 break; 4749 case 1: 4750 /* Only allow per-cpu swap if the ring buffer supports it */ 4751 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP 4752 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 4753 ret = -EINVAL; 4754 break; 4755 } 4756 #endif 4757 if (!tr->allocated_snapshot) { 4758 ret = alloc_snapshot(tr); 4759 if (ret < 0) 4760 break; 4761 } 4762 local_irq_disable(); 4763 /* Now, we're going to swap */ 4764 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 4765 update_max_tr(tr, current, smp_processor_id()); 4766 else 4767 update_max_tr_single(tr, current, iter->cpu_file); 4768 local_irq_enable(); 4769 break; 4770 default: 4771 if (tr->allocated_snapshot) { 4772 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 4773 tracing_reset_online_cpus(&tr->max_buffer); 4774 else 4775 tracing_reset(&tr->max_buffer, iter->cpu_file); 4776 } 4777 break; 4778 } 4779 4780 if (ret >= 0) { 4781 *ppos += cnt; 4782 ret = cnt; 4783 } 4784 out: 4785 mutex_unlock(&trace_types_lock); 4786 return ret; 4787 } 4788 4789 static int tracing_snapshot_release(struct inode *inode, struct file *file) 4790 { 4791 struct seq_file *m = file->private_data; 4792 int ret; 4793 4794 ret = tracing_release(inode, file); 4795 4796 if (file->f_mode & FMODE_READ) 4797 return ret; 4798 4799 /* If write only, the seq_file is just a stub */ 4800 if (m) 4801 kfree(m->private); 4802 kfree(m); 4803 4804 return 0; 4805 } 4806 4807 static int tracing_buffers_open(struct inode *inode, struct file *filp); 4808 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, 4809 size_t count, loff_t *ppos); 4810 static int tracing_buffers_release(struct inode *inode, struct file *file); 4811 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, 4812 struct pipe_inode_info *pipe, size_t len, unsigned int flags); 4813 4814 static int snapshot_raw_open(struct inode *inode, struct file *filp) 4815 { 4816 struct ftrace_buffer_info *info; 4817 int ret; 4818 4819 ret = tracing_buffers_open(inode, filp); 4820 if (ret < 0) 4821 return ret; 4822 4823 info = filp->private_data; 4824 4825 if (info->iter.trace->use_max_tr) { 4826 tracing_buffers_release(inode, filp); 4827 return -EBUSY; 4828 } 4829 4830 info->iter.snapshot = true; 4831 info->iter.trace_buffer = &info->iter.tr->max_buffer; 4832 4833 return ret; 4834 } 4835 4836 #endif /* CONFIG_TRACER_SNAPSHOT */ 4837 4838 4839 static const struct file_operations tracing_max_lat_fops = { 4840 .open = tracing_open_generic, 4841 .read = tracing_max_lat_read, 4842 .write = tracing_max_lat_write, 4843 .llseek = generic_file_llseek, 4844 }; 4845 4846 static const struct file_operations set_tracer_fops = { 4847 .open = tracing_open_generic, 4848 .read = tracing_set_trace_read, 4849 .write = tracing_set_trace_write, 4850 .llseek = generic_file_llseek, 4851 }; 4852 4853 static const struct file_operations tracing_pipe_fops = { 4854 .open = tracing_open_pipe, 4855 .poll = tracing_poll_pipe, 4856 .read = tracing_read_pipe, 4857 .splice_read = tracing_splice_read_pipe, 4858 .release = tracing_release_pipe, 4859 .llseek = no_llseek, 4860 }; 4861 4862 static const struct file_operations tracing_entries_fops = { 4863 .open = tracing_open_generic_tr, 4864 .read = tracing_entries_read, 4865 .write = tracing_entries_write, 4866 .llseek = generic_file_llseek, 4867 .release = tracing_release_generic_tr, 4868 }; 4869 4870 static const struct file_operations tracing_total_entries_fops = { 4871 .open = tracing_open_generic_tr, 4872 .read = tracing_total_entries_read, 4873 .llseek = generic_file_llseek, 4874 .release = tracing_release_generic_tr, 4875 }; 4876 4877 static const struct file_operations tracing_free_buffer_fops = { 4878 .open = tracing_open_generic_tr, 4879 .write = tracing_free_buffer_write, 4880 .release = tracing_free_buffer_release, 4881 }; 4882 4883 static const struct file_operations tracing_mark_fops = { 4884 .open = tracing_open_generic_tr, 4885 .write = tracing_mark_write, 4886 .llseek = generic_file_llseek, 4887 .release = tracing_release_generic_tr, 4888 }; 4889 4890 static const struct file_operations trace_clock_fops = { 4891 .open = tracing_clock_open, 4892 .read = seq_read, 4893 .llseek = seq_lseek, 4894 .release = tracing_single_release_tr, 4895 .write = tracing_clock_write, 4896 }; 4897 4898 #ifdef CONFIG_TRACER_SNAPSHOT 4899 static const struct file_operations snapshot_fops = { 4900 .open = tracing_snapshot_open, 4901 .read = seq_read, 4902 .write = tracing_snapshot_write, 4903 .llseek = tracing_seek, 4904 .release = tracing_snapshot_release, 4905 }; 4906 4907 static const struct file_operations snapshot_raw_fops = { 4908 .open = snapshot_raw_open, 4909 .read = tracing_buffers_read, 4910 .release = tracing_buffers_release, 4911 .splice_read = tracing_buffers_splice_read, 4912 .llseek = no_llseek, 4913 }; 4914 4915 #endif /* CONFIG_TRACER_SNAPSHOT */ 4916 4917 static int tracing_buffers_open(struct inode *inode, struct file *filp) 4918 { 4919 struct trace_array *tr = inode->i_private; 4920 struct ftrace_buffer_info *info; 4921 int ret; 4922 4923 if (tracing_disabled) 4924 return -ENODEV; 4925 4926 if (trace_array_get(tr) < 0) 4927 return -ENODEV; 4928 4929 info = kzalloc(sizeof(*info), GFP_KERNEL); 4930 if (!info) { 4931 trace_array_put(tr); 4932 return -ENOMEM; 4933 } 4934 4935 mutex_lock(&trace_types_lock); 4936 4937 info->iter.tr = tr; 4938 info->iter.cpu_file = tracing_get_cpu(inode); 4939 info->iter.trace = tr->current_trace; 4940 info->iter.trace_buffer = &tr->trace_buffer; 4941 info->spare = NULL; 4942 /* Force reading ring buffer for first read */ 4943 info->read = (unsigned int)-1; 4944 4945 filp->private_data = info; 4946 4947 mutex_unlock(&trace_types_lock); 4948 4949 ret = nonseekable_open(inode, filp); 4950 if (ret < 0) 4951 trace_array_put(tr); 4952 4953 return ret; 4954 } 4955 4956 static unsigned int 4957 tracing_buffers_poll(struct file *filp, poll_table *poll_table) 4958 { 4959 struct ftrace_buffer_info *info = filp->private_data; 4960 struct trace_iterator *iter = &info->iter; 4961 4962 return trace_poll(iter, filp, poll_table); 4963 } 4964 4965 static ssize_t 4966 tracing_buffers_read(struct file *filp, char __user *ubuf, 4967 size_t count, loff_t *ppos) 4968 { 4969 struct ftrace_buffer_info *info = filp->private_data; 4970 struct trace_iterator *iter = &info->iter; 4971 ssize_t ret; 4972 ssize_t size; 4973 4974 if (!count) 4975 return 0; 4976 4977 mutex_lock(&trace_types_lock); 4978 4979 #ifdef CONFIG_TRACER_MAX_TRACE 4980 if (iter->snapshot && iter->tr->current_trace->use_max_tr) { 4981 size = -EBUSY; 4982 goto out_unlock; 4983 } 4984 #endif 4985 4986 if (!info->spare) 4987 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, 4988 iter->cpu_file); 4989 size = -ENOMEM; 4990 if (!info->spare) 4991 goto out_unlock; 4992 4993 /* Do we have previous read data to read? */ 4994 if (info->read < PAGE_SIZE) 4995 goto read; 4996 4997 again: 4998 trace_access_lock(iter->cpu_file); 4999 ret = ring_buffer_read_page(iter->trace_buffer->buffer, 5000 &info->spare, 5001 count, 5002 iter->cpu_file, 0); 5003 trace_access_unlock(iter->cpu_file); 5004 5005 if (ret < 0) { 5006 if (trace_empty(iter)) { 5007 if ((filp->f_flags & O_NONBLOCK)) { 5008 size = -EAGAIN; 5009 goto out_unlock; 5010 } 5011 mutex_unlock(&trace_types_lock); 5012 iter->trace->wait_pipe(iter); 5013 mutex_lock(&trace_types_lock); 5014 if (signal_pending(current)) { 5015 size = -EINTR; 5016 goto out_unlock; 5017 } 5018 goto again; 5019 } 5020 size = 0; 5021 goto out_unlock; 5022 } 5023 5024 info->read = 0; 5025 read: 5026 size = PAGE_SIZE - info->read; 5027 if (size > count) 5028 size = count; 5029 5030 ret = copy_to_user(ubuf, info->spare + info->read, size); 5031 if (ret == size) { 5032 size = -EFAULT; 5033 goto out_unlock; 5034 } 5035 size -= ret; 5036 5037 *ppos += size; 5038 info->read += size; 5039 5040 out_unlock: 5041 mutex_unlock(&trace_types_lock); 5042 5043 return size; 5044 } 5045 5046 static int tracing_buffers_release(struct inode *inode, struct file *file) 5047 { 5048 struct ftrace_buffer_info *info = file->private_data; 5049 struct trace_iterator *iter = &info->iter; 5050 5051 mutex_lock(&trace_types_lock); 5052 5053 __trace_array_put(iter->tr); 5054 5055 if (info->spare) 5056 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare); 5057 kfree(info); 5058 5059 mutex_unlock(&trace_types_lock); 5060 5061 return 0; 5062 } 5063 5064 struct buffer_ref { 5065 struct ring_buffer *buffer; 5066 void *page; 5067 int ref; 5068 }; 5069 5070 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, 5071 struct pipe_buffer *buf) 5072 { 5073 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 5074 5075 if (--ref->ref) 5076 return; 5077 5078 ring_buffer_free_read_page(ref->buffer, ref->page); 5079 kfree(ref); 5080 buf->private = 0; 5081 } 5082 5083 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, 5084 struct pipe_buffer *buf) 5085 { 5086 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 5087 5088 ref->ref++; 5089 } 5090 5091 /* Pipe buffer operations for a buffer. */ 5092 static const struct pipe_buf_operations buffer_pipe_buf_ops = { 5093 .can_merge = 0, 5094 .map = generic_pipe_buf_map, 5095 .unmap = generic_pipe_buf_unmap, 5096 .confirm = generic_pipe_buf_confirm, 5097 .release = buffer_pipe_buf_release, 5098 .steal = generic_pipe_buf_steal, 5099 .get = buffer_pipe_buf_get, 5100 }; 5101 5102 /* 5103 * Callback from splice_to_pipe(), if we need to release some pages 5104 * at the end of the spd in case we error'ed out in filling the pipe. 5105 */ 5106 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) 5107 { 5108 struct buffer_ref *ref = 5109 (struct buffer_ref *)spd->partial[i].private; 5110 5111 if (--ref->ref) 5112 return; 5113 5114 ring_buffer_free_read_page(ref->buffer, ref->page); 5115 kfree(ref); 5116 spd->partial[i].private = 0; 5117 } 5118 5119 static ssize_t 5120 tracing_buffers_splice_read(struct file *file, loff_t *ppos, 5121 struct pipe_inode_info *pipe, size_t len, 5122 unsigned int flags) 5123 { 5124 struct ftrace_buffer_info *info = file->private_data; 5125 struct trace_iterator *iter = &info->iter; 5126 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 5127 struct page *pages_def[PIPE_DEF_BUFFERS]; 5128 struct splice_pipe_desc spd = { 5129 .pages = pages_def, 5130 .partial = partial_def, 5131 .nr_pages_max = PIPE_DEF_BUFFERS, 5132 .flags = flags, 5133 .ops = &buffer_pipe_buf_ops, 5134 .spd_release = buffer_spd_release, 5135 }; 5136 struct buffer_ref *ref; 5137 int entries, size, i; 5138 ssize_t ret; 5139 5140 mutex_lock(&trace_types_lock); 5141 5142 #ifdef CONFIG_TRACER_MAX_TRACE 5143 if (iter->snapshot && iter->tr->current_trace->use_max_tr) { 5144 ret = -EBUSY; 5145 goto out; 5146 } 5147 #endif 5148 5149 if (splice_grow_spd(pipe, &spd)) { 5150 ret = -ENOMEM; 5151 goto out; 5152 } 5153 5154 if (*ppos & (PAGE_SIZE - 1)) { 5155 ret = -EINVAL; 5156 goto out; 5157 } 5158 5159 if (len & (PAGE_SIZE - 1)) { 5160 if (len < PAGE_SIZE) { 5161 ret = -EINVAL; 5162 goto out; 5163 } 5164 len &= PAGE_MASK; 5165 } 5166 5167 again: 5168 trace_access_lock(iter->cpu_file); 5169 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); 5170 5171 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) { 5172 struct page *page; 5173 int r; 5174 5175 ref = kzalloc(sizeof(*ref), GFP_KERNEL); 5176 if (!ref) 5177 break; 5178 5179 ref->ref = 1; 5180 ref->buffer = iter->trace_buffer->buffer; 5181 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); 5182 if (!ref->page) { 5183 kfree(ref); 5184 break; 5185 } 5186 5187 r = ring_buffer_read_page(ref->buffer, &ref->page, 5188 len, iter->cpu_file, 1); 5189 if (r < 0) { 5190 ring_buffer_free_read_page(ref->buffer, ref->page); 5191 kfree(ref); 5192 break; 5193 } 5194 5195 /* 5196 * zero out any left over data, this is going to 5197 * user land. 5198 */ 5199 size = ring_buffer_page_len(ref->page); 5200 if (size < PAGE_SIZE) 5201 memset(ref->page + size, 0, PAGE_SIZE - size); 5202 5203 page = virt_to_page(ref->page); 5204 5205 spd.pages[i] = page; 5206 spd.partial[i].len = PAGE_SIZE; 5207 spd.partial[i].offset = 0; 5208 spd.partial[i].private = (unsigned long)ref; 5209 spd.nr_pages++; 5210 *ppos += PAGE_SIZE; 5211 5212 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); 5213 } 5214 5215 trace_access_unlock(iter->cpu_file); 5216 spd.nr_pages = i; 5217 5218 /* did we read anything? */ 5219 if (!spd.nr_pages) { 5220 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) { 5221 ret = -EAGAIN; 5222 goto out; 5223 } 5224 mutex_unlock(&trace_types_lock); 5225 iter->trace->wait_pipe(iter); 5226 mutex_lock(&trace_types_lock); 5227 if (signal_pending(current)) { 5228 ret = -EINTR; 5229 goto out; 5230 } 5231 goto again; 5232 } 5233 5234 ret = splice_to_pipe(pipe, &spd); 5235 splice_shrink_spd(&spd); 5236 out: 5237 mutex_unlock(&trace_types_lock); 5238 5239 return ret; 5240 } 5241 5242 static const struct file_operations tracing_buffers_fops = { 5243 .open = tracing_buffers_open, 5244 .read = tracing_buffers_read, 5245 .poll = tracing_buffers_poll, 5246 .release = tracing_buffers_release, 5247 .splice_read = tracing_buffers_splice_read, 5248 .llseek = no_llseek, 5249 }; 5250 5251 static ssize_t 5252 tracing_stats_read(struct file *filp, char __user *ubuf, 5253 size_t count, loff_t *ppos) 5254 { 5255 struct inode *inode = file_inode(filp); 5256 struct trace_array *tr = inode->i_private; 5257 struct trace_buffer *trace_buf = &tr->trace_buffer; 5258 int cpu = tracing_get_cpu(inode); 5259 struct trace_seq *s; 5260 unsigned long cnt; 5261 unsigned long long t; 5262 unsigned long usec_rem; 5263 5264 s = kmalloc(sizeof(*s), GFP_KERNEL); 5265 if (!s) 5266 return -ENOMEM; 5267 5268 trace_seq_init(s); 5269 5270 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu); 5271 trace_seq_printf(s, "entries: %ld\n", cnt); 5272 5273 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu); 5274 trace_seq_printf(s, "overrun: %ld\n", cnt); 5275 5276 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu); 5277 trace_seq_printf(s, "commit overrun: %ld\n", cnt); 5278 5279 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); 5280 trace_seq_printf(s, "bytes: %ld\n", cnt); 5281 5282 if (trace_clocks[tr->clock_id].in_ns) { 5283 /* local or global for trace_clock */ 5284 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 5285 usec_rem = do_div(t, USEC_PER_SEC); 5286 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", 5287 t, usec_rem); 5288 5289 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu)); 5290 usec_rem = do_div(t, USEC_PER_SEC); 5291 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); 5292 } else { 5293 /* counter or tsc mode for trace_clock */ 5294 trace_seq_printf(s, "oldest event ts: %llu\n", 5295 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 5296 5297 trace_seq_printf(s, "now ts: %llu\n", 5298 ring_buffer_time_stamp(trace_buf->buffer, cpu)); 5299 } 5300 5301 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu); 5302 trace_seq_printf(s, "dropped events: %ld\n", cnt); 5303 5304 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); 5305 trace_seq_printf(s, "read events: %ld\n", cnt); 5306 5307 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); 5308 5309 kfree(s); 5310 5311 return count; 5312 } 5313 5314 static const struct file_operations tracing_stats_fops = { 5315 .open = tracing_open_generic_tr, 5316 .read = tracing_stats_read, 5317 .llseek = generic_file_llseek, 5318 .release = tracing_release_generic_tr, 5319 }; 5320 5321 #ifdef CONFIG_DYNAMIC_FTRACE 5322 5323 int __weak ftrace_arch_read_dyn_info(char *buf, int size) 5324 { 5325 return 0; 5326 } 5327 5328 static ssize_t 5329 tracing_read_dyn_info(struct file *filp, char __user *ubuf, 5330 size_t cnt, loff_t *ppos) 5331 { 5332 static char ftrace_dyn_info_buffer[1024]; 5333 static DEFINE_MUTEX(dyn_info_mutex); 5334 unsigned long *p = filp->private_data; 5335 char *buf = ftrace_dyn_info_buffer; 5336 int size = ARRAY_SIZE(ftrace_dyn_info_buffer); 5337 int r; 5338 5339 mutex_lock(&dyn_info_mutex); 5340 r = sprintf(buf, "%ld ", *p); 5341 5342 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); 5343 buf[r++] = '\n'; 5344 5345 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 5346 5347 mutex_unlock(&dyn_info_mutex); 5348 5349 return r; 5350 } 5351 5352 static const struct file_operations tracing_dyn_info_fops = { 5353 .open = tracing_open_generic, 5354 .read = tracing_read_dyn_info, 5355 .llseek = generic_file_llseek, 5356 }; 5357 #endif /* CONFIG_DYNAMIC_FTRACE */ 5358 5359 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) 5360 static void 5361 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data) 5362 { 5363 tracing_snapshot(); 5364 } 5365 5366 static void 5367 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data) 5368 { 5369 unsigned long *count = (long *)data; 5370 5371 if (!*count) 5372 return; 5373 5374 if (*count != -1) 5375 (*count)--; 5376 5377 tracing_snapshot(); 5378 } 5379 5380 static int 5381 ftrace_snapshot_print(struct seq_file *m, unsigned long ip, 5382 struct ftrace_probe_ops *ops, void *data) 5383 { 5384 long count = (long)data; 5385 5386 seq_printf(m, "%ps:", (void *)ip); 5387 5388 seq_printf(m, "snapshot"); 5389 5390 if (count == -1) 5391 seq_printf(m, ":unlimited\n"); 5392 else 5393 seq_printf(m, ":count=%ld\n", count); 5394 5395 return 0; 5396 } 5397 5398 static struct ftrace_probe_ops snapshot_probe_ops = { 5399 .func = ftrace_snapshot, 5400 .print = ftrace_snapshot_print, 5401 }; 5402 5403 static struct ftrace_probe_ops snapshot_count_probe_ops = { 5404 .func = ftrace_count_snapshot, 5405 .print = ftrace_snapshot_print, 5406 }; 5407 5408 static int 5409 ftrace_trace_snapshot_callback(struct ftrace_hash *hash, 5410 char *glob, char *cmd, char *param, int enable) 5411 { 5412 struct ftrace_probe_ops *ops; 5413 void *count = (void *)-1; 5414 char *number; 5415 int ret; 5416 5417 /* hash funcs only work with set_ftrace_filter */ 5418 if (!enable) 5419 return -EINVAL; 5420 5421 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; 5422 5423 if (glob[0] == '!') { 5424 unregister_ftrace_function_probe_func(glob+1, ops); 5425 return 0; 5426 } 5427 5428 if (!param) 5429 goto out_reg; 5430 5431 number = strsep(¶m, ":"); 5432 5433 if (!strlen(number)) 5434 goto out_reg; 5435 5436 /* 5437 * We use the callback data field (which is a pointer) 5438 * as our counter. 5439 */ 5440 ret = kstrtoul(number, 0, (unsigned long *)&count); 5441 if (ret) 5442 return ret; 5443 5444 out_reg: 5445 ret = register_ftrace_function_probe(glob, ops, count); 5446 5447 if (ret >= 0) 5448 alloc_snapshot(&global_trace); 5449 5450 return ret < 0 ? ret : 0; 5451 } 5452 5453 static struct ftrace_func_command ftrace_snapshot_cmd = { 5454 .name = "snapshot", 5455 .func = ftrace_trace_snapshot_callback, 5456 }; 5457 5458 static int register_snapshot_cmd(void) 5459 { 5460 return register_ftrace_command(&ftrace_snapshot_cmd); 5461 } 5462 #else 5463 static inline int register_snapshot_cmd(void) { return 0; } 5464 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ 5465 5466 struct dentry *tracing_init_dentry_tr(struct trace_array *tr) 5467 { 5468 if (tr->dir) 5469 return tr->dir; 5470 5471 if (!debugfs_initialized()) 5472 return NULL; 5473 5474 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 5475 tr->dir = debugfs_create_dir("tracing", NULL); 5476 5477 if (!tr->dir) 5478 pr_warn_once("Could not create debugfs directory 'tracing'\n"); 5479 5480 return tr->dir; 5481 } 5482 5483 struct dentry *tracing_init_dentry(void) 5484 { 5485 return tracing_init_dentry_tr(&global_trace); 5486 } 5487 5488 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) 5489 { 5490 struct dentry *d_tracer; 5491 5492 if (tr->percpu_dir) 5493 return tr->percpu_dir; 5494 5495 d_tracer = tracing_init_dentry_tr(tr); 5496 if (!d_tracer) 5497 return NULL; 5498 5499 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer); 5500 5501 WARN_ONCE(!tr->percpu_dir, 5502 "Could not create debugfs directory 'per_cpu/%d'\n", cpu); 5503 5504 return tr->percpu_dir; 5505 } 5506 5507 static struct dentry * 5508 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, 5509 void *data, long cpu, const struct file_operations *fops) 5510 { 5511 struct dentry *ret = trace_create_file(name, mode, parent, data, fops); 5512 5513 if (ret) /* See tracing_get_cpu() */ 5514 ret->d_inode->i_cdev = (void *)(cpu + 1); 5515 return ret; 5516 } 5517 5518 static void 5519 tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) 5520 { 5521 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); 5522 struct dentry *d_cpu; 5523 char cpu_dir[30]; /* 30 characters should be more than enough */ 5524 5525 if (!d_percpu) 5526 return; 5527 5528 snprintf(cpu_dir, 30, "cpu%ld", cpu); 5529 d_cpu = debugfs_create_dir(cpu_dir, d_percpu); 5530 if (!d_cpu) { 5531 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); 5532 return; 5533 } 5534 5535 /* per cpu trace_pipe */ 5536 trace_create_cpu_file("trace_pipe", 0444, d_cpu, 5537 tr, cpu, &tracing_pipe_fops); 5538 5539 /* per cpu trace */ 5540 trace_create_cpu_file("trace", 0644, d_cpu, 5541 tr, cpu, &tracing_fops); 5542 5543 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, 5544 tr, cpu, &tracing_buffers_fops); 5545 5546 trace_create_cpu_file("stats", 0444, d_cpu, 5547 tr, cpu, &tracing_stats_fops); 5548 5549 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, 5550 tr, cpu, &tracing_entries_fops); 5551 5552 #ifdef CONFIG_TRACER_SNAPSHOT 5553 trace_create_cpu_file("snapshot", 0644, d_cpu, 5554 tr, cpu, &snapshot_fops); 5555 5556 trace_create_cpu_file("snapshot_raw", 0444, d_cpu, 5557 tr, cpu, &snapshot_raw_fops); 5558 #endif 5559 } 5560 5561 #ifdef CONFIG_FTRACE_SELFTEST 5562 /* Let selftest have access to static functions in this file */ 5563 #include "trace_selftest.c" 5564 #endif 5565 5566 struct trace_option_dentry { 5567 struct tracer_opt *opt; 5568 struct tracer_flags *flags; 5569 struct trace_array *tr; 5570 struct dentry *entry; 5571 }; 5572 5573 static ssize_t 5574 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, 5575 loff_t *ppos) 5576 { 5577 struct trace_option_dentry *topt = filp->private_data; 5578 char *buf; 5579 5580 if (topt->flags->val & topt->opt->bit) 5581 buf = "1\n"; 5582 else 5583 buf = "0\n"; 5584 5585 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 5586 } 5587 5588 static ssize_t 5589 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, 5590 loff_t *ppos) 5591 { 5592 struct trace_option_dentry *topt = filp->private_data; 5593 unsigned long val; 5594 int ret; 5595 5596 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 5597 if (ret) 5598 return ret; 5599 5600 if (val != 0 && val != 1) 5601 return -EINVAL; 5602 5603 if (!!(topt->flags->val & topt->opt->bit) != val) { 5604 mutex_lock(&trace_types_lock); 5605 ret = __set_tracer_option(topt->tr->current_trace, topt->flags, 5606 topt->opt, !val); 5607 mutex_unlock(&trace_types_lock); 5608 if (ret) 5609 return ret; 5610 } 5611 5612 *ppos += cnt; 5613 5614 return cnt; 5615 } 5616 5617 5618 static const struct file_operations trace_options_fops = { 5619 .open = tracing_open_generic, 5620 .read = trace_options_read, 5621 .write = trace_options_write, 5622 .llseek = generic_file_llseek, 5623 }; 5624 5625 static ssize_t 5626 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, 5627 loff_t *ppos) 5628 { 5629 long index = (long)filp->private_data; 5630 char *buf; 5631 5632 if (trace_flags & (1 << index)) 5633 buf = "1\n"; 5634 else 5635 buf = "0\n"; 5636 5637 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 5638 } 5639 5640 static ssize_t 5641 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, 5642 loff_t *ppos) 5643 { 5644 struct trace_array *tr = &global_trace; 5645 long index = (long)filp->private_data; 5646 unsigned long val; 5647 int ret; 5648 5649 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 5650 if (ret) 5651 return ret; 5652 5653 if (val != 0 && val != 1) 5654 return -EINVAL; 5655 5656 mutex_lock(&trace_types_lock); 5657 ret = set_tracer_flag(tr, 1 << index, val); 5658 mutex_unlock(&trace_types_lock); 5659 5660 if (ret < 0) 5661 return ret; 5662 5663 *ppos += cnt; 5664 5665 return cnt; 5666 } 5667 5668 static const struct file_operations trace_options_core_fops = { 5669 .open = tracing_open_generic, 5670 .read = trace_options_core_read, 5671 .write = trace_options_core_write, 5672 .llseek = generic_file_llseek, 5673 }; 5674 5675 struct dentry *trace_create_file(const char *name, 5676 umode_t mode, 5677 struct dentry *parent, 5678 void *data, 5679 const struct file_operations *fops) 5680 { 5681 struct dentry *ret; 5682 5683 ret = debugfs_create_file(name, mode, parent, data, fops); 5684 if (!ret) 5685 pr_warning("Could not create debugfs '%s' entry\n", name); 5686 5687 return ret; 5688 } 5689 5690 5691 static struct dentry *trace_options_init_dentry(struct trace_array *tr) 5692 { 5693 struct dentry *d_tracer; 5694 5695 if (tr->options) 5696 return tr->options; 5697 5698 d_tracer = tracing_init_dentry_tr(tr); 5699 if (!d_tracer) 5700 return NULL; 5701 5702 tr->options = debugfs_create_dir("options", d_tracer); 5703 if (!tr->options) { 5704 pr_warning("Could not create debugfs directory 'options'\n"); 5705 return NULL; 5706 } 5707 5708 return tr->options; 5709 } 5710 5711 static void 5712 create_trace_option_file(struct trace_array *tr, 5713 struct trace_option_dentry *topt, 5714 struct tracer_flags *flags, 5715 struct tracer_opt *opt) 5716 { 5717 struct dentry *t_options; 5718 5719 t_options = trace_options_init_dentry(tr); 5720 if (!t_options) 5721 return; 5722 5723 topt->flags = flags; 5724 topt->opt = opt; 5725 topt->tr = tr; 5726 5727 topt->entry = trace_create_file(opt->name, 0644, t_options, topt, 5728 &trace_options_fops); 5729 5730 } 5731 5732 static struct trace_option_dentry * 5733 create_trace_option_files(struct trace_array *tr, struct tracer *tracer) 5734 { 5735 struct trace_option_dentry *topts; 5736 struct tracer_flags *flags; 5737 struct tracer_opt *opts; 5738 int cnt; 5739 5740 if (!tracer) 5741 return NULL; 5742 5743 flags = tracer->flags; 5744 5745 if (!flags || !flags->opts) 5746 return NULL; 5747 5748 opts = flags->opts; 5749 5750 for (cnt = 0; opts[cnt].name; cnt++) 5751 ; 5752 5753 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); 5754 if (!topts) 5755 return NULL; 5756 5757 for (cnt = 0; opts[cnt].name; cnt++) 5758 create_trace_option_file(tr, &topts[cnt], flags, 5759 &opts[cnt]); 5760 5761 return topts; 5762 } 5763 5764 static void 5765 destroy_trace_option_files(struct trace_option_dentry *topts) 5766 { 5767 int cnt; 5768 5769 if (!topts) 5770 return; 5771 5772 for (cnt = 0; topts[cnt].opt; cnt++) { 5773 if (topts[cnt].entry) 5774 debugfs_remove(topts[cnt].entry); 5775 } 5776 5777 kfree(topts); 5778 } 5779 5780 static struct dentry * 5781 create_trace_option_core_file(struct trace_array *tr, 5782 const char *option, long index) 5783 { 5784 struct dentry *t_options; 5785 5786 t_options = trace_options_init_dentry(tr); 5787 if (!t_options) 5788 return NULL; 5789 5790 return trace_create_file(option, 0644, t_options, (void *)index, 5791 &trace_options_core_fops); 5792 } 5793 5794 static __init void create_trace_options_dir(struct trace_array *tr) 5795 { 5796 struct dentry *t_options; 5797 int i; 5798 5799 t_options = trace_options_init_dentry(tr); 5800 if (!t_options) 5801 return; 5802 5803 for (i = 0; trace_options[i]; i++) 5804 create_trace_option_core_file(tr, trace_options[i], i); 5805 } 5806 5807 static ssize_t 5808 rb_simple_read(struct file *filp, char __user *ubuf, 5809 size_t cnt, loff_t *ppos) 5810 { 5811 struct trace_array *tr = filp->private_data; 5812 char buf[64]; 5813 int r; 5814 5815 r = tracer_tracing_is_on(tr); 5816 r = sprintf(buf, "%d\n", r); 5817 5818 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 5819 } 5820 5821 static ssize_t 5822 rb_simple_write(struct file *filp, const char __user *ubuf, 5823 size_t cnt, loff_t *ppos) 5824 { 5825 struct trace_array *tr = filp->private_data; 5826 struct ring_buffer *buffer = tr->trace_buffer.buffer; 5827 unsigned long val; 5828 int ret; 5829 5830 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 5831 if (ret) 5832 return ret; 5833 5834 if (buffer) { 5835 mutex_lock(&trace_types_lock); 5836 if (val) { 5837 tracer_tracing_on(tr); 5838 if (tr->current_trace->start) 5839 tr->current_trace->start(tr); 5840 } else { 5841 tracer_tracing_off(tr); 5842 if (tr->current_trace->stop) 5843 tr->current_trace->stop(tr); 5844 } 5845 mutex_unlock(&trace_types_lock); 5846 } 5847 5848 (*ppos)++; 5849 5850 return cnt; 5851 } 5852 5853 static const struct file_operations rb_simple_fops = { 5854 .open = tracing_open_generic_tr, 5855 .read = rb_simple_read, 5856 .write = rb_simple_write, 5857 .release = tracing_release_generic_tr, 5858 .llseek = default_llseek, 5859 }; 5860 5861 struct dentry *trace_instance_dir; 5862 5863 static void 5864 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer); 5865 5866 static int 5867 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) 5868 { 5869 enum ring_buffer_flags rb_flags; 5870 5871 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; 5872 5873 buf->buffer = ring_buffer_alloc(size, rb_flags); 5874 if (!buf->buffer) 5875 return -ENOMEM; 5876 5877 buf->data = alloc_percpu(struct trace_array_cpu); 5878 if (!buf->data) { 5879 ring_buffer_free(buf->buffer); 5880 return -ENOMEM; 5881 } 5882 5883 /* Allocate the first page for all buffers */ 5884 set_buffer_entries(&tr->trace_buffer, 5885 ring_buffer_size(tr->trace_buffer.buffer, 0)); 5886 5887 return 0; 5888 } 5889 5890 static int allocate_trace_buffers(struct trace_array *tr, int size) 5891 { 5892 int ret; 5893 5894 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size); 5895 if (ret) 5896 return ret; 5897 5898 #ifdef CONFIG_TRACER_MAX_TRACE 5899 ret = allocate_trace_buffer(tr, &tr->max_buffer, 5900 allocate_snapshot ? size : 1); 5901 if (WARN_ON(ret)) { 5902 ring_buffer_free(tr->trace_buffer.buffer); 5903 free_percpu(tr->trace_buffer.data); 5904 return -ENOMEM; 5905 } 5906 tr->allocated_snapshot = allocate_snapshot; 5907 5908 /* 5909 * Only the top level trace array gets its snapshot allocated 5910 * from the kernel command line. 5911 */ 5912 allocate_snapshot = false; 5913 #endif 5914 return 0; 5915 } 5916 5917 static int new_instance_create(const char *name) 5918 { 5919 struct trace_array *tr; 5920 int ret; 5921 5922 mutex_lock(&trace_types_lock); 5923 5924 ret = -EEXIST; 5925 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 5926 if (tr->name && strcmp(tr->name, name) == 0) 5927 goto out_unlock; 5928 } 5929 5930 ret = -ENOMEM; 5931 tr = kzalloc(sizeof(*tr), GFP_KERNEL); 5932 if (!tr) 5933 goto out_unlock; 5934 5935 tr->name = kstrdup(name, GFP_KERNEL); 5936 if (!tr->name) 5937 goto out_free_tr; 5938 5939 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) 5940 goto out_free_tr; 5941 5942 cpumask_copy(tr->tracing_cpumask, cpu_all_mask); 5943 5944 raw_spin_lock_init(&tr->start_lock); 5945 5946 tr->current_trace = &nop_trace; 5947 5948 INIT_LIST_HEAD(&tr->systems); 5949 INIT_LIST_HEAD(&tr->events); 5950 5951 if (allocate_trace_buffers(tr, trace_buf_size) < 0) 5952 goto out_free_tr; 5953 5954 tr->dir = debugfs_create_dir(name, trace_instance_dir); 5955 if (!tr->dir) 5956 goto out_free_tr; 5957 5958 ret = event_trace_add_tracer(tr->dir, tr); 5959 if (ret) { 5960 debugfs_remove_recursive(tr->dir); 5961 goto out_free_tr; 5962 } 5963 5964 init_tracer_debugfs(tr, tr->dir); 5965 5966 list_add(&tr->list, &ftrace_trace_arrays); 5967 5968 mutex_unlock(&trace_types_lock); 5969 5970 return 0; 5971 5972 out_free_tr: 5973 if (tr->trace_buffer.buffer) 5974 ring_buffer_free(tr->trace_buffer.buffer); 5975 free_cpumask_var(tr->tracing_cpumask); 5976 kfree(tr->name); 5977 kfree(tr); 5978 5979 out_unlock: 5980 mutex_unlock(&trace_types_lock); 5981 5982 return ret; 5983 5984 } 5985 5986 static int instance_delete(const char *name) 5987 { 5988 struct trace_array *tr; 5989 int found = 0; 5990 int ret; 5991 5992 mutex_lock(&trace_types_lock); 5993 5994 ret = -ENODEV; 5995 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 5996 if (tr->name && strcmp(tr->name, name) == 0) { 5997 found = 1; 5998 break; 5999 } 6000 } 6001 if (!found) 6002 goto out_unlock; 6003 6004 ret = -EBUSY; 6005 if (tr->ref) 6006 goto out_unlock; 6007 6008 list_del(&tr->list); 6009 6010 event_trace_del_tracer(tr); 6011 debugfs_remove_recursive(tr->dir); 6012 free_percpu(tr->trace_buffer.data); 6013 ring_buffer_free(tr->trace_buffer.buffer); 6014 6015 kfree(tr->name); 6016 kfree(tr); 6017 6018 ret = 0; 6019 6020 out_unlock: 6021 mutex_unlock(&trace_types_lock); 6022 6023 return ret; 6024 } 6025 6026 static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode) 6027 { 6028 struct dentry *parent; 6029 int ret; 6030 6031 /* Paranoid: Make sure the parent is the "instances" directory */ 6032 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias); 6033 if (WARN_ON_ONCE(parent != trace_instance_dir)) 6034 return -ENOENT; 6035 6036 /* 6037 * The inode mutex is locked, but debugfs_create_dir() will also 6038 * take the mutex. As the instances directory can not be destroyed 6039 * or changed in any other way, it is safe to unlock it, and 6040 * let the dentry try. If two users try to make the same dir at 6041 * the same time, then the new_instance_create() will determine the 6042 * winner. 6043 */ 6044 mutex_unlock(&inode->i_mutex); 6045 6046 ret = new_instance_create(dentry->d_iname); 6047 6048 mutex_lock(&inode->i_mutex); 6049 6050 return ret; 6051 } 6052 6053 static int instance_rmdir(struct inode *inode, struct dentry *dentry) 6054 { 6055 struct dentry *parent; 6056 int ret; 6057 6058 /* Paranoid: Make sure the parent is the "instances" directory */ 6059 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias); 6060 if (WARN_ON_ONCE(parent != trace_instance_dir)) 6061 return -ENOENT; 6062 6063 /* The caller did a dget() on dentry */ 6064 mutex_unlock(&dentry->d_inode->i_mutex); 6065 6066 /* 6067 * The inode mutex is locked, but debugfs_create_dir() will also 6068 * take the mutex. As the instances directory can not be destroyed 6069 * or changed in any other way, it is safe to unlock it, and 6070 * let the dentry try. If two users try to make the same dir at 6071 * the same time, then the instance_delete() will determine the 6072 * winner. 6073 */ 6074 mutex_unlock(&inode->i_mutex); 6075 6076 ret = instance_delete(dentry->d_iname); 6077 6078 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT); 6079 mutex_lock(&dentry->d_inode->i_mutex); 6080 6081 return ret; 6082 } 6083 6084 static const struct inode_operations instance_dir_inode_operations = { 6085 .lookup = simple_lookup, 6086 .mkdir = instance_mkdir, 6087 .rmdir = instance_rmdir, 6088 }; 6089 6090 static __init void create_trace_instances(struct dentry *d_tracer) 6091 { 6092 trace_instance_dir = debugfs_create_dir("instances", d_tracer); 6093 if (WARN_ON(!trace_instance_dir)) 6094 return; 6095 6096 /* Hijack the dir inode operations, to allow mkdir */ 6097 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations; 6098 } 6099 6100 static void 6101 init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) 6102 { 6103 int cpu; 6104 6105 trace_create_file("tracing_cpumask", 0644, d_tracer, 6106 tr, &tracing_cpumask_fops); 6107 6108 trace_create_file("trace_options", 0644, d_tracer, 6109 tr, &tracing_iter_fops); 6110 6111 trace_create_file("trace", 0644, d_tracer, 6112 tr, &tracing_fops); 6113 6114 trace_create_file("trace_pipe", 0444, d_tracer, 6115 tr, &tracing_pipe_fops); 6116 6117 trace_create_file("buffer_size_kb", 0644, d_tracer, 6118 tr, &tracing_entries_fops); 6119 6120 trace_create_file("buffer_total_size_kb", 0444, d_tracer, 6121 tr, &tracing_total_entries_fops); 6122 6123 trace_create_file("free_buffer", 0200, d_tracer, 6124 tr, &tracing_free_buffer_fops); 6125 6126 trace_create_file("trace_marker", 0220, d_tracer, 6127 tr, &tracing_mark_fops); 6128 6129 trace_create_file("trace_clock", 0644, d_tracer, tr, 6130 &trace_clock_fops); 6131 6132 trace_create_file("tracing_on", 0644, d_tracer, 6133 tr, &rb_simple_fops); 6134 6135 #ifdef CONFIG_TRACER_SNAPSHOT 6136 trace_create_file("snapshot", 0644, d_tracer, 6137 tr, &snapshot_fops); 6138 #endif 6139 6140 for_each_tracing_cpu(cpu) 6141 tracing_init_debugfs_percpu(tr, cpu); 6142 6143 } 6144 6145 static __init int tracer_init_debugfs(void) 6146 { 6147 struct dentry *d_tracer; 6148 6149 trace_access_lock_init(); 6150 6151 d_tracer = tracing_init_dentry(); 6152 if (!d_tracer) 6153 return 0; 6154 6155 init_tracer_debugfs(&global_trace, d_tracer); 6156 6157 trace_create_file("available_tracers", 0444, d_tracer, 6158 &global_trace, &show_traces_fops); 6159 6160 trace_create_file("current_tracer", 0644, d_tracer, 6161 &global_trace, &set_tracer_fops); 6162 6163 #ifdef CONFIG_TRACER_MAX_TRACE 6164 trace_create_file("tracing_max_latency", 0644, d_tracer, 6165 &tracing_max_latency, &tracing_max_lat_fops); 6166 #endif 6167 6168 trace_create_file("tracing_thresh", 0644, d_tracer, 6169 &tracing_thresh, &tracing_max_lat_fops); 6170 6171 trace_create_file("README", 0444, d_tracer, 6172 NULL, &tracing_readme_fops); 6173 6174 trace_create_file("saved_cmdlines", 0444, d_tracer, 6175 NULL, &tracing_saved_cmdlines_fops); 6176 6177 #ifdef CONFIG_DYNAMIC_FTRACE 6178 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 6179 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 6180 #endif 6181 6182 create_trace_instances(d_tracer); 6183 6184 create_trace_options_dir(&global_trace); 6185 6186 return 0; 6187 } 6188 6189 static int trace_panic_handler(struct notifier_block *this, 6190 unsigned long event, void *unused) 6191 { 6192 if (ftrace_dump_on_oops) 6193 ftrace_dump(ftrace_dump_on_oops); 6194 return NOTIFY_OK; 6195 } 6196 6197 static struct notifier_block trace_panic_notifier = { 6198 .notifier_call = trace_panic_handler, 6199 .next = NULL, 6200 .priority = 150 /* priority: INT_MAX >= x >= 0 */ 6201 }; 6202 6203 static int trace_die_handler(struct notifier_block *self, 6204 unsigned long val, 6205 void *data) 6206 { 6207 switch (val) { 6208 case DIE_OOPS: 6209 if (ftrace_dump_on_oops) 6210 ftrace_dump(ftrace_dump_on_oops); 6211 break; 6212 default: 6213 break; 6214 } 6215 return NOTIFY_OK; 6216 } 6217 6218 static struct notifier_block trace_die_notifier = { 6219 .notifier_call = trace_die_handler, 6220 .priority = 200 6221 }; 6222 6223 /* 6224 * printk is set to max of 1024, we really don't need it that big. 6225 * Nothing should be printing 1000 characters anyway. 6226 */ 6227 #define TRACE_MAX_PRINT 1000 6228 6229 /* 6230 * Define here KERN_TRACE so that we have one place to modify 6231 * it if we decide to change what log level the ftrace dump 6232 * should be at. 6233 */ 6234 #define KERN_TRACE KERN_EMERG 6235 6236 void 6237 trace_printk_seq(struct trace_seq *s) 6238 { 6239 /* Probably should print a warning here. */ 6240 if (s->len >= TRACE_MAX_PRINT) 6241 s->len = TRACE_MAX_PRINT; 6242 6243 /* should be zero ended, but we are paranoid. */ 6244 s->buffer[s->len] = 0; 6245 6246 printk(KERN_TRACE "%s", s->buffer); 6247 6248 trace_seq_init(s); 6249 } 6250 6251 void trace_init_global_iter(struct trace_iterator *iter) 6252 { 6253 iter->tr = &global_trace; 6254 iter->trace = iter->tr->current_trace; 6255 iter->cpu_file = RING_BUFFER_ALL_CPUS; 6256 iter->trace_buffer = &global_trace.trace_buffer; 6257 } 6258 6259 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) 6260 { 6261 /* use static because iter can be a bit big for the stack */ 6262 static struct trace_iterator iter; 6263 static atomic_t dump_running; 6264 unsigned int old_userobj; 6265 unsigned long flags; 6266 int cnt = 0, cpu; 6267 6268 /* Only allow one dump user at a time. */ 6269 if (atomic_inc_return(&dump_running) != 1) { 6270 atomic_dec(&dump_running); 6271 return; 6272 } 6273 6274 /* 6275 * Always turn off tracing when we dump. 6276 * We don't need to show trace output of what happens 6277 * between multiple crashes. 6278 * 6279 * If the user does a sysrq-z, then they can re-enable 6280 * tracing with echo 1 > tracing_on. 6281 */ 6282 tracing_off(); 6283 6284 local_irq_save(flags); 6285 6286 /* Simulate the iterator */ 6287 trace_init_global_iter(&iter); 6288 6289 for_each_tracing_cpu(cpu) { 6290 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled); 6291 } 6292 6293 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; 6294 6295 /* don't look at user memory in panic mode */ 6296 trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 6297 6298 switch (oops_dump_mode) { 6299 case DUMP_ALL: 6300 iter.cpu_file = RING_BUFFER_ALL_CPUS; 6301 break; 6302 case DUMP_ORIG: 6303 iter.cpu_file = raw_smp_processor_id(); 6304 break; 6305 case DUMP_NONE: 6306 goto out_enable; 6307 default: 6308 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); 6309 iter.cpu_file = RING_BUFFER_ALL_CPUS; 6310 } 6311 6312 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 6313 6314 /* Did function tracer already get disabled? */ 6315 if (ftrace_is_dead()) { 6316 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); 6317 printk("# MAY BE MISSING FUNCTION EVENTS\n"); 6318 } 6319 6320 /* 6321 * We need to stop all tracing on all CPUS to read the 6322 * the next buffer. This is a bit expensive, but is 6323 * not done often. We fill all what we can read, 6324 * and then release the locks again. 6325 */ 6326 6327 while (!trace_empty(&iter)) { 6328 6329 if (!cnt) 6330 printk(KERN_TRACE "---------------------------------\n"); 6331 6332 cnt++; 6333 6334 /* reset all but tr, trace, and overruns */ 6335 memset(&iter.seq, 0, 6336 sizeof(struct trace_iterator) - 6337 offsetof(struct trace_iterator, seq)); 6338 iter.iter_flags |= TRACE_FILE_LAT_FMT; 6339 iter.pos = -1; 6340 6341 if (trace_find_next_entry_inc(&iter) != NULL) { 6342 int ret; 6343 6344 ret = print_trace_line(&iter); 6345 if (ret != TRACE_TYPE_NO_CONSUME) 6346 trace_consume(&iter); 6347 } 6348 touch_nmi_watchdog(); 6349 6350 trace_printk_seq(&iter.seq); 6351 } 6352 6353 if (!cnt) 6354 printk(KERN_TRACE " (ftrace buffer empty)\n"); 6355 else 6356 printk(KERN_TRACE "---------------------------------\n"); 6357 6358 out_enable: 6359 trace_flags |= old_userobj; 6360 6361 for_each_tracing_cpu(cpu) { 6362 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); 6363 } 6364 atomic_dec(&dump_running); 6365 local_irq_restore(flags); 6366 } 6367 EXPORT_SYMBOL_GPL(ftrace_dump); 6368 6369 __init static int tracer_alloc_buffers(void) 6370 { 6371 int ring_buf_size; 6372 int ret = -ENOMEM; 6373 6374 6375 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 6376 goto out; 6377 6378 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) 6379 goto out_free_buffer_mask; 6380 6381 /* Only allocate trace_printk buffers if a trace_printk exists */ 6382 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) 6383 /* Must be called before global_trace.buffer is allocated */ 6384 trace_printk_init_buffers(); 6385 6386 /* To save memory, keep the ring buffer size to its minimum */ 6387 if (ring_buffer_expanded) 6388 ring_buf_size = trace_buf_size; 6389 else 6390 ring_buf_size = 1; 6391 6392 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 6393 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask); 6394 6395 raw_spin_lock_init(&global_trace.start_lock); 6396 6397 /* TODO: make the number of buffers hot pluggable with CPUS */ 6398 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { 6399 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 6400 WARN_ON(1); 6401 goto out_free_cpumask; 6402 } 6403 6404 if (global_trace.buffer_disabled) 6405 tracing_off(); 6406 6407 trace_init_cmdlines(); 6408 6409 /* 6410 * register_tracer() might reference current_trace, so it 6411 * needs to be set before we register anything. This is 6412 * just a bootstrap of current_trace anyway. 6413 */ 6414 global_trace.current_trace = &nop_trace; 6415 6416 register_tracer(&nop_trace); 6417 6418 /* All seems OK, enable tracing */ 6419 tracing_disabled = 0; 6420 6421 atomic_notifier_chain_register(&panic_notifier_list, 6422 &trace_panic_notifier); 6423 6424 register_die_notifier(&trace_die_notifier); 6425 6426 global_trace.flags = TRACE_ARRAY_FL_GLOBAL; 6427 6428 INIT_LIST_HEAD(&global_trace.systems); 6429 INIT_LIST_HEAD(&global_trace.events); 6430 list_add(&global_trace.list, &ftrace_trace_arrays); 6431 6432 while (trace_boot_options) { 6433 char *option; 6434 6435 option = strsep(&trace_boot_options, ","); 6436 trace_set_options(&global_trace, option); 6437 } 6438 6439 register_snapshot_cmd(); 6440 6441 return 0; 6442 6443 out_free_cpumask: 6444 free_percpu(global_trace.trace_buffer.data); 6445 #ifdef CONFIG_TRACER_MAX_TRACE 6446 free_percpu(global_trace.max_buffer.data); 6447 #endif 6448 free_cpumask_var(global_trace.tracing_cpumask); 6449 out_free_buffer_mask: 6450 free_cpumask_var(tracing_buffer_mask); 6451 out: 6452 return ret; 6453 } 6454 6455 __init static int clear_boot_tracer(void) 6456 { 6457 /* 6458 * The default tracer at boot buffer is an init section. 6459 * This function is called in lateinit. If we did not 6460 * find the boot tracer, then clear it out, to prevent 6461 * later registration from accessing the buffer that is 6462 * about to be freed. 6463 */ 6464 if (!default_bootup_tracer) 6465 return 0; 6466 6467 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", 6468 default_bootup_tracer); 6469 default_bootup_tracer = NULL; 6470 6471 return 0; 6472 } 6473 6474 early_initcall(tracer_alloc_buffers); 6475 fs_initcall(tracer_init_debugfs); 6476 late_initcall(clear_boot_tracer); 6477