1 /* 2 * ring buffer based function tracer 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 6 * 7 * Originally taken from the RT patch by: 8 * Arnaldo Carvalho de Melo <acme@redhat.com> 9 * 10 * Based on code from the latency_tracer, that is: 11 * Copyright (C) 2004-2006 Ingo Molnar 12 * Copyright (C) 2004 Nadia Yvette Chambers 13 */ 14 #include <linux/ring_buffer.h> 15 #include <generated/utsrelease.h> 16 #include <linux/stacktrace.h> 17 #include <linux/writeback.h> 18 #include <linux/kallsyms.h> 19 #include <linux/seq_file.h> 20 #include <linux/notifier.h> 21 #include <linux/irqflags.h> 22 #include <linux/irq_work.h> 23 #include <linux/debugfs.h> 24 #include <linux/pagemap.h> 25 #include <linux/hardirq.h> 26 #include <linux/linkage.h> 27 #include <linux/uaccess.h> 28 #include <linux/kprobes.h> 29 #include <linux/ftrace.h> 30 #include <linux/module.h> 31 #include <linux/percpu.h> 32 #include <linux/splice.h> 33 #include <linux/kdebug.h> 34 #include <linux/string.h> 35 #include <linux/rwsem.h> 36 #include <linux/slab.h> 37 #include <linux/ctype.h> 38 #include <linux/init.h> 39 #include <linux/poll.h> 40 #include <linux/nmi.h> 41 #include <linux/fs.h> 42 #include <linux/sched/rt.h> 43 44 #include "trace.h" 45 #include "trace_output.h" 46 47 /* 48 * On boot up, the ring buffer is set to the minimum size, so that 49 * we do not waste memory on systems that are not using tracing. 50 */ 51 int ring_buffer_expanded; 52 53 /* 54 * We need to change this state when a selftest is running. 55 * A selftest will lurk into the ring-buffer to count the 56 * entries inserted during the selftest although some concurrent 57 * insertions into the ring-buffer such as trace_printk could occurred 58 * at the same time, giving false positive or negative results. 59 */ 60 static bool __read_mostly tracing_selftest_running; 61 62 /* 63 * If a tracer is running, we do not want to run SELFTEST. 64 */ 65 bool __read_mostly tracing_selftest_disabled; 66 67 /* For tracers that don't implement custom flags */ 68 static struct tracer_opt dummy_tracer_opt[] = { 69 { } 70 }; 71 72 static struct tracer_flags dummy_tracer_flags = { 73 .val = 0, 74 .opts = dummy_tracer_opt 75 }; 76 77 static int dummy_set_flag(u32 old_flags, u32 bit, int set) 78 { 79 return 0; 80 } 81 82 /* 83 * To prevent the comm cache from being overwritten when no 84 * tracing is active, only save the comm when a trace event 85 * occurred. 86 */ 87 static DEFINE_PER_CPU(bool, trace_cmdline_save); 88 89 /* 90 * When a reader is waiting for data, then this variable is 91 * set to true. 92 */ 93 static bool trace_wakeup_needed; 94 95 static struct irq_work trace_work_wakeup; 96 97 /* 98 * Kill all tracing for good (never come back). 99 * It is initialized to 1 but will turn to zero if the initialization 100 * of the tracer is successful. But that is the only place that sets 101 * this back to zero. 102 */ 103 static int tracing_disabled = 1; 104 105 DEFINE_PER_CPU(int, ftrace_cpu_disabled); 106 107 cpumask_var_t __read_mostly tracing_buffer_mask; 108 109 /* 110 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops 111 * 112 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops 113 * is set, then ftrace_dump is called. This will output the contents 114 * of the ftrace buffers to the console. This is very useful for 115 * capturing traces that lead to crashes and outputing it to a 116 * serial console. 117 * 118 * It is default off, but you can enable it with either specifying 119 * "ftrace_dump_on_oops" in the kernel command line, or setting 120 * /proc/sys/kernel/ftrace_dump_on_oops 121 * Set 1 if you want to dump buffers of all CPUs 122 * Set 2 if you want to dump the buffer of the CPU that triggered oops 123 */ 124 125 enum ftrace_dump_mode ftrace_dump_on_oops; 126 127 static int tracing_set_tracer(const char *buf); 128 129 #define MAX_TRACER_SIZE 100 130 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 131 static char *default_bootup_tracer; 132 133 static int __init set_cmdline_ftrace(char *str) 134 { 135 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 136 default_bootup_tracer = bootup_tracer_buf; 137 /* We are using ftrace early, expand it */ 138 ring_buffer_expanded = 1; 139 return 1; 140 } 141 __setup("ftrace=", set_cmdline_ftrace); 142 143 static int __init set_ftrace_dump_on_oops(char *str) 144 { 145 if (*str++ != '=' || !*str) { 146 ftrace_dump_on_oops = DUMP_ALL; 147 return 1; 148 } 149 150 if (!strcmp("orig_cpu", str)) { 151 ftrace_dump_on_oops = DUMP_ORIG; 152 return 1; 153 } 154 155 return 0; 156 } 157 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 158 159 160 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; 161 static char *trace_boot_options __initdata; 162 163 static int __init set_trace_boot_options(char *str) 164 { 165 strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); 166 trace_boot_options = trace_boot_options_buf; 167 return 0; 168 } 169 __setup("trace_options=", set_trace_boot_options); 170 171 unsigned long long ns2usecs(cycle_t nsec) 172 { 173 nsec += 500; 174 do_div(nsec, 1000); 175 return nsec; 176 } 177 178 /* 179 * The global_trace is the descriptor that holds the tracing 180 * buffers for the live tracing. For each CPU, it contains 181 * a link list of pages that will store trace entries. The 182 * page descriptor of the pages in the memory is used to hold 183 * the link list by linking the lru item in the page descriptor 184 * to each of the pages in the buffer per CPU. 185 * 186 * For each active CPU there is a data field that holds the 187 * pages for the buffer for that CPU. Each CPU has the same number 188 * of pages allocated for its buffer. 189 */ 190 static struct trace_array global_trace; 191 192 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); 193 194 int filter_current_check_discard(struct ring_buffer *buffer, 195 struct ftrace_event_call *call, void *rec, 196 struct ring_buffer_event *event) 197 { 198 return filter_check_discard(call, rec, buffer, event); 199 } 200 EXPORT_SYMBOL_GPL(filter_current_check_discard); 201 202 cycle_t ftrace_now(int cpu) 203 { 204 u64 ts; 205 206 /* Early boot up does not have a buffer yet */ 207 if (!global_trace.buffer) 208 return trace_clock_local(); 209 210 ts = ring_buffer_time_stamp(global_trace.buffer, cpu); 211 ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts); 212 213 return ts; 214 } 215 216 /* 217 * The max_tr is used to snapshot the global_trace when a maximum 218 * latency is reached. Some tracers will use this to store a maximum 219 * trace while it continues examining live traces. 220 * 221 * The buffers for the max_tr are set up the same as the global_trace. 222 * When a snapshot is taken, the link list of the max_tr is swapped 223 * with the link list of the global_trace and the buffers are reset for 224 * the global_trace so the tracing can continue. 225 */ 226 static struct trace_array max_tr; 227 228 static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); 229 230 int tracing_is_enabled(void) 231 { 232 return tracing_is_on(); 233 } 234 235 /* 236 * trace_buf_size is the size in bytes that is allocated 237 * for a buffer. Note, the number of bytes is always rounded 238 * to page size. 239 * 240 * This number is purposely set to a low number of 16384. 241 * If the dump on oops happens, it will be much appreciated 242 * to not have to wait for all that output. Anyway this can be 243 * boot time and run time configurable. 244 */ 245 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ 246 247 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; 248 249 /* trace_types holds a link list of available tracers. */ 250 static struct tracer *trace_types __read_mostly; 251 252 /* current_trace points to the tracer that is currently active */ 253 static struct tracer *current_trace __read_mostly = &nop_trace; 254 255 /* 256 * trace_types_lock is used to protect the trace_types list. 257 */ 258 static DEFINE_MUTEX(trace_types_lock); 259 260 /* 261 * serialize the access of the ring buffer 262 * 263 * ring buffer serializes readers, but it is low level protection. 264 * The validity of the events (which returns by ring_buffer_peek() ..etc) 265 * are not protected by ring buffer. 266 * 267 * The content of events may become garbage if we allow other process consumes 268 * these events concurrently: 269 * A) the page of the consumed events may become a normal page 270 * (not reader page) in ring buffer, and this page will be rewrited 271 * by events producer. 272 * B) The page of the consumed events may become a page for splice_read, 273 * and this page will be returned to system. 274 * 275 * These primitives allow multi process access to different cpu ring buffer 276 * concurrently. 277 * 278 * These primitives don't distinguish read-only and read-consume access. 279 * Multi read-only access are also serialized. 280 */ 281 282 #ifdef CONFIG_SMP 283 static DECLARE_RWSEM(all_cpu_access_lock); 284 static DEFINE_PER_CPU(struct mutex, cpu_access_lock); 285 286 static inline void trace_access_lock(int cpu) 287 { 288 if (cpu == TRACE_PIPE_ALL_CPU) { 289 /* gain it for accessing the whole ring buffer. */ 290 down_write(&all_cpu_access_lock); 291 } else { 292 /* gain it for accessing a cpu ring buffer. */ 293 294 /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */ 295 down_read(&all_cpu_access_lock); 296 297 /* Secondly block other access to this @cpu ring buffer. */ 298 mutex_lock(&per_cpu(cpu_access_lock, cpu)); 299 } 300 } 301 302 static inline void trace_access_unlock(int cpu) 303 { 304 if (cpu == TRACE_PIPE_ALL_CPU) { 305 up_write(&all_cpu_access_lock); 306 } else { 307 mutex_unlock(&per_cpu(cpu_access_lock, cpu)); 308 up_read(&all_cpu_access_lock); 309 } 310 } 311 312 static inline void trace_access_lock_init(void) 313 { 314 int cpu; 315 316 for_each_possible_cpu(cpu) 317 mutex_init(&per_cpu(cpu_access_lock, cpu)); 318 } 319 320 #else 321 322 static DEFINE_MUTEX(access_lock); 323 324 static inline void trace_access_lock(int cpu) 325 { 326 (void)cpu; 327 mutex_lock(&access_lock); 328 } 329 330 static inline void trace_access_unlock(int cpu) 331 { 332 (void)cpu; 333 mutex_unlock(&access_lock); 334 } 335 336 static inline void trace_access_lock_init(void) 337 { 338 } 339 340 #endif 341 342 /* trace_wait is a waitqueue for tasks blocked on trace_poll */ 343 static DECLARE_WAIT_QUEUE_HEAD(trace_wait); 344 345 /* trace_flags holds trace_options default values */ 346 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | 347 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | 348 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | 349 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS; 350 351 static int trace_stop_count; 352 static DEFINE_RAW_SPINLOCK(tracing_start_lock); 353 354 /** 355 * trace_wake_up - wake up tasks waiting for trace input 356 * 357 * Schedules a delayed work to wake up any task that is blocked on the 358 * trace_wait queue. These is used with trace_poll for tasks polling the 359 * trace. 360 */ 361 static void trace_wake_up(struct irq_work *work) 362 { 363 wake_up_all(&trace_wait); 364 365 } 366 367 /** 368 * tracing_on - enable tracing buffers 369 * 370 * This function enables tracing buffers that may have been 371 * disabled with tracing_off. 372 */ 373 void tracing_on(void) 374 { 375 if (global_trace.buffer) 376 ring_buffer_record_on(global_trace.buffer); 377 /* 378 * This flag is only looked at when buffers haven't been 379 * allocated yet. We don't really care about the race 380 * between setting this flag and actually turning 381 * on the buffer. 382 */ 383 global_trace.buffer_disabled = 0; 384 } 385 EXPORT_SYMBOL_GPL(tracing_on); 386 387 /** 388 * tracing_off - turn off tracing buffers 389 * 390 * This function stops the tracing buffers from recording data. 391 * It does not disable any overhead the tracers themselves may 392 * be causing. This function simply causes all recording to 393 * the ring buffers to fail. 394 */ 395 void tracing_off(void) 396 { 397 if (global_trace.buffer) 398 ring_buffer_record_off(global_trace.buffer); 399 /* 400 * This flag is only looked at when buffers haven't been 401 * allocated yet. We don't really care about the race 402 * between setting this flag and actually turning 403 * on the buffer. 404 */ 405 global_trace.buffer_disabled = 1; 406 } 407 EXPORT_SYMBOL_GPL(tracing_off); 408 409 /** 410 * tracing_is_on - show state of ring buffers enabled 411 */ 412 int tracing_is_on(void) 413 { 414 if (global_trace.buffer) 415 return ring_buffer_record_is_on(global_trace.buffer); 416 return !global_trace.buffer_disabled; 417 } 418 EXPORT_SYMBOL_GPL(tracing_is_on); 419 420 static int __init set_buf_size(char *str) 421 { 422 unsigned long buf_size; 423 424 if (!str) 425 return 0; 426 buf_size = memparse(str, &str); 427 /* nr_entries can not be zero */ 428 if (buf_size == 0) 429 return 0; 430 trace_buf_size = buf_size; 431 return 1; 432 } 433 __setup("trace_buf_size=", set_buf_size); 434 435 static int __init set_tracing_thresh(char *str) 436 { 437 unsigned long threshold; 438 int ret; 439 440 if (!str) 441 return 0; 442 ret = kstrtoul(str, 0, &threshold); 443 if (ret < 0) 444 return 0; 445 tracing_thresh = threshold * 1000; 446 return 1; 447 } 448 __setup("tracing_thresh=", set_tracing_thresh); 449 450 unsigned long nsecs_to_usecs(unsigned long nsecs) 451 { 452 return nsecs / 1000; 453 } 454 455 /* These must match the bit postions in trace_iterator_flags */ 456 static const char *trace_options[] = { 457 "print-parent", 458 "sym-offset", 459 "sym-addr", 460 "verbose", 461 "raw", 462 "hex", 463 "bin", 464 "block", 465 "stacktrace", 466 "trace_printk", 467 "ftrace_preempt", 468 "branch", 469 "annotate", 470 "userstacktrace", 471 "sym-userobj", 472 "printk-msg-only", 473 "context-info", 474 "latency-format", 475 "sleep-time", 476 "graph-time", 477 "record-cmd", 478 "overwrite", 479 "disable_on_free", 480 "irq-info", 481 "markers", 482 NULL 483 }; 484 485 static struct { 486 u64 (*func)(void); 487 const char *name; 488 int in_ns; /* is this clock in nanoseconds? */ 489 } trace_clocks[] = { 490 { trace_clock_local, "local", 1 }, 491 { trace_clock_global, "global", 1 }, 492 { trace_clock_counter, "counter", 0 }, 493 ARCH_TRACE_CLOCKS 494 }; 495 496 int trace_clock_id; 497 498 /* 499 * trace_parser_get_init - gets the buffer for trace parser 500 */ 501 int trace_parser_get_init(struct trace_parser *parser, int size) 502 { 503 memset(parser, 0, sizeof(*parser)); 504 505 parser->buffer = kmalloc(size, GFP_KERNEL); 506 if (!parser->buffer) 507 return 1; 508 509 parser->size = size; 510 return 0; 511 } 512 513 /* 514 * trace_parser_put - frees the buffer for trace parser 515 */ 516 void trace_parser_put(struct trace_parser *parser) 517 { 518 kfree(parser->buffer); 519 } 520 521 /* 522 * trace_get_user - reads the user input string separated by space 523 * (matched by isspace(ch)) 524 * 525 * For each string found the 'struct trace_parser' is updated, 526 * and the function returns. 527 * 528 * Returns number of bytes read. 529 * 530 * See kernel/trace/trace.h for 'struct trace_parser' details. 531 */ 532 int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 533 size_t cnt, loff_t *ppos) 534 { 535 char ch; 536 size_t read = 0; 537 ssize_t ret; 538 539 if (!*ppos) 540 trace_parser_clear(parser); 541 542 ret = get_user(ch, ubuf++); 543 if (ret) 544 goto out; 545 546 read++; 547 cnt--; 548 549 /* 550 * The parser is not finished with the last write, 551 * continue reading the user input without skipping spaces. 552 */ 553 if (!parser->cont) { 554 /* skip white space */ 555 while (cnt && isspace(ch)) { 556 ret = get_user(ch, ubuf++); 557 if (ret) 558 goto out; 559 read++; 560 cnt--; 561 } 562 563 /* only spaces were written */ 564 if (isspace(ch)) { 565 *ppos += read; 566 ret = read; 567 goto out; 568 } 569 570 parser->idx = 0; 571 } 572 573 /* read the non-space input */ 574 while (cnt && !isspace(ch)) { 575 if (parser->idx < parser->size - 1) 576 parser->buffer[parser->idx++] = ch; 577 else { 578 ret = -EINVAL; 579 goto out; 580 } 581 ret = get_user(ch, ubuf++); 582 if (ret) 583 goto out; 584 read++; 585 cnt--; 586 } 587 588 /* We either got finished input or we have to wait for another call. */ 589 if (isspace(ch)) { 590 parser->buffer[parser->idx] = 0; 591 parser->cont = false; 592 } else { 593 parser->cont = true; 594 parser->buffer[parser->idx++] = ch; 595 } 596 597 *ppos += read; 598 ret = read; 599 600 out: 601 return ret; 602 } 603 604 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) 605 { 606 int len; 607 int ret; 608 609 if (!cnt) 610 return 0; 611 612 if (s->len <= s->readpos) 613 return -EBUSY; 614 615 len = s->len - s->readpos; 616 if (cnt > len) 617 cnt = len; 618 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); 619 if (ret == cnt) 620 return -EFAULT; 621 622 cnt -= ret; 623 624 s->readpos += cnt; 625 return cnt; 626 } 627 628 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) 629 { 630 int len; 631 632 if (s->len <= s->readpos) 633 return -EBUSY; 634 635 len = s->len - s->readpos; 636 if (cnt > len) 637 cnt = len; 638 memcpy(buf, s->buffer + s->readpos, cnt); 639 640 s->readpos += cnt; 641 return cnt; 642 } 643 644 /* 645 * ftrace_max_lock is used to protect the swapping of buffers 646 * when taking a max snapshot. The buffers themselves are 647 * protected by per_cpu spinlocks. But the action of the swap 648 * needs its own lock. 649 * 650 * This is defined as a arch_spinlock_t in order to help 651 * with performance when lockdep debugging is enabled. 652 * 653 * It is also used in other places outside the update_max_tr 654 * so it needs to be defined outside of the 655 * CONFIG_TRACER_MAX_TRACE. 656 */ 657 static arch_spinlock_t ftrace_max_lock = 658 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 659 660 unsigned long __read_mostly tracing_thresh; 661 662 #ifdef CONFIG_TRACER_MAX_TRACE 663 unsigned long __read_mostly tracing_max_latency; 664 665 /* 666 * Copy the new maximum trace into the separate maximum-trace 667 * structure. (this way the maximum trace is permanently saved, 668 * for later retrieval via /sys/kernel/debug/tracing/latency_trace) 669 */ 670 static void 671 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 672 { 673 struct trace_array_cpu *data = tr->data[cpu]; 674 struct trace_array_cpu *max_data; 675 676 max_tr.cpu = cpu; 677 max_tr.time_start = data->preempt_timestamp; 678 679 max_data = max_tr.data[cpu]; 680 max_data->saved_latency = tracing_max_latency; 681 max_data->critical_start = data->critical_start; 682 max_data->critical_end = data->critical_end; 683 684 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); 685 max_data->pid = tsk->pid; 686 max_data->uid = task_uid(tsk); 687 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 688 max_data->policy = tsk->policy; 689 max_data->rt_priority = tsk->rt_priority; 690 691 /* record this tasks comm */ 692 tracing_record_cmdline(tsk); 693 } 694 695 /** 696 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 697 * @tr: tracer 698 * @tsk: the task with the latency 699 * @cpu: The cpu that initiated the trace. 700 * 701 * Flip the buffers between the @tr and the max_tr and record information 702 * about which task was the cause of this latency. 703 */ 704 void 705 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 706 { 707 struct ring_buffer *buf = tr->buffer; 708 709 if (trace_stop_count) 710 return; 711 712 WARN_ON_ONCE(!irqs_disabled()); 713 714 if (!current_trace->allocated_snapshot) { 715 /* Only the nop tracer should hit this when disabling */ 716 WARN_ON_ONCE(current_trace != &nop_trace); 717 return; 718 } 719 720 arch_spin_lock(&ftrace_max_lock); 721 722 tr->buffer = max_tr.buffer; 723 max_tr.buffer = buf; 724 725 __update_max_tr(tr, tsk, cpu); 726 arch_spin_unlock(&ftrace_max_lock); 727 } 728 729 /** 730 * update_max_tr_single - only copy one trace over, and reset the rest 731 * @tr - tracer 732 * @tsk - task with the latency 733 * @cpu - the cpu of the buffer to copy. 734 * 735 * Flip the trace of a single CPU buffer between the @tr and the max_tr. 736 */ 737 void 738 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 739 { 740 int ret; 741 742 if (trace_stop_count) 743 return; 744 745 WARN_ON_ONCE(!irqs_disabled()); 746 if (WARN_ON_ONCE(!current_trace->allocated_snapshot)) 747 return; 748 749 arch_spin_lock(&ftrace_max_lock); 750 751 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); 752 753 if (ret == -EBUSY) { 754 /* 755 * We failed to swap the buffer due to a commit taking 756 * place on this CPU. We fail to record, but we reset 757 * the max trace buffer (no one writes directly to it) 758 * and flag that it failed. 759 */ 760 trace_array_printk(&max_tr, _THIS_IP_, 761 "Failed to swap buffers due to commit in progress\n"); 762 } 763 764 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 765 766 __update_max_tr(tr, tsk, cpu); 767 arch_spin_unlock(&ftrace_max_lock); 768 } 769 #endif /* CONFIG_TRACER_MAX_TRACE */ 770 771 static void default_wait_pipe(struct trace_iterator *iter) 772 { 773 DEFINE_WAIT(wait); 774 775 prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); 776 777 /* 778 * The events can happen in critical sections where 779 * checking a work queue can cause deadlocks. 780 * After adding a task to the queue, this flag is set 781 * only to notify events to try to wake up the queue 782 * using irq_work. 783 * 784 * We don't clear it even if the buffer is no longer 785 * empty. The flag only causes the next event to run 786 * irq_work to do the work queue wake up. The worse 787 * that can happen if we race with !trace_empty() is that 788 * an event will cause an irq_work to try to wake up 789 * an empty queue. 790 * 791 * There's no reason to protect this flag either, as 792 * the work queue and irq_work logic will do the necessary 793 * synchronization for the wake ups. The only thing 794 * that is necessary is that the wake up happens after 795 * a task has been queued. It's OK for spurious wake ups. 796 */ 797 trace_wakeup_needed = true; 798 799 if (trace_empty(iter)) 800 schedule(); 801 802 finish_wait(&trace_wait, &wait); 803 } 804 805 /** 806 * register_tracer - register a tracer with the ftrace system. 807 * @type - the plugin for the tracer 808 * 809 * Register a new plugin tracer. 810 */ 811 int register_tracer(struct tracer *type) 812 { 813 struct tracer *t; 814 int ret = 0; 815 816 if (!type->name) { 817 pr_info("Tracer must have a name\n"); 818 return -1; 819 } 820 821 if (strlen(type->name) >= MAX_TRACER_SIZE) { 822 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); 823 return -1; 824 } 825 826 mutex_lock(&trace_types_lock); 827 828 tracing_selftest_running = true; 829 830 for (t = trace_types; t; t = t->next) { 831 if (strcmp(type->name, t->name) == 0) { 832 /* already found */ 833 pr_info("Tracer %s already registered\n", 834 type->name); 835 ret = -1; 836 goto out; 837 } 838 } 839 840 if (!type->set_flag) 841 type->set_flag = &dummy_set_flag; 842 if (!type->flags) 843 type->flags = &dummy_tracer_flags; 844 else 845 if (!type->flags->opts) 846 type->flags->opts = dummy_tracer_opt; 847 if (!type->wait_pipe) 848 type->wait_pipe = default_wait_pipe; 849 850 851 #ifdef CONFIG_FTRACE_STARTUP_TEST 852 if (type->selftest && !tracing_selftest_disabled) { 853 struct tracer *saved_tracer = current_trace; 854 struct trace_array *tr = &global_trace; 855 856 /* 857 * Run a selftest on this tracer. 858 * Here we reset the trace buffer, and set the current 859 * tracer to be this tracer. The tracer can then run some 860 * internal tracing to verify that everything is in order. 861 * If we fail, we do not register this tracer. 862 */ 863 tracing_reset_online_cpus(tr); 864 865 current_trace = type; 866 867 if (type->use_max_tr) { 868 /* If we expanded the buffers, make sure the max is expanded too */ 869 if (ring_buffer_expanded) 870 ring_buffer_resize(max_tr.buffer, trace_buf_size, 871 RING_BUFFER_ALL_CPUS); 872 type->allocated_snapshot = true; 873 } 874 875 /* the test is responsible for initializing and enabling */ 876 pr_info("Testing tracer %s: ", type->name); 877 ret = type->selftest(type, tr); 878 /* the test is responsible for resetting too */ 879 current_trace = saved_tracer; 880 if (ret) { 881 printk(KERN_CONT "FAILED!\n"); 882 /* Add the warning after printing 'FAILED' */ 883 WARN_ON(1); 884 goto out; 885 } 886 /* Only reset on passing, to avoid touching corrupted buffers */ 887 tracing_reset_online_cpus(tr); 888 889 if (type->use_max_tr) { 890 type->allocated_snapshot = false; 891 892 /* Shrink the max buffer again */ 893 if (ring_buffer_expanded) 894 ring_buffer_resize(max_tr.buffer, 1, 895 RING_BUFFER_ALL_CPUS); 896 } 897 898 printk(KERN_CONT "PASSED\n"); 899 } 900 #endif 901 902 type->next = trace_types; 903 trace_types = type; 904 905 out: 906 tracing_selftest_running = false; 907 mutex_unlock(&trace_types_lock); 908 909 if (ret || !default_bootup_tracer) 910 goto out_unlock; 911 912 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) 913 goto out_unlock; 914 915 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 916 /* Do we want this tracer to start on bootup? */ 917 tracing_set_tracer(type->name); 918 default_bootup_tracer = NULL; 919 /* disable other selftests, since this will break it. */ 920 tracing_selftest_disabled = 1; 921 #ifdef CONFIG_FTRACE_STARTUP_TEST 922 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", 923 type->name); 924 #endif 925 926 out_unlock: 927 return ret; 928 } 929 930 void tracing_reset(struct trace_array *tr, int cpu) 931 { 932 struct ring_buffer *buffer = tr->buffer; 933 934 if (!buffer) 935 return; 936 937 ring_buffer_record_disable(buffer); 938 939 /* Make sure all commits have finished */ 940 synchronize_sched(); 941 ring_buffer_reset_cpu(buffer, cpu); 942 943 ring_buffer_record_enable(buffer); 944 } 945 946 void tracing_reset_online_cpus(struct trace_array *tr) 947 { 948 struct ring_buffer *buffer = tr->buffer; 949 int cpu; 950 951 if (!buffer) 952 return; 953 954 ring_buffer_record_disable(buffer); 955 956 /* Make sure all commits have finished */ 957 synchronize_sched(); 958 959 tr->time_start = ftrace_now(tr->cpu); 960 961 for_each_online_cpu(cpu) 962 ring_buffer_reset_cpu(buffer, cpu); 963 964 ring_buffer_record_enable(buffer); 965 } 966 967 void tracing_reset_current(int cpu) 968 { 969 tracing_reset(&global_trace, cpu); 970 } 971 972 void tracing_reset_current_online_cpus(void) 973 { 974 tracing_reset_online_cpus(&global_trace); 975 } 976 977 #define SAVED_CMDLINES 128 978 #define NO_CMDLINE_MAP UINT_MAX 979 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 980 static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 981 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; 982 static int cmdline_idx; 983 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; 984 985 /* temporary disable recording */ 986 static atomic_t trace_record_cmdline_disabled __read_mostly; 987 988 static void trace_init_cmdlines(void) 989 { 990 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline)); 991 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid)); 992 cmdline_idx = 0; 993 } 994 995 int is_tracing_stopped(void) 996 { 997 return trace_stop_count; 998 } 999 1000 /** 1001 * ftrace_off_permanent - disable all ftrace code permanently 1002 * 1003 * This should only be called when a serious anomally has 1004 * been detected. This will turn off the function tracing, 1005 * ring buffers, and other tracing utilites. It takes no 1006 * locks and can be called from any context. 1007 */ 1008 void ftrace_off_permanent(void) 1009 { 1010 tracing_disabled = 1; 1011 ftrace_stop(); 1012 tracing_off_permanent(); 1013 } 1014 1015 /** 1016 * tracing_start - quick start of the tracer 1017 * 1018 * If tracing is enabled but was stopped by tracing_stop, 1019 * this will start the tracer back up. 1020 */ 1021 void tracing_start(void) 1022 { 1023 struct ring_buffer *buffer; 1024 unsigned long flags; 1025 1026 if (tracing_disabled) 1027 return; 1028 1029 raw_spin_lock_irqsave(&tracing_start_lock, flags); 1030 if (--trace_stop_count) { 1031 if (trace_stop_count < 0) { 1032 /* Someone screwed up their debugging */ 1033 WARN_ON_ONCE(1); 1034 trace_stop_count = 0; 1035 } 1036 goto out; 1037 } 1038 1039 /* Prevent the buffers from switching */ 1040 arch_spin_lock(&ftrace_max_lock); 1041 1042 buffer = global_trace.buffer; 1043 if (buffer) 1044 ring_buffer_record_enable(buffer); 1045 1046 buffer = max_tr.buffer; 1047 if (buffer) 1048 ring_buffer_record_enable(buffer); 1049 1050 arch_spin_unlock(&ftrace_max_lock); 1051 1052 ftrace_start(); 1053 out: 1054 raw_spin_unlock_irqrestore(&tracing_start_lock, flags); 1055 } 1056 1057 /** 1058 * tracing_stop - quick stop of the tracer 1059 * 1060 * Light weight way to stop tracing. Use in conjunction with 1061 * tracing_start. 1062 */ 1063 void tracing_stop(void) 1064 { 1065 struct ring_buffer *buffer; 1066 unsigned long flags; 1067 1068 ftrace_stop(); 1069 raw_spin_lock_irqsave(&tracing_start_lock, flags); 1070 if (trace_stop_count++) 1071 goto out; 1072 1073 /* Prevent the buffers from switching */ 1074 arch_spin_lock(&ftrace_max_lock); 1075 1076 buffer = global_trace.buffer; 1077 if (buffer) 1078 ring_buffer_record_disable(buffer); 1079 1080 buffer = max_tr.buffer; 1081 if (buffer) 1082 ring_buffer_record_disable(buffer); 1083 1084 arch_spin_unlock(&ftrace_max_lock); 1085 1086 out: 1087 raw_spin_unlock_irqrestore(&tracing_start_lock, flags); 1088 } 1089 1090 void trace_stop_cmdline_recording(void); 1091 1092 static void trace_save_cmdline(struct task_struct *tsk) 1093 { 1094 unsigned pid, idx; 1095 1096 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) 1097 return; 1098 1099 /* 1100 * It's not the end of the world if we don't get 1101 * the lock, but we also don't want to spin 1102 * nor do we want to disable interrupts, 1103 * so if we miss here, then better luck next time. 1104 */ 1105 if (!arch_spin_trylock(&trace_cmdline_lock)) 1106 return; 1107 1108 idx = map_pid_to_cmdline[tsk->pid]; 1109 if (idx == NO_CMDLINE_MAP) { 1110 idx = (cmdline_idx + 1) % SAVED_CMDLINES; 1111 1112 /* 1113 * Check whether the cmdline buffer at idx has a pid 1114 * mapped. We are going to overwrite that entry so we 1115 * need to clear the map_pid_to_cmdline. Otherwise we 1116 * would read the new comm for the old pid. 1117 */ 1118 pid = map_cmdline_to_pid[idx]; 1119 if (pid != NO_CMDLINE_MAP) 1120 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; 1121 1122 map_cmdline_to_pid[idx] = tsk->pid; 1123 map_pid_to_cmdline[tsk->pid] = idx; 1124 1125 cmdline_idx = idx; 1126 } 1127 1128 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 1129 1130 arch_spin_unlock(&trace_cmdline_lock); 1131 } 1132 1133 void trace_find_cmdline(int pid, char comm[]) 1134 { 1135 unsigned map; 1136 1137 if (!pid) { 1138 strcpy(comm, "<idle>"); 1139 return; 1140 } 1141 1142 if (WARN_ON_ONCE(pid < 0)) { 1143 strcpy(comm, "<XXX>"); 1144 return; 1145 } 1146 1147 if (pid > PID_MAX_DEFAULT) { 1148 strcpy(comm, "<...>"); 1149 return; 1150 } 1151 1152 preempt_disable(); 1153 arch_spin_lock(&trace_cmdline_lock); 1154 map = map_pid_to_cmdline[pid]; 1155 if (map != NO_CMDLINE_MAP) 1156 strcpy(comm, saved_cmdlines[map]); 1157 else 1158 strcpy(comm, "<...>"); 1159 1160 arch_spin_unlock(&trace_cmdline_lock); 1161 preempt_enable(); 1162 } 1163 1164 void tracing_record_cmdline(struct task_struct *tsk) 1165 { 1166 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on()) 1167 return; 1168 1169 if (!__this_cpu_read(trace_cmdline_save)) 1170 return; 1171 1172 __this_cpu_write(trace_cmdline_save, false); 1173 1174 trace_save_cmdline(tsk); 1175 } 1176 1177 void 1178 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, 1179 int pc) 1180 { 1181 struct task_struct *tsk = current; 1182 1183 entry->preempt_count = pc & 0xff; 1184 entry->pid = (tsk) ? tsk->pid : 0; 1185 entry->flags = 1186 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 1187 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 1188 #else 1189 TRACE_FLAG_IRQS_NOSUPPORT | 1190 #endif 1191 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | 1192 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 1193 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 1194 } 1195 EXPORT_SYMBOL_GPL(tracing_generic_entry_update); 1196 1197 struct ring_buffer_event * 1198 trace_buffer_lock_reserve(struct ring_buffer *buffer, 1199 int type, 1200 unsigned long len, 1201 unsigned long flags, int pc) 1202 { 1203 struct ring_buffer_event *event; 1204 1205 event = ring_buffer_lock_reserve(buffer, len); 1206 if (event != NULL) { 1207 struct trace_entry *ent = ring_buffer_event_data(event); 1208 1209 tracing_generic_entry_update(ent, flags, pc); 1210 ent->type = type; 1211 } 1212 1213 return event; 1214 } 1215 1216 void 1217 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) 1218 { 1219 __this_cpu_write(trace_cmdline_save, true); 1220 if (trace_wakeup_needed) { 1221 trace_wakeup_needed = false; 1222 /* irq_work_queue() supplies it's own memory barriers */ 1223 irq_work_queue(&trace_work_wakeup); 1224 } 1225 ring_buffer_unlock_commit(buffer, event); 1226 } 1227 1228 static inline void 1229 __trace_buffer_unlock_commit(struct ring_buffer *buffer, 1230 struct ring_buffer_event *event, 1231 unsigned long flags, int pc) 1232 { 1233 __buffer_unlock_commit(buffer, event); 1234 1235 ftrace_trace_stack(buffer, flags, 6, pc); 1236 ftrace_trace_userstack(buffer, flags, pc); 1237 } 1238 1239 void trace_buffer_unlock_commit(struct ring_buffer *buffer, 1240 struct ring_buffer_event *event, 1241 unsigned long flags, int pc) 1242 { 1243 __trace_buffer_unlock_commit(buffer, event, flags, pc); 1244 } 1245 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); 1246 1247 struct ring_buffer_event * 1248 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, 1249 int type, unsigned long len, 1250 unsigned long flags, int pc) 1251 { 1252 *current_rb = global_trace.buffer; 1253 return trace_buffer_lock_reserve(*current_rb, 1254 type, len, flags, pc); 1255 } 1256 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); 1257 1258 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, 1259 struct ring_buffer_event *event, 1260 unsigned long flags, int pc) 1261 { 1262 __trace_buffer_unlock_commit(buffer, event, flags, pc); 1263 } 1264 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); 1265 1266 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, 1267 struct ring_buffer_event *event, 1268 unsigned long flags, int pc, 1269 struct pt_regs *regs) 1270 { 1271 __buffer_unlock_commit(buffer, event); 1272 1273 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); 1274 ftrace_trace_userstack(buffer, flags, pc); 1275 } 1276 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs); 1277 1278 void trace_current_buffer_discard_commit(struct ring_buffer *buffer, 1279 struct ring_buffer_event *event) 1280 { 1281 ring_buffer_discard_commit(buffer, event); 1282 } 1283 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); 1284 1285 void 1286 trace_function(struct trace_array *tr, 1287 unsigned long ip, unsigned long parent_ip, unsigned long flags, 1288 int pc) 1289 { 1290 struct ftrace_event_call *call = &event_function; 1291 struct ring_buffer *buffer = tr->buffer; 1292 struct ring_buffer_event *event; 1293 struct ftrace_entry *entry; 1294 1295 /* If we are reading the ring buffer, don't trace */ 1296 if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) 1297 return; 1298 1299 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 1300 flags, pc); 1301 if (!event) 1302 return; 1303 entry = ring_buffer_event_data(event); 1304 entry->ip = ip; 1305 entry->parent_ip = parent_ip; 1306 1307 if (!filter_check_discard(call, entry, buffer, event)) 1308 __buffer_unlock_commit(buffer, event); 1309 } 1310 1311 void 1312 ftrace(struct trace_array *tr, struct trace_array_cpu *data, 1313 unsigned long ip, unsigned long parent_ip, unsigned long flags, 1314 int pc) 1315 { 1316 if (likely(!atomic_read(&data->disabled))) 1317 trace_function(tr, ip, parent_ip, flags, pc); 1318 } 1319 1320 #ifdef CONFIG_STACKTRACE 1321 1322 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) 1323 struct ftrace_stack { 1324 unsigned long calls[FTRACE_STACK_MAX_ENTRIES]; 1325 }; 1326 1327 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); 1328 static DEFINE_PER_CPU(int, ftrace_stack_reserve); 1329 1330 static void __ftrace_trace_stack(struct ring_buffer *buffer, 1331 unsigned long flags, 1332 int skip, int pc, struct pt_regs *regs) 1333 { 1334 struct ftrace_event_call *call = &event_kernel_stack; 1335 struct ring_buffer_event *event; 1336 struct stack_entry *entry; 1337 struct stack_trace trace; 1338 int use_stack; 1339 int size = FTRACE_STACK_ENTRIES; 1340 1341 trace.nr_entries = 0; 1342 trace.skip = skip; 1343 1344 /* 1345 * Since events can happen in NMIs there's no safe way to 1346 * use the per cpu ftrace_stacks. We reserve it and if an interrupt 1347 * or NMI comes in, it will just have to use the default 1348 * FTRACE_STACK_SIZE. 1349 */ 1350 preempt_disable_notrace(); 1351 1352 use_stack = __this_cpu_inc_return(ftrace_stack_reserve); 1353 /* 1354 * We don't need any atomic variables, just a barrier. 1355 * If an interrupt comes in, we don't care, because it would 1356 * have exited and put the counter back to what we want. 1357 * We just need a barrier to keep gcc from moving things 1358 * around. 1359 */ 1360 barrier(); 1361 if (use_stack == 1) { 1362 trace.entries = &__get_cpu_var(ftrace_stack).calls[0]; 1363 trace.max_entries = FTRACE_STACK_MAX_ENTRIES; 1364 1365 if (regs) 1366 save_stack_trace_regs(regs, &trace); 1367 else 1368 save_stack_trace(&trace); 1369 1370 if (trace.nr_entries > size) 1371 size = trace.nr_entries; 1372 } else 1373 /* From now on, use_stack is a boolean */ 1374 use_stack = 0; 1375 1376 size *= sizeof(unsigned long); 1377 1378 event = trace_buffer_lock_reserve(buffer, TRACE_STACK, 1379 sizeof(*entry) + size, flags, pc); 1380 if (!event) 1381 goto out; 1382 entry = ring_buffer_event_data(event); 1383 1384 memset(&entry->caller, 0, size); 1385 1386 if (use_stack) 1387 memcpy(&entry->caller, trace.entries, 1388 trace.nr_entries * sizeof(unsigned long)); 1389 else { 1390 trace.max_entries = FTRACE_STACK_ENTRIES; 1391 trace.entries = entry->caller; 1392 if (regs) 1393 save_stack_trace_regs(regs, &trace); 1394 else 1395 save_stack_trace(&trace); 1396 } 1397 1398 entry->size = trace.nr_entries; 1399 1400 if (!filter_check_discard(call, entry, buffer, event)) 1401 __buffer_unlock_commit(buffer, event); 1402 1403 out: 1404 /* Again, don't let gcc optimize things here */ 1405 barrier(); 1406 __this_cpu_dec(ftrace_stack_reserve); 1407 preempt_enable_notrace(); 1408 1409 } 1410 1411 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, 1412 int skip, int pc, struct pt_regs *regs) 1413 { 1414 if (!(trace_flags & TRACE_ITER_STACKTRACE)) 1415 return; 1416 1417 __ftrace_trace_stack(buffer, flags, skip, pc, regs); 1418 } 1419 1420 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, 1421 int skip, int pc) 1422 { 1423 if (!(trace_flags & TRACE_ITER_STACKTRACE)) 1424 return; 1425 1426 __ftrace_trace_stack(buffer, flags, skip, pc, NULL); 1427 } 1428 1429 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 1430 int pc) 1431 { 1432 __ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL); 1433 } 1434 1435 /** 1436 * trace_dump_stack - record a stack back trace in the trace buffer 1437 */ 1438 void trace_dump_stack(void) 1439 { 1440 unsigned long flags; 1441 1442 if (tracing_disabled || tracing_selftest_running) 1443 return; 1444 1445 local_save_flags(flags); 1446 1447 /* skipping 3 traces, seems to get us at the caller of this function */ 1448 __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL); 1449 } 1450 1451 static DEFINE_PER_CPU(int, user_stack_count); 1452 1453 void 1454 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) 1455 { 1456 struct ftrace_event_call *call = &event_user_stack; 1457 struct ring_buffer_event *event; 1458 struct userstack_entry *entry; 1459 struct stack_trace trace; 1460 1461 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) 1462 return; 1463 1464 /* 1465 * NMIs can not handle page faults, even with fix ups. 1466 * The save user stack can (and often does) fault. 1467 */ 1468 if (unlikely(in_nmi())) 1469 return; 1470 1471 /* 1472 * prevent recursion, since the user stack tracing may 1473 * trigger other kernel events. 1474 */ 1475 preempt_disable(); 1476 if (__this_cpu_read(user_stack_count)) 1477 goto out; 1478 1479 __this_cpu_inc(user_stack_count); 1480 1481 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 1482 sizeof(*entry), flags, pc); 1483 if (!event) 1484 goto out_drop_count; 1485 entry = ring_buffer_event_data(event); 1486 1487 entry->tgid = current->tgid; 1488 memset(&entry->caller, 0, sizeof(entry->caller)); 1489 1490 trace.nr_entries = 0; 1491 trace.max_entries = FTRACE_STACK_ENTRIES; 1492 trace.skip = 0; 1493 trace.entries = entry->caller; 1494 1495 save_stack_trace_user(&trace); 1496 if (!filter_check_discard(call, entry, buffer, event)) 1497 __buffer_unlock_commit(buffer, event); 1498 1499 out_drop_count: 1500 __this_cpu_dec(user_stack_count); 1501 out: 1502 preempt_enable(); 1503 } 1504 1505 #ifdef UNUSED 1506 static void __trace_userstack(struct trace_array *tr, unsigned long flags) 1507 { 1508 ftrace_trace_userstack(tr, flags, preempt_count()); 1509 } 1510 #endif /* UNUSED */ 1511 1512 #endif /* CONFIG_STACKTRACE */ 1513 1514 /* created for use with alloc_percpu */ 1515 struct trace_buffer_struct { 1516 char buffer[TRACE_BUF_SIZE]; 1517 }; 1518 1519 static struct trace_buffer_struct *trace_percpu_buffer; 1520 static struct trace_buffer_struct *trace_percpu_sirq_buffer; 1521 static struct trace_buffer_struct *trace_percpu_irq_buffer; 1522 static struct trace_buffer_struct *trace_percpu_nmi_buffer; 1523 1524 /* 1525 * The buffer used is dependent on the context. There is a per cpu 1526 * buffer for normal context, softirq contex, hard irq context and 1527 * for NMI context. Thise allows for lockless recording. 1528 * 1529 * Note, if the buffers failed to be allocated, then this returns NULL 1530 */ 1531 static char *get_trace_buf(void) 1532 { 1533 struct trace_buffer_struct *percpu_buffer; 1534 1535 /* 1536 * If we have allocated per cpu buffers, then we do not 1537 * need to do any locking. 1538 */ 1539 if (in_nmi()) 1540 percpu_buffer = trace_percpu_nmi_buffer; 1541 else if (in_irq()) 1542 percpu_buffer = trace_percpu_irq_buffer; 1543 else if (in_softirq()) 1544 percpu_buffer = trace_percpu_sirq_buffer; 1545 else 1546 percpu_buffer = trace_percpu_buffer; 1547 1548 if (!percpu_buffer) 1549 return NULL; 1550 1551 return this_cpu_ptr(&percpu_buffer->buffer[0]); 1552 } 1553 1554 static int alloc_percpu_trace_buffer(void) 1555 { 1556 struct trace_buffer_struct *buffers; 1557 struct trace_buffer_struct *sirq_buffers; 1558 struct trace_buffer_struct *irq_buffers; 1559 struct trace_buffer_struct *nmi_buffers; 1560 1561 buffers = alloc_percpu(struct trace_buffer_struct); 1562 if (!buffers) 1563 goto err_warn; 1564 1565 sirq_buffers = alloc_percpu(struct trace_buffer_struct); 1566 if (!sirq_buffers) 1567 goto err_sirq; 1568 1569 irq_buffers = alloc_percpu(struct trace_buffer_struct); 1570 if (!irq_buffers) 1571 goto err_irq; 1572 1573 nmi_buffers = alloc_percpu(struct trace_buffer_struct); 1574 if (!nmi_buffers) 1575 goto err_nmi; 1576 1577 trace_percpu_buffer = buffers; 1578 trace_percpu_sirq_buffer = sirq_buffers; 1579 trace_percpu_irq_buffer = irq_buffers; 1580 trace_percpu_nmi_buffer = nmi_buffers; 1581 1582 return 0; 1583 1584 err_nmi: 1585 free_percpu(irq_buffers); 1586 err_irq: 1587 free_percpu(sirq_buffers); 1588 err_sirq: 1589 free_percpu(buffers); 1590 err_warn: 1591 WARN(1, "Could not allocate percpu trace_printk buffer"); 1592 return -ENOMEM; 1593 } 1594 1595 static int buffers_allocated; 1596 1597 void trace_printk_init_buffers(void) 1598 { 1599 if (buffers_allocated) 1600 return; 1601 1602 if (alloc_percpu_trace_buffer()) 1603 return; 1604 1605 pr_info("ftrace: Allocated trace_printk buffers\n"); 1606 1607 /* Expand the buffers to set size */ 1608 tracing_update_buffers(); 1609 1610 buffers_allocated = 1; 1611 1612 /* 1613 * trace_printk_init_buffers() can be called by modules. 1614 * If that happens, then we need to start cmdline recording 1615 * directly here. If the global_trace.buffer is already 1616 * allocated here, then this was called by module code. 1617 */ 1618 if (global_trace.buffer) 1619 tracing_start_cmdline_record(); 1620 } 1621 1622 void trace_printk_start_comm(void) 1623 { 1624 /* Start tracing comms if trace printk is set */ 1625 if (!buffers_allocated) 1626 return; 1627 tracing_start_cmdline_record(); 1628 } 1629 1630 static void trace_printk_start_stop_comm(int enabled) 1631 { 1632 if (!buffers_allocated) 1633 return; 1634 1635 if (enabled) 1636 tracing_start_cmdline_record(); 1637 else 1638 tracing_stop_cmdline_record(); 1639 } 1640 1641 /** 1642 * trace_vbprintk - write binary msg to tracing buffer 1643 * 1644 */ 1645 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 1646 { 1647 struct ftrace_event_call *call = &event_bprint; 1648 struct ring_buffer_event *event; 1649 struct ring_buffer *buffer; 1650 struct trace_array *tr = &global_trace; 1651 struct bprint_entry *entry; 1652 unsigned long flags; 1653 char *tbuffer; 1654 int len = 0, size, pc; 1655 1656 if (unlikely(tracing_selftest_running || tracing_disabled)) 1657 return 0; 1658 1659 /* Don't pollute graph traces with trace_vprintk internals */ 1660 pause_graph_tracing(); 1661 1662 pc = preempt_count(); 1663 preempt_disable_notrace(); 1664 1665 tbuffer = get_trace_buf(); 1666 if (!tbuffer) { 1667 len = 0; 1668 goto out; 1669 } 1670 1671 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); 1672 1673 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) 1674 goto out; 1675 1676 local_save_flags(flags); 1677 size = sizeof(*entry) + sizeof(u32) * len; 1678 buffer = tr->buffer; 1679 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, 1680 flags, pc); 1681 if (!event) 1682 goto out; 1683 entry = ring_buffer_event_data(event); 1684 entry->ip = ip; 1685 entry->fmt = fmt; 1686 1687 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 1688 if (!filter_check_discard(call, entry, buffer, event)) { 1689 __buffer_unlock_commit(buffer, event); 1690 ftrace_trace_stack(buffer, flags, 6, pc); 1691 } 1692 1693 out: 1694 preempt_enable_notrace(); 1695 unpause_graph_tracing(); 1696 1697 return len; 1698 } 1699 EXPORT_SYMBOL_GPL(trace_vbprintk); 1700 1701 int trace_array_printk(struct trace_array *tr, 1702 unsigned long ip, const char *fmt, ...) 1703 { 1704 int ret; 1705 va_list ap; 1706 1707 if (!(trace_flags & TRACE_ITER_PRINTK)) 1708 return 0; 1709 1710 va_start(ap, fmt); 1711 ret = trace_array_vprintk(tr, ip, fmt, ap); 1712 va_end(ap); 1713 return ret; 1714 } 1715 1716 int trace_array_vprintk(struct trace_array *tr, 1717 unsigned long ip, const char *fmt, va_list args) 1718 { 1719 struct ftrace_event_call *call = &event_print; 1720 struct ring_buffer_event *event; 1721 struct ring_buffer *buffer; 1722 int len = 0, size, pc; 1723 struct print_entry *entry; 1724 unsigned long flags; 1725 char *tbuffer; 1726 1727 if (tracing_disabled || tracing_selftest_running) 1728 return 0; 1729 1730 /* Don't pollute graph traces with trace_vprintk internals */ 1731 pause_graph_tracing(); 1732 1733 pc = preempt_count(); 1734 preempt_disable_notrace(); 1735 1736 1737 tbuffer = get_trace_buf(); 1738 if (!tbuffer) { 1739 len = 0; 1740 goto out; 1741 } 1742 1743 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); 1744 if (len > TRACE_BUF_SIZE) 1745 goto out; 1746 1747 local_save_flags(flags); 1748 size = sizeof(*entry) + len + 1; 1749 buffer = tr->buffer; 1750 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 1751 flags, pc); 1752 if (!event) 1753 goto out; 1754 entry = ring_buffer_event_data(event); 1755 entry->ip = ip; 1756 1757 memcpy(&entry->buf, tbuffer, len); 1758 entry->buf[len] = '\0'; 1759 if (!filter_check_discard(call, entry, buffer, event)) { 1760 __buffer_unlock_commit(buffer, event); 1761 ftrace_trace_stack(buffer, flags, 6, pc); 1762 } 1763 out: 1764 preempt_enable_notrace(); 1765 unpause_graph_tracing(); 1766 1767 return len; 1768 } 1769 1770 int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 1771 { 1772 return trace_array_vprintk(&global_trace, ip, fmt, args); 1773 } 1774 EXPORT_SYMBOL_GPL(trace_vprintk); 1775 1776 static void trace_iterator_increment(struct trace_iterator *iter) 1777 { 1778 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); 1779 1780 iter->idx++; 1781 if (buf_iter) 1782 ring_buffer_read(buf_iter, NULL); 1783 } 1784 1785 static struct trace_entry * 1786 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, 1787 unsigned long *lost_events) 1788 { 1789 struct ring_buffer_event *event; 1790 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); 1791 1792 if (buf_iter) 1793 event = ring_buffer_iter_peek(buf_iter, ts); 1794 else 1795 event = ring_buffer_peek(iter->tr->buffer, cpu, ts, 1796 lost_events); 1797 1798 if (event) { 1799 iter->ent_size = ring_buffer_event_length(event); 1800 return ring_buffer_event_data(event); 1801 } 1802 iter->ent_size = 0; 1803 return NULL; 1804 } 1805 1806 static struct trace_entry * 1807 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, 1808 unsigned long *missing_events, u64 *ent_ts) 1809 { 1810 struct ring_buffer *buffer = iter->tr->buffer; 1811 struct trace_entry *ent, *next = NULL; 1812 unsigned long lost_events = 0, next_lost = 0; 1813 int cpu_file = iter->cpu_file; 1814 u64 next_ts = 0, ts; 1815 int next_cpu = -1; 1816 int next_size = 0; 1817 int cpu; 1818 1819 /* 1820 * If we are in a per_cpu trace file, don't bother by iterating over 1821 * all cpu and peek directly. 1822 */ 1823 if (cpu_file > TRACE_PIPE_ALL_CPU) { 1824 if (ring_buffer_empty_cpu(buffer, cpu_file)) 1825 return NULL; 1826 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); 1827 if (ent_cpu) 1828 *ent_cpu = cpu_file; 1829 1830 return ent; 1831 } 1832 1833 for_each_tracing_cpu(cpu) { 1834 1835 if (ring_buffer_empty_cpu(buffer, cpu)) 1836 continue; 1837 1838 ent = peek_next_entry(iter, cpu, &ts, &lost_events); 1839 1840 /* 1841 * Pick the entry with the smallest timestamp: 1842 */ 1843 if (ent && (!next || ts < next_ts)) { 1844 next = ent; 1845 next_cpu = cpu; 1846 next_ts = ts; 1847 next_lost = lost_events; 1848 next_size = iter->ent_size; 1849 } 1850 } 1851 1852 iter->ent_size = next_size; 1853 1854 if (ent_cpu) 1855 *ent_cpu = next_cpu; 1856 1857 if (ent_ts) 1858 *ent_ts = next_ts; 1859 1860 if (missing_events) 1861 *missing_events = next_lost; 1862 1863 return next; 1864 } 1865 1866 /* Find the next real entry, without updating the iterator itself */ 1867 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 1868 int *ent_cpu, u64 *ent_ts) 1869 { 1870 return __find_next_entry(iter, ent_cpu, NULL, ent_ts); 1871 } 1872 1873 /* Find the next real entry, and increment the iterator to the next entry */ 1874 void *trace_find_next_entry_inc(struct trace_iterator *iter) 1875 { 1876 iter->ent = __find_next_entry(iter, &iter->cpu, 1877 &iter->lost_events, &iter->ts); 1878 1879 if (iter->ent) 1880 trace_iterator_increment(iter); 1881 1882 return iter->ent ? iter : NULL; 1883 } 1884 1885 static void trace_consume(struct trace_iterator *iter) 1886 { 1887 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, 1888 &iter->lost_events); 1889 } 1890 1891 static void *s_next(struct seq_file *m, void *v, loff_t *pos) 1892 { 1893 struct trace_iterator *iter = m->private; 1894 int i = (int)*pos; 1895 void *ent; 1896 1897 WARN_ON_ONCE(iter->leftover); 1898 1899 (*pos)++; 1900 1901 /* can't go backwards */ 1902 if (iter->idx > i) 1903 return NULL; 1904 1905 if (iter->idx < 0) 1906 ent = trace_find_next_entry_inc(iter); 1907 else 1908 ent = iter; 1909 1910 while (ent && iter->idx < i) 1911 ent = trace_find_next_entry_inc(iter); 1912 1913 iter->pos = *pos; 1914 1915 return ent; 1916 } 1917 1918 void tracing_iter_reset(struct trace_iterator *iter, int cpu) 1919 { 1920 struct trace_array *tr = iter->tr; 1921 struct ring_buffer_event *event; 1922 struct ring_buffer_iter *buf_iter; 1923 unsigned long entries = 0; 1924 u64 ts; 1925 1926 tr->data[cpu]->skipped_entries = 0; 1927 1928 buf_iter = trace_buffer_iter(iter, cpu); 1929 if (!buf_iter) 1930 return; 1931 1932 ring_buffer_iter_reset(buf_iter); 1933 1934 /* 1935 * We could have the case with the max latency tracers 1936 * that a reset never took place on a cpu. This is evident 1937 * by the timestamp being before the start of the buffer. 1938 */ 1939 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { 1940 if (ts >= iter->tr->time_start) 1941 break; 1942 entries++; 1943 ring_buffer_read(buf_iter, NULL); 1944 } 1945 1946 tr->data[cpu]->skipped_entries = entries; 1947 } 1948 1949 /* 1950 * The current tracer is copied to avoid a global locking 1951 * all around. 1952 */ 1953 static void *s_start(struct seq_file *m, loff_t *pos) 1954 { 1955 struct trace_iterator *iter = m->private; 1956 int cpu_file = iter->cpu_file; 1957 void *p = NULL; 1958 loff_t l = 0; 1959 int cpu; 1960 1961 /* 1962 * copy the tracer to avoid using a global lock all around. 1963 * iter->trace is a copy of current_trace, the pointer to the 1964 * name may be used instead of a strcmp(), as iter->trace->name 1965 * will point to the same string as current_trace->name. 1966 */ 1967 mutex_lock(&trace_types_lock); 1968 if (unlikely(current_trace && iter->trace->name != current_trace->name)) 1969 *iter->trace = *current_trace; 1970 mutex_unlock(&trace_types_lock); 1971 1972 if (iter->snapshot && iter->trace->use_max_tr) 1973 return ERR_PTR(-EBUSY); 1974 1975 if (!iter->snapshot) 1976 atomic_inc(&trace_record_cmdline_disabled); 1977 1978 if (*pos != iter->pos) { 1979 iter->ent = NULL; 1980 iter->cpu = 0; 1981 iter->idx = -1; 1982 1983 if (cpu_file == TRACE_PIPE_ALL_CPU) { 1984 for_each_tracing_cpu(cpu) 1985 tracing_iter_reset(iter, cpu); 1986 } else 1987 tracing_iter_reset(iter, cpu_file); 1988 1989 iter->leftover = 0; 1990 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 1991 ; 1992 1993 } else { 1994 /* 1995 * If we overflowed the seq_file before, then we want 1996 * to just reuse the trace_seq buffer again. 1997 */ 1998 if (iter->leftover) 1999 p = iter; 2000 else { 2001 l = *pos - 1; 2002 p = s_next(m, p, &l); 2003 } 2004 } 2005 2006 trace_event_read_lock(); 2007 trace_access_lock(cpu_file); 2008 return p; 2009 } 2010 2011 static void s_stop(struct seq_file *m, void *p) 2012 { 2013 struct trace_iterator *iter = m->private; 2014 2015 if (iter->snapshot && iter->trace->use_max_tr) 2016 return; 2017 2018 if (!iter->snapshot) 2019 atomic_dec(&trace_record_cmdline_disabled); 2020 trace_access_unlock(iter->cpu_file); 2021 trace_event_read_unlock(); 2022 } 2023 2024 static void 2025 get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries) 2026 { 2027 unsigned long count; 2028 int cpu; 2029 2030 *total = 0; 2031 *entries = 0; 2032 2033 for_each_tracing_cpu(cpu) { 2034 count = ring_buffer_entries_cpu(tr->buffer, cpu); 2035 /* 2036 * If this buffer has skipped entries, then we hold all 2037 * entries for the trace and we need to ignore the 2038 * ones before the time stamp. 2039 */ 2040 if (tr->data[cpu]->skipped_entries) { 2041 count -= tr->data[cpu]->skipped_entries; 2042 /* total is the same as the entries */ 2043 *total += count; 2044 } else 2045 *total += count + 2046 ring_buffer_overrun_cpu(tr->buffer, cpu); 2047 *entries += count; 2048 } 2049 } 2050 2051 static void print_lat_help_header(struct seq_file *m) 2052 { 2053 seq_puts(m, "# _------=> CPU# \n"); 2054 seq_puts(m, "# / _-----=> irqs-off \n"); 2055 seq_puts(m, "# | / _----=> need-resched \n"); 2056 seq_puts(m, "# || / _---=> hardirq/softirq \n"); 2057 seq_puts(m, "# ||| / _--=> preempt-depth \n"); 2058 seq_puts(m, "# |||| / delay \n"); 2059 seq_puts(m, "# cmd pid ||||| time | caller \n"); 2060 seq_puts(m, "# \\ / ||||| \\ | / \n"); 2061 } 2062 2063 static void print_event_info(struct trace_array *tr, struct seq_file *m) 2064 { 2065 unsigned long total; 2066 unsigned long entries; 2067 2068 get_total_entries(tr, &total, &entries); 2069 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", 2070 entries, total, num_online_cpus()); 2071 seq_puts(m, "#\n"); 2072 } 2073 2074 static void print_func_help_header(struct trace_array *tr, struct seq_file *m) 2075 { 2076 print_event_info(tr, m); 2077 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); 2078 seq_puts(m, "# | | | | |\n"); 2079 } 2080 2081 static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m) 2082 { 2083 print_event_info(tr, m); 2084 seq_puts(m, "# _-----=> irqs-off\n"); 2085 seq_puts(m, "# / _----=> need-resched\n"); 2086 seq_puts(m, "# | / _---=> hardirq/softirq\n"); 2087 seq_puts(m, "# || / _--=> preempt-depth\n"); 2088 seq_puts(m, "# ||| / delay\n"); 2089 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"); 2090 seq_puts(m, "# | | | |||| | |\n"); 2091 } 2092 2093 void 2094 print_trace_header(struct seq_file *m, struct trace_iterator *iter) 2095 { 2096 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 2097 struct trace_array *tr = iter->tr; 2098 struct trace_array_cpu *data = tr->data[tr->cpu]; 2099 struct tracer *type = current_trace; 2100 unsigned long entries; 2101 unsigned long total; 2102 const char *name = "preemption"; 2103 2104 name = type->name; 2105 2106 get_total_entries(tr, &total, &entries); 2107 2108 seq_printf(m, "# %s latency trace v1.1.5 on %s\n", 2109 name, UTS_RELEASE); 2110 seq_puts(m, "# -----------------------------------" 2111 "---------------------------------\n"); 2112 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" 2113 " (M:%s VP:%d, KP:%d, SP:%d HP:%d", 2114 nsecs_to_usecs(data->saved_latency), 2115 entries, 2116 total, 2117 tr->cpu, 2118 #if defined(CONFIG_PREEMPT_NONE) 2119 "server", 2120 #elif defined(CONFIG_PREEMPT_VOLUNTARY) 2121 "desktop", 2122 #elif defined(CONFIG_PREEMPT) 2123 "preempt", 2124 #else 2125 "unknown", 2126 #endif 2127 /* These are reserved for later use */ 2128 0, 0, 0, 0); 2129 #ifdef CONFIG_SMP 2130 seq_printf(m, " #P:%d)\n", num_online_cpus()); 2131 #else 2132 seq_puts(m, ")\n"); 2133 #endif 2134 seq_puts(m, "# -----------------\n"); 2135 seq_printf(m, "# | task: %.16s-%d " 2136 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", 2137 data->comm, data->pid, 2138 from_kuid_munged(seq_user_ns(m), data->uid), data->nice, 2139 data->policy, data->rt_priority); 2140 seq_puts(m, "# -----------------\n"); 2141 2142 if (data->critical_start) { 2143 seq_puts(m, "# => started at: "); 2144 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); 2145 trace_print_seq(m, &iter->seq); 2146 seq_puts(m, "\n# => ended at: "); 2147 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); 2148 trace_print_seq(m, &iter->seq); 2149 seq_puts(m, "\n#\n"); 2150 } 2151 2152 seq_puts(m, "#\n"); 2153 } 2154 2155 static void test_cpu_buff_start(struct trace_iterator *iter) 2156 { 2157 struct trace_seq *s = &iter->seq; 2158 2159 if (!(trace_flags & TRACE_ITER_ANNOTATE)) 2160 return; 2161 2162 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 2163 return; 2164 2165 if (cpumask_test_cpu(iter->cpu, iter->started)) 2166 return; 2167 2168 if (iter->tr->data[iter->cpu]->skipped_entries) 2169 return; 2170 2171 cpumask_set_cpu(iter->cpu, iter->started); 2172 2173 /* Don't print started cpu buffer for the first entry of the trace */ 2174 if (iter->idx > 1) 2175 trace_seq_printf(s, "##### CPU %u buffer started ####\n", 2176 iter->cpu); 2177 } 2178 2179 static enum print_line_t print_trace_fmt(struct trace_iterator *iter) 2180 { 2181 struct trace_seq *s = &iter->seq; 2182 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 2183 struct trace_entry *entry; 2184 struct trace_event *event; 2185 2186 entry = iter->ent; 2187 2188 test_cpu_buff_start(iter); 2189 2190 event = ftrace_find_event(entry->type); 2191 2192 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2193 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2194 if (!trace_print_lat_context(iter)) 2195 goto partial; 2196 } else { 2197 if (!trace_print_context(iter)) 2198 goto partial; 2199 } 2200 } 2201 2202 if (event) 2203 return event->funcs->trace(iter, sym_flags, event); 2204 2205 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) 2206 goto partial; 2207 2208 return TRACE_TYPE_HANDLED; 2209 partial: 2210 return TRACE_TYPE_PARTIAL_LINE; 2211 } 2212 2213 static enum print_line_t print_raw_fmt(struct trace_iterator *iter) 2214 { 2215 struct trace_seq *s = &iter->seq; 2216 struct trace_entry *entry; 2217 struct trace_event *event; 2218 2219 entry = iter->ent; 2220 2221 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2222 if (!trace_seq_printf(s, "%d %d %llu ", 2223 entry->pid, iter->cpu, iter->ts)) 2224 goto partial; 2225 } 2226 2227 event = ftrace_find_event(entry->type); 2228 if (event) 2229 return event->funcs->raw(iter, 0, event); 2230 2231 if (!trace_seq_printf(s, "%d ?\n", entry->type)) 2232 goto partial; 2233 2234 return TRACE_TYPE_HANDLED; 2235 partial: 2236 return TRACE_TYPE_PARTIAL_LINE; 2237 } 2238 2239 static enum print_line_t print_hex_fmt(struct trace_iterator *iter) 2240 { 2241 struct trace_seq *s = &iter->seq; 2242 unsigned char newline = '\n'; 2243 struct trace_entry *entry; 2244 struct trace_event *event; 2245 2246 entry = iter->ent; 2247 2248 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2249 SEQ_PUT_HEX_FIELD_RET(s, entry->pid); 2250 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); 2251 SEQ_PUT_HEX_FIELD_RET(s, iter->ts); 2252 } 2253 2254 event = ftrace_find_event(entry->type); 2255 if (event) { 2256 enum print_line_t ret = event->funcs->hex(iter, 0, event); 2257 if (ret != TRACE_TYPE_HANDLED) 2258 return ret; 2259 } 2260 2261 SEQ_PUT_FIELD_RET(s, newline); 2262 2263 return TRACE_TYPE_HANDLED; 2264 } 2265 2266 static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 2267 { 2268 struct trace_seq *s = &iter->seq; 2269 struct trace_entry *entry; 2270 struct trace_event *event; 2271 2272 entry = iter->ent; 2273 2274 if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2275 SEQ_PUT_FIELD_RET(s, entry->pid); 2276 SEQ_PUT_FIELD_RET(s, iter->cpu); 2277 SEQ_PUT_FIELD_RET(s, iter->ts); 2278 } 2279 2280 event = ftrace_find_event(entry->type); 2281 return event ? event->funcs->binary(iter, 0, event) : 2282 TRACE_TYPE_HANDLED; 2283 } 2284 2285 int trace_empty(struct trace_iterator *iter) 2286 { 2287 struct ring_buffer_iter *buf_iter; 2288 int cpu; 2289 2290 /* If we are looking at one CPU buffer, only check that one */ 2291 if (iter->cpu_file != TRACE_PIPE_ALL_CPU) { 2292 cpu = iter->cpu_file; 2293 buf_iter = trace_buffer_iter(iter, cpu); 2294 if (buf_iter) { 2295 if (!ring_buffer_iter_empty(buf_iter)) 2296 return 0; 2297 } else { 2298 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) 2299 return 0; 2300 } 2301 return 1; 2302 } 2303 2304 for_each_tracing_cpu(cpu) { 2305 buf_iter = trace_buffer_iter(iter, cpu); 2306 if (buf_iter) { 2307 if (!ring_buffer_iter_empty(buf_iter)) 2308 return 0; 2309 } else { 2310 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) 2311 return 0; 2312 } 2313 } 2314 2315 return 1; 2316 } 2317 2318 /* Called with trace_event_read_lock() held. */ 2319 enum print_line_t print_trace_line(struct trace_iterator *iter) 2320 { 2321 enum print_line_t ret; 2322 2323 if (iter->lost_events && 2324 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", 2325 iter->cpu, iter->lost_events)) 2326 return TRACE_TYPE_PARTIAL_LINE; 2327 2328 if (iter->trace && iter->trace->print_line) { 2329 ret = iter->trace->print_line(iter); 2330 if (ret != TRACE_TYPE_UNHANDLED) 2331 return ret; 2332 } 2333 2334 if (iter->ent->type == TRACE_BPRINT && 2335 trace_flags & TRACE_ITER_PRINTK && 2336 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 2337 return trace_print_bprintk_msg_only(iter); 2338 2339 if (iter->ent->type == TRACE_PRINT && 2340 trace_flags & TRACE_ITER_PRINTK && 2341 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 2342 return trace_print_printk_msg_only(iter); 2343 2344 if (trace_flags & TRACE_ITER_BIN) 2345 return print_bin_fmt(iter); 2346 2347 if (trace_flags & TRACE_ITER_HEX) 2348 return print_hex_fmt(iter); 2349 2350 if (trace_flags & TRACE_ITER_RAW) 2351 return print_raw_fmt(iter); 2352 2353 return print_trace_fmt(iter); 2354 } 2355 2356 void trace_latency_header(struct seq_file *m) 2357 { 2358 struct trace_iterator *iter = m->private; 2359 2360 /* print nothing if the buffers are empty */ 2361 if (trace_empty(iter)) 2362 return; 2363 2364 if (iter->iter_flags & TRACE_FILE_LAT_FMT) 2365 print_trace_header(m, iter); 2366 2367 if (!(trace_flags & TRACE_ITER_VERBOSE)) 2368 print_lat_help_header(m); 2369 } 2370 2371 void trace_default_header(struct seq_file *m) 2372 { 2373 struct trace_iterator *iter = m->private; 2374 2375 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) 2376 return; 2377 2378 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 2379 /* print nothing if the buffers are empty */ 2380 if (trace_empty(iter)) 2381 return; 2382 print_trace_header(m, iter); 2383 if (!(trace_flags & TRACE_ITER_VERBOSE)) 2384 print_lat_help_header(m); 2385 } else { 2386 if (!(trace_flags & TRACE_ITER_VERBOSE)) { 2387 if (trace_flags & TRACE_ITER_IRQ_INFO) 2388 print_func_help_header_irq(iter->tr, m); 2389 else 2390 print_func_help_header(iter->tr, m); 2391 } 2392 } 2393 } 2394 2395 static void test_ftrace_alive(struct seq_file *m) 2396 { 2397 if (!ftrace_is_dead()) 2398 return; 2399 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"); 2400 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); 2401 } 2402 2403 static int s_show(struct seq_file *m, void *v) 2404 { 2405 struct trace_iterator *iter = v; 2406 int ret; 2407 2408 if (iter->ent == NULL) { 2409 if (iter->tr) { 2410 seq_printf(m, "# tracer: %s\n", iter->trace->name); 2411 seq_puts(m, "#\n"); 2412 test_ftrace_alive(m); 2413 } 2414 if (iter->trace && iter->trace->print_header) 2415 iter->trace->print_header(m); 2416 else 2417 trace_default_header(m); 2418 2419 } else if (iter->leftover) { 2420 /* 2421 * If we filled the seq_file buffer earlier, we 2422 * want to just show it now. 2423 */ 2424 ret = trace_print_seq(m, &iter->seq); 2425 2426 /* ret should this time be zero, but you never know */ 2427 iter->leftover = ret; 2428 2429 } else { 2430 print_trace_line(iter); 2431 ret = trace_print_seq(m, &iter->seq); 2432 /* 2433 * If we overflow the seq_file buffer, then it will 2434 * ask us for this data again at start up. 2435 * Use that instead. 2436 * ret is 0 if seq_file write succeeded. 2437 * -1 otherwise. 2438 */ 2439 iter->leftover = ret; 2440 } 2441 2442 return 0; 2443 } 2444 2445 static const struct seq_operations tracer_seq_ops = { 2446 .start = s_start, 2447 .next = s_next, 2448 .stop = s_stop, 2449 .show = s_show, 2450 }; 2451 2452 static struct trace_iterator * 2453 __tracing_open(struct inode *inode, struct file *file, bool snapshot) 2454 { 2455 long cpu_file = (long) inode->i_private; 2456 struct trace_iterator *iter; 2457 int cpu; 2458 2459 if (tracing_disabled) 2460 return ERR_PTR(-ENODEV); 2461 2462 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); 2463 if (!iter) 2464 return ERR_PTR(-ENOMEM); 2465 2466 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(), 2467 GFP_KERNEL); 2468 if (!iter->buffer_iter) 2469 goto release; 2470 2471 /* 2472 * We make a copy of the current tracer to avoid concurrent 2473 * changes on it while we are reading. 2474 */ 2475 mutex_lock(&trace_types_lock); 2476 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); 2477 if (!iter->trace) 2478 goto fail; 2479 2480 *iter->trace = *current_trace; 2481 2482 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 2483 goto fail; 2484 2485 if (current_trace->print_max || snapshot) 2486 iter->tr = &max_tr; 2487 else 2488 iter->tr = &global_trace; 2489 iter->snapshot = snapshot; 2490 iter->pos = -1; 2491 mutex_init(&iter->mutex); 2492 iter->cpu_file = cpu_file; 2493 2494 /* Notify the tracer early; before we stop tracing. */ 2495 if (iter->trace && iter->trace->open) 2496 iter->trace->open(iter); 2497 2498 /* Annotate start of buffers if we had overruns */ 2499 if (ring_buffer_overruns(iter->tr->buffer)) 2500 iter->iter_flags |= TRACE_FILE_ANNOTATE; 2501 2502 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 2503 if (trace_clocks[trace_clock_id].in_ns) 2504 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 2505 2506 /* stop the trace while dumping if we are not opening "snapshot" */ 2507 if (!iter->snapshot) 2508 tracing_stop(); 2509 2510 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { 2511 for_each_tracing_cpu(cpu) { 2512 iter->buffer_iter[cpu] = 2513 ring_buffer_read_prepare(iter->tr->buffer, cpu); 2514 } 2515 ring_buffer_read_prepare_sync(); 2516 for_each_tracing_cpu(cpu) { 2517 ring_buffer_read_start(iter->buffer_iter[cpu]); 2518 tracing_iter_reset(iter, cpu); 2519 } 2520 } else { 2521 cpu = iter->cpu_file; 2522 iter->buffer_iter[cpu] = 2523 ring_buffer_read_prepare(iter->tr->buffer, cpu); 2524 ring_buffer_read_prepare_sync(); 2525 ring_buffer_read_start(iter->buffer_iter[cpu]); 2526 tracing_iter_reset(iter, cpu); 2527 } 2528 2529 mutex_unlock(&trace_types_lock); 2530 2531 return iter; 2532 2533 fail: 2534 mutex_unlock(&trace_types_lock); 2535 kfree(iter->trace); 2536 kfree(iter->buffer_iter); 2537 release: 2538 seq_release_private(inode, file); 2539 return ERR_PTR(-ENOMEM); 2540 } 2541 2542 int tracing_open_generic(struct inode *inode, struct file *filp) 2543 { 2544 if (tracing_disabled) 2545 return -ENODEV; 2546 2547 filp->private_data = inode->i_private; 2548 return 0; 2549 } 2550 2551 static int tracing_release(struct inode *inode, struct file *file) 2552 { 2553 struct seq_file *m = file->private_data; 2554 struct trace_iterator *iter; 2555 int cpu; 2556 2557 if (!(file->f_mode & FMODE_READ)) 2558 return 0; 2559 2560 iter = m->private; 2561 2562 mutex_lock(&trace_types_lock); 2563 for_each_tracing_cpu(cpu) { 2564 if (iter->buffer_iter[cpu]) 2565 ring_buffer_read_finish(iter->buffer_iter[cpu]); 2566 } 2567 2568 if (iter->trace && iter->trace->close) 2569 iter->trace->close(iter); 2570 2571 if (!iter->snapshot) 2572 /* reenable tracing if it was previously enabled */ 2573 tracing_start(); 2574 mutex_unlock(&trace_types_lock); 2575 2576 mutex_destroy(&iter->mutex); 2577 free_cpumask_var(iter->started); 2578 kfree(iter->trace); 2579 kfree(iter->buffer_iter); 2580 seq_release_private(inode, file); 2581 return 0; 2582 } 2583 2584 static int tracing_open(struct inode *inode, struct file *file) 2585 { 2586 struct trace_iterator *iter; 2587 int ret = 0; 2588 2589 /* If this file was open for write, then erase contents */ 2590 if ((file->f_mode & FMODE_WRITE) && 2591 (file->f_flags & O_TRUNC)) { 2592 long cpu = (long) inode->i_private; 2593 2594 if (cpu == TRACE_PIPE_ALL_CPU) 2595 tracing_reset_online_cpus(&global_trace); 2596 else 2597 tracing_reset(&global_trace, cpu); 2598 } 2599 2600 if (file->f_mode & FMODE_READ) { 2601 iter = __tracing_open(inode, file, false); 2602 if (IS_ERR(iter)) 2603 ret = PTR_ERR(iter); 2604 else if (trace_flags & TRACE_ITER_LATENCY_FMT) 2605 iter->iter_flags |= TRACE_FILE_LAT_FMT; 2606 } 2607 return ret; 2608 } 2609 2610 static void * 2611 t_next(struct seq_file *m, void *v, loff_t *pos) 2612 { 2613 struct tracer *t = v; 2614 2615 (*pos)++; 2616 2617 if (t) 2618 t = t->next; 2619 2620 return t; 2621 } 2622 2623 static void *t_start(struct seq_file *m, loff_t *pos) 2624 { 2625 struct tracer *t; 2626 loff_t l = 0; 2627 2628 mutex_lock(&trace_types_lock); 2629 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) 2630 ; 2631 2632 return t; 2633 } 2634 2635 static void t_stop(struct seq_file *m, void *p) 2636 { 2637 mutex_unlock(&trace_types_lock); 2638 } 2639 2640 static int t_show(struct seq_file *m, void *v) 2641 { 2642 struct tracer *t = v; 2643 2644 if (!t) 2645 return 0; 2646 2647 seq_printf(m, "%s", t->name); 2648 if (t->next) 2649 seq_putc(m, ' '); 2650 else 2651 seq_putc(m, '\n'); 2652 2653 return 0; 2654 } 2655 2656 static const struct seq_operations show_traces_seq_ops = { 2657 .start = t_start, 2658 .next = t_next, 2659 .stop = t_stop, 2660 .show = t_show, 2661 }; 2662 2663 static int show_traces_open(struct inode *inode, struct file *file) 2664 { 2665 if (tracing_disabled) 2666 return -ENODEV; 2667 2668 return seq_open(file, &show_traces_seq_ops); 2669 } 2670 2671 static ssize_t 2672 tracing_write_stub(struct file *filp, const char __user *ubuf, 2673 size_t count, loff_t *ppos) 2674 { 2675 return count; 2676 } 2677 2678 static loff_t tracing_seek(struct file *file, loff_t offset, int origin) 2679 { 2680 if (file->f_mode & FMODE_READ) 2681 return seq_lseek(file, offset, origin); 2682 else 2683 return 0; 2684 } 2685 2686 static const struct file_operations tracing_fops = { 2687 .open = tracing_open, 2688 .read = seq_read, 2689 .write = tracing_write_stub, 2690 .llseek = tracing_seek, 2691 .release = tracing_release, 2692 }; 2693 2694 static const struct file_operations show_traces_fops = { 2695 .open = show_traces_open, 2696 .read = seq_read, 2697 .release = seq_release, 2698 .llseek = seq_lseek, 2699 }; 2700 2701 /* 2702 * Only trace on a CPU if the bitmask is set: 2703 */ 2704 static cpumask_var_t tracing_cpumask; 2705 2706 /* 2707 * The tracer itself will not take this lock, but still we want 2708 * to provide a consistent cpumask to user-space: 2709 */ 2710 static DEFINE_MUTEX(tracing_cpumask_update_lock); 2711 2712 /* 2713 * Temporary storage for the character representation of the 2714 * CPU bitmask (and one more byte for the newline): 2715 */ 2716 static char mask_str[NR_CPUS + 1]; 2717 2718 static ssize_t 2719 tracing_cpumask_read(struct file *filp, char __user *ubuf, 2720 size_t count, loff_t *ppos) 2721 { 2722 int len; 2723 2724 mutex_lock(&tracing_cpumask_update_lock); 2725 2726 len = cpumask_scnprintf(mask_str, count, tracing_cpumask); 2727 if (count - len < 2) { 2728 count = -EINVAL; 2729 goto out_err; 2730 } 2731 len += sprintf(mask_str + len, "\n"); 2732 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); 2733 2734 out_err: 2735 mutex_unlock(&tracing_cpumask_update_lock); 2736 2737 return count; 2738 } 2739 2740 static ssize_t 2741 tracing_cpumask_write(struct file *filp, const char __user *ubuf, 2742 size_t count, loff_t *ppos) 2743 { 2744 int err, cpu; 2745 cpumask_var_t tracing_cpumask_new; 2746 2747 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) 2748 return -ENOMEM; 2749 2750 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 2751 if (err) 2752 goto err_unlock; 2753 2754 mutex_lock(&tracing_cpumask_update_lock); 2755 2756 local_irq_disable(); 2757 arch_spin_lock(&ftrace_max_lock); 2758 for_each_tracing_cpu(cpu) { 2759 /* 2760 * Increase/decrease the disabled counter if we are 2761 * about to flip a bit in the cpumask: 2762 */ 2763 if (cpumask_test_cpu(cpu, tracing_cpumask) && 2764 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 2765 atomic_inc(&global_trace.data[cpu]->disabled); 2766 ring_buffer_record_disable_cpu(global_trace.buffer, cpu); 2767 } 2768 if (!cpumask_test_cpu(cpu, tracing_cpumask) && 2769 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 2770 atomic_dec(&global_trace.data[cpu]->disabled); 2771 ring_buffer_record_enable_cpu(global_trace.buffer, cpu); 2772 } 2773 } 2774 arch_spin_unlock(&ftrace_max_lock); 2775 local_irq_enable(); 2776 2777 cpumask_copy(tracing_cpumask, tracing_cpumask_new); 2778 2779 mutex_unlock(&tracing_cpumask_update_lock); 2780 free_cpumask_var(tracing_cpumask_new); 2781 2782 return count; 2783 2784 err_unlock: 2785 free_cpumask_var(tracing_cpumask_new); 2786 2787 return err; 2788 } 2789 2790 static const struct file_operations tracing_cpumask_fops = { 2791 .open = tracing_open_generic, 2792 .read = tracing_cpumask_read, 2793 .write = tracing_cpumask_write, 2794 .llseek = generic_file_llseek, 2795 }; 2796 2797 static int tracing_trace_options_show(struct seq_file *m, void *v) 2798 { 2799 struct tracer_opt *trace_opts; 2800 u32 tracer_flags; 2801 int i; 2802 2803 mutex_lock(&trace_types_lock); 2804 tracer_flags = current_trace->flags->val; 2805 trace_opts = current_trace->flags->opts; 2806 2807 for (i = 0; trace_options[i]; i++) { 2808 if (trace_flags & (1 << i)) 2809 seq_printf(m, "%s\n", trace_options[i]); 2810 else 2811 seq_printf(m, "no%s\n", trace_options[i]); 2812 } 2813 2814 for (i = 0; trace_opts[i].name; i++) { 2815 if (tracer_flags & trace_opts[i].bit) 2816 seq_printf(m, "%s\n", trace_opts[i].name); 2817 else 2818 seq_printf(m, "no%s\n", trace_opts[i].name); 2819 } 2820 mutex_unlock(&trace_types_lock); 2821 2822 return 0; 2823 } 2824 2825 static int __set_tracer_option(struct tracer *trace, 2826 struct tracer_flags *tracer_flags, 2827 struct tracer_opt *opts, int neg) 2828 { 2829 int ret; 2830 2831 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); 2832 if (ret) 2833 return ret; 2834 2835 if (neg) 2836 tracer_flags->val &= ~opts->bit; 2837 else 2838 tracer_flags->val |= opts->bit; 2839 return 0; 2840 } 2841 2842 /* Try to assign a tracer specific option */ 2843 static int set_tracer_option(struct tracer *trace, char *cmp, int neg) 2844 { 2845 struct tracer_flags *tracer_flags = trace->flags; 2846 struct tracer_opt *opts = NULL; 2847 int i; 2848 2849 for (i = 0; tracer_flags->opts[i].name; i++) { 2850 opts = &tracer_flags->opts[i]; 2851 2852 if (strcmp(cmp, opts->name) == 0) 2853 return __set_tracer_option(trace, trace->flags, 2854 opts, neg); 2855 } 2856 2857 return -EINVAL; 2858 } 2859 2860 static void set_tracer_flags(unsigned int mask, int enabled) 2861 { 2862 /* do nothing if flag is already set */ 2863 if (!!(trace_flags & mask) == !!enabled) 2864 return; 2865 2866 if (enabled) 2867 trace_flags |= mask; 2868 else 2869 trace_flags &= ~mask; 2870 2871 if (mask == TRACE_ITER_RECORD_CMD) 2872 trace_event_enable_cmd_record(enabled); 2873 2874 if (mask == TRACE_ITER_OVERWRITE) 2875 ring_buffer_change_overwrite(global_trace.buffer, enabled); 2876 2877 if (mask == TRACE_ITER_PRINTK) 2878 trace_printk_start_stop_comm(enabled); 2879 } 2880 2881 static int trace_set_options(char *option) 2882 { 2883 char *cmp; 2884 int neg = 0; 2885 int ret = 0; 2886 int i; 2887 2888 cmp = strstrip(option); 2889 2890 if (strncmp(cmp, "no", 2) == 0) { 2891 neg = 1; 2892 cmp += 2; 2893 } 2894 2895 for (i = 0; trace_options[i]; i++) { 2896 if (strcmp(cmp, trace_options[i]) == 0) { 2897 set_tracer_flags(1 << i, !neg); 2898 break; 2899 } 2900 } 2901 2902 /* If no option could be set, test the specific tracer options */ 2903 if (!trace_options[i]) { 2904 mutex_lock(&trace_types_lock); 2905 ret = set_tracer_option(current_trace, cmp, neg); 2906 mutex_unlock(&trace_types_lock); 2907 } 2908 2909 return ret; 2910 } 2911 2912 static ssize_t 2913 tracing_trace_options_write(struct file *filp, const char __user *ubuf, 2914 size_t cnt, loff_t *ppos) 2915 { 2916 char buf[64]; 2917 2918 if (cnt >= sizeof(buf)) 2919 return -EINVAL; 2920 2921 if (copy_from_user(&buf, ubuf, cnt)) 2922 return -EFAULT; 2923 2924 buf[cnt] = 0; 2925 2926 trace_set_options(buf); 2927 2928 *ppos += cnt; 2929 2930 return cnt; 2931 } 2932 2933 static int tracing_trace_options_open(struct inode *inode, struct file *file) 2934 { 2935 if (tracing_disabled) 2936 return -ENODEV; 2937 return single_open(file, tracing_trace_options_show, NULL); 2938 } 2939 2940 static const struct file_operations tracing_iter_fops = { 2941 .open = tracing_trace_options_open, 2942 .read = seq_read, 2943 .llseek = seq_lseek, 2944 .release = single_release, 2945 .write = tracing_trace_options_write, 2946 }; 2947 2948 static const char readme_msg[] = 2949 "tracing mini-HOWTO:\n\n" 2950 "# mount -t debugfs nodev /sys/kernel/debug\n\n" 2951 "# cat /sys/kernel/debug/tracing/available_tracers\n" 2952 "wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n" 2953 "# cat /sys/kernel/debug/tracing/current_tracer\n" 2954 "nop\n" 2955 "# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n" 2956 "# cat /sys/kernel/debug/tracing/current_tracer\n" 2957 "wakeup\n" 2958 "# cat /sys/kernel/debug/tracing/trace_options\n" 2959 "noprint-parent nosym-offset nosym-addr noverbose\n" 2960 "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n" 2961 "# echo 1 > /sys/kernel/debug/tracing/tracing_on\n" 2962 "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n" 2963 "# echo 0 > /sys/kernel/debug/tracing/tracing_on\n" 2964 ; 2965 2966 static ssize_t 2967 tracing_readme_read(struct file *filp, char __user *ubuf, 2968 size_t cnt, loff_t *ppos) 2969 { 2970 return simple_read_from_buffer(ubuf, cnt, ppos, 2971 readme_msg, strlen(readme_msg)); 2972 } 2973 2974 static const struct file_operations tracing_readme_fops = { 2975 .open = tracing_open_generic, 2976 .read = tracing_readme_read, 2977 .llseek = generic_file_llseek, 2978 }; 2979 2980 static ssize_t 2981 tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, 2982 size_t cnt, loff_t *ppos) 2983 { 2984 char *buf_comm; 2985 char *file_buf; 2986 char *buf; 2987 int len = 0; 2988 int pid; 2989 int i; 2990 2991 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL); 2992 if (!file_buf) 2993 return -ENOMEM; 2994 2995 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL); 2996 if (!buf_comm) { 2997 kfree(file_buf); 2998 return -ENOMEM; 2999 } 3000 3001 buf = file_buf; 3002 3003 for (i = 0; i < SAVED_CMDLINES; i++) { 3004 int r; 3005 3006 pid = map_cmdline_to_pid[i]; 3007 if (pid == -1 || pid == NO_CMDLINE_MAP) 3008 continue; 3009 3010 trace_find_cmdline(pid, buf_comm); 3011 r = sprintf(buf, "%d %s\n", pid, buf_comm); 3012 buf += r; 3013 len += r; 3014 } 3015 3016 len = simple_read_from_buffer(ubuf, cnt, ppos, 3017 file_buf, len); 3018 3019 kfree(file_buf); 3020 kfree(buf_comm); 3021 3022 return len; 3023 } 3024 3025 static const struct file_operations tracing_saved_cmdlines_fops = { 3026 .open = tracing_open_generic, 3027 .read = tracing_saved_cmdlines_read, 3028 .llseek = generic_file_llseek, 3029 }; 3030 3031 static ssize_t 3032 tracing_set_trace_read(struct file *filp, char __user *ubuf, 3033 size_t cnt, loff_t *ppos) 3034 { 3035 char buf[MAX_TRACER_SIZE+2]; 3036 int r; 3037 3038 mutex_lock(&trace_types_lock); 3039 r = sprintf(buf, "%s\n", current_trace->name); 3040 mutex_unlock(&trace_types_lock); 3041 3042 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3043 } 3044 3045 int tracer_init(struct tracer *t, struct trace_array *tr) 3046 { 3047 tracing_reset_online_cpus(tr); 3048 return t->init(tr); 3049 } 3050 3051 static void set_buffer_entries(struct trace_array *tr, unsigned long val) 3052 { 3053 int cpu; 3054 for_each_tracing_cpu(cpu) 3055 tr->data[cpu]->entries = val; 3056 } 3057 3058 /* resize @tr's buffer to the size of @size_tr's entries */ 3059 static int resize_buffer_duplicate_size(struct trace_array *tr, 3060 struct trace_array *size_tr, int cpu_id) 3061 { 3062 int cpu, ret = 0; 3063 3064 if (cpu_id == RING_BUFFER_ALL_CPUS) { 3065 for_each_tracing_cpu(cpu) { 3066 ret = ring_buffer_resize(tr->buffer, 3067 size_tr->data[cpu]->entries, cpu); 3068 if (ret < 0) 3069 break; 3070 tr->data[cpu]->entries = size_tr->data[cpu]->entries; 3071 } 3072 } else { 3073 ret = ring_buffer_resize(tr->buffer, 3074 size_tr->data[cpu_id]->entries, cpu_id); 3075 if (ret == 0) 3076 tr->data[cpu_id]->entries = 3077 size_tr->data[cpu_id]->entries; 3078 } 3079 3080 return ret; 3081 } 3082 3083 static int __tracing_resize_ring_buffer(unsigned long size, int cpu) 3084 { 3085 int ret; 3086 3087 /* 3088 * If kernel or user changes the size of the ring buffer 3089 * we use the size that was given, and we can forget about 3090 * expanding it later. 3091 */ 3092 ring_buffer_expanded = 1; 3093 3094 /* May be called before buffers are initialized */ 3095 if (!global_trace.buffer) 3096 return 0; 3097 3098 ret = ring_buffer_resize(global_trace.buffer, size, cpu); 3099 if (ret < 0) 3100 return ret; 3101 3102 if (!current_trace->use_max_tr) 3103 goto out; 3104 3105 ret = ring_buffer_resize(max_tr.buffer, size, cpu); 3106 if (ret < 0) { 3107 int r = resize_buffer_duplicate_size(&global_trace, 3108 &global_trace, cpu); 3109 if (r < 0) { 3110 /* 3111 * AARGH! We are left with different 3112 * size max buffer!!!! 3113 * The max buffer is our "snapshot" buffer. 3114 * When a tracer needs a snapshot (one of the 3115 * latency tracers), it swaps the max buffer 3116 * with the saved snap shot. We succeeded to 3117 * update the size of the main buffer, but failed to 3118 * update the size of the max buffer. But when we tried 3119 * to reset the main buffer to the original size, we 3120 * failed there too. This is very unlikely to 3121 * happen, but if it does, warn and kill all 3122 * tracing. 3123 */ 3124 WARN_ON(1); 3125 tracing_disabled = 1; 3126 } 3127 return ret; 3128 } 3129 3130 if (cpu == RING_BUFFER_ALL_CPUS) 3131 set_buffer_entries(&max_tr, size); 3132 else 3133 max_tr.data[cpu]->entries = size; 3134 3135 out: 3136 if (cpu == RING_BUFFER_ALL_CPUS) 3137 set_buffer_entries(&global_trace, size); 3138 else 3139 global_trace.data[cpu]->entries = size; 3140 3141 return ret; 3142 } 3143 3144 static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id) 3145 { 3146 int ret = size; 3147 3148 mutex_lock(&trace_types_lock); 3149 3150 if (cpu_id != RING_BUFFER_ALL_CPUS) { 3151 /* make sure, this cpu is enabled in the mask */ 3152 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { 3153 ret = -EINVAL; 3154 goto out; 3155 } 3156 } 3157 3158 ret = __tracing_resize_ring_buffer(size, cpu_id); 3159 if (ret < 0) 3160 ret = -ENOMEM; 3161 3162 out: 3163 mutex_unlock(&trace_types_lock); 3164 3165 return ret; 3166 } 3167 3168 3169 /** 3170 * tracing_update_buffers - used by tracing facility to expand ring buffers 3171 * 3172 * To save on memory when the tracing is never used on a system with it 3173 * configured in. The ring buffers are set to a minimum size. But once 3174 * a user starts to use the tracing facility, then they need to grow 3175 * to their default size. 3176 * 3177 * This function is to be called when a tracer is about to be used. 3178 */ 3179 int tracing_update_buffers(void) 3180 { 3181 int ret = 0; 3182 3183 mutex_lock(&trace_types_lock); 3184 if (!ring_buffer_expanded) 3185 ret = __tracing_resize_ring_buffer(trace_buf_size, 3186 RING_BUFFER_ALL_CPUS); 3187 mutex_unlock(&trace_types_lock); 3188 3189 return ret; 3190 } 3191 3192 struct trace_option_dentry; 3193 3194 static struct trace_option_dentry * 3195 create_trace_option_files(struct tracer *tracer); 3196 3197 static void 3198 destroy_trace_option_files(struct trace_option_dentry *topts); 3199 3200 static int tracing_set_tracer(const char *buf) 3201 { 3202 static struct trace_option_dentry *topts; 3203 struct trace_array *tr = &global_trace; 3204 struct tracer *t; 3205 bool had_max_tr; 3206 int ret = 0; 3207 3208 mutex_lock(&trace_types_lock); 3209 3210 if (!ring_buffer_expanded) { 3211 ret = __tracing_resize_ring_buffer(trace_buf_size, 3212 RING_BUFFER_ALL_CPUS); 3213 if (ret < 0) 3214 goto out; 3215 ret = 0; 3216 } 3217 3218 for (t = trace_types; t; t = t->next) { 3219 if (strcmp(t->name, buf) == 0) 3220 break; 3221 } 3222 if (!t) { 3223 ret = -EINVAL; 3224 goto out; 3225 } 3226 if (t == current_trace) 3227 goto out; 3228 3229 trace_branch_disable(); 3230 if (current_trace->reset) 3231 current_trace->reset(tr); 3232 3233 had_max_tr = current_trace->allocated_snapshot; 3234 current_trace = &nop_trace; 3235 3236 if (had_max_tr && !t->use_max_tr) { 3237 /* 3238 * We need to make sure that the update_max_tr sees that 3239 * current_trace changed to nop_trace to keep it from 3240 * swapping the buffers after we resize it. 3241 * The update_max_tr is called from interrupts disabled 3242 * so a synchronized_sched() is sufficient. 3243 */ 3244 synchronize_sched(); 3245 /* 3246 * We don't free the ring buffer. instead, resize it because 3247 * The max_tr ring buffer has some state (e.g. ring->clock) and 3248 * we want preserve it. 3249 */ 3250 ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); 3251 set_buffer_entries(&max_tr, 1); 3252 tracing_reset_online_cpus(&max_tr); 3253 current_trace->allocated_snapshot = false; 3254 } 3255 destroy_trace_option_files(topts); 3256 3257 topts = create_trace_option_files(t); 3258 if (t->use_max_tr && !had_max_tr) { 3259 /* we need to make per cpu buffer sizes equivalent */ 3260 ret = resize_buffer_duplicate_size(&max_tr, &global_trace, 3261 RING_BUFFER_ALL_CPUS); 3262 if (ret < 0) 3263 goto out; 3264 t->allocated_snapshot = true; 3265 } 3266 3267 if (t->init) { 3268 ret = tracer_init(t, tr); 3269 if (ret) 3270 goto out; 3271 } 3272 3273 current_trace = t; 3274 trace_branch_enable(tr); 3275 out: 3276 mutex_unlock(&trace_types_lock); 3277 3278 return ret; 3279 } 3280 3281 static ssize_t 3282 tracing_set_trace_write(struct file *filp, const char __user *ubuf, 3283 size_t cnt, loff_t *ppos) 3284 { 3285 char buf[MAX_TRACER_SIZE+1]; 3286 int i; 3287 size_t ret; 3288 int err; 3289 3290 ret = cnt; 3291 3292 if (cnt > MAX_TRACER_SIZE) 3293 cnt = MAX_TRACER_SIZE; 3294 3295 if (copy_from_user(&buf, ubuf, cnt)) 3296 return -EFAULT; 3297 3298 buf[cnt] = 0; 3299 3300 /* strip ending whitespace. */ 3301 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) 3302 buf[i] = 0; 3303 3304 err = tracing_set_tracer(buf); 3305 if (err) 3306 return err; 3307 3308 *ppos += ret; 3309 3310 return ret; 3311 } 3312 3313 static ssize_t 3314 tracing_max_lat_read(struct file *filp, char __user *ubuf, 3315 size_t cnt, loff_t *ppos) 3316 { 3317 unsigned long *ptr = filp->private_data; 3318 char buf[64]; 3319 int r; 3320 3321 r = snprintf(buf, sizeof(buf), "%ld\n", 3322 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); 3323 if (r > sizeof(buf)) 3324 r = sizeof(buf); 3325 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3326 } 3327 3328 static ssize_t 3329 tracing_max_lat_write(struct file *filp, const char __user *ubuf, 3330 size_t cnt, loff_t *ppos) 3331 { 3332 unsigned long *ptr = filp->private_data; 3333 unsigned long val; 3334 int ret; 3335 3336 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 3337 if (ret) 3338 return ret; 3339 3340 *ptr = val * 1000; 3341 3342 return cnt; 3343 } 3344 3345 static int tracing_open_pipe(struct inode *inode, struct file *filp) 3346 { 3347 long cpu_file = (long) inode->i_private; 3348 struct trace_iterator *iter; 3349 int ret = 0; 3350 3351 if (tracing_disabled) 3352 return -ENODEV; 3353 3354 mutex_lock(&trace_types_lock); 3355 3356 /* create a buffer to store the information to pass to userspace */ 3357 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 3358 if (!iter) { 3359 ret = -ENOMEM; 3360 goto out; 3361 } 3362 3363 /* 3364 * We make a copy of the current tracer to avoid concurrent 3365 * changes on it while we are reading. 3366 */ 3367 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL); 3368 if (!iter->trace) { 3369 ret = -ENOMEM; 3370 goto fail; 3371 } 3372 *iter->trace = *current_trace; 3373 3374 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 3375 ret = -ENOMEM; 3376 goto fail; 3377 } 3378 3379 /* trace pipe does not show start of buffer */ 3380 cpumask_setall(iter->started); 3381 3382 if (trace_flags & TRACE_ITER_LATENCY_FMT) 3383 iter->iter_flags |= TRACE_FILE_LAT_FMT; 3384 3385 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 3386 if (trace_clocks[trace_clock_id].in_ns) 3387 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 3388 3389 iter->cpu_file = cpu_file; 3390 iter->tr = &global_trace; 3391 mutex_init(&iter->mutex); 3392 filp->private_data = iter; 3393 3394 if (iter->trace->pipe_open) 3395 iter->trace->pipe_open(iter); 3396 3397 nonseekable_open(inode, filp); 3398 out: 3399 mutex_unlock(&trace_types_lock); 3400 return ret; 3401 3402 fail: 3403 kfree(iter->trace); 3404 kfree(iter); 3405 mutex_unlock(&trace_types_lock); 3406 return ret; 3407 } 3408 3409 static int tracing_release_pipe(struct inode *inode, struct file *file) 3410 { 3411 struct trace_iterator *iter = file->private_data; 3412 3413 mutex_lock(&trace_types_lock); 3414 3415 if (iter->trace->pipe_close) 3416 iter->trace->pipe_close(iter); 3417 3418 mutex_unlock(&trace_types_lock); 3419 3420 free_cpumask_var(iter->started); 3421 mutex_destroy(&iter->mutex); 3422 kfree(iter->trace); 3423 kfree(iter); 3424 3425 return 0; 3426 } 3427 3428 static unsigned int 3429 tracing_poll_pipe(struct file *filp, poll_table *poll_table) 3430 { 3431 struct trace_iterator *iter = filp->private_data; 3432 3433 if (trace_flags & TRACE_ITER_BLOCK) { 3434 /* 3435 * Always select as readable when in blocking mode 3436 */ 3437 return POLLIN | POLLRDNORM; 3438 } else { 3439 if (!trace_empty(iter)) 3440 return POLLIN | POLLRDNORM; 3441 poll_wait(filp, &trace_wait, poll_table); 3442 if (!trace_empty(iter)) 3443 return POLLIN | POLLRDNORM; 3444 3445 return 0; 3446 } 3447 } 3448 3449 /* 3450 * This is a make-shift waitqueue. 3451 * A tracer might use this callback on some rare cases: 3452 * 3453 * 1) the current tracer might hold the runqueue lock when it wakes up 3454 * a reader, hence a deadlock (sched, function, and function graph tracers) 3455 * 2) the function tracers, trace all functions, we don't want 3456 * the overhead of calling wake_up and friends 3457 * (and tracing them too) 3458 * 3459 * Anyway, this is really very primitive wakeup. 3460 */ 3461 void poll_wait_pipe(struct trace_iterator *iter) 3462 { 3463 set_current_state(TASK_INTERRUPTIBLE); 3464 /* sleep for 100 msecs, and try again. */ 3465 schedule_timeout(HZ / 10); 3466 } 3467 3468 /* Must be called with trace_types_lock mutex held. */ 3469 static int tracing_wait_pipe(struct file *filp) 3470 { 3471 struct trace_iterator *iter = filp->private_data; 3472 3473 while (trace_empty(iter)) { 3474 3475 if ((filp->f_flags & O_NONBLOCK)) { 3476 return -EAGAIN; 3477 } 3478 3479 mutex_unlock(&iter->mutex); 3480 3481 iter->trace->wait_pipe(iter); 3482 3483 mutex_lock(&iter->mutex); 3484 3485 if (signal_pending(current)) 3486 return -EINTR; 3487 3488 /* 3489 * We block until we read something and tracing is disabled. 3490 * We still block if tracing is disabled, but we have never 3491 * read anything. This allows a user to cat this file, and 3492 * then enable tracing. But after we have read something, 3493 * we give an EOF when tracing is again disabled. 3494 * 3495 * iter->pos will be 0 if we haven't read anything. 3496 */ 3497 if (!tracing_is_enabled() && iter->pos) 3498 break; 3499 } 3500 3501 return 1; 3502 } 3503 3504 /* 3505 * Consumer reader. 3506 */ 3507 static ssize_t 3508 tracing_read_pipe(struct file *filp, char __user *ubuf, 3509 size_t cnt, loff_t *ppos) 3510 { 3511 struct trace_iterator *iter = filp->private_data; 3512 ssize_t sret; 3513 3514 /* return any leftover data */ 3515 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 3516 if (sret != -EBUSY) 3517 return sret; 3518 3519 trace_seq_init(&iter->seq); 3520 3521 /* copy the tracer to avoid using a global lock all around */ 3522 mutex_lock(&trace_types_lock); 3523 if (unlikely(iter->trace->name != current_trace->name)) 3524 *iter->trace = *current_trace; 3525 mutex_unlock(&trace_types_lock); 3526 3527 /* 3528 * Avoid more than one consumer on a single file descriptor 3529 * This is just a matter of traces coherency, the ring buffer itself 3530 * is protected. 3531 */ 3532 mutex_lock(&iter->mutex); 3533 if (iter->trace->read) { 3534 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); 3535 if (sret) 3536 goto out; 3537 } 3538 3539 waitagain: 3540 sret = tracing_wait_pipe(filp); 3541 if (sret <= 0) 3542 goto out; 3543 3544 /* stop when tracing is finished */ 3545 if (trace_empty(iter)) { 3546 sret = 0; 3547 goto out; 3548 } 3549 3550 if (cnt >= PAGE_SIZE) 3551 cnt = PAGE_SIZE - 1; 3552 3553 /* reset all but tr, trace, and overruns */ 3554 memset(&iter->seq, 0, 3555 sizeof(struct trace_iterator) - 3556 offsetof(struct trace_iterator, seq)); 3557 iter->pos = -1; 3558 3559 trace_event_read_lock(); 3560 trace_access_lock(iter->cpu_file); 3561 while (trace_find_next_entry_inc(iter) != NULL) { 3562 enum print_line_t ret; 3563 int len = iter->seq.len; 3564 3565 ret = print_trace_line(iter); 3566 if (ret == TRACE_TYPE_PARTIAL_LINE) { 3567 /* don't print partial lines */ 3568 iter->seq.len = len; 3569 break; 3570 } 3571 if (ret != TRACE_TYPE_NO_CONSUME) 3572 trace_consume(iter); 3573 3574 if (iter->seq.len >= cnt) 3575 break; 3576 3577 /* 3578 * Setting the full flag means we reached the trace_seq buffer 3579 * size and we should leave by partial output condition above. 3580 * One of the trace_seq_* functions is not used properly. 3581 */ 3582 WARN_ONCE(iter->seq.full, "full flag set for trace type %d", 3583 iter->ent->type); 3584 } 3585 trace_access_unlock(iter->cpu_file); 3586 trace_event_read_unlock(); 3587 3588 /* Now copy what we have to the user */ 3589 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 3590 if (iter->seq.readpos >= iter->seq.len) 3591 trace_seq_init(&iter->seq); 3592 3593 /* 3594 * If there was nothing to send to user, in spite of consuming trace 3595 * entries, go back to wait for more entries. 3596 */ 3597 if (sret == -EBUSY) 3598 goto waitagain; 3599 3600 out: 3601 mutex_unlock(&iter->mutex); 3602 3603 return sret; 3604 } 3605 3606 static void tracing_pipe_buf_release(struct pipe_inode_info *pipe, 3607 struct pipe_buffer *buf) 3608 { 3609 __free_page(buf->page); 3610 } 3611 3612 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, 3613 unsigned int idx) 3614 { 3615 __free_page(spd->pages[idx]); 3616 } 3617 3618 static const struct pipe_buf_operations tracing_pipe_buf_ops = { 3619 .can_merge = 0, 3620 .map = generic_pipe_buf_map, 3621 .unmap = generic_pipe_buf_unmap, 3622 .confirm = generic_pipe_buf_confirm, 3623 .release = tracing_pipe_buf_release, 3624 .steal = generic_pipe_buf_steal, 3625 .get = generic_pipe_buf_get, 3626 }; 3627 3628 static size_t 3629 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) 3630 { 3631 size_t count; 3632 int ret; 3633 3634 /* Seq buffer is page-sized, exactly what we need. */ 3635 for (;;) { 3636 count = iter->seq.len; 3637 ret = print_trace_line(iter); 3638 count = iter->seq.len - count; 3639 if (rem < count) { 3640 rem = 0; 3641 iter->seq.len -= count; 3642 break; 3643 } 3644 if (ret == TRACE_TYPE_PARTIAL_LINE) { 3645 iter->seq.len -= count; 3646 break; 3647 } 3648 3649 if (ret != TRACE_TYPE_NO_CONSUME) 3650 trace_consume(iter); 3651 rem -= count; 3652 if (!trace_find_next_entry_inc(iter)) { 3653 rem = 0; 3654 iter->ent = NULL; 3655 break; 3656 } 3657 } 3658 3659 return rem; 3660 } 3661 3662 static ssize_t tracing_splice_read_pipe(struct file *filp, 3663 loff_t *ppos, 3664 struct pipe_inode_info *pipe, 3665 size_t len, 3666 unsigned int flags) 3667 { 3668 struct page *pages_def[PIPE_DEF_BUFFERS]; 3669 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 3670 struct trace_iterator *iter = filp->private_data; 3671 struct splice_pipe_desc spd = { 3672 .pages = pages_def, 3673 .partial = partial_def, 3674 .nr_pages = 0, /* This gets updated below. */ 3675 .nr_pages_max = PIPE_DEF_BUFFERS, 3676 .flags = flags, 3677 .ops = &tracing_pipe_buf_ops, 3678 .spd_release = tracing_spd_release_pipe, 3679 }; 3680 ssize_t ret; 3681 size_t rem; 3682 unsigned int i; 3683 3684 if (splice_grow_spd(pipe, &spd)) 3685 return -ENOMEM; 3686 3687 /* copy the tracer to avoid using a global lock all around */ 3688 mutex_lock(&trace_types_lock); 3689 if (unlikely(iter->trace->name != current_trace->name)) 3690 *iter->trace = *current_trace; 3691 mutex_unlock(&trace_types_lock); 3692 3693 mutex_lock(&iter->mutex); 3694 3695 if (iter->trace->splice_read) { 3696 ret = iter->trace->splice_read(iter, filp, 3697 ppos, pipe, len, flags); 3698 if (ret) 3699 goto out_err; 3700 } 3701 3702 ret = tracing_wait_pipe(filp); 3703 if (ret <= 0) 3704 goto out_err; 3705 3706 if (!iter->ent && !trace_find_next_entry_inc(iter)) { 3707 ret = -EFAULT; 3708 goto out_err; 3709 } 3710 3711 trace_event_read_lock(); 3712 trace_access_lock(iter->cpu_file); 3713 3714 /* Fill as many pages as possible. */ 3715 for (i = 0, rem = len; i < pipe->buffers && rem; i++) { 3716 spd.pages[i] = alloc_page(GFP_KERNEL); 3717 if (!spd.pages[i]) 3718 break; 3719 3720 rem = tracing_fill_pipe_page(rem, iter); 3721 3722 /* Copy the data into the page, so we can start over. */ 3723 ret = trace_seq_to_buffer(&iter->seq, 3724 page_address(spd.pages[i]), 3725 iter->seq.len); 3726 if (ret < 0) { 3727 __free_page(spd.pages[i]); 3728 break; 3729 } 3730 spd.partial[i].offset = 0; 3731 spd.partial[i].len = iter->seq.len; 3732 3733 trace_seq_init(&iter->seq); 3734 } 3735 3736 trace_access_unlock(iter->cpu_file); 3737 trace_event_read_unlock(); 3738 mutex_unlock(&iter->mutex); 3739 3740 spd.nr_pages = i; 3741 3742 ret = splice_to_pipe(pipe, &spd); 3743 out: 3744 splice_shrink_spd(&spd); 3745 return ret; 3746 3747 out_err: 3748 mutex_unlock(&iter->mutex); 3749 goto out; 3750 } 3751 3752 struct ftrace_entries_info { 3753 struct trace_array *tr; 3754 int cpu; 3755 }; 3756 3757 static int tracing_entries_open(struct inode *inode, struct file *filp) 3758 { 3759 struct ftrace_entries_info *info; 3760 3761 if (tracing_disabled) 3762 return -ENODEV; 3763 3764 info = kzalloc(sizeof(*info), GFP_KERNEL); 3765 if (!info) 3766 return -ENOMEM; 3767 3768 info->tr = &global_trace; 3769 info->cpu = (unsigned long)inode->i_private; 3770 3771 filp->private_data = info; 3772 3773 return 0; 3774 } 3775 3776 static ssize_t 3777 tracing_entries_read(struct file *filp, char __user *ubuf, 3778 size_t cnt, loff_t *ppos) 3779 { 3780 struct ftrace_entries_info *info = filp->private_data; 3781 struct trace_array *tr = info->tr; 3782 char buf[64]; 3783 int r = 0; 3784 ssize_t ret; 3785 3786 mutex_lock(&trace_types_lock); 3787 3788 if (info->cpu == RING_BUFFER_ALL_CPUS) { 3789 int cpu, buf_size_same; 3790 unsigned long size; 3791 3792 size = 0; 3793 buf_size_same = 1; 3794 /* check if all cpu sizes are same */ 3795 for_each_tracing_cpu(cpu) { 3796 /* fill in the size from first enabled cpu */ 3797 if (size == 0) 3798 size = tr->data[cpu]->entries; 3799 if (size != tr->data[cpu]->entries) { 3800 buf_size_same = 0; 3801 break; 3802 } 3803 } 3804 3805 if (buf_size_same) { 3806 if (!ring_buffer_expanded) 3807 r = sprintf(buf, "%lu (expanded: %lu)\n", 3808 size >> 10, 3809 trace_buf_size >> 10); 3810 else 3811 r = sprintf(buf, "%lu\n", size >> 10); 3812 } else 3813 r = sprintf(buf, "X\n"); 3814 } else 3815 r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10); 3816 3817 mutex_unlock(&trace_types_lock); 3818 3819 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3820 return ret; 3821 } 3822 3823 static ssize_t 3824 tracing_entries_write(struct file *filp, const char __user *ubuf, 3825 size_t cnt, loff_t *ppos) 3826 { 3827 struct ftrace_entries_info *info = filp->private_data; 3828 unsigned long val; 3829 int ret; 3830 3831 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 3832 if (ret) 3833 return ret; 3834 3835 /* must have at least 1 entry */ 3836 if (!val) 3837 return -EINVAL; 3838 3839 /* value is in KB */ 3840 val <<= 10; 3841 3842 ret = tracing_resize_ring_buffer(val, info->cpu); 3843 if (ret < 0) 3844 return ret; 3845 3846 *ppos += cnt; 3847 3848 return cnt; 3849 } 3850 3851 static int 3852 tracing_entries_release(struct inode *inode, struct file *filp) 3853 { 3854 struct ftrace_entries_info *info = filp->private_data; 3855 3856 kfree(info); 3857 3858 return 0; 3859 } 3860 3861 static ssize_t 3862 tracing_total_entries_read(struct file *filp, char __user *ubuf, 3863 size_t cnt, loff_t *ppos) 3864 { 3865 struct trace_array *tr = filp->private_data; 3866 char buf[64]; 3867 int r, cpu; 3868 unsigned long size = 0, expanded_size = 0; 3869 3870 mutex_lock(&trace_types_lock); 3871 for_each_tracing_cpu(cpu) { 3872 size += tr->data[cpu]->entries >> 10; 3873 if (!ring_buffer_expanded) 3874 expanded_size += trace_buf_size >> 10; 3875 } 3876 if (ring_buffer_expanded) 3877 r = sprintf(buf, "%lu\n", size); 3878 else 3879 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); 3880 mutex_unlock(&trace_types_lock); 3881 3882 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3883 } 3884 3885 static ssize_t 3886 tracing_free_buffer_write(struct file *filp, const char __user *ubuf, 3887 size_t cnt, loff_t *ppos) 3888 { 3889 /* 3890 * There is no need to read what the user has written, this function 3891 * is just to make sure that there is no error when "echo" is used 3892 */ 3893 3894 *ppos += cnt; 3895 3896 return cnt; 3897 } 3898 3899 static int 3900 tracing_free_buffer_release(struct inode *inode, struct file *filp) 3901 { 3902 /* disable tracing ? */ 3903 if (trace_flags & TRACE_ITER_STOP_ON_FREE) 3904 tracing_off(); 3905 /* resize the ring buffer to 0 */ 3906 tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS); 3907 3908 return 0; 3909 } 3910 3911 static ssize_t 3912 tracing_mark_write(struct file *filp, const char __user *ubuf, 3913 size_t cnt, loff_t *fpos) 3914 { 3915 unsigned long addr = (unsigned long)ubuf; 3916 struct ring_buffer_event *event; 3917 struct ring_buffer *buffer; 3918 struct print_entry *entry; 3919 unsigned long irq_flags; 3920 struct page *pages[2]; 3921 void *map_page[2]; 3922 int nr_pages = 1; 3923 ssize_t written; 3924 int offset; 3925 int size; 3926 int len; 3927 int ret; 3928 int i; 3929 3930 if (tracing_disabled) 3931 return -EINVAL; 3932 3933 if (!(trace_flags & TRACE_ITER_MARKERS)) 3934 return -EINVAL; 3935 3936 if (cnt > TRACE_BUF_SIZE) 3937 cnt = TRACE_BUF_SIZE; 3938 3939 /* 3940 * Userspace is injecting traces into the kernel trace buffer. 3941 * We want to be as non intrusive as possible. 3942 * To do so, we do not want to allocate any special buffers 3943 * or take any locks, but instead write the userspace data 3944 * straight into the ring buffer. 3945 * 3946 * First we need to pin the userspace buffer into memory, 3947 * which, most likely it is, because it just referenced it. 3948 * But there's no guarantee that it is. By using get_user_pages_fast() 3949 * and kmap_atomic/kunmap_atomic() we can get access to the 3950 * pages directly. We then write the data directly into the 3951 * ring buffer. 3952 */ 3953 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); 3954 3955 /* check if we cross pages */ 3956 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK)) 3957 nr_pages = 2; 3958 3959 offset = addr & (PAGE_SIZE - 1); 3960 addr &= PAGE_MASK; 3961 3962 ret = get_user_pages_fast(addr, nr_pages, 0, pages); 3963 if (ret < nr_pages) { 3964 while (--ret >= 0) 3965 put_page(pages[ret]); 3966 written = -EFAULT; 3967 goto out; 3968 } 3969 3970 for (i = 0; i < nr_pages; i++) 3971 map_page[i] = kmap_atomic(pages[i]); 3972 3973 local_save_flags(irq_flags); 3974 size = sizeof(*entry) + cnt + 2; /* possible \n added */ 3975 buffer = global_trace.buffer; 3976 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 3977 irq_flags, preempt_count()); 3978 if (!event) { 3979 /* Ring buffer disabled, return as if not open for write */ 3980 written = -EBADF; 3981 goto out_unlock; 3982 } 3983 3984 entry = ring_buffer_event_data(event); 3985 entry->ip = _THIS_IP_; 3986 3987 if (nr_pages == 2) { 3988 len = PAGE_SIZE - offset; 3989 memcpy(&entry->buf, map_page[0] + offset, len); 3990 memcpy(&entry->buf[len], map_page[1], cnt - len); 3991 } else 3992 memcpy(&entry->buf, map_page[0] + offset, cnt); 3993 3994 if (entry->buf[cnt - 1] != '\n') { 3995 entry->buf[cnt] = '\n'; 3996 entry->buf[cnt + 1] = '\0'; 3997 } else 3998 entry->buf[cnt] = '\0'; 3999 4000 __buffer_unlock_commit(buffer, event); 4001 4002 written = cnt; 4003 4004 *fpos += written; 4005 4006 out_unlock: 4007 for (i = 0; i < nr_pages; i++){ 4008 kunmap_atomic(map_page[i]); 4009 put_page(pages[i]); 4010 } 4011 out: 4012 return written; 4013 } 4014 4015 static int tracing_clock_show(struct seq_file *m, void *v) 4016 { 4017 int i; 4018 4019 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) 4020 seq_printf(m, 4021 "%s%s%s%s", i ? " " : "", 4022 i == trace_clock_id ? "[" : "", trace_clocks[i].name, 4023 i == trace_clock_id ? "]" : ""); 4024 seq_putc(m, '\n'); 4025 4026 return 0; 4027 } 4028 4029 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, 4030 size_t cnt, loff_t *fpos) 4031 { 4032 char buf[64]; 4033 const char *clockstr; 4034 int i; 4035 4036 if (cnt >= sizeof(buf)) 4037 return -EINVAL; 4038 4039 if (copy_from_user(&buf, ubuf, cnt)) 4040 return -EFAULT; 4041 4042 buf[cnt] = 0; 4043 4044 clockstr = strstrip(buf); 4045 4046 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { 4047 if (strcmp(trace_clocks[i].name, clockstr) == 0) 4048 break; 4049 } 4050 if (i == ARRAY_SIZE(trace_clocks)) 4051 return -EINVAL; 4052 4053 trace_clock_id = i; 4054 4055 mutex_lock(&trace_types_lock); 4056 4057 ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func); 4058 if (max_tr.buffer) 4059 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func); 4060 4061 /* 4062 * New clock may not be consistent with the previous clock. 4063 * Reset the buffer so that it doesn't have incomparable timestamps. 4064 */ 4065 tracing_reset_online_cpus(&global_trace); 4066 tracing_reset_online_cpus(&max_tr); 4067 4068 mutex_unlock(&trace_types_lock); 4069 4070 *fpos += cnt; 4071 4072 return cnt; 4073 } 4074 4075 static int tracing_clock_open(struct inode *inode, struct file *file) 4076 { 4077 if (tracing_disabled) 4078 return -ENODEV; 4079 return single_open(file, tracing_clock_show, NULL); 4080 } 4081 4082 #ifdef CONFIG_TRACER_SNAPSHOT 4083 static int tracing_snapshot_open(struct inode *inode, struct file *file) 4084 { 4085 struct trace_iterator *iter; 4086 int ret = 0; 4087 4088 if (file->f_mode & FMODE_READ) { 4089 iter = __tracing_open(inode, file, true); 4090 if (IS_ERR(iter)) 4091 ret = PTR_ERR(iter); 4092 } 4093 return ret; 4094 } 4095 4096 static ssize_t 4097 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, 4098 loff_t *ppos) 4099 { 4100 unsigned long val; 4101 int ret; 4102 4103 ret = tracing_update_buffers(); 4104 if (ret < 0) 4105 return ret; 4106 4107 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 4108 if (ret) 4109 return ret; 4110 4111 mutex_lock(&trace_types_lock); 4112 4113 if (current_trace->use_max_tr) { 4114 ret = -EBUSY; 4115 goto out; 4116 } 4117 4118 switch (val) { 4119 case 0: 4120 if (current_trace->allocated_snapshot) { 4121 /* free spare buffer */ 4122 ring_buffer_resize(max_tr.buffer, 1, 4123 RING_BUFFER_ALL_CPUS); 4124 set_buffer_entries(&max_tr, 1); 4125 tracing_reset_online_cpus(&max_tr); 4126 current_trace->allocated_snapshot = false; 4127 } 4128 break; 4129 case 1: 4130 if (!current_trace->allocated_snapshot) { 4131 /* allocate spare buffer */ 4132 ret = resize_buffer_duplicate_size(&max_tr, 4133 &global_trace, RING_BUFFER_ALL_CPUS); 4134 if (ret < 0) 4135 break; 4136 current_trace->allocated_snapshot = true; 4137 } 4138 4139 local_irq_disable(); 4140 /* Now, we're going to swap */ 4141 update_max_tr(&global_trace, current, smp_processor_id()); 4142 local_irq_enable(); 4143 break; 4144 default: 4145 if (current_trace->allocated_snapshot) 4146 tracing_reset_online_cpus(&max_tr); 4147 else 4148 ret = -EINVAL; 4149 break; 4150 } 4151 4152 if (ret >= 0) { 4153 *ppos += cnt; 4154 ret = cnt; 4155 } 4156 out: 4157 mutex_unlock(&trace_types_lock); 4158 return ret; 4159 } 4160 #endif /* CONFIG_TRACER_SNAPSHOT */ 4161 4162 4163 static const struct file_operations tracing_max_lat_fops = { 4164 .open = tracing_open_generic, 4165 .read = tracing_max_lat_read, 4166 .write = tracing_max_lat_write, 4167 .llseek = generic_file_llseek, 4168 }; 4169 4170 static const struct file_operations set_tracer_fops = { 4171 .open = tracing_open_generic, 4172 .read = tracing_set_trace_read, 4173 .write = tracing_set_trace_write, 4174 .llseek = generic_file_llseek, 4175 }; 4176 4177 static const struct file_operations tracing_pipe_fops = { 4178 .open = tracing_open_pipe, 4179 .poll = tracing_poll_pipe, 4180 .read = tracing_read_pipe, 4181 .splice_read = tracing_splice_read_pipe, 4182 .release = tracing_release_pipe, 4183 .llseek = no_llseek, 4184 }; 4185 4186 static const struct file_operations tracing_entries_fops = { 4187 .open = tracing_entries_open, 4188 .read = tracing_entries_read, 4189 .write = tracing_entries_write, 4190 .release = tracing_entries_release, 4191 .llseek = generic_file_llseek, 4192 }; 4193 4194 static const struct file_operations tracing_total_entries_fops = { 4195 .open = tracing_open_generic, 4196 .read = tracing_total_entries_read, 4197 .llseek = generic_file_llseek, 4198 }; 4199 4200 static const struct file_operations tracing_free_buffer_fops = { 4201 .write = tracing_free_buffer_write, 4202 .release = tracing_free_buffer_release, 4203 }; 4204 4205 static const struct file_operations tracing_mark_fops = { 4206 .open = tracing_open_generic, 4207 .write = tracing_mark_write, 4208 .llseek = generic_file_llseek, 4209 }; 4210 4211 static const struct file_operations trace_clock_fops = { 4212 .open = tracing_clock_open, 4213 .read = seq_read, 4214 .llseek = seq_lseek, 4215 .release = single_release, 4216 .write = tracing_clock_write, 4217 }; 4218 4219 #ifdef CONFIG_TRACER_SNAPSHOT 4220 static const struct file_operations snapshot_fops = { 4221 .open = tracing_snapshot_open, 4222 .read = seq_read, 4223 .write = tracing_snapshot_write, 4224 .llseek = tracing_seek, 4225 .release = tracing_release, 4226 }; 4227 #endif /* CONFIG_TRACER_SNAPSHOT */ 4228 4229 struct ftrace_buffer_info { 4230 struct trace_array *tr; 4231 void *spare; 4232 int cpu; 4233 unsigned int read; 4234 }; 4235 4236 static int tracing_buffers_open(struct inode *inode, struct file *filp) 4237 { 4238 int cpu = (int)(long)inode->i_private; 4239 struct ftrace_buffer_info *info; 4240 4241 if (tracing_disabled) 4242 return -ENODEV; 4243 4244 info = kzalloc(sizeof(*info), GFP_KERNEL); 4245 if (!info) 4246 return -ENOMEM; 4247 4248 info->tr = &global_trace; 4249 info->cpu = cpu; 4250 info->spare = NULL; 4251 /* Force reading ring buffer for first read */ 4252 info->read = (unsigned int)-1; 4253 4254 filp->private_data = info; 4255 4256 return nonseekable_open(inode, filp); 4257 } 4258 4259 static ssize_t 4260 tracing_buffers_read(struct file *filp, char __user *ubuf, 4261 size_t count, loff_t *ppos) 4262 { 4263 struct ftrace_buffer_info *info = filp->private_data; 4264 ssize_t ret; 4265 size_t size; 4266 4267 if (!count) 4268 return 0; 4269 4270 if (!info->spare) 4271 info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu); 4272 if (!info->spare) 4273 return -ENOMEM; 4274 4275 /* Do we have previous read data to read? */ 4276 if (info->read < PAGE_SIZE) 4277 goto read; 4278 4279 trace_access_lock(info->cpu); 4280 ret = ring_buffer_read_page(info->tr->buffer, 4281 &info->spare, 4282 count, 4283 info->cpu, 0); 4284 trace_access_unlock(info->cpu); 4285 if (ret < 0) 4286 return 0; 4287 4288 info->read = 0; 4289 4290 read: 4291 size = PAGE_SIZE - info->read; 4292 if (size > count) 4293 size = count; 4294 4295 ret = copy_to_user(ubuf, info->spare + info->read, size); 4296 if (ret == size) 4297 return -EFAULT; 4298 size -= ret; 4299 4300 *ppos += size; 4301 info->read += size; 4302 4303 return size; 4304 } 4305 4306 static int tracing_buffers_release(struct inode *inode, struct file *file) 4307 { 4308 struct ftrace_buffer_info *info = file->private_data; 4309 4310 if (info->spare) 4311 ring_buffer_free_read_page(info->tr->buffer, info->spare); 4312 kfree(info); 4313 4314 return 0; 4315 } 4316 4317 struct buffer_ref { 4318 struct ring_buffer *buffer; 4319 void *page; 4320 int ref; 4321 }; 4322 4323 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, 4324 struct pipe_buffer *buf) 4325 { 4326 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 4327 4328 if (--ref->ref) 4329 return; 4330 4331 ring_buffer_free_read_page(ref->buffer, ref->page); 4332 kfree(ref); 4333 buf->private = 0; 4334 } 4335 4336 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, 4337 struct pipe_buffer *buf) 4338 { 4339 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 4340 4341 ref->ref++; 4342 } 4343 4344 /* Pipe buffer operations for a buffer. */ 4345 static const struct pipe_buf_operations buffer_pipe_buf_ops = { 4346 .can_merge = 0, 4347 .map = generic_pipe_buf_map, 4348 .unmap = generic_pipe_buf_unmap, 4349 .confirm = generic_pipe_buf_confirm, 4350 .release = buffer_pipe_buf_release, 4351 .steal = generic_pipe_buf_steal, 4352 .get = buffer_pipe_buf_get, 4353 }; 4354 4355 /* 4356 * Callback from splice_to_pipe(), if we need to release some pages 4357 * at the end of the spd in case we error'ed out in filling the pipe. 4358 */ 4359 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) 4360 { 4361 struct buffer_ref *ref = 4362 (struct buffer_ref *)spd->partial[i].private; 4363 4364 if (--ref->ref) 4365 return; 4366 4367 ring_buffer_free_read_page(ref->buffer, ref->page); 4368 kfree(ref); 4369 spd->partial[i].private = 0; 4370 } 4371 4372 static ssize_t 4373 tracing_buffers_splice_read(struct file *file, loff_t *ppos, 4374 struct pipe_inode_info *pipe, size_t len, 4375 unsigned int flags) 4376 { 4377 struct ftrace_buffer_info *info = file->private_data; 4378 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 4379 struct page *pages_def[PIPE_DEF_BUFFERS]; 4380 struct splice_pipe_desc spd = { 4381 .pages = pages_def, 4382 .partial = partial_def, 4383 .nr_pages_max = PIPE_DEF_BUFFERS, 4384 .flags = flags, 4385 .ops = &buffer_pipe_buf_ops, 4386 .spd_release = buffer_spd_release, 4387 }; 4388 struct buffer_ref *ref; 4389 int entries, size, i; 4390 size_t ret; 4391 4392 if (splice_grow_spd(pipe, &spd)) 4393 return -ENOMEM; 4394 4395 if (*ppos & (PAGE_SIZE - 1)) { 4396 ret = -EINVAL; 4397 goto out; 4398 } 4399 4400 if (len & (PAGE_SIZE - 1)) { 4401 if (len < PAGE_SIZE) { 4402 ret = -EINVAL; 4403 goto out; 4404 } 4405 len &= PAGE_MASK; 4406 } 4407 4408 trace_access_lock(info->cpu); 4409 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); 4410 4411 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) { 4412 struct page *page; 4413 int r; 4414 4415 ref = kzalloc(sizeof(*ref), GFP_KERNEL); 4416 if (!ref) 4417 break; 4418 4419 ref->ref = 1; 4420 ref->buffer = info->tr->buffer; 4421 ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu); 4422 if (!ref->page) { 4423 kfree(ref); 4424 break; 4425 } 4426 4427 r = ring_buffer_read_page(ref->buffer, &ref->page, 4428 len, info->cpu, 1); 4429 if (r < 0) { 4430 ring_buffer_free_read_page(ref->buffer, ref->page); 4431 kfree(ref); 4432 break; 4433 } 4434 4435 /* 4436 * zero out any left over data, this is going to 4437 * user land. 4438 */ 4439 size = ring_buffer_page_len(ref->page); 4440 if (size < PAGE_SIZE) 4441 memset(ref->page + size, 0, PAGE_SIZE - size); 4442 4443 page = virt_to_page(ref->page); 4444 4445 spd.pages[i] = page; 4446 spd.partial[i].len = PAGE_SIZE; 4447 spd.partial[i].offset = 0; 4448 spd.partial[i].private = (unsigned long)ref; 4449 spd.nr_pages++; 4450 *ppos += PAGE_SIZE; 4451 4452 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); 4453 } 4454 4455 trace_access_unlock(info->cpu); 4456 spd.nr_pages = i; 4457 4458 /* did we read anything? */ 4459 if (!spd.nr_pages) { 4460 if (flags & SPLICE_F_NONBLOCK) 4461 ret = -EAGAIN; 4462 else 4463 ret = 0; 4464 /* TODO: block */ 4465 goto out; 4466 } 4467 4468 ret = splice_to_pipe(pipe, &spd); 4469 splice_shrink_spd(&spd); 4470 out: 4471 return ret; 4472 } 4473 4474 static const struct file_operations tracing_buffers_fops = { 4475 .open = tracing_buffers_open, 4476 .read = tracing_buffers_read, 4477 .release = tracing_buffers_release, 4478 .splice_read = tracing_buffers_splice_read, 4479 .llseek = no_llseek, 4480 }; 4481 4482 static ssize_t 4483 tracing_stats_read(struct file *filp, char __user *ubuf, 4484 size_t count, loff_t *ppos) 4485 { 4486 unsigned long cpu = (unsigned long)filp->private_data; 4487 struct trace_array *tr = &global_trace; 4488 struct trace_seq *s; 4489 unsigned long cnt; 4490 unsigned long long t; 4491 unsigned long usec_rem; 4492 4493 s = kmalloc(sizeof(*s), GFP_KERNEL); 4494 if (!s) 4495 return -ENOMEM; 4496 4497 trace_seq_init(s); 4498 4499 cnt = ring_buffer_entries_cpu(tr->buffer, cpu); 4500 trace_seq_printf(s, "entries: %ld\n", cnt); 4501 4502 cnt = ring_buffer_overrun_cpu(tr->buffer, cpu); 4503 trace_seq_printf(s, "overrun: %ld\n", cnt); 4504 4505 cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu); 4506 trace_seq_printf(s, "commit overrun: %ld\n", cnt); 4507 4508 cnt = ring_buffer_bytes_cpu(tr->buffer, cpu); 4509 trace_seq_printf(s, "bytes: %ld\n", cnt); 4510 4511 if (trace_clocks[trace_clock_id].in_ns) { 4512 /* local or global for trace_clock */ 4513 t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu)); 4514 usec_rem = do_div(t, USEC_PER_SEC); 4515 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", 4516 t, usec_rem); 4517 4518 t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu)); 4519 usec_rem = do_div(t, USEC_PER_SEC); 4520 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); 4521 } else { 4522 /* counter or tsc mode for trace_clock */ 4523 trace_seq_printf(s, "oldest event ts: %llu\n", 4524 ring_buffer_oldest_event_ts(tr->buffer, cpu)); 4525 4526 trace_seq_printf(s, "now ts: %llu\n", 4527 ring_buffer_time_stamp(tr->buffer, cpu)); 4528 } 4529 4530 cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu); 4531 trace_seq_printf(s, "dropped events: %ld\n", cnt); 4532 4533 cnt = ring_buffer_read_events_cpu(tr->buffer, cpu); 4534 trace_seq_printf(s, "read events: %ld\n", cnt); 4535 4536 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); 4537 4538 kfree(s); 4539 4540 return count; 4541 } 4542 4543 static const struct file_operations tracing_stats_fops = { 4544 .open = tracing_open_generic, 4545 .read = tracing_stats_read, 4546 .llseek = generic_file_llseek, 4547 }; 4548 4549 #ifdef CONFIG_DYNAMIC_FTRACE 4550 4551 int __weak ftrace_arch_read_dyn_info(char *buf, int size) 4552 { 4553 return 0; 4554 } 4555 4556 static ssize_t 4557 tracing_read_dyn_info(struct file *filp, char __user *ubuf, 4558 size_t cnt, loff_t *ppos) 4559 { 4560 static char ftrace_dyn_info_buffer[1024]; 4561 static DEFINE_MUTEX(dyn_info_mutex); 4562 unsigned long *p = filp->private_data; 4563 char *buf = ftrace_dyn_info_buffer; 4564 int size = ARRAY_SIZE(ftrace_dyn_info_buffer); 4565 int r; 4566 4567 mutex_lock(&dyn_info_mutex); 4568 r = sprintf(buf, "%ld ", *p); 4569 4570 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); 4571 buf[r++] = '\n'; 4572 4573 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 4574 4575 mutex_unlock(&dyn_info_mutex); 4576 4577 return r; 4578 } 4579 4580 static const struct file_operations tracing_dyn_info_fops = { 4581 .open = tracing_open_generic, 4582 .read = tracing_read_dyn_info, 4583 .llseek = generic_file_llseek, 4584 }; 4585 #endif 4586 4587 static struct dentry *d_tracer; 4588 4589 struct dentry *tracing_init_dentry(void) 4590 { 4591 static int once; 4592 4593 if (d_tracer) 4594 return d_tracer; 4595 4596 if (!debugfs_initialized()) 4597 return NULL; 4598 4599 d_tracer = debugfs_create_dir("tracing", NULL); 4600 4601 if (!d_tracer && !once) { 4602 once = 1; 4603 pr_warning("Could not create debugfs directory 'tracing'\n"); 4604 return NULL; 4605 } 4606 4607 return d_tracer; 4608 } 4609 4610 static struct dentry *d_percpu; 4611 4612 static struct dentry *tracing_dentry_percpu(void) 4613 { 4614 static int once; 4615 struct dentry *d_tracer; 4616 4617 if (d_percpu) 4618 return d_percpu; 4619 4620 d_tracer = tracing_init_dentry(); 4621 4622 if (!d_tracer) 4623 return NULL; 4624 4625 d_percpu = debugfs_create_dir("per_cpu", d_tracer); 4626 4627 if (!d_percpu && !once) { 4628 once = 1; 4629 pr_warning("Could not create debugfs directory 'per_cpu'\n"); 4630 return NULL; 4631 } 4632 4633 return d_percpu; 4634 } 4635 4636 static void tracing_init_debugfs_percpu(long cpu) 4637 { 4638 struct dentry *d_percpu = tracing_dentry_percpu(); 4639 struct dentry *d_cpu; 4640 char cpu_dir[30]; /* 30 characters should be more than enough */ 4641 4642 if (!d_percpu) 4643 return; 4644 4645 snprintf(cpu_dir, 30, "cpu%ld", cpu); 4646 d_cpu = debugfs_create_dir(cpu_dir, d_percpu); 4647 if (!d_cpu) { 4648 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); 4649 return; 4650 } 4651 4652 /* per cpu trace_pipe */ 4653 trace_create_file("trace_pipe", 0444, d_cpu, 4654 (void *) cpu, &tracing_pipe_fops); 4655 4656 /* per cpu trace */ 4657 trace_create_file("trace", 0644, d_cpu, 4658 (void *) cpu, &tracing_fops); 4659 4660 trace_create_file("trace_pipe_raw", 0444, d_cpu, 4661 (void *) cpu, &tracing_buffers_fops); 4662 4663 trace_create_file("stats", 0444, d_cpu, 4664 (void *) cpu, &tracing_stats_fops); 4665 4666 trace_create_file("buffer_size_kb", 0444, d_cpu, 4667 (void *) cpu, &tracing_entries_fops); 4668 } 4669 4670 #ifdef CONFIG_FTRACE_SELFTEST 4671 /* Let selftest have access to static functions in this file */ 4672 #include "trace_selftest.c" 4673 #endif 4674 4675 struct trace_option_dentry { 4676 struct tracer_opt *opt; 4677 struct tracer_flags *flags; 4678 struct dentry *entry; 4679 }; 4680 4681 static ssize_t 4682 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, 4683 loff_t *ppos) 4684 { 4685 struct trace_option_dentry *topt = filp->private_data; 4686 char *buf; 4687 4688 if (topt->flags->val & topt->opt->bit) 4689 buf = "1\n"; 4690 else 4691 buf = "0\n"; 4692 4693 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 4694 } 4695 4696 static ssize_t 4697 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, 4698 loff_t *ppos) 4699 { 4700 struct trace_option_dentry *topt = filp->private_data; 4701 unsigned long val; 4702 int ret; 4703 4704 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 4705 if (ret) 4706 return ret; 4707 4708 if (val != 0 && val != 1) 4709 return -EINVAL; 4710 4711 if (!!(topt->flags->val & topt->opt->bit) != val) { 4712 mutex_lock(&trace_types_lock); 4713 ret = __set_tracer_option(current_trace, topt->flags, 4714 topt->opt, !val); 4715 mutex_unlock(&trace_types_lock); 4716 if (ret) 4717 return ret; 4718 } 4719 4720 *ppos += cnt; 4721 4722 return cnt; 4723 } 4724 4725 4726 static const struct file_operations trace_options_fops = { 4727 .open = tracing_open_generic, 4728 .read = trace_options_read, 4729 .write = trace_options_write, 4730 .llseek = generic_file_llseek, 4731 }; 4732 4733 static ssize_t 4734 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, 4735 loff_t *ppos) 4736 { 4737 long index = (long)filp->private_data; 4738 char *buf; 4739 4740 if (trace_flags & (1 << index)) 4741 buf = "1\n"; 4742 else 4743 buf = "0\n"; 4744 4745 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 4746 } 4747 4748 static ssize_t 4749 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, 4750 loff_t *ppos) 4751 { 4752 long index = (long)filp->private_data; 4753 unsigned long val; 4754 int ret; 4755 4756 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 4757 if (ret) 4758 return ret; 4759 4760 if (val != 0 && val != 1) 4761 return -EINVAL; 4762 set_tracer_flags(1 << index, val); 4763 4764 *ppos += cnt; 4765 4766 return cnt; 4767 } 4768 4769 static const struct file_operations trace_options_core_fops = { 4770 .open = tracing_open_generic, 4771 .read = trace_options_core_read, 4772 .write = trace_options_core_write, 4773 .llseek = generic_file_llseek, 4774 }; 4775 4776 struct dentry *trace_create_file(const char *name, 4777 umode_t mode, 4778 struct dentry *parent, 4779 void *data, 4780 const struct file_operations *fops) 4781 { 4782 struct dentry *ret; 4783 4784 ret = debugfs_create_file(name, mode, parent, data, fops); 4785 if (!ret) 4786 pr_warning("Could not create debugfs '%s' entry\n", name); 4787 4788 return ret; 4789 } 4790 4791 4792 static struct dentry *trace_options_init_dentry(void) 4793 { 4794 struct dentry *d_tracer; 4795 static struct dentry *t_options; 4796 4797 if (t_options) 4798 return t_options; 4799 4800 d_tracer = tracing_init_dentry(); 4801 if (!d_tracer) 4802 return NULL; 4803 4804 t_options = debugfs_create_dir("options", d_tracer); 4805 if (!t_options) { 4806 pr_warning("Could not create debugfs directory 'options'\n"); 4807 return NULL; 4808 } 4809 4810 return t_options; 4811 } 4812 4813 static void 4814 create_trace_option_file(struct trace_option_dentry *topt, 4815 struct tracer_flags *flags, 4816 struct tracer_opt *opt) 4817 { 4818 struct dentry *t_options; 4819 4820 t_options = trace_options_init_dentry(); 4821 if (!t_options) 4822 return; 4823 4824 topt->flags = flags; 4825 topt->opt = opt; 4826 4827 topt->entry = trace_create_file(opt->name, 0644, t_options, topt, 4828 &trace_options_fops); 4829 4830 } 4831 4832 static struct trace_option_dentry * 4833 create_trace_option_files(struct tracer *tracer) 4834 { 4835 struct trace_option_dentry *topts; 4836 struct tracer_flags *flags; 4837 struct tracer_opt *opts; 4838 int cnt; 4839 4840 if (!tracer) 4841 return NULL; 4842 4843 flags = tracer->flags; 4844 4845 if (!flags || !flags->opts) 4846 return NULL; 4847 4848 opts = flags->opts; 4849 4850 for (cnt = 0; opts[cnt].name; cnt++) 4851 ; 4852 4853 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); 4854 if (!topts) 4855 return NULL; 4856 4857 for (cnt = 0; opts[cnt].name; cnt++) 4858 create_trace_option_file(&topts[cnt], flags, 4859 &opts[cnt]); 4860 4861 return topts; 4862 } 4863 4864 static void 4865 destroy_trace_option_files(struct trace_option_dentry *topts) 4866 { 4867 int cnt; 4868 4869 if (!topts) 4870 return; 4871 4872 for (cnt = 0; topts[cnt].opt; cnt++) { 4873 if (topts[cnt].entry) 4874 debugfs_remove(topts[cnt].entry); 4875 } 4876 4877 kfree(topts); 4878 } 4879 4880 static struct dentry * 4881 create_trace_option_core_file(const char *option, long index) 4882 { 4883 struct dentry *t_options; 4884 4885 t_options = trace_options_init_dentry(); 4886 if (!t_options) 4887 return NULL; 4888 4889 return trace_create_file(option, 0644, t_options, (void *)index, 4890 &trace_options_core_fops); 4891 } 4892 4893 static __init void create_trace_options_dir(void) 4894 { 4895 struct dentry *t_options; 4896 int i; 4897 4898 t_options = trace_options_init_dentry(); 4899 if (!t_options) 4900 return; 4901 4902 for (i = 0; trace_options[i]; i++) 4903 create_trace_option_core_file(trace_options[i], i); 4904 } 4905 4906 static ssize_t 4907 rb_simple_read(struct file *filp, char __user *ubuf, 4908 size_t cnt, loff_t *ppos) 4909 { 4910 struct trace_array *tr = filp->private_data; 4911 struct ring_buffer *buffer = tr->buffer; 4912 char buf[64]; 4913 int r; 4914 4915 if (buffer) 4916 r = ring_buffer_record_is_on(buffer); 4917 else 4918 r = 0; 4919 4920 r = sprintf(buf, "%d\n", r); 4921 4922 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 4923 } 4924 4925 static ssize_t 4926 rb_simple_write(struct file *filp, const char __user *ubuf, 4927 size_t cnt, loff_t *ppos) 4928 { 4929 struct trace_array *tr = filp->private_data; 4930 struct ring_buffer *buffer = tr->buffer; 4931 unsigned long val; 4932 int ret; 4933 4934 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 4935 if (ret) 4936 return ret; 4937 4938 if (buffer) { 4939 mutex_lock(&trace_types_lock); 4940 if (val) { 4941 ring_buffer_record_on(buffer); 4942 if (current_trace->start) 4943 current_trace->start(tr); 4944 } else { 4945 ring_buffer_record_off(buffer); 4946 if (current_trace->stop) 4947 current_trace->stop(tr); 4948 } 4949 mutex_unlock(&trace_types_lock); 4950 } 4951 4952 (*ppos)++; 4953 4954 return cnt; 4955 } 4956 4957 static const struct file_operations rb_simple_fops = { 4958 .open = tracing_open_generic, 4959 .read = rb_simple_read, 4960 .write = rb_simple_write, 4961 .llseek = default_llseek, 4962 }; 4963 4964 static __init int tracer_init_debugfs(void) 4965 { 4966 struct dentry *d_tracer; 4967 int cpu; 4968 4969 trace_access_lock_init(); 4970 4971 d_tracer = tracing_init_dentry(); 4972 4973 trace_create_file("trace_options", 0644, d_tracer, 4974 NULL, &tracing_iter_fops); 4975 4976 trace_create_file("tracing_cpumask", 0644, d_tracer, 4977 NULL, &tracing_cpumask_fops); 4978 4979 trace_create_file("trace", 0644, d_tracer, 4980 (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); 4981 4982 trace_create_file("available_tracers", 0444, d_tracer, 4983 &global_trace, &show_traces_fops); 4984 4985 trace_create_file("current_tracer", 0644, d_tracer, 4986 &global_trace, &set_tracer_fops); 4987 4988 #ifdef CONFIG_TRACER_MAX_TRACE 4989 trace_create_file("tracing_max_latency", 0644, d_tracer, 4990 &tracing_max_latency, &tracing_max_lat_fops); 4991 #endif 4992 4993 trace_create_file("tracing_thresh", 0644, d_tracer, 4994 &tracing_thresh, &tracing_max_lat_fops); 4995 4996 trace_create_file("README", 0444, d_tracer, 4997 NULL, &tracing_readme_fops); 4998 4999 trace_create_file("trace_pipe", 0444, d_tracer, 5000 (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); 5001 5002 trace_create_file("buffer_size_kb", 0644, d_tracer, 5003 (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops); 5004 5005 trace_create_file("buffer_total_size_kb", 0444, d_tracer, 5006 &global_trace, &tracing_total_entries_fops); 5007 5008 trace_create_file("free_buffer", 0644, d_tracer, 5009 &global_trace, &tracing_free_buffer_fops); 5010 5011 trace_create_file("trace_marker", 0220, d_tracer, 5012 NULL, &tracing_mark_fops); 5013 5014 trace_create_file("saved_cmdlines", 0444, d_tracer, 5015 NULL, &tracing_saved_cmdlines_fops); 5016 5017 trace_create_file("trace_clock", 0644, d_tracer, NULL, 5018 &trace_clock_fops); 5019 5020 trace_create_file("tracing_on", 0644, d_tracer, 5021 &global_trace, &rb_simple_fops); 5022 5023 #ifdef CONFIG_DYNAMIC_FTRACE 5024 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 5025 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 5026 #endif 5027 5028 #ifdef CONFIG_TRACER_SNAPSHOT 5029 trace_create_file("snapshot", 0644, d_tracer, 5030 (void *) TRACE_PIPE_ALL_CPU, &snapshot_fops); 5031 #endif 5032 5033 create_trace_options_dir(); 5034 5035 for_each_tracing_cpu(cpu) 5036 tracing_init_debugfs_percpu(cpu); 5037 5038 return 0; 5039 } 5040 5041 static int trace_panic_handler(struct notifier_block *this, 5042 unsigned long event, void *unused) 5043 { 5044 if (ftrace_dump_on_oops) 5045 ftrace_dump(ftrace_dump_on_oops); 5046 return NOTIFY_OK; 5047 } 5048 5049 static struct notifier_block trace_panic_notifier = { 5050 .notifier_call = trace_panic_handler, 5051 .next = NULL, 5052 .priority = 150 /* priority: INT_MAX >= x >= 0 */ 5053 }; 5054 5055 static int trace_die_handler(struct notifier_block *self, 5056 unsigned long val, 5057 void *data) 5058 { 5059 switch (val) { 5060 case DIE_OOPS: 5061 if (ftrace_dump_on_oops) 5062 ftrace_dump(ftrace_dump_on_oops); 5063 break; 5064 default: 5065 break; 5066 } 5067 return NOTIFY_OK; 5068 } 5069 5070 static struct notifier_block trace_die_notifier = { 5071 .notifier_call = trace_die_handler, 5072 .priority = 200 5073 }; 5074 5075 /* 5076 * printk is set to max of 1024, we really don't need it that big. 5077 * Nothing should be printing 1000 characters anyway. 5078 */ 5079 #define TRACE_MAX_PRINT 1000 5080 5081 /* 5082 * Define here KERN_TRACE so that we have one place to modify 5083 * it if we decide to change what log level the ftrace dump 5084 * should be at. 5085 */ 5086 #define KERN_TRACE KERN_EMERG 5087 5088 void 5089 trace_printk_seq(struct trace_seq *s) 5090 { 5091 /* Probably should print a warning here. */ 5092 if (s->len >= 1000) 5093 s->len = 1000; 5094 5095 /* should be zero ended, but we are paranoid. */ 5096 s->buffer[s->len] = 0; 5097 5098 printk(KERN_TRACE "%s", s->buffer); 5099 5100 trace_seq_init(s); 5101 } 5102 5103 void trace_init_global_iter(struct trace_iterator *iter) 5104 { 5105 iter->tr = &global_trace; 5106 iter->trace = current_trace; 5107 iter->cpu_file = TRACE_PIPE_ALL_CPU; 5108 } 5109 5110 static void 5111 __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) 5112 { 5113 static arch_spinlock_t ftrace_dump_lock = 5114 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 5115 /* use static because iter can be a bit big for the stack */ 5116 static struct trace_iterator iter; 5117 unsigned int old_userobj; 5118 static int dump_ran; 5119 unsigned long flags; 5120 int cnt = 0, cpu; 5121 5122 /* only one dump */ 5123 local_irq_save(flags); 5124 arch_spin_lock(&ftrace_dump_lock); 5125 if (dump_ran) 5126 goto out; 5127 5128 dump_ran = 1; 5129 5130 tracing_off(); 5131 5132 /* Did function tracer already get disabled? */ 5133 if (ftrace_is_dead()) { 5134 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); 5135 printk("# MAY BE MISSING FUNCTION EVENTS\n"); 5136 } 5137 5138 if (disable_tracing) 5139 ftrace_kill(); 5140 5141 /* Simulate the iterator */ 5142 trace_init_global_iter(&iter); 5143 5144 for_each_tracing_cpu(cpu) { 5145 atomic_inc(&iter.tr->data[cpu]->disabled); 5146 } 5147 5148 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; 5149 5150 /* don't look at user memory in panic mode */ 5151 trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 5152 5153 switch (oops_dump_mode) { 5154 case DUMP_ALL: 5155 iter.cpu_file = TRACE_PIPE_ALL_CPU; 5156 break; 5157 case DUMP_ORIG: 5158 iter.cpu_file = raw_smp_processor_id(); 5159 break; 5160 case DUMP_NONE: 5161 goto out_enable; 5162 default: 5163 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); 5164 iter.cpu_file = TRACE_PIPE_ALL_CPU; 5165 } 5166 5167 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 5168 5169 /* 5170 * We need to stop all tracing on all CPUS to read the 5171 * the next buffer. This is a bit expensive, but is 5172 * not done often. We fill all what we can read, 5173 * and then release the locks again. 5174 */ 5175 5176 while (!trace_empty(&iter)) { 5177 5178 if (!cnt) 5179 printk(KERN_TRACE "---------------------------------\n"); 5180 5181 cnt++; 5182 5183 /* reset all but tr, trace, and overruns */ 5184 memset(&iter.seq, 0, 5185 sizeof(struct trace_iterator) - 5186 offsetof(struct trace_iterator, seq)); 5187 iter.iter_flags |= TRACE_FILE_LAT_FMT; 5188 iter.pos = -1; 5189 5190 if (trace_find_next_entry_inc(&iter) != NULL) { 5191 int ret; 5192 5193 ret = print_trace_line(&iter); 5194 if (ret != TRACE_TYPE_NO_CONSUME) 5195 trace_consume(&iter); 5196 } 5197 touch_nmi_watchdog(); 5198 5199 trace_printk_seq(&iter.seq); 5200 } 5201 5202 if (!cnt) 5203 printk(KERN_TRACE " (ftrace buffer empty)\n"); 5204 else 5205 printk(KERN_TRACE "---------------------------------\n"); 5206 5207 out_enable: 5208 /* Re-enable tracing if requested */ 5209 if (!disable_tracing) { 5210 trace_flags |= old_userobj; 5211 5212 for_each_tracing_cpu(cpu) { 5213 atomic_dec(&iter.tr->data[cpu]->disabled); 5214 } 5215 tracing_on(); 5216 } 5217 5218 out: 5219 arch_spin_unlock(&ftrace_dump_lock); 5220 local_irq_restore(flags); 5221 } 5222 5223 /* By default: disable tracing after the dump */ 5224 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) 5225 { 5226 __ftrace_dump(true, oops_dump_mode); 5227 } 5228 EXPORT_SYMBOL_GPL(ftrace_dump); 5229 5230 __init static int tracer_alloc_buffers(void) 5231 { 5232 int ring_buf_size; 5233 enum ring_buffer_flags rb_flags; 5234 int i; 5235 int ret = -ENOMEM; 5236 5237 5238 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 5239 goto out; 5240 5241 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) 5242 goto out_free_buffer_mask; 5243 5244 /* Only allocate trace_printk buffers if a trace_printk exists */ 5245 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) 5246 /* Must be called before global_trace.buffer is allocated */ 5247 trace_printk_init_buffers(); 5248 5249 /* To save memory, keep the ring buffer size to its minimum */ 5250 if (ring_buffer_expanded) 5251 ring_buf_size = trace_buf_size; 5252 else 5253 ring_buf_size = 1; 5254 5255 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; 5256 5257 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 5258 cpumask_copy(tracing_cpumask, cpu_all_mask); 5259 5260 /* TODO: make the number of buffers hot pluggable with CPUS */ 5261 global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags); 5262 if (!global_trace.buffer) { 5263 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 5264 WARN_ON(1); 5265 goto out_free_cpumask; 5266 } 5267 if (global_trace.buffer_disabled) 5268 tracing_off(); 5269 5270 5271 #ifdef CONFIG_TRACER_MAX_TRACE 5272 max_tr.buffer = ring_buffer_alloc(1, rb_flags); 5273 if (!max_tr.buffer) { 5274 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 5275 WARN_ON(1); 5276 ring_buffer_free(global_trace.buffer); 5277 goto out_free_cpumask; 5278 } 5279 #endif 5280 5281 /* Allocate the first page for all buffers */ 5282 for_each_tracing_cpu(i) { 5283 global_trace.data[i] = &per_cpu(global_trace_cpu, i); 5284 max_tr.data[i] = &per_cpu(max_tr_data, i); 5285 } 5286 5287 set_buffer_entries(&global_trace, 5288 ring_buffer_size(global_trace.buffer, 0)); 5289 #ifdef CONFIG_TRACER_MAX_TRACE 5290 set_buffer_entries(&max_tr, 1); 5291 #endif 5292 5293 trace_init_cmdlines(); 5294 init_irq_work(&trace_work_wakeup, trace_wake_up); 5295 5296 register_tracer(&nop_trace); 5297 5298 /* All seems OK, enable tracing */ 5299 tracing_disabled = 0; 5300 5301 atomic_notifier_chain_register(&panic_notifier_list, 5302 &trace_panic_notifier); 5303 5304 register_die_notifier(&trace_die_notifier); 5305 5306 while (trace_boot_options) { 5307 char *option; 5308 5309 option = strsep(&trace_boot_options, ","); 5310 trace_set_options(option); 5311 } 5312 5313 return 0; 5314 5315 out_free_cpumask: 5316 free_cpumask_var(tracing_cpumask); 5317 out_free_buffer_mask: 5318 free_cpumask_var(tracing_buffer_mask); 5319 out: 5320 return ret; 5321 } 5322 5323 __init static int clear_boot_tracer(void) 5324 { 5325 /* 5326 * The default tracer at boot buffer is an init section. 5327 * This function is called in lateinit. If we did not 5328 * find the boot tracer, then clear it out, to prevent 5329 * later registration from accessing the buffer that is 5330 * about to be freed. 5331 */ 5332 if (!default_bootup_tracer) 5333 return 0; 5334 5335 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", 5336 default_bootup_tracer); 5337 default_bootup_tracer = NULL; 5338 5339 return 0; 5340 } 5341 5342 early_initcall(tracer_alloc_buffers); 5343 fs_initcall(tracer_init_debugfs); 5344 late_initcall(clear_boot_tracer); 5345