1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ring buffer based function tracer 4 * 5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com> 6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 7 * 8 * Originally taken from the RT patch by: 9 * Arnaldo Carvalho de Melo <acme@redhat.com> 10 * 11 * Based on code from the latency_tracer, that is: 12 * Copyright (C) 2004-2006 Ingo Molnar 13 * Copyright (C) 2004 Nadia Yvette Chambers 14 */ 15 #include <linux/ring_buffer.h> 16 #include <generated/utsrelease.h> 17 #include <linux/stacktrace.h> 18 #include <linux/writeback.h> 19 #include <linux/kallsyms.h> 20 #include <linux/security.h> 21 #include <linux/seq_file.h> 22 #include <linux/irqflags.h> 23 #include <linux/debugfs.h> 24 #include <linux/tracefs.h> 25 #include <linux/pagemap.h> 26 #include <linux/hardirq.h> 27 #include <linux/linkage.h> 28 #include <linux/uaccess.h> 29 #include <linux/vmalloc.h> 30 #include <linux/ftrace.h> 31 #include <linux/module.h> 32 #include <linux/percpu.h> 33 #include <linux/splice.h> 34 #include <linux/kdebug.h> 35 #include <linux/string.h> 36 #include <linux/mount.h> 37 #include <linux/rwsem.h> 38 #include <linux/slab.h> 39 #include <linux/ctype.h> 40 #include <linux/init.h> 41 #include <linux/panic_notifier.h> 42 #include <linux/poll.h> 43 #include <linux/nmi.h> 44 #include <linux/fs.h> 45 #include <linux/trace.h> 46 #include <linux/sched/clock.h> 47 #include <linux/sched/rt.h> 48 #include <linux/fsnotify.h> 49 #include <linux/irq_work.h> 50 #include <linux/workqueue.h> 51 52 #include <asm/setup.h> /* COMMAND_LINE_SIZE */ 53 54 #include "trace.h" 55 #include "trace_output.h" 56 57 /* 58 * On boot up, the ring buffer is set to the minimum size, so that 59 * we do not waste memory on systems that are not using tracing. 60 */ 61 bool ring_buffer_expanded; 62 63 #ifdef CONFIG_FTRACE_STARTUP_TEST 64 /* 65 * We need to change this state when a selftest is running. 66 * A selftest will lurk into the ring-buffer to count the 67 * entries inserted during the selftest although some concurrent 68 * insertions into the ring-buffer such as trace_printk could occurred 69 * at the same time, giving false positive or negative results. 70 */ 71 static bool __read_mostly tracing_selftest_running; 72 73 /* 74 * If boot-time tracing including tracers/events via kernel cmdline 75 * is running, we do not want to run SELFTEST. 76 */ 77 bool __read_mostly tracing_selftest_disabled; 78 79 void __init disable_tracing_selftest(const char *reason) 80 { 81 if (!tracing_selftest_disabled) { 82 tracing_selftest_disabled = true; 83 pr_info("Ftrace startup test is disabled due to %s\n", reason); 84 } 85 } 86 #else 87 #define tracing_selftest_running 0 88 #define tracing_selftest_disabled 0 89 #endif 90 91 /* Pipe tracepoints to printk */ 92 static struct trace_iterator *tracepoint_print_iter; 93 int tracepoint_printk; 94 static bool tracepoint_printk_stop_on_boot __initdata; 95 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key); 96 97 /* For tracers that don't implement custom flags */ 98 static struct tracer_opt dummy_tracer_opt[] = { 99 { } 100 }; 101 102 static int 103 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) 104 { 105 return 0; 106 } 107 108 /* 109 * To prevent the comm cache from being overwritten when no 110 * tracing is active, only save the comm when a trace event 111 * occurred. 112 */ 113 static DEFINE_PER_CPU(bool, trace_taskinfo_save); 114 115 /* 116 * Kill all tracing for good (never come back). 117 * It is initialized to 1 but will turn to zero if the initialization 118 * of the tracer is successful. But that is the only place that sets 119 * this back to zero. 120 */ 121 static int tracing_disabled = 1; 122 123 cpumask_var_t __read_mostly tracing_buffer_mask; 124 125 /* 126 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops 127 * 128 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops 129 * is set, then ftrace_dump is called. This will output the contents 130 * of the ftrace buffers to the console. This is very useful for 131 * capturing traces that lead to crashes and outputing it to a 132 * serial console. 133 * 134 * It is default off, but you can enable it with either specifying 135 * "ftrace_dump_on_oops" in the kernel command line, or setting 136 * /proc/sys/kernel/ftrace_dump_on_oops 137 * Set 1 if you want to dump buffers of all CPUs 138 * Set 2 if you want to dump the buffer of the CPU that triggered oops 139 */ 140 141 enum ftrace_dump_mode ftrace_dump_on_oops; 142 143 /* When set, tracing will stop when a WARN*() is hit */ 144 int __disable_trace_on_warning; 145 146 #ifdef CONFIG_TRACE_EVAL_MAP_FILE 147 /* Map of enums to their values, for "eval_map" file */ 148 struct trace_eval_map_head { 149 struct module *mod; 150 unsigned long length; 151 }; 152 153 union trace_eval_map_item; 154 155 struct trace_eval_map_tail { 156 /* 157 * "end" is first and points to NULL as it must be different 158 * than "mod" or "eval_string" 159 */ 160 union trace_eval_map_item *next; 161 const char *end; /* points to NULL */ 162 }; 163 164 static DEFINE_MUTEX(trace_eval_mutex); 165 166 /* 167 * The trace_eval_maps are saved in an array with two extra elements, 168 * one at the beginning, and one at the end. The beginning item contains 169 * the count of the saved maps (head.length), and the module they 170 * belong to if not built in (head.mod). The ending item contains a 171 * pointer to the next array of saved eval_map items. 172 */ 173 union trace_eval_map_item { 174 struct trace_eval_map map; 175 struct trace_eval_map_head head; 176 struct trace_eval_map_tail tail; 177 }; 178 179 static union trace_eval_map_item *trace_eval_maps; 180 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ 181 182 int tracing_set_tracer(struct trace_array *tr, const char *buf); 183 static void ftrace_trace_userstack(struct trace_array *tr, 184 struct trace_buffer *buffer, 185 unsigned int trace_ctx); 186 187 #define MAX_TRACER_SIZE 100 188 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 189 static char *default_bootup_tracer; 190 191 static bool allocate_snapshot; 192 static bool snapshot_at_boot; 193 194 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata; 195 static int boot_instance_index; 196 197 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata; 198 static int boot_snapshot_index; 199 200 static int __init set_cmdline_ftrace(char *str) 201 { 202 strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 203 default_bootup_tracer = bootup_tracer_buf; 204 /* We are using ftrace early, expand it */ 205 ring_buffer_expanded = true; 206 return 1; 207 } 208 __setup("ftrace=", set_cmdline_ftrace); 209 210 static int __init set_ftrace_dump_on_oops(char *str) 211 { 212 if (*str++ != '=' || !*str || !strcmp("1", str)) { 213 ftrace_dump_on_oops = DUMP_ALL; 214 return 1; 215 } 216 217 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) { 218 ftrace_dump_on_oops = DUMP_ORIG; 219 return 1; 220 } 221 222 return 0; 223 } 224 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 225 226 static int __init stop_trace_on_warning(char *str) 227 { 228 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) 229 __disable_trace_on_warning = 1; 230 return 1; 231 } 232 __setup("traceoff_on_warning", stop_trace_on_warning); 233 234 static int __init boot_alloc_snapshot(char *str) 235 { 236 char *slot = boot_snapshot_info + boot_snapshot_index; 237 int left = sizeof(boot_snapshot_info) - boot_snapshot_index; 238 int ret; 239 240 if (str[0] == '=') { 241 str++; 242 if (strlen(str) >= left) 243 return -1; 244 245 ret = snprintf(slot, left, "%s\t", str); 246 boot_snapshot_index += ret; 247 } else { 248 allocate_snapshot = true; 249 /* We also need the main ring buffer expanded */ 250 ring_buffer_expanded = true; 251 } 252 return 1; 253 } 254 __setup("alloc_snapshot", boot_alloc_snapshot); 255 256 257 static int __init boot_snapshot(char *str) 258 { 259 snapshot_at_boot = true; 260 boot_alloc_snapshot(str); 261 return 1; 262 } 263 __setup("ftrace_boot_snapshot", boot_snapshot); 264 265 266 static int __init boot_instance(char *str) 267 { 268 char *slot = boot_instance_info + boot_instance_index; 269 int left = sizeof(boot_instance_info) - boot_instance_index; 270 int ret; 271 272 if (strlen(str) >= left) 273 return -1; 274 275 ret = snprintf(slot, left, "%s\t", str); 276 boot_instance_index += ret; 277 278 return 1; 279 } 280 __setup("trace_instance=", boot_instance); 281 282 283 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; 284 285 static int __init set_trace_boot_options(char *str) 286 { 287 strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); 288 return 1; 289 } 290 __setup("trace_options=", set_trace_boot_options); 291 292 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; 293 static char *trace_boot_clock __initdata; 294 295 static int __init set_trace_boot_clock(char *str) 296 { 297 strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); 298 trace_boot_clock = trace_boot_clock_buf; 299 return 1; 300 } 301 __setup("trace_clock=", set_trace_boot_clock); 302 303 static int __init set_tracepoint_printk(char *str) 304 { 305 /* Ignore the "tp_printk_stop_on_boot" param */ 306 if (*str == '_') 307 return 0; 308 309 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) 310 tracepoint_printk = 1; 311 return 1; 312 } 313 __setup("tp_printk", set_tracepoint_printk); 314 315 static int __init set_tracepoint_printk_stop(char *str) 316 { 317 tracepoint_printk_stop_on_boot = true; 318 return 1; 319 } 320 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop); 321 322 unsigned long long ns2usecs(u64 nsec) 323 { 324 nsec += 500; 325 do_div(nsec, 1000); 326 return nsec; 327 } 328 329 static void 330 trace_process_export(struct trace_export *export, 331 struct ring_buffer_event *event, int flag) 332 { 333 struct trace_entry *entry; 334 unsigned int size = 0; 335 336 if (export->flags & flag) { 337 entry = ring_buffer_event_data(event); 338 size = ring_buffer_event_length(event); 339 export->write(export, entry, size); 340 } 341 } 342 343 static DEFINE_MUTEX(ftrace_export_lock); 344 345 static struct trace_export __rcu *ftrace_exports_list __read_mostly; 346 347 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled); 348 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled); 349 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled); 350 351 static inline void ftrace_exports_enable(struct trace_export *export) 352 { 353 if (export->flags & TRACE_EXPORT_FUNCTION) 354 static_branch_inc(&trace_function_exports_enabled); 355 356 if (export->flags & TRACE_EXPORT_EVENT) 357 static_branch_inc(&trace_event_exports_enabled); 358 359 if (export->flags & TRACE_EXPORT_MARKER) 360 static_branch_inc(&trace_marker_exports_enabled); 361 } 362 363 static inline void ftrace_exports_disable(struct trace_export *export) 364 { 365 if (export->flags & TRACE_EXPORT_FUNCTION) 366 static_branch_dec(&trace_function_exports_enabled); 367 368 if (export->flags & TRACE_EXPORT_EVENT) 369 static_branch_dec(&trace_event_exports_enabled); 370 371 if (export->flags & TRACE_EXPORT_MARKER) 372 static_branch_dec(&trace_marker_exports_enabled); 373 } 374 375 static void ftrace_exports(struct ring_buffer_event *event, int flag) 376 { 377 struct trace_export *export; 378 379 preempt_disable_notrace(); 380 381 export = rcu_dereference_raw_check(ftrace_exports_list); 382 while (export) { 383 trace_process_export(export, event, flag); 384 export = rcu_dereference_raw_check(export->next); 385 } 386 387 preempt_enable_notrace(); 388 } 389 390 static inline void 391 add_trace_export(struct trace_export **list, struct trace_export *export) 392 { 393 rcu_assign_pointer(export->next, *list); 394 /* 395 * We are entering export into the list but another 396 * CPU might be walking that list. We need to make sure 397 * the export->next pointer is valid before another CPU sees 398 * the export pointer included into the list. 399 */ 400 rcu_assign_pointer(*list, export); 401 } 402 403 static inline int 404 rm_trace_export(struct trace_export **list, struct trace_export *export) 405 { 406 struct trace_export **p; 407 408 for (p = list; *p != NULL; p = &(*p)->next) 409 if (*p == export) 410 break; 411 412 if (*p != export) 413 return -1; 414 415 rcu_assign_pointer(*p, (*p)->next); 416 417 return 0; 418 } 419 420 static inline void 421 add_ftrace_export(struct trace_export **list, struct trace_export *export) 422 { 423 ftrace_exports_enable(export); 424 425 add_trace_export(list, export); 426 } 427 428 static inline int 429 rm_ftrace_export(struct trace_export **list, struct trace_export *export) 430 { 431 int ret; 432 433 ret = rm_trace_export(list, export); 434 ftrace_exports_disable(export); 435 436 return ret; 437 } 438 439 int register_ftrace_export(struct trace_export *export) 440 { 441 if (WARN_ON_ONCE(!export->write)) 442 return -1; 443 444 mutex_lock(&ftrace_export_lock); 445 446 add_ftrace_export(&ftrace_exports_list, export); 447 448 mutex_unlock(&ftrace_export_lock); 449 450 return 0; 451 } 452 EXPORT_SYMBOL_GPL(register_ftrace_export); 453 454 int unregister_ftrace_export(struct trace_export *export) 455 { 456 int ret; 457 458 mutex_lock(&ftrace_export_lock); 459 460 ret = rm_ftrace_export(&ftrace_exports_list, export); 461 462 mutex_unlock(&ftrace_export_lock); 463 464 return ret; 465 } 466 EXPORT_SYMBOL_GPL(unregister_ftrace_export); 467 468 /* trace_flags holds trace_options default values */ 469 #define TRACE_DEFAULT_FLAGS \ 470 (FUNCTION_DEFAULT_FLAGS | \ 471 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \ 472 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \ 473 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \ 474 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \ 475 TRACE_ITER_HASH_PTR) 476 477 /* trace_options that are only supported by global_trace */ 478 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \ 479 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD) 480 481 /* trace_flags that are default zero for instances */ 482 #define ZEROED_TRACE_FLAGS \ 483 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK) 484 485 /* 486 * The global_trace is the descriptor that holds the top-level tracing 487 * buffers for the live tracing. 488 */ 489 static struct trace_array global_trace = { 490 .trace_flags = TRACE_DEFAULT_FLAGS, 491 }; 492 493 LIST_HEAD(ftrace_trace_arrays); 494 495 int trace_array_get(struct trace_array *this_tr) 496 { 497 struct trace_array *tr; 498 int ret = -ENODEV; 499 500 mutex_lock(&trace_types_lock); 501 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 502 if (tr == this_tr) { 503 tr->ref++; 504 ret = 0; 505 break; 506 } 507 } 508 mutex_unlock(&trace_types_lock); 509 510 return ret; 511 } 512 513 static void __trace_array_put(struct trace_array *this_tr) 514 { 515 WARN_ON(!this_tr->ref); 516 this_tr->ref--; 517 } 518 519 /** 520 * trace_array_put - Decrement the reference counter for this trace array. 521 * @this_tr : pointer to the trace array 522 * 523 * NOTE: Use this when we no longer need the trace array returned by 524 * trace_array_get_by_name(). This ensures the trace array can be later 525 * destroyed. 526 * 527 */ 528 void trace_array_put(struct trace_array *this_tr) 529 { 530 if (!this_tr) 531 return; 532 533 mutex_lock(&trace_types_lock); 534 __trace_array_put(this_tr); 535 mutex_unlock(&trace_types_lock); 536 } 537 EXPORT_SYMBOL_GPL(trace_array_put); 538 539 int tracing_check_open_get_tr(struct trace_array *tr) 540 { 541 int ret; 542 543 ret = security_locked_down(LOCKDOWN_TRACEFS); 544 if (ret) 545 return ret; 546 547 if (tracing_disabled) 548 return -ENODEV; 549 550 if (tr && trace_array_get(tr) < 0) 551 return -ENODEV; 552 553 return 0; 554 } 555 556 int call_filter_check_discard(struct trace_event_call *call, void *rec, 557 struct trace_buffer *buffer, 558 struct ring_buffer_event *event) 559 { 560 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && 561 !filter_match_preds(call->filter, rec)) { 562 __trace_event_discard_commit(buffer, event); 563 return 1; 564 } 565 566 return 0; 567 } 568 569 /** 570 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list 571 * @filtered_pids: The list of pids to check 572 * @search_pid: The PID to find in @filtered_pids 573 * 574 * Returns true if @search_pid is found in @filtered_pids, and false otherwise. 575 */ 576 bool 577 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid) 578 { 579 return trace_pid_list_is_set(filtered_pids, search_pid); 580 } 581 582 /** 583 * trace_ignore_this_task - should a task be ignored for tracing 584 * @filtered_pids: The list of pids to check 585 * @filtered_no_pids: The list of pids not to be traced 586 * @task: The task that should be ignored if not filtered 587 * 588 * Checks if @task should be traced or not from @filtered_pids. 589 * Returns true if @task should *NOT* be traced. 590 * Returns false if @task should be traced. 591 */ 592 bool 593 trace_ignore_this_task(struct trace_pid_list *filtered_pids, 594 struct trace_pid_list *filtered_no_pids, 595 struct task_struct *task) 596 { 597 /* 598 * If filtered_no_pids is not empty, and the task's pid is listed 599 * in filtered_no_pids, then return true. 600 * Otherwise, if filtered_pids is empty, that means we can 601 * trace all tasks. If it has content, then only trace pids 602 * within filtered_pids. 603 */ 604 605 return (filtered_pids && 606 !trace_find_filtered_pid(filtered_pids, task->pid)) || 607 (filtered_no_pids && 608 trace_find_filtered_pid(filtered_no_pids, task->pid)); 609 } 610 611 /** 612 * trace_filter_add_remove_task - Add or remove a task from a pid_list 613 * @pid_list: The list to modify 614 * @self: The current task for fork or NULL for exit 615 * @task: The task to add or remove 616 * 617 * If adding a task, if @self is defined, the task is only added if @self 618 * is also included in @pid_list. This happens on fork and tasks should 619 * only be added when the parent is listed. If @self is NULL, then the 620 * @task pid will be removed from the list, which would happen on exit 621 * of a task. 622 */ 623 void trace_filter_add_remove_task(struct trace_pid_list *pid_list, 624 struct task_struct *self, 625 struct task_struct *task) 626 { 627 if (!pid_list) 628 return; 629 630 /* For forks, we only add if the forking task is listed */ 631 if (self) { 632 if (!trace_find_filtered_pid(pid_list, self->pid)) 633 return; 634 } 635 636 /* "self" is set for forks, and NULL for exits */ 637 if (self) 638 trace_pid_list_set(pid_list, task->pid); 639 else 640 trace_pid_list_clear(pid_list, task->pid); 641 } 642 643 /** 644 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list 645 * @pid_list: The pid list to show 646 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed) 647 * @pos: The position of the file 648 * 649 * This is used by the seq_file "next" operation to iterate the pids 650 * listed in a trace_pid_list structure. 651 * 652 * Returns the pid+1 as we want to display pid of zero, but NULL would 653 * stop the iteration. 654 */ 655 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos) 656 { 657 long pid = (unsigned long)v; 658 unsigned int next; 659 660 (*pos)++; 661 662 /* pid already is +1 of the actual previous bit */ 663 if (trace_pid_list_next(pid_list, pid, &next) < 0) 664 return NULL; 665 666 pid = next; 667 668 /* Return pid + 1 to allow zero to be represented */ 669 return (void *)(pid + 1); 670 } 671 672 /** 673 * trace_pid_start - Used for seq_file to start reading pid lists 674 * @pid_list: The pid list to show 675 * @pos: The position of the file 676 * 677 * This is used by seq_file "start" operation to start the iteration 678 * of listing pids. 679 * 680 * Returns the pid+1 as we want to display pid of zero, but NULL would 681 * stop the iteration. 682 */ 683 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos) 684 { 685 unsigned long pid; 686 unsigned int first; 687 loff_t l = 0; 688 689 if (trace_pid_list_first(pid_list, &first) < 0) 690 return NULL; 691 692 pid = first; 693 694 /* Return pid + 1 so that zero can be the exit value */ 695 for (pid++; pid && l < *pos; 696 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) 697 ; 698 return (void *)pid; 699 } 700 701 /** 702 * trace_pid_show - show the current pid in seq_file processing 703 * @m: The seq_file structure to write into 704 * @v: A void pointer of the pid (+1) value to display 705 * 706 * Can be directly used by seq_file operations to display the current 707 * pid value. 708 */ 709 int trace_pid_show(struct seq_file *m, void *v) 710 { 711 unsigned long pid = (unsigned long)v - 1; 712 713 seq_printf(m, "%lu\n", pid); 714 return 0; 715 } 716 717 /* 128 should be much more than enough */ 718 #define PID_BUF_SIZE 127 719 720 int trace_pid_write(struct trace_pid_list *filtered_pids, 721 struct trace_pid_list **new_pid_list, 722 const char __user *ubuf, size_t cnt) 723 { 724 struct trace_pid_list *pid_list; 725 struct trace_parser parser; 726 unsigned long val; 727 int nr_pids = 0; 728 ssize_t read = 0; 729 ssize_t ret; 730 loff_t pos; 731 pid_t pid; 732 733 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1)) 734 return -ENOMEM; 735 736 /* 737 * Always recreate a new array. The write is an all or nothing 738 * operation. Always create a new array when adding new pids by 739 * the user. If the operation fails, then the current list is 740 * not modified. 741 */ 742 pid_list = trace_pid_list_alloc(); 743 if (!pid_list) { 744 trace_parser_put(&parser); 745 return -ENOMEM; 746 } 747 748 if (filtered_pids) { 749 /* copy the current bits to the new max */ 750 ret = trace_pid_list_first(filtered_pids, &pid); 751 while (!ret) { 752 trace_pid_list_set(pid_list, pid); 753 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid); 754 nr_pids++; 755 } 756 } 757 758 ret = 0; 759 while (cnt > 0) { 760 761 pos = 0; 762 763 ret = trace_get_user(&parser, ubuf, cnt, &pos); 764 if (ret < 0) 765 break; 766 767 read += ret; 768 ubuf += ret; 769 cnt -= ret; 770 771 if (!trace_parser_loaded(&parser)) 772 break; 773 774 ret = -EINVAL; 775 if (kstrtoul(parser.buffer, 0, &val)) 776 break; 777 778 pid = (pid_t)val; 779 780 if (trace_pid_list_set(pid_list, pid) < 0) { 781 ret = -1; 782 break; 783 } 784 nr_pids++; 785 786 trace_parser_clear(&parser); 787 ret = 0; 788 } 789 trace_parser_put(&parser); 790 791 if (ret < 0) { 792 trace_pid_list_free(pid_list); 793 return ret; 794 } 795 796 if (!nr_pids) { 797 /* Cleared the list of pids */ 798 trace_pid_list_free(pid_list); 799 pid_list = NULL; 800 } 801 802 *new_pid_list = pid_list; 803 804 return read; 805 } 806 807 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu) 808 { 809 u64 ts; 810 811 /* Early boot up does not have a buffer yet */ 812 if (!buf->buffer) 813 return trace_clock_local(); 814 815 ts = ring_buffer_time_stamp(buf->buffer); 816 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); 817 818 return ts; 819 } 820 821 u64 ftrace_now(int cpu) 822 { 823 return buffer_ftrace_now(&global_trace.array_buffer, cpu); 824 } 825 826 /** 827 * tracing_is_enabled - Show if global_trace has been enabled 828 * 829 * Shows if the global trace has been enabled or not. It uses the 830 * mirror flag "buffer_disabled" to be used in fast paths such as for 831 * the irqsoff tracer. But it may be inaccurate due to races. If you 832 * need to know the accurate state, use tracing_is_on() which is a little 833 * slower, but accurate. 834 */ 835 int tracing_is_enabled(void) 836 { 837 /* 838 * For quick access (irqsoff uses this in fast path), just 839 * return the mirror variable of the state of the ring buffer. 840 * It's a little racy, but we don't really care. 841 */ 842 smp_rmb(); 843 return !global_trace.buffer_disabled; 844 } 845 846 /* 847 * trace_buf_size is the size in bytes that is allocated 848 * for a buffer. Note, the number of bytes is always rounded 849 * to page size. 850 * 851 * This number is purposely set to a low number of 16384. 852 * If the dump on oops happens, it will be much appreciated 853 * to not have to wait for all that output. Anyway this can be 854 * boot time and run time configurable. 855 */ 856 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ 857 858 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; 859 860 /* trace_types holds a link list of available tracers. */ 861 static struct tracer *trace_types __read_mostly; 862 863 /* 864 * trace_types_lock is used to protect the trace_types list. 865 */ 866 DEFINE_MUTEX(trace_types_lock); 867 868 /* 869 * serialize the access of the ring buffer 870 * 871 * ring buffer serializes readers, but it is low level protection. 872 * The validity of the events (which returns by ring_buffer_peek() ..etc) 873 * are not protected by ring buffer. 874 * 875 * The content of events may become garbage if we allow other process consumes 876 * these events concurrently: 877 * A) the page of the consumed events may become a normal page 878 * (not reader page) in ring buffer, and this page will be rewritten 879 * by events producer. 880 * B) The page of the consumed events may become a page for splice_read, 881 * and this page will be returned to system. 882 * 883 * These primitives allow multi process access to different cpu ring buffer 884 * concurrently. 885 * 886 * These primitives don't distinguish read-only and read-consume access. 887 * Multi read-only access are also serialized. 888 */ 889 890 #ifdef CONFIG_SMP 891 static DECLARE_RWSEM(all_cpu_access_lock); 892 static DEFINE_PER_CPU(struct mutex, cpu_access_lock); 893 894 static inline void trace_access_lock(int cpu) 895 { 896 if (cpu == RING_BUFFER_ALL_CPUS) { 897 /* gain it for accessing the whole ring buffer. */ 898 down_write(&all_cpu_access_lock); 899 } else { 900 /* gain it for accessing a cpu ring buffer. */ 901 902 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ 903 down_read(&all_cpu_access_lock); 904 905 /* Secondly block other access to this @cpu ring buffer. */ 906 mutex_lock(&per_cpu(cpu_access_lock, cpu)); 907 } 908 } 909 910 static inline void trace_access_unlock(int cpu) 911 { 912 if (cpu == RING_BUFFER_ALL_CPUS) { 913 up_write(&all_cpu_access_lock); 914 } else { 915 mutex_unlock(&per_cpu(cpu_access_lock, cpu)); 916 up_read(&all_cpu_access_lock); 917 } 918 } 919 920 static inline void trace_access_lock_init(void) 921 { 922 int cpu; 923 924 for_each_possible_cpu(cpu) 925 mutex_init(&per_cpu(cpu_access_lock, cpu)); 926 } 927 928 #else 929 930 static DEFINE_MUTEX(access_lock); 931 932 static inline void trace_access_lock(int cpu) 933 { 934 (void)cpu; 935 mutex_lock(&access_lock); 936 } 937 938 static inline void trace_access_unlock(int cpu) 939 { 940 (void)cpu; 941 mutex_unlock(&access_lock); 942 } 943 944 static inline void trace_access_lock_init(void) 945 { 946 } 947 948 #endif 949 950 #ifdef CONFIG_STACKTRACE 951 static void __ftrace_trace_stack(struct trace_buffer *buffer, 952 unsigned int trace_ctx, 953 int skip, struct pt_regs *regs); 954 static inline void ftrace_trace_stack(struct trace_array *tr, 955 struct trace_buffer *buffer, 956 unsigned int trace_ctx, 957 int skip, struct pt_regs *regs); 958 959 #else 960 static inline void __ftrace_trace_stack(struct trace_buffer *buffer, 961 unsigned int trace_ctx, 962 int skip, struct pt_regs *regs) 963 { 964 } 965 static inline void ftrace_trace_stack(struct trace_array *tr, 966 struct trace_buffer *buffer, 967 unsigned long trace_ctx, 968 int skip, struct pt_regs *regs) 969 { 970 } 971 972 #endif 973 974 static __always_inline void 975 trace_event_setup(struct ring_buffer_event *event, 976 int type, unsigned int trace_ctx) 977 { 978 struct trace_entry *ent = ring_buffer_event_data(event); 979 980 tracing_generic_entry_update(ent, type, trace_ctx); 981 } 982 983 static __always_inline struct ring_buffer_event * 984 __trace_buffer_lock_reserve(struct trace_buffer *buffer, 985 int type, 986 unsigned long len, 987 unsigned int trace_ctx) 988 { 989 struct ring_buffer_event *event; 990 991 event = ring_buffer_lock_reserve(buffer, len); 992 if (event != NULL) 993 trace_event_setup(event, type, trace_ctx); 994 995 return event; 996 } 997 998 void tracer_tracing_on(struct trace_array *tr) 999 { 1000 if (tr->array_buffer.buffer) 1001 ring_buffer_record_on(tr->array_buffer.buffer); 1002 /* 1003 * This flag is looked at when buffers haven't been allocated 1004 * yet, or by some tracers (like irqsoff), that just want to 1005 * know if the ring buffer has been disabled, but it can handle 1006 * races of where it gets disabled but we still do a record. 1007 * As the check is in the fast path of the tracers, it is more 1008 * important to be fast than accurate. 1009 */ 1010 tr->buffer_disabled = 0; 1011 /* Make the flag seen by readers */ 1012 smp_wmb(); 1013 } 1014 1015 /** 1016 * tracing_on - enable tracing buffers 1017 * 1018 * This function enables tracing buffers that may have been 1019 * disabled with tracing_off. 1020 */ 1021 void tracing_on(void) 1022 { 1023 tracer_tracing_on(&global_trace); 1024 } 1025 EXPORT_SYMBOL_GPL(tracing_on); 1026 1027 1028 static __always_inline void 1029 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event) 1030 { 1031 __this_cpu_write(trace_taskinfo_save, true); 1032 1033 /* If this is the temp buffer, we need to commit fully */ 1034 if (this_cpu_read(trace_buffered_event) == event) { 1035 /* Length is in event->array[0] */ 1036 ring_buffer_write(buffer, event->array[0], &event->array[1]); 1037 /* Release the temp buffer */ 1038 this_cpu_dec(trace_buffered_event_cnt); 1039 /* ring_buffer_unlock_commit() enables preemption */ 1040 preempt_enable_notrace(); 1041 } else 1042 ring_buffer_unlock_commit(buffer); 1043 } 1044 1045 int __trace_array_puts(struct trace_array *tr, unsigned long ip, 1046 const char *str, int size) 1047 { 1048 struct ring_buffer_event *event; 1049 struct trace_buffer *buffer; 1050 struct print_entry *entry; 1051 unsigned int trace_ctx; 1052 int alloc; 1053 1054 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) 1055 return 0; 1056 1057 if (unlikely(tracing_selftest_running && tr == &global_trace)) 1058 return 0; 1059 1060 if (unlikely(tracing_disabled)) 1061 return 0; 1062 1063 alloc = sizeof(*entry) + size + 2; /* possible \n added */ 1064 1065 trace_ctx = tracing_gen_ctx(); 1066 buffer = tr->array_buffer.buffer; 1067 ring_buffer_nest_start(buffer); 1068 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 1069 trace_ctx); 1070 if (!event) { 1071 size = 0; 1072 goto out; 1073 } 1074 1075 entry = ring_buffer_event_data(event); 1076 entry->ip = ip; 1077 1078 memcpy(&entry->buf, str, size); 1079 1080 /* Add a newline if necessary */ 1081 if (entry->buf[size - 1] != '\n') { 1082 entry->buf[size] = '\n'; 1083 entry->buf[size + 1] = '\0'; 1084 } else 1085 entry->buf[size] = '\0'; 1086 1087 __buffer_unlock_commit(buffer, event); 1088 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL); 1089 out: 1090 ring_buffer_nest_end(buffer); 1091 return size; 1092 } 1093 EXPORT_SYMBOL_GPL(__trace_array_puts); 1094 1095 /** 1096 * __trace_puts - write a constant string into the trace buffer. 1097 * @ip: The address of the caller 1098 * @str: The constant string to write 1099 * @size: The size of the string. 1100 */ 1101 int __trace_puts(unsigned long ip, const char *str, int size) 1102 { 1103 return __trace_array_puts(&global_trace, ip, str, size); 1104 } 1105 EXPORT_SYMBOL_GPL(__trace_puts); 1106 1107 /** 1108 * __trace_bputs - write the pointer to a constant string into trace buffer 1109 * @ip: The address of the caller 1110 * @str: The constant string to write to the buffer to 1111 */ 1112 int __trace_bputs(unsigned long ip, const char *str) 1113 { 1114 struct ring_buffer_event *event; 1115 struct trace_buffer *buffer; 1116 struct bputs_entry *entry; 1117 unsigned int trace_ctx; 1118 int size = sizeof(struct bputs_entry); 1119 int ret = 0; 1120 1121 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) 1122 return 0; 1123 1124 if (unlikely(tracing_selftest_running || tracing_disabled)) 1125 return 0; 1126 1127 trace_ctx = tracing_gen_ctx(); 1128 buffer = global_trace.array_buffer.buffer; 1129 1130 ring_buffer_nest_start(buffer); 1131 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, 1132 trace_ctx); 1133 if (!event) 1134 goto out; 1135 1136 entry = ring_buffer_event_data(event); 1137 entry->ip = ip; 1138 entry->str = str; 1139 1140 __buffer_unlock_commit(buffer, event); 1141 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL); 1142 1143 ret = 1; 1144 out: 1145 ring_buffer_nest_end(buffer); 1146 return ret; 1147 } 1148 EXPORT_SYMBOL_GPL(__trace_bputs); 1149 1150 #ifdef CONFIG_TRACER_SNAPSHOT 1151 static void tracing_snapshot_instance_cond(struct trace_array *tr, 1152 void *cond_data) 1153 { 1154 struct tracer *tracer = tr->current_trace; 1155 unsigned long flags; 1156 1157 if (in_nmi()) { 1158 trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); 1159 trace_array_puts(tr, "*** snapshot is being ignored ***\n"); 1160 return; 1161 } 1162 1163 if (!tr->allocated_snapshot) { 1164 trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n"); 1165 trace_array_puts(tr, "*** stopping trace here! ***\n"); 1166 tracer_tracing_off(tr); 1167 return; 1168 } 1169 1170 /* Note, snapshot can not be used when the tracer uses it */ 1171 if (tracer->use_max_tr) { 1172 trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n"); 1173 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); 1174 return; 1175 } 1176 1177 local_irq_save(flags); 1178 update_max_tr(tr, current, smp_processor_id(), cond_data); 1179 local_irq_restore(flags); 1180 } 1181 1182 void tracing_snapshot_instance(struct trace_array *tr) 1183 { 1184 tracing_snapshot_instance_cond(tr, NULL); 1185 } 1186 1187 /** 1188 * tracing_snapshot - take a snapshot of the current buffer. 1189 * 1190 * This causes a swap between the snapshot buffer and the current live 1191 * tracing buffer. You can use this to take snapshots of the live 1192 * trace when some condition is triggered, but continue to trace. 1193 * 1194 * Note, make sure to allocate the snapshot with either 1195 * a tracing_snapshot_alloc(), or by doing it manually 1196 * with: echo 1 > /sys/kernel/tracing/snapshot 1197 * 1198 * If the snapshot buffer is not allocated, it will stop tracing. 1199 * Basically making a permanent snapshot. 1200 */ 1201 void tracing_snapshot(void) 1202 { 1203 struct trace_array *tr = &global_trace; 1204 1205 tracing_snapshot_instance(tr); 1206 } 1207 EXPORT_SYMBOL_GPL(tracing_snapshot); 1208 1209 /** 1210 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer. 1211 * @tr: The tracing instance to snapshot 1212 * @cond_data: The data to be tested conditionally, and possibly saved 1213 * 1214 * This is the same as tracing_snapshot() except that the snapshot is 1215 * conditional - the snapshot will only happen if the 1216 * cond_snapshot.update() implementation receiving the cond_data 1217 * returns true, which means that the trace array's cond_snapshot 1218 * update() operation used the cond_data to determine whether the 1219 * snapshot should be taken, and if it was, presumably saved it along 1220 * with the snapshot. 1221 */ 1222 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) 1223 { 1224 tracing_snapshot_instance_cond(tr, cond_data); 1225 } 1226 EXPORT_SYMBOL_GPL(tracing_snapshot_cond); 1227 1228 /** 1229 * tracing_cond_snapshot_data - get the user data associated with a snapshot 1230 * @tr: The tracing instance 1231 * 1232 * When the user enables a conditional snapshot using 1233 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved 1234 * with the snapshot. This accessor is used to retrieve it. 1235 * 1236 * Should not be called from cond_snapshot.update(), since it takes 1237 * the tr->max_lock lock, which the code calling 1238 * cond_snapshot.update() has already done. 1239 * 1240 * Returns the cond_data associated with the trace array's snapshot. 1241 */ 1242 void *tracing_cond_snapshot_data(struct trace_array *tr) 1243 { 1244 void *cond_data = NULL; 1245 1246 local_irq_disable(); 1247 arch_spin_lock(&tr->max_lock); 1248 1249 if (tr->cond_snapshot) 1250 cond_data = tr->cond_snapshot->cond_data; 1251 1252 arch_spin_unlock(&tr->max_lock); 1253 local_irq_enable(); 1254 1255 return cond_data; 1256 } 1257 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); 1258 1259 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, 1260 struct array_buffer *size_buf, int cpu_id); 1261 static void set_buffer_entries(struct array_buffer *buf, unsigned long val); 1262 1263 int tracing_alloc_snapshot_instance(struct trace_array *tr) 1264 { 1265 int ret; 1266 1267 if (!tr->allocated_snapshot) { 1268 1269 /* allocate spare buffer */ 1270 ret = resize_buffer_duplicate_size(&tr->max_buffer, 1271 &tr->array_buffer, RING_BUFFER_ALL_CPUS); 1272 if (ret < 0) 1273 return ret; 1274 1275 tr->allocated_snapshot = true; 1276 } 1277 1278 return 0; 1279 } 1280 1281 static void free_snapshot(struct trace_array *tr) 1282 { 1283 /* 1284 * We don't free the ring buffer. instead, resize it because 1285 * The max_tr ring buffer has some state (e.g. ring->clock) and 1286 * we want preserve it. 1287 */ 1288 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); 1289 set_buffer_entries(&tr->max_buffer, 1); 1290 tracing_reset_online_cpus(&tr->max_buffer); 1291 tr->allocated_snapshot = false; 1292 } 1293 1294 /** 1295 * tracing_alloc_snapshot - allocate snapshot buffer. 1296 * 1297 * This only allocates the snapshot buffer if it isn't already 1298 * allocated - it doesn't also take a snapshot. 1299 * 1300 * This is meant to be used in cases where the snapshot buffer needs 1301 * to be set up for events that can't sleep but need to be able to 1302 * trigger a snapshot. 1303 */ 1304 int tracing_alloc_snapshot(void) 1305 { 1306 struct trace_array *tr = &global_trace; 1307 int ret; 1308 1309 ret = tracing_alloc_snapshot_instance(tr); 1310 WARN_ON(ret < 0); 1311 1312 return ret; 1313 } 1314 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); 1315 1316 /** 1317 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer. 1318 * 1319 * This is similar to tracing_snapshot(), but it will allocate the 1320 * snapshot buffer if it isn't already allocated. Use this only 1321 * where it is safe to sleep, as the allocation may sleep. 1322 * 1323 * This causes a swap between the snapshot buffer and the current live 1324 * tracing buffer. You can use this to take snapshots of the live 1325 * trace when some condition is triggered, but continue to trace. 1326 */ 1327 void tracing_snapshot_alloc(void) 1328 { 1329 int ret; 1330 1331 ret = tracing_alloc_snapshot(); 1332 if (ret < 0) 1333 return; 1334 1335 tracing_snapshot(); 1336 } 1337 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 1338 1339 /** 1340 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance 1341 * @tr: The tracing instance 1342 * @cond_data: User data to associate with the snapshot 1343 * @update: Implementation of the cond_snapshot update function 1344 * 1345 * Check whether the conditional snapshot for the given instance has 1346 * already been enabled, or if the current tracer is already using a 1347 * snapshot; if so, return -EBUSY, else create a cond_snapshot and 1348 * save the cond_data and update function inside. 1349 * 1350 * Returns 0 if successful, error otherwise. 1351 */ 1352 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, 1353 cond_update_fn_t update) 1354 { 1355 struct cond_snapshot *cond_snapshot; 1356 int ret = 0; 1357 1358 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL); 1359 if (!cond_snapshot) 1360 return -ENOMEM; 1361 1362 cond_snapshot->cond_data = cond_data; 1363 cond_snapshot->update = update; 1364 1365 mutex_lock(&trace_types_lock); 1366 1367 ret = tracing_alloc_snapshot_instance(tr); 1368 if (ret) 1369 goto fail_unlock; 1370 1371 if (tr->current_trace->use_max_tr) { 1372 ret = -EBUSY; 1373 goto fail_unlock; 1374 } 1375 1376 /* 1377 * The cond_snapshot can only change to NULL without the 1378 * trace_types_lock. We don't care if we race with it going 1379 * to NULL, but we want to make sure that it's not set to 1380 * something other than NULL when we get here, which we can 1381 * do safely with only holding the trace_types_lock and not 1382 * having to take the max_lock. 1383 */ 1384 if (tr->cond_snapshot) { 1385 ret = -EBUSY; 1386 goto fail_unlock; 1387 } 1388 1389 local_irq_disable(); 1390 arch_spin_lock(&tr->max_lock); 1391 tr->cond_snapshot = cond_snapshot; 1392 arch_spin_unlock(&tr->max_lock); 1393 local_irq_enable(); 1394 1395 mutex_unlock(&trace_types_lock); 1396 1397 return ret; 1398 1399 fail_unlock: 1400 mutex_unlock(&trace_types_lock); 1401 kfree(cond_snapshot); 1402 return ret; 1403 } 1404 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); 1405 1406 /** 1407 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance 1408 * @tr: The tracing instance 1409 * 1410 * Check whether the conditional snapshot for the given instance is 1411 * enabled; if so, free the cond_snapshot associated with it, 1412 * otherwise return -EINVAL. 1413 * 1414 * Returns 0 if successful, error otherwise. 1415 */ 1416 int tracing_snapshot_cond_disable(struct trace_array *tr) 1417 { 1418 int ret = 0; 1419 1420 local_irq_disable(); 1421 arch_spin_lock(&tr->max_lock); 1422 1423 if (!tr->cond_snapshot) 1424 ret = -EINVAL; 1425 else { 1426 kfree(tr->cond_snapshot); 1427 tr->cond_snapshot = NULL; 1428 } 1429 1430 arch_spin_unlock(&tr->max_lock); 1431 local_irq_enable(); 1432 1433 return ret; 1434 } 1435 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); 1436 #else 1437 void tracing_snapshot(void) 1438 { 1439 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); 1440 } 1441 EXPORT_SYMBOL_GPL(tracing_snapshot); 1442 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) 1443 { 1444 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used"); 1445 } 1446 EXPORT_SYMBOL_GPL(tracing_snapshot_cond); 1447 int tracing_alloc_snapshot(void) 1448 { 1449 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used"); 1450 return -ENODEV; 1451 } 1452 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); 1453 void tracing_snapshot_alloc(void) 1454 { 1455 /* Give warning */ 1456 tracing_snapshot(); 1457 } 1458 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); 1459 void *tracing_cond_snapshot_data(struct trace_array *tr) 1460 { 1461 return NULL; 1462 } 1463 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); 1464 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) 1465 { 1466 return -ENODEV; 1467 } 1468 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); 1469 int tracing_snapshot_cond_disable(struct trace_array *tr) 1470 { 1471 return false; 1472 } 1473 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); 1474 #define free_snapshot(tr) do { } while (0) 1475 #endif /* CONFIG_TRACER_SNAPSHOT */ 1476 1477 void tracer_tracing_off(struct trace_array *tr) 1478 { 1479 if (tr->array_buffer.buffer) 1480 ring_buffer_record_off(tr->array_buffer.buffer); 1481 /* 1482 * This flag is looked at when buffers haven't been allocated 1483 * yet, or by some tracers (like irqsoff), that just want to 1484 * know if the ring buffer has been disabled, but it can handle 1485 * races of where it gets disabled but we still do a record. 1486 * As the check is in the fast path of the tracers, it is more 1487 * important to be fast than accurate. 1488 */ 1489 tr->buffer_disabled = 1; 1490 /* Make the flag seen by readers */ 1491 smp_wmb(); 1492 } 1493 1494 /** 1495 * tracing_off - turn off tracing buffers 1496 * 1497 * This function stops the tracing buffers from recording data. 1498 * It does not disable any overhead the tracers themselves may 1499 * be causing. This function simply causes all recording to 1500 * the ring buffers to fail. 1501 */ 1502 void tracing_off(void) 1503 { 1504 tracer_tracing_off(&global_trace); 1505 } 1506 EXPORT_SYMBOL_GPL(tracing_off); 1507 1508 void disable_trace_on_warning(void) 1509 { 1510 if (__disable_trace_on_warning) { 1511 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_, 1512 "Disabling tracing due to warning\n"); 1513 tracing_off(); 1514 } 1515 } 1516 1517 /** 1518 * tracer_tracing_is_on - show real state of ring buffer enabled 1519 * @tr : the trace array to know if ring buffer is enabled 1520 * 1521 * Shows real state of the ring buffer if it is enabled or not. 1522 */ 1523 bool tracer_tracing_is_on(struct trace_array *tr) 1524 { 1525 if (tr->array_buffer.buffer) 1526 return ring_buffer_record_is_on(tr->array_buffer.buffer); 1527 return !tr->buffer_disabled; 1528 } 1529 1530 /** 1531 * tracing_is_on - show state of ring buffers enabled 1532 */ 1533 int tracing_is_on(void) 1534 { 1535 return tracer_tracing_is_on(&global_trace); 1536 } 1537 EXPORT_SYMBOL_GPL(tracing_is_on); 1538 1539 static int __init set_buf_size(char *str) 1540 { 1541 unsigned long buf_size; 1542 1543 if (!str) 1544 return 0; 1545 buf_size = memparse(str, &str); 1546 /* 1547 * nr_entries can not be zero and the startup 1548 * tests require some buffer space. Therefore 1549 * ensure we have at least 4096 bytes of buffer. 1550 */ 1551 trace_buf_size = max(4096UL, buf_size); 1552 return 1; 1553 } 1554 __setup("trace_buf_size=", set_buf_size); 1555 1556 static int __init set_tracing_thresh(char *str) 1557 { 1558 unsigned long threshold; 1559 int ret; 1560 1561 if (!str) 1562 return 0; 1563 ret = kstrtoul(str, 0, &threshold); 1564 if (ret < 0) 1565 return 0; 1566 tracing_thresh = threshold * 1000; 1567 return 1; 1568 } 1569 __setup("tracing_thresh=", set_tracing_thresh); 1570 1571 unsigned long nsecs_to_usecs(unsigned long nsecs) 1572 { 1573 return nsecs / 1000; 1574 } 1575 1576 /* 1577 * TRACE_FLAGS is defined as a tuple matching bit masks with strings. 1578 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that 1579 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list 1580 * of strings in the order that the evals (enum) were defined. 1581 */ 1582 #undef C 1583 #define C(a, b) b 1584 1585 /* These must match the bit positions in trace_iterator_flags */ 1586 static const char *trace_options[] = { 1587 TRACE_FLAGS 1588 NULL 1589 }; 1590 1591 static struct { 1592 u64 (*func)(void); 1593 const char *name; 1594 int in_ns; /* is this clock in nanoseconds? */ 1595 } trace_clocks[] = { 1596 { trace_clock_local, "local", 1 }, 1597 { trace_clock_global, "global", 1 }, 1598 { trace_clock_counter, "counter", 0 }, 1599 { trace_clock_jiffies, "uptime", 0 }, 1600 { trace_clock, "perf", 1 }, 1601 { ktime_get_mono_fast_ns, "mono", 1 }, 1602 { ktime_get_raw_fast_ns, "mono_raw", 1 }, 1603 { ktime_get_boot_fast_ns, "boot", 1 }, 1604 { ktime_get_tai_fast_ns, "tai", 1 }, 1605 ARCH_TRACE_CLOCKS 1606 }; 1607 1608 bool trace_clock_in_ns(struct trace_array *tr) 1609 { 1610 if (trace_clocks[tr->clock_id].in_ns) 1611 return true; 1612 1613 return false; 1614 } 1615 1616 /* 1617 * trace_parser_get_init - gets the buffer for trace parser 1618 */ 1619 int trace_parser_get_init(struct trace_parser *parser, int size) 1620 { 1621 memset(parser, 0, sizeof(*parser)); 1622 1623 parser->buffer = kmalloc(size, GFP_KERNEL); 1624 if (!parser->buffer) 1625 return 1; 1626 1627 parser->size = size; 1628 return 0; 1629 } 1630 1631 /* 1632 * trace_parser_put - frees the buffer for trace parser 1633 */ 1634 void trace_parser_put(struct trace_parser *parser) 1635 { 1636 kfree(parser->buffer); 1637 parser->buffer = NULL; 1638 } 1639 1640 /* 1641 * trace_get_user - reads the user input string separated by space 1642 * (matched by isspace(ch)) 1643 * 1644 * For each string found the 'struct trace_parser' is updated, 1645 * and the function returns. 1646 * 1647 * Returns number of bytes read. 1648 * 1649 * See kernel/trace/trace.h for 'struct trace_parser' details. 1650 */ 1651 int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 1652 size_t cnt, loff_t *ppos) 1653 { 1654 char ch; 1655 size_t read = 0; 1656 ssize_t ret; 1657 1658 if (!*ppos) 1659 trace_parser_clear(parser); 1660 1661 ret = get_user(ch, ubuf++); 1662 if (ret) 1663 goto out; 1664 1665 read++; 1666 cnt--; 1667 1668 /* 1669 * The parser is not finished with the last write, 1670 * continue reading the user input without skipping spaces. 1671 */ 1672 if (!parser->cont) { 1673 /* skip white space */ 1674 while (cnt && isspace(ch)) { 1675 ret = get_user(ch, ubuf++); 1676 if (ret) 1677 goto out; 1678 read++; 1679 cnt--; 1680 } 1681 1682 parser->idx = 0; 1683 1684 /* only spaces were written */ 1685 if (isspace(ch) || !ch) { 1686 *ppos += read; 1687 ret = read; 1688 goto out; 1689 } 1690 } 1691 1692 /* read the non-space input */ 1693 while (cnt && !isspace(ch) && ch) { 1694 if (parser->idx < parser->size - 1) 1695 parser->buffer[parser->idx++] = ch; 1696 else { 1697 ret = -EINVAL; 1698 goto out; 1699 } 1700 ret = get_user(ch, ubuf++); 1701 if (ret) 1702 goto out; 1703 read++; 1704 cnt--; 1705 } 1706 1707 /* We either got finished input or we have to wait for another call. */ 1708 if (isspace(ch) || !ch) { 1709 parser->buffer[parser->idx] = 0; 1710 parser->cont = false; 1711 } else if (parser->idx < parser->size - 1) { 1712 parser->cont = true; 1713 parser->buffer[parser->idx++] = ch; 1714 /* Make sure the parsed string always terminates with '\0'. */ 1715 parser->buffer[parser->idx] = 0; 1716 } else { 1717 ret = -EINVAL; 1718 goto out; 1719 } 1720 1721 *ppos += read; 1722 ret = read; 1723 1724 out: 1725 return ret; 1726 } 1727 1728 /* TODO add a seq_buf_to_buffer() */ 1729 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) 1730 { 1731 int len; 1732 1733 if (trace_seq_used(s) <= s->seq.readpos) 1734 return -EBUSY; 1735 1736 len = trace_seq_used(s) - s->seq.readpos; 1737 if (cnt > len) 1738 cnt = len; 1739 memcpy(buf, s->buffer + s->seq.readpos, cnt); 1740 1741 s->seq.readpos += cnt; 1742 return cnt; 1743 } 1744 1745 unsigned long __read_mostly tracing_thresh; 1746 1747 #ifdef CONFIG_TRACER_MAX_TRACE 1748 static const struct file_operations tracing_max_lat_fops; 1749 1750 #ifdef LATENCY_FS_NOTIFY 1751 1752 static struct workqueue_struct *fsnotify_wq; 1753 1754 static void latency_fsnotify_workfn(struct work_struct *work) 1755 { 1756 struct trace_array *tr = container_of(work, struct trace_array, 1757 fsnotify_work); 1758 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY); 1759 } 1760 1761 static void latency_fsnotify_workfn_irq(struct irq_work *iwork) 1762 { 1763 struct trace_array *tr = container_of(iwork, struct trace_array, 1764 fsnotify_irqwork); 1765 queue_work(fsnotify_wq, &tr->fsnotify_work); 1766 } 1767 1768 static void trace_create_maxlat_file(struct trace_array *tr, 1769 struct dentry *d_tracer) 1770 { 1771 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn); 1772 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq); 1773 tr->d_max_latency = trace_create_file("tracing_max_latency", 1774 TRACE_MODE_WRITE, 1775 d_tracer, &tr->max_latency, 1776 &tracing_max_lat_fops); 1777 } 1778 1779 __init static int latency_fsnotify_init(void) 1780 { 1781 fsnotify_wq = alloc_workqueue("tr_max_lat_wq", 1782 WQ_UNBOUND | WQ_HIGHPRI, 0); 1783 if (!fsnotify_wq) { 1784 pr_err("Unable to allocate tr_max_lat_wq\n"); 1785 return -ENOMEM; 1786 } 1787 return 0; 1788 } 1789 1790 late_initcall_sync(latency_fsnotify_init); 1791 1792 void latency_fsnotify(struct trace_array *tr) 1793 { 1794 if (!fsnotify_wq) 1795 return; 1796 /* 1797 * We cannot call queue_work(&tr->fsnotify_work) from here because it's 1798 * possible that we are called from __schedule() or do_idle(), which 1799 * could cause a deadlock. 1800 */ 1801 irq_work_queue(&tr->fsnotify_irqwork); 1802 } 1803 1804 #else /* !LATENCY_FS_NOTIFY */ 1805 1806 #define trace_create_maxlat_file(tr, d_tracer) \ 1807 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \ 1808 d_tracer, &tr->max_latency, &tracing_max_lat_fops) 1809 1810 #endif 1811 1812 /* 1813 * Copy the new maximum trace into the separate maximum-trace 1814 * structure. (this way the maximum trace is permanently saved, 1815 * for later retrieval via /sys/kernel/tracing/tracing_max_latency) 1816 */ 1817 static void 1818 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 1819 { 1820 struct array_buffer *trace_buf = &tr->array_buffer; 1821 struct array_buffer *max_buf = &tr->max_buffer; 1822 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); 1823 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); 1824 1825 max_buf->cpu = cpu; 1826 max_buf->time_start = data->preempt_timestamp; 1827 1828 max_data->saved_latency = tr->max_latency; 1829 max_data->critical_start = data->critical_start; 1830 max_data->critical_end = data->critical_end; 1831 1832 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN); 1833 max_data->pid = tsk->pid; 1834 /* 1835 * If tsk == current, then use current_uid(), as that does not use 1836 * RCU. The irq tracer can be called out of RCU scope. 1837 */ 1838 if (tsk == current) 1839 max_data->uid = current_uid(); 1840 else 1841 max_data->uid = task_uid(tsk); 1842 1843 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; 1844 max_data->policy = tsk->policy; 1845 max_data->rt_priority = tsk->rt_priority; 1846 1847 /* record this tasks comm */ 1848 tracing_record_cmdline(tsk); 1849 latency_fsnotify(tr); 1850 } 1851 1852 /** 1853 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 1854 * @tr: tracer 1855 * @tsk: the task with the latency 1856 * @cpu: The cpu that initiated the trace. 1857 * @cond_data: User data associated with a conditional snapshot 1858 * 1859 * Flip the buffers between the @tr and the max_tr and record information 1860 * about which task was the cause of this latency. 1861 */ 1862 void 1863 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, 1864 void *cond_data) 1865 { 1866 if (tr->stop_count) 1867 return; 1868 1869 WARN_ON_ONCE(!irqs_disabled()); 1870 1871 if (!tr->allocated_snapshot) { 1872 /* Only the nop tracer should hit this when disabling */ 1873 WARN_ON_ONCE(tr->current_trace != &nop_trace); 1874 return; 1875 } 1876 1877 arch_spin_lock(&tr->max_lock); 1878 1879 /* Inherit the recordable setting from array_buffer */ 1880 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer)) 1881 ring_buffer_record_on(tr->max_buffer.buffer); 1882 else 1883 ring_buffer_record_off(tr->max_buffer.buffer); 1884 1885 #ifdef CONFIG_TRACER_SNAPSHOT 1886 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) { 1887 arch_spin_unlock(&tr->max_lock); 1888 return; 1889 } 1890 #endif 1891 swap(tr->array_buffer.buffer, tr->max_buffer.buffer); 1892 1893 __update_max_tr(tr, tsk, cpu); 1894 1895 arch_spin_unlock(&tr->max_lock); 1896 } 1897 1898 /** 1899 * update_max_tr_single - only copy one trace over, and reset the rest 1900 * @tr: tracer 1901 * @tsk: task with the latency 1902 * @cpu: the cpu of the buffer to copy. 1903 * 1904 * Flip the trace of a single CPU buffer between the @tr and the max_tr. 1905 */ 1906 void 1907 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 1908 { 1909 int ret; 1910 1911 if (tr->stop_count) 1912 return; 1913 1914 WARN_ON_ONCE(!irqs_disabled()); 1915 if (!tr->allocated_snapshot) { 1916 /* Only the nop tracer should hit this when disabling */ 1917 WARN_ON_ONCE(tr->current_trace != &nop_trace); 1918 return; 1919 } 1920 1921 arch_spin_lock(&tr->max_lock); 1922 1923 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu); 1924 1925 if (ret == -EBUSY) { 1926 /* 1927 * We failed to swap the buffer due to a commit taking 1928 * place on this CPU. We fail to record, but we reset 1929 * the max trace buffer (no one writes directly to it) 1930 * and flag that it failed. 1931 * Another reason is resize is in progress. 1932 */ 1933 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, 1934 "Failed to swap buffers due to commit or resize in progress\n"); 1935 } 1936 1937 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 1938 1939 __update_max_tr(tr, tsk, cpu); 1940 arch_spin_unlock(&tr->max_lock); 1941 } 1942 1943 #endif /* CONFIG_TRACER_MAX_TRACE */ 1944 1945 static int wait_on_pipe(struct trace_iterator *iter, int full) 1946 { 1947 /* Iterators are static, they should be filled or empty */ 1948 if (trace_buffer_iter(iter, iter->cpu_file)) 1949 return 0; 1950 1951 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, 1952 full); 1953 } 1954 1955 #ifdef CONFIG_FTRACE_STARTUP_TEST 1956 static bool selftests_can_run; 1957 1958 struct trace_selftests { 1959 struct list_head list; 1960 struct tracer *type; 1961 }; 1962 1963 static LIST_HEAD(postponed_selftests); 1964 1965 static int save_selftest(struct tracer *type) 1966 { 1967 struct trace_selftests *selftest; 1968 1969 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL); 1970 if (!selftest) 1971 return -ENOMEM; 1972 1973 selftest->type = type; 1974 list_add(&selftest->list, &postponed_selftests); 1975 return 0; 1976 } 1977 1978 static int run_tracer_selftest(struct tracer *type) 1979 { 1980 struct trace_array *tr = &global_trace; 1981 struct tracer *saved_tracer = tr->current_trace; 1982 int ret; 1983 1984 if (!type->selftest || tracing_selftest_disabled) 1985 return 0; 1986 1987 /* 1988 * If a tracer registers early in boot up (before scheduling is 1989 * initialized and such), then do not run its selftests yet. 1990 * Instead, run it a little later in the boot process. 1991 */ 1992 if (!selftests_can_run) 1993 return save_selftest(type); 1994 1995 if (!tracing_is_on()) { 1996 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n", 1997 type->name); 1998 return 0; 1999 } 2000 2001 /* 2002 * Run a selftest on this tracer. 2003 * Here we reset the trace buffer, and set the current 2004 * tracer to be this tracer. The tracer can then run some 2005 * internal tracing to verify that everything is in order. 2006 * If we fail, we do not register this tracer. 2007 */ 2008 tracing_reset_online_cpus(&tr->array_buffer); 2009 2010 tr->current_trace = type; 2011 2012 #ifdef CONFIG_TRACER_MAX_TRACE 2013 if (type->use_max_tr) { 2014 /* If we expanded the buffers, make sure the max is expanded too */ 2015 if (ring_buffer_expanded) 2016 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, 2017 RING_BUFFER_ALL_CPUS); 2018 tr->allocated_snapshot = true; 2019 } 2020 #endif 2021 2022 /* the test is responsible for initializing and enabling */ 2023 pr_info("Testing tracer %s: ", type->name); 2024 ret = type->selftest(type, tr); 2025 /* the test is responsible for resetting too */ 2026 tr->current_trace = saved_tracer; 2027 if (ret) { 2028 printk(KERN_CONT "FAILED!\n"); 2029 /* Add the warning after printing 'FAILED' */ 2030 WARN_ON(1); 2031 return -1; 2032 } 2033 /* Only reset on passing, to avoid touching corrupted buffers */ 2034 tracing_reset_online_cpus(&tr->array_buffer); 2035 2036 #ifdef CONFIG_TRACER_MAX_TRACE 2037 if (type->use_max_tr) { 2038 tr->allocated_snapshot = false; 2039 2040 /* Shrink the max buffer again */ 2041 if (ring_buffer_expanded) 2042 ring_buffer_resize(tr->max_buffer.buffer, 1, 2043 RING_BUFFER_ALL_CPUS); 2044 } 2045 #endif 2046 2047 printk(KERN_CONT "PASSED\n"); 2048 return 0; 2049 } 2050 2051 static int do_run_tracer_selftest(struct tracer *type) 2052 { 2053 int ret; 2054 2055 /* 2056 * Tests can take a long time, especially if they are run one after the 2057 * other, as does happen during bootup when all the tracers are 2058 * registered. This could cause the soft lockup watchdog to trigger. 2059 */ 2060 cond_resched(); 2061 2062 tracing_selftest_running = true; 2063 ret = run_tracer_selftest(type); 2064 tracing_selftest_running = false; 2065 2066 return ret; 2067 } 2068 2069 static __init int init_trace_selftests(void) 2070 { 2071 struct trace_selftests *p, *n; 2072 struct tracer *t, **last; 2073 int ret; 2074 2075 selftests_can_run = true; 2076 2077 mutex_lock(&trace_types_lock); 2078 2079 if (list_empty(&postponed_selftests)) 2080 goto out; 2081 2082 pr_info("Running postponed tracer tests:\n"); 2083 2084 tracing_selftest_running = true; 2085 list_for_each_entry_safe(p, n, &postponed_selftests, list) { 2086 /* This loop can take minutes when sanitizers are enabled, so 2087 * lets make sure we allow RCU processing. 2088 */ 2089 cond_resched(); 2090 ret = run_tracer_selftest(p->type); 2091 /* If the test fails, then warn and remove from available_tracers */ 2092 if (ret < 0) { 2093 WARN(1, "tracer: %s failed selftest, disabling\n", 2094 p->type->name); 2095 last = &trace_types; 2096 for (t = trace_types; t; t = t->next) { 2097 if (t == p->type) { 2098 *last = t->next; 2099 break; 2100 } 2101 last = &t->next; 2102 } 2103 } 2104 list_del(&p->list); 2105 kfree(p); 2106 } 2107 tracing_selftest_running = false; 2108 2109 out: 2110 mutex_unlock(&trace_types_lock); 2111 2112 return 0; 2113 } 2114 core_initcall(init_trace_selftests); 2115 #else 2116 static inline int run_tracer_selftest(struct tracer *type) 2117 { 2118 return 0; 2119 } 2120 static inline int do_run_tracer_selftest(struct tracer *type) 2121 { 2122 return 0; 2123 } 2124 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 2125 2126 static void add_tracer_options(struct trace_array *tr, struct tracer *t); 2127 2128 static void __init apply_trace_boot_options(void); 2129 2130 /** 2131 * register_tracer - register a tracer with the ftrace system. 2132 * @type: the plugin for the tracer 2133 * 2134 * Register a new plugin tracer. 2135 */ 2136 int __init register_tracer(struct tracer *type) 2137 { 2138 struct tracer *t; 2139 int ret = 0; 2140 2141 if (!type->name) { 2142 pr_info("Tracer must have a name\n"); 2143 return -1; 2144 } 2145 2146 if (strlen(type->name) >= MAX_TRACER_SIZE) { 2147 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); 2148 return -1; 2149 } 2150 2151 if (security_locked_down(LOCKDOWN_TRACEFS)) { 2152 pr_warn("Can not register tracer %s due to lockdown\n", 2153 type->name); 2154 return -EPERM; 2155 } 2156 2157 mutex_lock(&trace_types_lock); 2158 2159 for (t = trace_types; t; t = t->next) { 2160 if (strcmp(type->name, t->name) == 0) { 2161 /* already found */ 2162 pr_info("Tracer %s already registered\n", 2163 type->name); 2164 ret = -1; 2165 goto out; 2166 } 2167 } 2168 2169 if (!type->set_flag) 2170 type->set_flag = &dummy_set_flag; 2171 if (!type->flags) { 2172 /*allocate a dummy tracer_flags*/ 2173 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL); 2174 if (!type->flags) { 2175 ret = -ENOMEM; 2176 goto out; 2177 } 2178 type->flags->val = 0; 2179 type->flags->opts = dummy_tracer_opt; 2180 } else 2181 if (!type->flags->opts) 2182 type->flags->opts = dummy_tracer_opt; 2183 2184 /* store the tracer for __set_tracer_option */ 2185 type->flags->trace = type; 2186 2187 ret = do_run_tracer_selftest(type); 2188 if (ret < 0) 2189 goto out; 2190 2191 type->next = trace_types; 2192 trace_types = type; 2193 add_tracer_options(&global_trace, type); 2194 2195 out: 2196 mutex_unlock(&trace_types_lock); 2197 2198 if (ret || !default_bootup_tracer) 2199 goto out_unlock; 2200 2201 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) 2202 goto out_unlock; 2203 2204 printk(KERN_INFO "Starting tracer '%s'\n", type->name); 2205 /* Do we want this tracer to start on bootup? */ 2206 tracing_set_tracer(&global_trace, type->name); 2207 default_bootup_tracer = NULL; 2208 2209 apply_trace_boot_options(); 2210 2211 /* disable other selftests, since this will break it. */ 2212 disable_tracing_selftest("running a tracer"); 2213 2214 out_unlock: 2215 return ret; 2216 } 2217 2218 static void tracing_reset_cpu(struct array_buffer *buf, int cpu) 2219 { 2220 struct trace_buffer *buffer = buf->buffer; 2221 2222 if (!buffer) 2223 return; 2224 2225 ring_buffer_record_disable(buffer); 2226 2227 /* Make sure all commits have finished */ 2228 synchronize_rcu(); 2229 ring_buffer_reset_cpu(buffer, cpu); 2230 2231 ring_buffer_record_enable(buffer); 2232 } 2233 2234 void tracing_reset_online_cpus(struct array_buffer *buf) 2235 { 2236 struct trace_buffer *buffer = buf->buffer; 2237 2238 if (!buffer) 2239 return; 2240 2241 ring_buffer_record_disable(buffer); 2242 2243 /* Make sure all commits have finished */ 2244 synchronize_rcu(); 2245 2246 buf->time_start = buffer_ftrace_now(buf, buf->cpu); 2247 2248 ring_buffer_reset_online_cpus(buffer); 2249 2250 ring_buffer_record_enable(buffer); 2251 } 2252 2253 /* Must have trace_types_lock held */ 2254 void tracing_reset_all_online_cpus_unlocked(void) 2255 { 2256 struct trace_array *tr; 2257 2258 lockdep_assert_held(&trace_types_lock); 2259 2260 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 2261 if (!tr->clear_trace) 2262 continue; 2263 tr->clear_trace = false; 2264 tracing_reset_online_cpus(&tr->array_buffer); 2265 #ifdef CONFIG_TRACER_MAX_TRACE 2266 tracing_reset_online_cpus(&tr->max_buffer); 2267 #endif 2268 } 2269 } 2270 2271 void tracing_reset_all_online_cpus(void) 2272 { 2273 mutex_lock(&trace_types_lock); 2274 tracing_reset_all_online_cpus_unlocked(); 2275 mutex_unlock(&trace_types_lock); 2276 } 2277 2278 /* 2279 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i 2280 * is the tgid last observed corresponding to pid=i. 2281 */ 2282 static int *tgid_map; 2283 2284 /* The maximum valid index into tgid_map. */ 2285 static size_t tgid_map_max; 2286 2287 #define SAVED_CMDLINES_DEFAULT 128 2288 #define NO_CMDLINE_MAP UINT_MAX 2289 /* 2290 * Preemption must be disabled before acquiring trace_cmdline_lock. 2291 * The various trace_arrays' max_lock must be acquired in a context 2292 * where interrupt is disabled. 2293 */ 2294 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; 2295 struct saved_cmdlines_buffer { 2296 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 2297 unsigned *map_cmdline_to_pid; 2298 unsigned cmdline_num; 2299 int cmdline_idx; 2300 char *saved_cmdlines; 2301 }; 2302 static struct saved_cmdlines_buffer *savedcmd; 2303 2304 static inline char *get_saved_cmdlines(int idx) 2305 { 2306 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN]; 2307 } 2308 2309 static inline void set_cmdline(int idx, const char *cmdline) 2310 { 2311 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN); 2312 } 2313 2314 static int allocate_cmdlines_buffer(unsigned int val, 2315 struct saved_cmdlines_buffer *s) 2316 { 2317 s->map_cmdline_to_pid = kmalloc_array(val, 2318 sizeof(*s->map_cmdline_to_pid), 2319 GFP_KERNEL); 2320 if (!s->map_cmdline_to_pid) 2321 return -ENOMEM; 2322 2323 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL); 2324 if (!s->saved_cmdlines) { 2325 kfree(s->map_cmdline_to_pid); 2326 return -ENOMEM; 2327 } 2328 2329 s->cmdline_idx = 0; 2330 s->cmdline_num = val; 2331 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP, 2332 sizeof(s->map_pid_to_cmdline)); 2333 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP, 2334 val * sizeof(*s->map_cmdline_to_pid)); 2335 2336 return 0; 2337 } 2338 2339 static int trace_create_savedcmd(void) 2340 { 2341 int ret; 2342 2343 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL); 2344 if (!savedcmd) 2345 return -ENOMEM; 2346 2347 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd); 2348 if (ret < 0) { 2349 kfree(savedcmd); 2350 savedcmd = NULL; 2351 return -ENOMEM; 2352 } 2353 2354 return 0; 2355 } 2356 2357 int is_tracing_stopped(void) 2358 { 2359 return global_trace.stop_count; 2360 } 2361 2362 /** 2363 * tracing_start - quick start of the tracer 2364 * 2365 * If tracing is enabled but was stopped by tracing_stop, 2366 * this will start the tracer back up. 2367 */ 2368 void tracing_start(void) 2369 { 2370 struct trace_buffer *buffer; 2371 unsigned long flags; 2372 2373 if (tracing_disabled) 2374 return; 2375 2376 raw_spin_lock_irqsave(&global_trace.start_lock, flags); 2377 if (--global_trace.stop_count) { 2378 if (global_trace.stop_count < 0) { 2379 /* Someone screwed up their debugging */ 2380 WARN_ON_ONCE(1); 2381 global_trace.stop_count = 0; 2382 } 2383 goto out; 2384 } 2385 2386 /* Prevent the buffers from switching */ 2387 arch_spin_lock(&global_trace.max_lock); 2388 2389 buffer = global_trace.array_buffer.buffer; 2390 if (buffer) 2391 ring_buffer_record_enable(buffer); 2392 2393 #ifdef CONFIG_TRACER_MAX_TRACE 2394 buffer = global_trace.max_buffer.buffer; 2395 if (buffer) 2396 ring_buffer_record_enable(buffer); 2397 #endif 2398 2399 arch_spin_unlock(&global_trace.max_lock); 2400 2401 out: 2402 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 2403 } 2404 2405 static void tracing_start_tr(struct trace_array *tr) 2406 { 2407 struct trace_buffer *buffer; 2408 unsigned long flags; 2409 2410 if (tracing_disabled) 2411 return; 2412 2413 /* If global, we need to also start the max tracer */ 2414 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 2415 return tracing_start(); 2416 2417 raw_spin_lock_irqsave(&tr->start_lock, flags); 2418 2419 if (--tr->stop_count) { 2420 if (tr->stop_count < 0) { 2421 /* Someone screwed up their debugging */ 2422 WARN_ON_ONCE(1); 2423 tr->stop_count = 0; 2424 } 2425 goto out; 2426 } 2427 2428 buffer = tr->array_buffer.buffer; 2429 if (buffer) 2430 ring_buffer_record_enable(buffer); 2431 2432 out: 2433 raw_spin_unlock_irqrestore(&tr->start_lock, flags); 2434 } 2435 2436 /** 2437 * tracing_stop - quick stop of the tracer 2438 * 2439 * Light weight way to stop tracing. Use in conjunction with 2440 * tracing_start. 2441 */ 2442 void tracing_stop(void) 2443 { 2444 struct trace_buffer *buffer; 2445 unsigned long flags; 2446 2447 raw_spin_lock_irqsave(&global_trace.start_lock, flags); 2448 if (global_trace.stop_count++) 2449 goto out; 2450 2451 /* Prevent the buffers from switching */ 2452 arch_spin_lock(&global_trace.max_lock); 2453 2454 buffer = global_trace.array_buffer.buffer; 2455 if (buffer) 2456 ring_buffer_record_disable(buffer); 2457 2458 #ifdef CONFIG_TRACER_MAX_TRACE 2459 buffer = global_trace.max_buffer.buffer; 2460 if (buffer) 2461 ring_buffer_record_disable(buffer); 2462 #endif 2463 2464 arch_spin_unlock(&global_trace.max_lock); 2465 2466 out: 2467 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 2468 } 2469 2470 static void tracing_stop_tr(struct trace_array *tr) 2471 { 2472 struct trace_buffer *buffer; 2473 unsigned long flags; 2474 2475 /* If global, we need to also stop the max tracer */ 2476 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 2477 return tracing_stop(); 2478 2479 raw_spin_lock_irqsave(&tr->start_lock, flags); 2480 if (tr->stop_count++) 2481 goto out; 2482 2483 buffer = tr->array_buffer.buffer; 2484 if (buffer) 2485 ring_buffer_record_disable(buffer); 2486 2487 out: 2488 raw_spin_unlock_irqrestore(&tr->start_lock, flags); 2489 } 2490 2491 static int trace_save_cmdline(struct task_struct *tsk) 2492 { 2493 unsigned tpid, idx; 2494 2495 /* treat recording of idle task as a success */ 2496 if (!tsk->pid) 2497 return 1; 2498 2499 tpid = tsk->pid & (PID_MAX_DEFAULT - 1); 2500 2501 /* 2502 * It's not the end of the world if we don't get 2503 * the lock, but we also don't want to spin 2504 * nor do we want to disable interrupts, 2505 * so if we miss here, then better luck next time. 2506 * 2507 * This is called within the scheduler and wake up, so interrupts 2508 * had better been disabled and run queue lock been held. 2509 */ 2510 lockdep_assert_preemption_disabled(); 2511 if (!arch_spin_trylock(&trace_cmdline_lock)) 2512 return 0; 2513 2514 idx = savedcmd->map_pid_to_cmdline[tpid]; 2515 if (idx == NO_CMDLINE_MAP) { 2516 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num; 2517 2518 savedcmd->map_pid_to_cmdline[tpid] = idx; 2519 savedcmd->cmdline_idx = idx; 2520 } 2521 2522 savedcmd->map_cmdline_to_pid[idx] = tsk->pid; 2523 set_cmdline(idx, tsk->comm); 2524 2525 arch_spin_unlock(&trace_cmdline_lock); 2526 2527 return 1; 2528 } 2529 2530 static void __trace_find_cmdline(int pid, char comm[]) 2531 { 2532 unsigned map; 2533 int tpid; 2534 2535 if (!pid) { 2536 strcpy(comm, "<idle>"); 2537 return; 2538 } 2539 2540 if (WARN_ON_ONCE(pid < 0)) { 2541 strcpy(comm, "<XXX>"); 2542 return; 2543 } 2544 2545 tpid = pid & (PID_MAX_DEFAULT - 1); 2546 map = savedcmd->map_pid_to_cmdline[tpid]; 2547 if (map != NO_CMDLINE_MAP) { 2548 tpid = savedcmd->map_cmdline_to_pid[map]; 2549 if (tpid == pid) { 2550 strscpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN); 2551 return; 2552 } 2553 } 2554 strcpy(comm, "<...>"); 2555 } 2556 2557 void trace_find_cmdline(int pid, char comm[]) 2558 { 2559 preempt_disable(); 2560 arch_spin_lock(&trace_cmdline_lock); 2561 2562 __trace_find_cmdline(pid, comm); 2563 2564 arch_spin_unlock(&trace_cmdline_lock); 2565 preempt_enable(); 2566 } 2567 2568 static int *trace_find_tgid_ptr(int pid) 2569 { 2570 /* 2571 * Pairs with the smp_store_release in set_tracer_flag() to ensure that 2572 * if we observe a non-NULL tgid_map then we also observe the correct 2573 * tgid_map_max. 2574 */ 2575 int *map = smp_load_acquire(&tgid_map); 2576 2577 if (unlikely(!map || pid > tgid_map_max)) 2578 return NULL; 2579 2580 return &map[pid]; 2581 } 2582 2583 int trace_find_tgid(int pid) 2584 { 2585 int *ptr = trace_find_tgid_ptr(pid); 2586 2587 return ptr ? *ptr : 0; 2588 } 2589 2590 static int trace_save_tgid(struct task_struct *tsk) 2591 { 2592 int *ptr; 2593 2594 /* treat recording of idle task as a success */ 2595 if (!tsk->pid) 2596 return 1; 2597 2598 ptr = trace_find_tgid_ptr(tsk->pid); 2599 if (!ptr) 2600 return 0; 2601 2602 *ptr = tsk->tgid; 2603 return 1; 2604 } 2605 2606 static bool tracing_record_taskinfo_skip(int flags) 2607 { 2608 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID)))) 2609 return true; 2610 if (!__this_cpu_read(trace_taskinfo_save)) 2611 return true; 2612 return false; 2613 } 2614 2615 /** 2616 * tracing_record_taskinfo - record the task info of a task 2617 * 2618 * @task: task to record 2619 * @flags: TRACE_RECORD_CMDLINE for recording comm 2620 * TRACE_RECORD_TGID for recording tgid 2621 */ 2622 void tracing_record_taskinfo(struct task_struct *task, int flags) 2623 { 2624 bool done; 2625 2626 if (tracing_record_taskinfo_skip(flags)) 2627 return; 2628 2629 /* 2630 * Record as much task information as possible. If some fail, continue 2631 * to try to record the others. 2632 */ 2633 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task); 2634 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task); 2635 2636 /* If recording any information failed, retry again soon. */ 2637 if (!done) 2638 return; 2639 2640 __this_cpu_write(trace_taskinfo_save, false); 2641 } 2642 2643 /** 2644 * tracing_record_taskinfo_sched_switch - record task info for sched_switch 2645 * 2646 * @prev: previous task during sched_switch 2647 * @next: next task during sched_switch 2648 * @flags: TRACE_RECORD_CMDLINE for recording comm 2649 * TRACE_RECORD_TGID for recording tgid 2650 */ 2651 void tracing_record_taskinfo_sched_switch(struct task_struct *prev, 2652 struct task_struct *next, int flags) 2653 { 2654 bool done; 2655 2656 if (tracing_record_taskinfo_skip(flags)) 2657 return; 2658 2659 /* 2660 * Record as much task information as possible. If some fail, continue 2661 * to try to record the others. 2662 */ 2663 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev); 2664 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next); 2665 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev); 2666 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next); 2667 2668 /* If recording any information failed, retry again soon. */ 2669 if (!done) 2670 return; 2671 2672 __this_cpu_write(trace_taskinfo_save, false); 2673 } 2674 2675 /* Helpers to record a specific task information */ 2676 void tracing_record_cmdline(struct task_struct *task) 2677 { 2678 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE); 2679 } 2680 2681 void tracing_record_tgid(struct task_struct *task) 2682 { 2683 tracing_record_taskinfo(task, TRACE_RECORD_TGID); 2684 } 2685 2686 /* 2687 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq 2688 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function 2689 * simplifies those functions and keeps them in sync. 2690 */ 2691 enum print_line_t trace_handle_return(struct trace_seq *s) 2692 { 2693 return trace_seq_has_overflowed(s) ? 2694 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; 2695 } 2696 EXPORT_SYMBOL_GPL(trace_handle_return); 2697 2698 static unsigned short migration_disable_value(void) 2699 { 2700 #if defined(CONFIG_SMP) 2701 return current->migration_disabled; 2702 #else 2703 return 0; 2704 #endif 2705 } 2706 2707 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status) 2708 { 2709 unsigned int trace_flags = irqs_status; 2710 unsigned int pc; 2711 2712 pc = preempt_count(); 2713 2714 if (pc & NMI_MASK) 2715 trace_flags |= TRACE_FLAG_NMI; 2716 if (pc & HARDIRQ_MASK) 2717 trace_flags |= TRACE_FLAG_HARDIRQ; 2718 if (in_serving_softirq()) 2719 trace_flags |= TRACE_FLAG_SOFTIRQ; 2720 if (softirq_count() >> (SOFTIRQ_SHIFT + 1)) 2721 trace_flags |= TRACE_FLAG_BH_OFF; 2722 2723 if (tif_need_resched()) 2724 trace_flags |= TRACE_FLAG_NEED_RESCHED; 2725 if (test_preempt_need_resched()) 2726 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED; 2727 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) | 2728 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4; 2729 } 2730 2731 struct ring_buffer_event * 2732 trace_buffer_lock_reserve(struct trace_buffer *buffer, 2733 int type, 2734 unsigned long len, 2735 unsigned int trace_ctx) 2736 { 2737 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx); 2738 } 2739 2740 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); 2741 DEFINE_PER_CPU(int, trace_buffered_event_cnt); 2742 static int trace_buffered_event_ref; 2743 2744 /** 2745 * trace_buffered_event_enable - enable buffering events 2746 * 2747 * When events are being filtered, it is quicker to use a temporary 2748 * buffer to write the event data into if there's a likely chance 2749 * that it will not be committed. The discard of the ring buffer 2750 * is not as fast as committing, and is much slower than copying 2751 * a commit. 2752 * 2753 * When an event is to be filtered, allocate per cpu buffers to 2754 * write the event data into, and if the event is filtered and discarded 2755 * it is simply dropped, otherwise, the entire data is to be committed 2756 * in one shot. 2757 */ 2758 void trace_buffered_event_enable(void) 2759 { 2760 struct ring_buffer_event *event; 2761 struct page *page; 2762 int cpu; 2763 2764 WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); 2765 2766 if (trace_buffered_event_ref++) 2767 return; 2768 2769 for_each_tracing_cpu(cpu) { 2770 page = alloc_pages_node(cpu_to_node(cpu), 2771 GFP_KERNEL | __GFP_NORETRY, 0); 2772 if (!page) 2773 goto failed; 2774 2775 event = page_address(page); 2776 memset(event, 0, sizeof(*event)); 2777 2778 per_cpu(trace_buffered_event, cpu) = event; 2779 2780 preempt_disable(); 2781 if (cpu == smp_processor_id() && 2782 __this_cpu_read(trace_buffered_event) != 2783 per_cpu(trace_buffered_event, cpu)) 2784 WARN_ON_ONCE(1); 2785 preempt_enable(); 2786 } 2787 2788 return; 2789 failed: 2790 trace_buffered_event_disable(); 2791 } 2792 2793 static void enable_trace_buffered_event(void *data) 2794 { 2795 /* Probably not needed, but do it anyway */ 2796 smp_rmb(); 2797 this_cpu_dec(trace_buffered_event_cnt); 2798 } 2799 2800 static void disable_trace_buffered_event(void *data) 2801 { 2802 this_cpu_inc(trace_buffered_event_cnt); 2803 } 2804 2805 /** 2806 * trace_buffered_event_disable - disable buffering events 2807 * 2808 * When a filter is removed, it is faster to not use the buffered 2809 * events, and to commit directly into the ring buffer. Free up 2810 * the temp buffers when there are no more users. This requires 2811 * special synchronization with current events. 2812 */ 2813 void trace_buffered_event_disable(void) 2814 { 2815 int cpu; 2816 2817 WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); 2818 2819 if (WARN_ON_ONCE(!trace_buffered_event_ref)) 2820 return; 2821 2822 if (--trace_buffered_event_ref) 2823 return; 2824 2825 preempt_disable(); 2826 /* For each CPU, set the buffer as used. */ 2827 smp_call_function_many(tracing_buffer_mask, 2828 disable_trace_buffered_event, NULL, 1); 2829 preempt_enable(); 2830 2831 /* Wait for all current users to finish */ 2832 synchronize_rcu(); 2833 2834 for_each_tracing_cpu(cpu) { 2835 free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); 2836 per_cpu(trace_buffered_event, cpu) = NULL; 2837 } 2838 /* 2839 * Make sure trace_buffered_event is NULL before clearing 2840 * trace_buffered_event_cnt. 2841 */ 2842 smp_wmb(); 2843 2844 preempt_disable(); 2845 /* Do the work on each cpu */ 2846 smp_call_function_many(tracing_buffer_mask, 2847 enable_trace_buffered_event, NULL, 1); 2848 preempt_enable(); 2849 } 2850 2851 static struct trace_buffer *temp_buffer; 2852 2853 struct ring_buffer_event * 2854 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb, 2855 struct trace_event_file *trace_file, 2856 int type, unsigned long len, 2857 unsigned int trace_ctx) 2858 { 2859 struct ring_buffer_event *entry; 2860 struct trace_array *tr = trace_file->tr; 2861 int val; 2862 2863 *current_rb = tr->array_buffer.buffer; 2864 2865 if (!tr->no_filter_buffering_ref && 2866 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) { 2867 preempt_disable_notrace(); 2868 /* 2869 * Filtering is on, so try to use the per cpu buffer first. 2870 * This buffer will simulate a ring_buffer_event, 2871 * where the type_len is zero and the array[0] will 2872 * hold the full length. 2873 * (see include/linux/ring-buffer.h for details on 2874 * how the ring_buffer_event is structured). 2875 * 2876 * Using a temp buffer during filtering and copying it 2877 * on a matched filter is quicker than writing directly 2878 * into the ring buffer and then discarding it when 2879 * it doesn't match. That is because the discard 2880 * requires several atomic operations to get right. 2881 * Copying on match and doing nothing on a failed match 2882 * is still quicker than no copy on match, but having 2883 * to discard out of the ring buffer on a failed match. 2884 */ 2885 if ((entry = __this_cpu_read(trace_buffered_event))) { 2886 int max_len = PAGE_SIZE - struct_size(entry, array, 1); 2887 2888 val = this_cpu_inc_return(trace_buffered_event_cnt); 2889 2890 /* 2891 * Preemption is disabled, but interrupts and NMIs 2892 * can still come in now. If that happens after 2893 * the above increment, then it will have to go 2894 * back to the old method of allocating the event 2895 * on the ring buffer, and if the filter fails, it 2896 * will have to call ring_buffer_discard_commit() 2897 * to remove it. 2898 * 2899 * Need to also check the unlikely case that the 2900 * length is bigger than the temp buffer size. 2901 * If that happens, then the reserve is pretty much 2902 * guaranteed to fail, as the ring buffer currently 2903 * only allows events less than a page. But that may 2904 * change in the future, so let the ring buffer reserve 2905 * handle the failure in that case. 2906 */ 2907 if (val == 1 && likely(len <= max_len)) { 2908 trace_event_setup(entry, type, trace_ctx); 2909 entry->array[0] = len; 2910 /* Return with preemption disabled */ 2911 return entry; 2912 } 2913 this_cpu_dec(trace_buffered_event_cnt); 2914 } 2915 /* __trace_buffer_lock_reserve() disables preemption */ 2916 preempt_enable_notrace(); 2917 } 2918 2919 entry = __trace_buffer_lock_reserve(*current_rb, type, len, 2920 trace_ctx); 2921 /* 2922 * If tracing is off, but we have triggers enabled 2923 * we still need to look at the event data. Use the temp_buffer 2924 * to store the trace event for the trigger to use. It's recursive 2925 * safe and will not be recorded anywhere. 2926 */ 2927 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { 2928 *current_rb = temp_buffer; 2929 entry = __trace_buffer_lock_reserve(*current_rb, type, len, 2930 trace_ctx); 2931 } 2932 return entry; 2933 } 2934 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); 2935 2936 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock); 2937 static DEFINE_MUTEX(tracepoint_printk_mutex); 2938 2939 static void output_printk(struct trace_event_buffer *fbuffer) 2940 { 2941 struct trace_event_call *event_call; 2942 struct trace_event_file *file; 2943 struct trace_event *event; 2944 unsigned long flags; 2945 struct trace_iterator *iter = tracepoint_print_iter; 2946 2947 /* We should never get here if iter is NULL */ 2948 if (WARN_ON_ONCE(!iter)) 2949 return; 2950 2951 event_call = fbuffer->trace_file->event_call; 2952 if (!event_call || !event_call->event.funcs || 2953 !event_call->event.funcs->trace) 2954 return; 2955 2956 file = fbuffer->trace_file; 2957 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || 2958 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && 2959 !filter_match_preds(file->filter, fbuffer->entry))) 2960 return; 2961 2962 event = &fbuffer->trace_file->event_call->event; 2963 2964 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags); 2965 trace_seq_init(&iter->seq); 2966 iter->ent = fbuffer->entry; 2967 event_call->event.funcs->trace(iter, 0, event); 2968 trace_seq_putc(&iter->seq, 0); 2969 printk("%s", iter->seq.buffer); 2970 2971 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags); 2972 } 2973 2974 int tracepoint_printk_sysctl(struct ctl_table *table, int write, 2975 void *buffer, size_t *lenp, 2976 loff_t *ppos) 2977 { 2978 int save_tracepoint_printk; 2979 int ret; 2980 2981 mutex_lock(&tracepoint_printk_mutex); 2982 save_tracepoint_printk = tracepoint_printk; 2983 2984 ret = proc_dointvec(table, write, buffer, lenp, ppos); 2985 2986 /* 2987 * This will force exiting early, as tracepoint_printk 2988 * is always zero when tracepoint_printk_iter is not allocated 2989 */ 2990 if (!tracepoint_print_iter) 2991 tracepoint_printk = 0; 2992 2993 if (save_tracepoint_printk == tracepoint_printk) 2994 goto out; 2995 2996 if (tracepoint_printk) 2997 static_key_enable(&tracepoint_printk_key.key); 2998 else 2999 static_key_disable(&tracepoint_printk_key.key); 3000 3001 out: 3002 mutex_unlock(&tracepoint_printk_mutex); 3003 3004 return ret; 3005 } 3006 3007 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) 3008 { 3009 enum event_trigger_type tt = ETT_NONE; 3010 struct trace_event_file *file = fbuffer->trace_file; 3011 3012 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event, 3013 fbuffer->entry, &tt)) 3014 goto discard; 3015 3016 if (static_key_false(&tracepoint_printk_key.key)) 3017 output_printk(fbuffer); 3018 3019 if (static_branch_unlikely(&trace_event_exports_enabled)) 3020 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT); 3021 3022 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer, 3023 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs); 3024 3025 discard: 3026 if (tt) 3027 event_triggers_post_call(file, tt); 3028 3029 } 3030 EXPORT_SYMBOL_GPL(trace_event_buffer_commit); 3031 3032 /* 3033 * Skip 3: 3034 * 3035 * trace_buffer_unlock_commit_regs() 3036 * trace_event_buffer_commit() 3037 * trace_event_raw_event_xxx() 3038 */ 3039 # define STACK_SKIP 3 3040 3041 void trace_buffer_unlock_commit_regs(struct trace_array *tr, 3042 struct trace_buffer *buffer, 3043 struct ring_buffer_event *event, 3044 unsigned int trace_ctx, 3045 struct pt_regs *regs) 3046 { 3047 __buffer_unlock_commit(buffer, event); 3048 3049 /* 3050 * If regs is not set, then skip the necessary functions. 3051 * Note, we can still get here via blktrace, wakeup tracer 3052 * and mmiotrace, but that's ok if they lose a function or 3053 * two. They are not that meaningful. 3054 */ 3055 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs); 3056 ftrace_trace_userstack(tr, buffer, trace_ctx); 3057 } 3058 3059 /* 3060 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack. 3061 */ 3062 void 3063 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, 3064 struct ring_buffer_event *event) 3065 { 3066 __buffer_unlock_commit(buffer, event); 3067 } 3068 3069 void 3070 trace_function(struct trace_array *tr, unsigned long ip, unsigned long 3071 parent_ip, unsigned int trace_ctx) 3072 { 3073 struct trace_event_call *call = &event_function; 3074 struct trace_buffer *buffer = tr->array_buffer.buffer; 3075 struct ring_buffer_event *event; 3076 struct ftrace_entry *entry; 3077 3078 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), 3079 trace_ctx); 3080 if (!event) 3081 return; 3082 entry = ring_buffer_event_data(event); 3083 entry->ip = ip; 3084 entry->parent_ip = parent_ip; 3085 3086 if (!call_filter_check_discard(call, entry, buffer, event)) { 3087 if (static_branch_unlikely(&trace_function_exports_enabled)) 3088 ftrace_exports(event, TRACE_EXPORT_FUNCTION); 3089 __buffer_unlock_commit(buffer, event); 3090 } 3091 } 3092 3093 #ifdef CONFIG_STACKTRACE 3094 3095 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */ 3096 #define FTRACE_KSTACK_NESTING 4 3097 3098 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING) 3099 3100 struct ftrace_stack { 3101 unsigned long calls[FTRACE_KSTACK_ENTRIES]; 3102 }; 3103 3104 3105 struct ftrace_stacks { 3106 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING]; 3107 }; 3108 3109 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks); 3110 static DEFINE_PER_CPU(int, ftrace_stack_reserve); 3111 3112 static void __ftrace_trace_stack(struct trace_buffer *buffer, 3113 unsigned int trace_ctx, 3114 int skip, struct pt_regs *regs) 3115 { 3116 struct trace_event_call *call = &event_kernel_stack; 3117 struct ring_buffer_event *event; 3118 unsigned int size, nr_entries; 3119 struct ftrace_stack *fstack; 3120 struct stack_entry *entry; 3121 int stackidx; 3122 3123 /* 3124 * Add one, for this function and the call to save_stack_trace() 3125 * If regs is set, then these functions will not be in the way. 3126 */ 3127 #ifndef CONFIG_UNWINDER_ORC 3128 if (!regs) 3129 skip++; 3130 #endif 3131 3132 preempt_disable_notrace(); 3133 3134 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1; 3135 3136 /* This should never happen. If it does, yell once and skip */ 3137 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING)) 3138 goto out; 3139 3140 /* 3141 * The above __this_cpu_inc_return() is 'atomic' cpu local. An 3142 * interrupt will either see the value pre increment or post 3143 * increment. If the interrupt happens pre increment it will have 3144 * restored the counter when it returns. We just need a barrier to 3145 * keep gcc from moving things around. 3146 */ 3147 barrier(); 3148 3149 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx; 3150 size = ARRAY_SIZE(fstack->calls); 3151 3152 if (regs) { 3153 nr_entries = stack_trace_save_regs(regs, fstack->calls, 3154 size, skip); 3155 } else { 3156 nr_entries = stack_trace_save(fstack->calls, size, skip); 3157 } 3158 3159 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, 3160 struct_size(entry, caller, nr_entries), 3161 trace_ctx); 3162 if (!event) 3163 goto out; 3164 entry = ring_buffer_event_data(event); 3165 3166 entry->size = nr_entries; 3167 memcpy(&entry->caller, fstack->calls, 3168 flex_array_size(entry, caller, nr_entries)); 3169 3170 if (!call_filter_check_discard(call, entry, buffer, event)) 3171 __buffer_unlock_commit(buffer, event); 3172 3173 out: 3174 /* Again, don't let gcc optimize things here */ 3175 barrier(); 3176 __this_cpu_dec(ftrace_stack_reserve); 3177 preempt_enable_notrace(); 3178 3179 } 3180 3181 static inline void ftrace_trace_stack(struct trace_array *tr, 3182 struct trace_buffer *buffer, 3183 unsigned int trace_ctx, 3184 int skip, struct pt_regs *regs) 3185 { 3186 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) 3187 return; 3188 3189 __ftrace_trace_stack(buffer, trace_ctx, skip, regs); 3190 } 3191 3192 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, 3193 int skip) 3194 { 3195 struct trace_buffer *buffer = tr->array_buffer.buffer; 3196 3197 if (rcu_is_watching()) { 3198 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL); 3199 return; 3200 } 3201 3202 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY))) 3203 return; 3204 3205 /* 3206 * When an NMI triggers, RCU is enabled via ct_nmi_enter(), 3207 * but if the above rcu_is_watching() failed, then the NMI 3208 * triggered someplace critical, and ct_irq_enter() should 3209 * not be called from NMI. 3210 */ 3211 if (unlikely(in_nmi())) 3212 return; 3213 3214 ct_irq_enter_irqson(); 3215 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL); 3216 ct_irq_exit_irqson(); 3217 } 3218 3219 /** 3220 * trace_dump_stack - record a stack back trace in the trace buffer 3221 * @skip: Number of functions to skip (helper handlers) 3222 */ 3223 void trace_dump_stack(int skip) 3224 { 3225 if (tracing_disabled || tracing_selftest_running) 3226 return; 3227 3228 #ifndef CONFIG_UNWINDER_ORC 3229 /* Skip 1 to skip this function. */ 3230 skip++; 3231 #endif 3232 __ftrace_trace_stack(global_trace.array_buffer.buffer, 3233 tracing_gen_ctx(), skip, NULL); 3234 } 3235 EXPORT_SYMBOL_GPL(trace_dump_stack); 3236 3237 #ifdef CONFIG_USER_STACKTRACE_SUPPORT 3238 static DEFINE_PER_CPU(int, user_stack_count); 3239 3240 static void 3241 ftrace_trace_userstack(struct trace_array *tr, 3242 struct trace_buffer *buffer, unsigned int trace_ctx) 3243 { 3244 struct trace_event_call *call = &event_user_stack; 3245 struct ring_buffer_event *event; 3246 struct userstack_entry *entry; 3247 3248 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE)) 3249 return; 3250 3251 /* 3252 * NMIs can not handle page faults, even with fix ups. 3253 * The save user stack can (and often does) fault. 3254 */ 3255 if (unlikely(in_nmi())) 3256 return; 3257 3258 /* 3259 * prevent recursion, since the user stack tracing may 3260 * trigger other kernel events. 3261 */ 3262 preempt_disable(); 3263 if (__this_cpu_read(user_stack_count)) 3264 goto out; 3265 3266 __this_cpu_inc(user_stack_count); 3267 3268 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 3269 sizeof(*entry), trace_ctx); 3270 if (!event) 3271 goto out_drop_count; 3272 entry = ring_buffer_event_data(event); 3273 3274 entry->tgid = current->tgid; 3275 memset(&entry->caller, 0, sizeof(entry->caller)); 3276 3277 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES); 3278 if (!call_filter_check_discard(call, entry, buffer, event)) 3279 __buffer_unlock_commit(buffer, event); 3280 3281 out_drop_count: 3282 __this_cpu_dec(user_stack_count); 3283 out: 3284 preempt_enable(); 3285 } 3286 #else /* CONFIG_USER_STACKTRACE_SUPPORT */ 3287 static void ftrace_trace_userstack(struct trace_array *tr, 3288 struct trace_buffer *buffer, 3289 unsigned int trace_ctx) 3290 { 3291 } 3292 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */ 3293 3294 #endif /* CONFIG_STACKTRACE */ 3295 3296 static inline void 3297 func_repeats_set_delta_ts(struct func_repeats_entry *entry, 3298 unsigned long long delta) 3299 { 3300 entry->bottom_delta_ts = delta & U32_MAX; 3301 entry->top_delta_ts = (delta >> 32); 3302 } 3303 3304 void trace_last_func_repeats(struct trace_array *tr, 3305 struct trace_func_repeats *last_info, 3306 unsigned int trace_ctx) 3307 { 3308 struct trace_buffer *buffer = tr->array_buffer.buffer; 3309 struct func_repeats_entry *entry; 3310 struct ring_buffer_event *event; 3311 u64 delta; 3312 3313 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS, 3314 sizeof(*entry), trace_ctx); 3315 if (!event) 3316 return; 3317 3318 delta = ring_buffer_event_time_stamp(buffer, event) - 3319 last_info->ts_last_call; 3320 3321 entry = ring_buffer_event_data(event); 3322 entry->ip = last_info->ip; 3323 entry->parent_ip = last_info->parent_ip; 3324 entry->count = last_info->count; 3325 func_repeats_set_delta_ts(entry, delta); 3326 3327 __buffer_unlock_commit(buffer, event); 3328 } 3329 3330 /* created for use with alloc_percpu */ 3331 struct trace_buffer_struct { 3332 int nesting; 3333 char buffer[4][TRACE_BUF_SIZE]; 3334 }; 3335 3336 static struct trace_buffer_struct __percpu *trace_percpu_buffer; 3337 3338 /* 3339 * This allows for lockless recording. If we're nested too deeply, then 3340 * this returns NULL. 3341 */ 3342 static char *get_trace_buf(void) 3343 { 3344 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer); 3345 3346 if (!trace_percpu_buffer || buffer->nesting >= 4) 3347 return NULL; 3348 3349 buffer->nesting++; 3350 3351 /* Interrupts must see nesting incremented before we use the buffer */ 3352 barrier(); 3353 return &buffer->buffer[buffer->nesting - 1][0]; 3354 } 3355 3356 static void put_trace_buf(void) 3357 { 3358 /* Don't let the decrement of nesting leak before this */ 3359 barrier(); 3360 this_cpu_dec(trace_percpu_buffer->nesting); 3361 } 3362 3363 static int alloc_percpu_trace_buffer(void) 3364 { 3365 struct trace_buffer_struct __percpu *buffers; 3366 3367 if (trace_percpu_buffer) 3368 return 0; 3369 3370 buffers = alloc_percpu(struct trace_buffer_struct); 3371 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer")) 3372 return -ENOMEM; 3373 3374 trace_percpu_buffer = buffers; 3375 return 0; 3376 } 3377 3378 static int buffers_allocated; 3379 3380 void trace_printk_init_buffers(void) 3381 { 3382 if (buffers_allocated) 3383 return; 3384 3385 if (alloc_percpu_trace_buffer()) 3386 return; 3387 3388 /* trace_printk() is for debug use only. Don't use it in production. */ 3389 3390 pr_warn("\n"); 3391 pr_warn("**********************************************************\n"); 3392 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3393 pr_warn("** **\n"); 3394 pr_warn("** trace_printk() being used. Allocating extra memory. **\n"); 3395 pr_warn("** **\n"); 3396 pr_warn("** This means that this is a DEBUG kernel and it is **\n"); 3397 pr_warn("** unsafe for production use. **\n"); 3398 pr_warn("** **\n"); 3399 pr_warn("** If you see this message and you are not debugging **\n"); 3400 pr_warn("** the kernel, report this immediately to your vendor! **\n"); 3401 pr_warn("** **\n"); 3402 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); 3403 pr_warn("**********************************************************\n"); 3404 3405 /* Expand the buffers to set size */ 3406 tracing_update_buffers(); 3407 3408 buffers_allocated = 1; 3409 3410 /* 3411 * trace_printk_init_buffers() can be called by modules. 3412 * If that happens, then we need to start cmdline recording 3413 * directly here. If the global_trace.buffer is already 3414 * allocated here, then this was called by module code. 3415 */ 3416 if (global_trace.array_buffer.buffer) 3417 tracing_start_cmdline_record(); 3418 } 3419 EXPORT_SYMBOL_GPL(trace_printk_init_buffers); 3420 3421 void trace_printk_start_comm(void) 3422 { 3423 /* Start tracing comms if trace printk is set */ 3424 if (!buffers_allocated) 3425 return; 3426 tracing_start_cmdline_record(); 3427 } 3428 3429 static void trace_printk_start_stop_comm(int enabled) 3430 { 3431 if (!buffers_allocated) 3432 return; 3433 3434 if (enabled) 3435 tracing_start_cmdline_record(); 3436 else 3437 tracing_stop_cmdline_record(); 3438 } 3439 3440 /** 3441 * trace_vbprintk - write binary msg to tracing buffer 3442 * @ip: The address of the caller 3443 * @fmt: The string format to write to the buffer 3444 * @args: Arguments for @fmt 3445 */ 3446 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 3447 { 3448 struct trace_event_call *call = &event_bprint; 3449 struct ring_buffer_event *event; 3450 struct trace_buffer *buffer; 3451 struct trace_array *tr = &global_trace; 3452 struct bprint_entry *entry; 3453 unsigned int trace_ctx; 3454 char *tbuffer; 3455 int len = 0, size; 3456 3457 if (unlikely(tracing_selftest_running || tracing_disabled)) 3458 return 0; 3459 3460 /* Don't pollute graph traces with trace_vprintk internals */ 3461 pause_graph_tracing(); 3462 3463 trace_ctx = tracing_gen_ctx(); 3464 preempt_disable_notrace(); 3465 3466 tbuffer = get_trace_buf(); 3467 if (!tbuffer) { 3468 len = 0; 3469 goto out_nobuffer; 3470 } 3471 3472 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); 3473 3474 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) 3475 goto out_put; 3476 3477 size = sizeof(*entry) + sizeof(u32) * len; 3478 buffer = tr->array_buffer.buffer; 3479 ring_buffer_nest_start(buffer); 3480 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, 3481 trace_ctx); 3482 if (!event) 3483 goto out; 3484 entry = ring_buffer_event_data(event); 3485 entry->ip = ip; 3486 entry->fmt = fmt; 3487 3488 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 3489 if (!call_filter_check_discard(call, entry, buffer, event)) { 3490 __buffer_unlock_commit(buffer, event); 3491 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL); 3492 } 3493 3494 out: 3495 ring_buffer_nest_end(buffer); 3496 out_put: 3497 put_trace_buf(); 3498 3499 out_nobuffer: 3500 preempt_enable_notrace(); 3501 unpause_graph_tracing(); 3502 3503 return len; 3504 } 3505 EXPORT_SYMBOL_GPL(trace_vbprintk); 3506 3507 __printf(3, 0) 3508 static int 3509 __trace_array_vprintk(struct trace_buffer *buffer, 3510 unsigned long ip, const char *fmt, va_list args) 3511 { 3512 struct trace_event_call *call = &event_print; 3513 struct ring_buffer_event *event; 3514 int len = 0, size; 3515 struct print_entry *entry; 3516 unsigned int trace_ctx; 3517 char *tbuffer; 3518 3519 if (tracing_disabled) 3520 return 0; 3521 3522 /* Don't pollute graph traces with trace_vprintk internals */ 3523 pause_graph_tracing(); 3524 3525 trace_ctx = tracing_gen_ctx(); 3526 preempt_disable_notrace(); 3527 3528 3529 tbuffer = get_trace_buf(); 3530 if (!tbuffer) { 3531 len = 0; 3532 goto out_nobuffer; 3533 } 3534 3535 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); 3536 3537 size = sizeof(*entry) + len + 1; 3538 ring_buffer_nest_start(buffer); 3539 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 3540 trace_ctx); 3541 if (!event) 3542 goto out; 3543 entry = ring_buffer_event_data(event); 3544 entry->ip = ip; 3545 3546 memcpy(&entry->buf, tbuffer, len + 1); 3547 if (!call_filter_check_discard(call, entry, buffer, event)) { 3548 __buffer_unlock_commit(buffer, event); 3549 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL); 3550 } 3551 3552 out: 3553 ring_buffer_nest_end(buffer); 3554 put_trace_buf(); 3555 3556 out_nobuffer: 3557 preempt_enable_notrace(); 3558 unpause_graph_tracing(); 3559 3560 return len; 3561 } 3562 3563 __printf(3, 0) 3564 int trace_array_vprintk(struct trace_array *tr, 3565 unsigned long ip, const char *fmt, va_list args) 3566 { 3567 if (tracing_selftest_running && tr == &global_trace) 3568 return 0; 3569 3570 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args); 3571 } 3572 3573 /** 3574 * trace_array_printk - Print a message to a specific instance 3575 * @tr: The instance trace_array descriptor 3576 * @ip: The instruction pointer that this is called from. 3577 * @fmt: The format to print (printf format) 3578 * 3579 * If a subsystem sets up its own instance, they have the right to 3580 * printk strings into their tracing instance buffer using this 3581 * function. Note, this function will not write into the top level 3582 * buffer (use trace_printk() for that), as writing into the top level 3583 * buffer should only have events that can be individually disabled. 3584 * trace_printk() is only used for debugging a kernel, and should not 3585 * be ever incorporated in normal use. 3586 * 3587 * trace_array_printk() can be used, as it will not add noise to the 3588 * top level tracing buffer. 3589 * 3590 * Note, trace_array_init_printk() must be called on @tr before this 3591 * can be used. 3592 */ 3593 __printf(3, 0) 3594 int trace_array_printk(struct trace_array *tr, 3595 unsigned long ip, const char *fmt, ...) 3596 { 3597 int ret; 3598 va_list ap; 3599 3600 if (!tr) 3601 return -ENOENT; 3602 3603 /* This is only allowed for created instances */ 3604 if (tr == &global_trace) 3605 return 0; 3606 3607 if (!(tr->trace_flags & TRACE_ITER_PRINTK)) 3608 return 0; 3609 3610 va_start(ap, fmt); 3611 ret = trace_array_vprintk(tr, ip, fmt, ap); 3612 va_end(ap); 3613 return ret; 3614 } 3615 EXPORT_SYMBOL_GPL(trace_array_printk); 3616 3617 /** 3618 * trace_array_init_printk - Initialize buffers for trace_array_printk() 3619 * @tr: The trace array to initialize the buffers for 3620 * 3621 * As trace_array_printk() only writes into instances, they are OK to 3622 * have in the kernel (unlike trace_printk()). This needs to be called 3623 * before trace_array_printk() can be used on a trace_array. 3624 */ 3625 int trace_array_init_printk(struct trace_array *tr) 3626 { 3627 if (!tr) 3628 return -ENOENT; 3629 3630 /* This is only allowed for created instances */ 3631 if (tr == &global_trace) 3632 return -EINVAL; 3633 3634 return alloc_percpu_trace_buffer(); 3635 } 3636 EXPORT_SYMBOL_GPL(trace_array_init_printk); 3637 3638 __printf(3, 4) 3639 int trace_array_printk_buf(struct trace_buffer *buffer, 3640 unsigned long ip, const char *fmt, ...) 3641 { 3642 int ret; 3643 va_list ap; 3644 3645 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) 3646 return 0; 3647 3648 va_start(ap, fmt); 3649 ret = __trace_array_vprintk(buffer, ip, fmt, ap); 3650 va_end(ap); 3651 return ret; 3652 } 3653 3654 __printf(2, 0) 3655 int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 3656 { 3657 return trace_array_vprintk(&global_trace, ip, fmt, args); 3658 } 3659 EXPORT_SYMBOL_GPL(trace_vprintk); 3660 3661 static void trace_iterator_increment(struct trace_iterator *iter) 3662 { 3663 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); 3664 3665 iter->idx++; 3666 if (buf_iter) 3667 ring_buffer_iter_advance(buf_iter); 3668 } 3669 3670 static struct trace_entry * 3671 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, 3672 unsigned long *lost_events) 3673 { 3674 struct ring_buffer_event *event; 3675 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); 3676 3677 if (buf_iter) { 3678 event = ring_buffer_iter_peek(buf_iter, ts); 3679 if (lost_events) 3680 *lost_events = ring_buffer_iter_dropped(buf_iter) ? 3681 (unsigned long)-1 : 0; 3682 } else { 3683 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts, 3684 lost_events); 3685 } 3686 3687 if (event) { 3688 iter->ent_size = ring_buffer_event_length(event); 3689 return ring_buffer_event_data(event); 3690 } 3691 iter->ent_size = 0; 3692 return NULL; 3693 } 3694 3695 static struct trace_entry * 3696 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, 3697 unsigned long *missing_events, u64 *ent_ts) 3698 { 3699 struct trace_buffer *buffer = iter->array_buffer->buffer; 3700 struct trace_entry *ent, *next = NULL; 3701 unsigned long lost_events = 0, next_lost = 0; 3702 int cpu_file = iter->cpu_file; 3703 u64 next_ts = 0, ts; 3704 int next_cpu = -1; 3705 int next_size = 0; 3706 int cpu; 3707 3708 /* 3709 * If we are in a per_cpu trace file, don't bother by iterating over 3710 * all cpu and peek directly. 3711 */ 3712 if (cpu_file > RING_BUFFER_ALL_CPUS) { 3713 if (ring_buffer_empty_cpu(buffer, cpu_file)) 3714 return NULL; 3715 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); 3716 if (ent_cpu) 3717 *ent_cpu = cpu_file; 3718 3719 return ent; 3720 } 3721 3722 for_each_tracing_cpu(cpu) { 3723 3724 if (ring_buffer_empty_cpu(buffer, cpu)) 3725 continue; 3726 3727 ent = peek_next_entry(iter, cpu, &ts, &lost_events); 3728 3729 /* 3730 * Pick the entry with the smallest timestamp: 3731 */ 3732 if (ent && (!next || ts < next_ts)) { 3733 next = ent; 3734 next_cpu = cpu; 3735 next_ts = ts; 3736 next_lost = lost_events; 3737 next_size = iter->ent_size; 3738 } 3739 } 3740 3741 iter->ent_size = next_size; 3742 3743 if (ent_cpu) 3744 *ent_cpu = next_cpu; 3745 3746 if (ent_ts) 3747 *ent_ts = next_ts; 3748 3749 if (missing_events) 3750 *missing_events = next_lost; 3751 3752 return next; 3753 } 3754 3755 #define STATIC_FMT_BUF_SIZE 128 3756 static char static_fmt_buf[STATIC_FMT_BUF_SIZE]; 3757 3758 char *trace_iter_expand_format(struct trace_iterator *iter) 3759 { 3760 char *tmp; 3761 3762 /* 3763 * iter->tr is NULL when used with tp_printk, which makes 3764 * this get called where it is not safe to call krealloc(). 3765 */ 3766 if (!iter->tr || iter->fmt == static_fmt_buf) 3767 return NULL; 3768 3769 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE, 3770 GFP_KERNEL); 3771 if (tmp) { 3772 iter->fmt_size += STATIC_FMT_BUF_SIZE; 3773 iter->fmt = tmp; 3774 } 3775 3776 return tmp; 3777 } 3778 3779 /* Returns true if the string is safe to dereference from an event */ 3780 static bool trace_safe_str(struct trace_iterator *iter, const char *str, 3781 bool star, int len) 3782 { 3783 unsigned long addr = (unsigned long)str; 3784 struct trace_event *trace_event; 3785 struct trace_event_call *event; 3786 3787 /* Ignore strings with no length */ 3788 if (star && !len) 3789 return true; 3790 3791 /* OK if part of the event data */ 3792 if ((addr >= (unsigned long)iter->ent) && 3793 (addr < (unsigned long)iter->ent + iter->ent_size)) 3794 return true; 3795 3796 /* OK if part of the temp seq buffer */ 3797 if ((addr >= (unsigned long)iter->tmp_seq.buffer) && 3798 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE)) 3799 return true; 3800 3801 /* Core rodata can not be freed */ 3802 if (is_kernel_rodata(addr)) 3803 return true; 3804 3805 if (trace_is_tracepoint_string(str)) 3806 return true; 3807 3808 /* 3809 * Now this could be a module event, referencing core module 3810 * data, which is OK. 3811 */ 3812 if (!iter->ent) 3813 return false; 3814 3815 trace_event = ftrace_find_event(iter->ent->type); 3816 if (!trace_event) 3817 return false; 3818 3819 event = container_of(trace_event, struct trace_event_call, event); 3820 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module) 3821 return false; 3822 3823 /* Would rather have rodata, but this will suffice */ 3824 if (within_module_core(addr, event->module)) 3825 return true; 3826 3827 return false; 3828 } 3829 3830 static const char *show_buffer(struct trace_seq *s) 3831 { 3832 struct seq_buf *seq = &s->seq; 3833 3834 seq_buf_terminate(seq); 3835 3836 return seq->buffer; 3837 } 3838 3839 static DEFINE_STATIC_KEY_FALSE(trace_no_verify); 3840 3841 static int test_can_verify_check(const char *fmt, ...) 3842 { 3843 char buf[16]; 3844 va_list ap; 3845 int ret; 3846 3847 /* 3848 * The verifier is dependent on vsnprintf() modifies the va_list 3849 * passed to it, where it is sent as a reference. Some architectures 3850 * (like x86_32) passes it by value, which means that vsnprintf() 3851 * does not modify the va_list passed to it, and the verifier 3852 * would then need to be able to understand all the values that 3853 * vsnprintf can use. If it is passed by value, then the verifier 3854 * is disabled. 3855 */ 3856 va_start(ap, fmt); 3857 vsnprintf(buf, 16, "%d", ap); 3858 ret = va_arg(ap, int); 3859 va_end(ap); 3860 3861 return ret; 3862 } 3863 3864 static void test_can_verify(void) 3865 { 3866 if (!test_can_verify_check("%d %d", 0, 1)) { 3867 pr_info("trace event string verifier disabled\n"); 3868 static_branch_inc(&trace_no_verify); 3869 } 3870 } 3871 3872 /** 3873 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer 3874 * @iter: The iterator that holds the seq buffer and the event being printed 3875 * @fmt: The format used to print the event 3876 * @ap: The va_list holding the data to print from @fmt. 3877 * 3878 * This writes the data into the @iter->seq buffer using the data from 3879 * @fmt and @ap. If the format has a %s, then the source of the string 3880 * is examined to make sure it is safe to print, otherwise it will 3881 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string 3882 * pointer. 3883 */ 3884 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt, 3885 va_list ap) 3886 { 3887 const char *p = fmt; 3888 const char *str; 3889 int i, j; 3890 3891 if (WARN_ON_ONCE(!fmt)) 3892 return; 3893 3894 if (static_branch_unlikely(&trace_no_verify)) 3895 goto print; 3896 3897 /* Don't bother checking when doing a ftrace_dump() */ 3898 if (iter->fmt == static_fmt_buf) 3899 goto print; 3900 3901 while (*p) { 3902 bool star = false; 3903 int len = 0; 3904 3905 j = 0; 3906 3907 /* We only care about %s and variants */ 3908 for (i = 0; p[i]; i++) { 3909 if (i + 1 >= iter->fmt_size) { 3910 /* 3911 * If we can't expand the copy buffer, 3912 * just print it. 3913 */ 3914 if (!trace_iter_expand_format(iter)) 3915 goto print; 3916 } 3917 3918 if (p[i] == '\\' && p[i+1]) { 3919 i++; 3920 continue; 3921 } 3922 if (p[i] == '%') { 3923 /* Need to test cases like %08.*s */ 3924 for (j = 1; p[i+j]; j++) { 3925 if (isdigit(p[i+j]) || 3926 p[i+j] == '.') 3927 continue; 3928 if (p[i+j] == '*') { 3929 star = true; 3930 continue; 3931 } 3932 break; 3933 } 3934 if (p[i+j] == 's') 3935 break; 3936 star = false; 3937 } 3938 j = 0; 3939 } 3940 /* If no %s found then just print normally */ 3941 if (!p[i]) 3942 break; 3943 3944 /* Copy up to the %s, and print that */ 3945 strncpy(iter->fmt, p, i); 3946 iter->fmt[i] = '\0'; 3947 trace_seq_vprintf(&iter->seq, iter->fmt, ap); 3948 3949 /* 3950 * If iter->seq is full, the above call no longer guarantees 3951 * that ap is in sync with fmt processing, and further calls 3952 * to va_arg() can return wrong positional arguments. 3953 * 3954 * Ensure that ap is no longer used in this case. 3955 */ 3956 if (iter->seq.full) { 3957 p = ""; 3958 break; 3959 } 3960 3961 if (star) 3962 len = va_arg(ap, int); 3963 3964 /* The ap now points to the string data of the %s */ 3965 str = va_arg(ap, const char *); 3966 3967 /* 3968 * If you hit this warning, it is likely that the 3969 * trace event in question used %s on a string that 3970 * was saved at the time of the event, but may not be 3971 * around when the trace is read. Use __string(), 3972 * __assign_str() and __get_str() helpers in the TRACE_EVENT() 3973 * instead. See samples/trace_events/trace-events-sample.h 3974 * for reference. 3975 */ 3976 if (WARN_ONCE(!trace_safe_str(iter, str, star, len), 3977 "fmt: '%s' current_buffer: '%s'", 3978 fmt, show_buffer(&iter->seq))) { 3979 int ret; 3980 3981 /* Try to safely read the string */ 3982 if (star) { 3983 if (len + 1 > iter->fmt_size) 3984 len = iter->fmt_size - 1; 3985 if (len < 0) 3986 len = 0; 3987 ret = copy_from_kernel_nofault(iter->fmt, str, len); 3988 iter->fmt[len] = 0; 3989 star = false; 3990 } else { 3991 ret = strncpy_from_kernel_nofault(iter->fmt, str, 3992 iter->fmt_size); 3993 } 3994 if (ret < 0) 3995 trace_seq_printf(&iter->seq, "(0x%px)", str); 3996 else 3997 trace_seq_printf(&iter->seq, "(0x%px:%s)", 3998 str, iter->fmt); 3999 str = "[UNSAFE-MEMORY]"; 4000 strcpy(iter->fmt, "%s"); 4001 } else { 4002 strncpy(iter->fmt, p + i, j + 1); 4003 iter->fmt[j+1] = '\0'; 4004 } 4005 if (star) 4006 trace_seq_printf(&iter->seq, iter->fmt, len, str); 4007 else 4008 trace_seq_printf(&iter->seq, iter->fmt, str); 4009 4010 p += i + j + 1; 4011 } 4012 print: 4013 if (*p) 4014 trace_seq_vprintf(&iter->seq, p, ap); 4015 } 4016 4017 const char *trace_event_format(struct trace_iterator *iter, const char *fmt) 4018 { 4019 const char *p, *new_fmt; 4020 char *q; 4021 4022 if (WARN_ON_ONCE(!fmt)) 4023 return fmt; 4024 4025 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR) 4026 return fmt; 4027 4028 p = fmt; 4029 new_fmt = q = iter->fmt; 4030 while (*p) { 4031 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) { 4032 if (!trace_iter_expand_format(iter)) 4033 return fmt; 4034 4035 q += iter->fmt - new_fmt; 4036 new_fmt = iter->fmt; 4037 } 4038 4039 *q++ = *p++; 4040 4041 /* Replace %p with %px */ 4042 if (p[-1] == '%') { 4043 if (p[0] == '%') { 4044 *q++ = *p++; 4045 } else if (p[0] == 'p' && !isalnum(p[1])) { 4046 *q++ = *p++; 4047 *q++ = 'x'; 4048 } 4049 } 4050 } 4051 *q = '\0'; 4052 4053 return new_fmt; 4054 } 4055 4056 #define STATIC_TEMP_BUF_SIZE 128 4057 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4); 4058 4059 /* Find the next real entry, without updating the iterator itself */ 4060 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 4061 int *ent_cpu, u64 *ent_ts) 4062 { 4063 /* __find_next_entry will reset ent_size */ 4064 int ent_size = iter->ent_size; 4065 struct trace_entry *entry; 4066 4067 /* 4068 * If called from ftrace_dump(), then the iter->temp buffer 4069 * will be the static_temp_buf and not created from kmalloc. 4070 * If the entry size is greater than the buffer, we can 4071 * not save it. Just return NULL in that case. This is only 4072 * used to add markers when two consecutive events' time 4073 * stamps have a large delta. See trace_print_lat_context() 4074 */ 4075 if (iter->temp == static_temp_buf && 4076 STATIC_TEMP_BUF_SIZE < ent_size) 4077 return NULL; 4078 4079 /* 4080 * The __find_next_entry() may call peek_next_entry(), which may 4081 * call ring_buffer_peek() that may make the contents of iter->ent 4082 * undefined. Need to copy iter->ent now. 4083 */ 4084 if (iter->ent && iter->ent != iter->temp) { 4085 if ((!iter->temp || iter->temp_size < iter->ent_size) && 4086 !WARN_ON_ONCE(iter->temp == static_temp_buf)) { 4087 void *temp; 4088 temp = kmalloc(iter->ent_size, GFP_KERNEL); 4089 if (!temp) 4090 return NULL; 4091 kfree(iter->temp); 4092 iter->temp = temp; 4093 iter->temp_size = iter->ent_size; 4094 } 4095 memcpy(iter->temp, iter->ent, iter->ent_size); 4096 iter->ent = iter->temp; 4097 } 4098 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts); 4099 /* Put back the original ent_size */ 4100 iter->ent_size = ent_size; 4101 4102 return entry; 4103 } 4104 4105 /* Find the next real entry, and increment the iterator to the next entry */ 4106 void *trace_find_next_entry_inc(struct trace_iterator *iter) 4107 { 4108 iter->ent = __find_next_entry(iter, &iter->cpu, 4109 &iter->lost_events, &iter->ts); 4110 4111 if (iter->ent) 4112 trace_iterator_increment(iter); 4113 4114 return iter->ent ? iter : NULL; 4115 } 4116 4117 static void trace_consume(struct trace_iterator *iter) 4118 { 4119 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts, 4120 &iter->lost_events); 4121 } 4122 4123 static void *s_next(struct seq_file *m, void *v, loff_t *pos) 4124 { 4125 struct trace_iterator *iter = m->private; 4126 int i = (int)*pos; 4127 void *ent; 4128 4129 WARN_ON_ONCE(iter->leftover); 4130 4131 (*pos)++; 4132 4133 /* can't go backwards */ 4134 if (iter->idx > i) 4135 return NULL; 4136 4137 if (iter->idx < 0) 4138 ent = trace_find_next_entry_inc(iter); 4139 else 4140 ent = iter; 4141 4142 while (ent && iter->idx < i) 4143 ent = trace_find_next_entry_inc(iter); 4144 4145 iter->pos = *pos; 4146 4147 return ent; 4148 } 4149 4150 void tracing_iter_reset(struct trace_iterator *iter, int cpu) 4151 { 4152 struct ring_buffer_iter *buf_iter; 4153 unsigned long entries = 0; 4154 u64 ts; 4155 4156 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0; 4157 4158 buf_iter = trace_buffer_iter(iter, cpu); 4159 if (!buf_iter) 4160 return; 4161 4162 ring_buffer_iter_reset(buf_iter); 4163 4164 /* 4165 * We could have the case with the max latency tracers 4166 * that a reset never took place on a cpu. This is evident 4167 * by the timestamp being before the start of the buffer. 4168 */ 4169 while (ring_buffer_iter_peek(buf_iter, &ts)) { 4170 if (ts >= iter->array_buffer->time_start) 4171 break; 4172 entries++; 4173 ring_buffer_iter_advance(buf_iter); 4174 } 4175 4176 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries; 4177 } 4178 4179 /* 4180 * The current tracer is copied to avoid a global locking 4181 * all around. 4182 */ 4183 static void *s_start(struct seq_file *m, loff_t *pos) 4184 { 4185 struct trace_iterator *iter = m->private; 4186 struct trace_array *tr = iter->tr; 4187 int cpu_file = iter->cpu_file; 4188 void *p = NULL; 4189 loff_t l = 0; 4190 int cpu; 4191 4192 mutex_lock(&trace_types_lock); 4193 if (unlikely(tr->current_trace != iter->trace)) { 4194 /* Close iter->trace before switching to the new current tracer */ 4195 if (iter->trace->close) 4196 iter->trace->close(iter); 4197 iter->trace = tr->current_trace; 4198 /* Reopen the new current tracer */ 4199 if (iter->trace->open) 4200 iter->trace->open(iter); 4201 } 4202 mutex_unlock(&trace_types_lock); 4203 4204 #ifdef CONFIG_TRACER_MAX_TRACE 4205 if (iter->snapshot && iter->trace->use_max_tr) 4206 return ERR_PTR(-EBUSY); 4207 #endif 4208 4209 if (*pos != iter->pos) { 4210 iter->ent = NULL; 4211 iter->cpu = 0; 4212 iter->idx = -1; 4213 4214 if (cpu_file == RING_BUFFER_ALL_CPUS) { 4215 for_each_tracing_cpu(cpu) 4216 tracing_iter_reset(iter, cpu); 4217 } else 4218 tracing_iter_reset(iter, cpu_file); 4219 4220 iter->leftover = 0; 4221 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 4222 ; 4223 4224 } else { 4225 /* 4226 * If we overflowed the seq_file before, then we want 4227 * to just reuse the trace_seq buffer again. 4228 */ 4229 if (iter->leftover) 4230 p = iter; 4231 else { 4232 l = *pos - 1; 4233 p = s_next(m, p, &l); 4234 } 4235 } 4236 4237 trace_event_read_lock(); 4238 trace_access_lock(cpu_file); 4239 return p; 4240 } 4241 4242 static void s_stop(struct seq_file *m, void *p) 4243 { 4244 struct trace_iterator *iter = m->private; 4245 4246 #ifdef CONFIG_TRACER_MAX_TRACE 4247 if (iter->snapshot && iter->trace->use_max_tr) 4248 return; 4249 #endif 4250 4251 trace_access_unlock(iter->cpu_file); 4252 trace_event_read_unlock(); 4253 } 4254 4255 static void 4256 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total, 4257 unsigned long *entries, int cpu) 4258 { 4259 unsigned long count; 4260 4261 count = ring_buffer_entries_cpu(buf->buffer, cpu); 4262 /* 4263 * If this buffer has skipped entries, then we hold all 4264 * entries for the trace and we need to ignore the 4265 * ones before the time stamp. 4266 */ 4267 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { 4268 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; 4269 /* total is the same as the entries */ 4270 *total = count; 4271 } else 4272 *total = count + 4273 ring_buffer_overrun_cpu(buf->buffer, cpu); 4274 *entries = count; 4275 } 4276 4277 static void 4278 get_total_entries(struct array_buffer *buf, 4279 unsigned long *total, unsigned long *entries) 4280 { 4281 unsigned long t, e; 4282 int cpu; 4283 4284 *total = 0; 4285 *entries = 0; 4286 4287 for_each_tracing_cpu(cpu) { 4288 get_total_entries_cpu(buf, &t, &e, cpu); 4289 *total += t; 4290 *entries += e; 4291 } 4292 } 4293 4294 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu) 4295 { 4296 unsigned long total, entries; 4297 4298 if (!tr) 4299 tr = &global_trace; 4300 4301 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu); 4302 4303 return entries; 4304 } 4305 4306 unsigned long trace_total_entries(struct trace_array *tr) 4307 { 4308 unsigned long total, entries; 4309 4310 if (!tr) 4311 tr = &global_trace; 4312 4313 get_total_entries(&tr->array_buffer, &total, &entries); 4314 4315 return entries; 4316 } 4317 4318 static void print_lat_help_header(struct seq_file *m) 4319 { 4320 seq_puts(m, "# _------=> CPU# \n" 4321 "# / _-----=> irqs-off/BH-disabled\n" 4322 "# | / _----=> need-resched \n" 4323 "# || / _---=> hardirq/softirq \n" 4324 "# ||| / _--=> preempt-depth \n" 4325 "# |||| / _-=> migrate-disable \n" 4326 "# ||||| / delay \n" 4327 "# cmd pid |||||| time | caller \n" 4328 "# \\ / |||||| \\ | / \n"); 4329 } 4330 4331 static void print_event_info(struct array_buffer *buf, struct seq_file *m) 4332 { 4333 unsigned long total; 4334 unsigned long entries; 4335 4336 get_total_entries(buf, &total, &entries); 4337 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", 4338 entries, total, num_online_cpus()); 4339 seq_puts(m, "#\n"); 4340 } 4341 4342 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m, 4343 unsigned int flags) 4344 { 4345 bool tgid = flags & TRACE_ITER_RECORD_TGID; 4346 4347 print_event_info(buf, m); 4348 4349 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : ""); 4350 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); 4351 } 4352 4353 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m, 4354 unsigned int flags) 4355 { 4356 bool tgid = flags & TRACE_ITER_RECORD_TGID; 4357 static const char space[] = " "; 4358 int prec = tgid ? 12 : 2; 4359 4360 print_event_info(buf, m); 4361 4362 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space); 4363 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); 4364 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); 4365 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); 4366 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space); 4367 seq_printf(m, "# %.*s|||| / delay\n", prec, space); 4368 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID "); 4369 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | "); 4370 } 4371 4372 void 4373 print_trace_header(struct seq_file *m, struct trace_iterator *iter) 4374 { 4375 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); 4376 struct array_buffer *buf = iter->array_buffer; 4377 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); 4378 struct tracer *type = iter->trace; 4379 unsigned long entries; 4380 unsigned long total; 4381 const char *name = type->name; 4382 4383 get_total_entries(buf, &total, &entries); 4384 4385 seq_printf(m, "# %s latency trace v1.1.5 on %s\n", 4386 name, UTS_RELEASE); 4387 seq_puts(m, "# -----------------------------------" 4388 "---------------------------------\n"); 4389 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" 4390 " (M:%s VP:%d, KP:%d, SP:%d HP:%d", 4391 nsecs_to_usecs(data->saved_latency), 4392 entries, 4393 total, 4394 buf->cpu, 4395 preempt_model_none() ? "server" : 4396 preempt_model_voluntary() ? "desktop" : 4397 preempt_model_full() ? "preempt" : 4398 preempt_model_rt() ? "preempt_rt" : 4399 "unknown", 4400 /* These are reserved for later use */ 4401 0, 0, 0, 0); 4402 #ifdef CONFIG_SMP 4403 seq_printf(m, " #P:%d)\n", num_online_cpus()); 4404 #else 4405 seq_puts(m, ")\n"); 4406 #endif 4407 seq_puts(m, "# -----------------\n"); 4408 seq_printf(m, "# | task: %.16s-%d " 4409 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", 4410 data->comm, data->pid, 4411 from_kuid_munged(seq_user_ns(m), data->uid), data->nice, 4412 data->policy, data->rt_priority); 4413 seq_puts(m, "# -----------------\n"); 4414 4415 if (data->critical_start) { 4416 seq_puts(m, "# => started at: "); 4417 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); 4418 trace_print_seq(m, &iter->seq); 4419 seq_puts(m, "\n# => ended at: "); 4420 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); 4421 trace_print_seq(m, &iter->seq); 4422 seq_puts(m, "\n#\n"); 4423 } 4424 4425 seq_puts(m, "#\n"); 4426 } 4427 4428 static void test_cpu_buff_start(struct trace_iterator *iter) 4429 { 4430 struct trace_seq *s = &iter->seq; 4431 struct trace_array *tr = iter->tr; 4432 4433 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) 4434 return; 4435 4436 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 4437 return; 4438 4439 if (cpumask_available(iter->started) && 4440 cpumask_test_cpu(iter->cpu, iter->started)) 4441 return; 4442 4443 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries) 4444 return; 4445 4446 if (cpumask_available(iter->started)) 4447 cpumask_set_cpu(iter->cpu, iter->started); 4448 4449 /* Don't print started cpu buffer for the first entry of the trace */ 4450 if (iter->idx > 1) 4451 trace_seq_printf(s, "##### CPU %u buffer started ####\n", 4452 iter->cpu); 4453 } 4454 4455 static enum print_line_t print_trace_fmt(struct trace_iterator *iter) 4456 { 4457 struct trace_array *tr = iter->tr; 4458 struct trace_seq *s = &iter->seq; 4459 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); 4460 struct trace_entry *entry; 4461 struct trace_event *event; 4462 4463 entry = iter->ent; 4464 4465 test_cpu_buff_start(iter); 4466 4467 event = ftrace_find_event(entry->type); 4468 4469 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 4470 if (iter->iter_flags & TRACE_FILE_LAT_FMT) 4471 trace_print_lat_context(iter); 4472 else 4473 trace_print_context(iter); 4474 } 4475 4476 if (trace_seq_has_overflowed(s)) 4477 return TRACE_TYPE_PARTIAL_LINE; 4478 4479 if (event) { 4480 if (tr->trace_flags & TRACE_ITER_FIELDS) 4481 return print_event_fields(iter, event); 4482 return event->funcs->trace(iter, sym_flags, event); 4483 } 4484 4485 trace_seq_printf(s, "Unknown type %d\n", entry->type); 4486 4487 return trace_handle_return(s); 4488 } 4489 4490 static enum print_line_t print_raw_fmt(struct trace_iterator *iter) 4491 { 4492 struct trace_array *tr = iter->tr; 4493 struct trace_seq *s = &iter->seq; 4494 struct trace_entry *entry; 4495 struct trace_event *event; 4496 4497 entry = iter->ent; 4498 4499 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) 4500 trace_seq_printf(s, "%d %d %llu ", 4501 entry->pid, iter->cpu, iter->ts); 4502 4503 if (trace_seq_has_overflowed(s)) 4504 return TRACE_TYPE_PARTIAL_LINE; 4505 4506 event = ftrace_find_event(entry->type); 4507 if (event) 4508 return event->funcs->raw(iter, 0, event); 4509 4510 trace_seq_printf(s, "%d ?\n", entry->type); 4511 4512 return trace_handle_return(s); 4513 } 4514 4515 static enum print_line_t print_hex_fmt(struct trace_iterator *iter) 4516 { 4517 struct trace_array *tr = iter->tr; 4518 struct trace_seq *s = &iter->seq; 4519 unsigned char newline = '\n'; 4520 struct trace_entry *entry; 4521 struct trace_event *event; 4522 4523 entry = iter->ent; 4524 4525 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 4526 SEQ_PUT_HEX_FIELD(s, entry->pid); 4527 SEQ_PUT_HEX_FIELD(s, iter->cpu); 4528 SEQ_PUT_HEX_FIELD(s, iter->ts); 4529 if (trace_seq_has_overflowed(s)) 4530 return TRACE_TYPE_PARTIAL_LINE; 4531 } 4532 4533 event = ftrace_find_event(entry->type); 4534 if (event) { 4535 enum print_line_t ret = event->funcs->hex(iter, 0, event); 4536 if (ret != TRACE_TYPE_HANDLED) 4537 return ret; 4538 } 4539 4540 SEQ_PUT_FIELD(s, newline); 4541 4542 return trace_handle_return(s); 4543 } 4544 4545 static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 4546 { 4547 struct trace_array *tr = iter->tr; 4548 struct trace_seq *s = &iter->seq; 4549 struct trace_entry *entry; 4550 struct trace_event *event; 4551 4552 entry = iter->ent; 4553 4554 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 4555 SEQ_PUT_FIELD(s, entry->pid); 4556 SEQ_PUT_FIELD(s, iter->cpu); 4557 SEQ_PUT_FIELD(s, iter->ts); 4558 if (trace_seq_has_overflowed(s)) 4559 return TRACE_TYPE_PARTIAL_LINE; 4560 } 4561 4562 event = ftrace_find_event(entry->type); 4563 return event ? event->funcs->binary(iter, 0, event) : 4564 TRACE_TYPE_HANDLED; 4565 } 4566 4567 int trace_empty(struct trace_iterator *iter) 4568 { 4569 struct ring_buffer_iter *buf_iter; 4570 int cpu; 4571 4572 /* If we are looking at one CPU buffer, only check that one */ 4573 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 4574 cpu = iter->cpu_file; 4575 buf_iter = trace_buffer_iter(iter, cpu); 4576 if (buf_iter) { 4577 if (!ring_buffer_iter_empty(buf_iter)) 4578 return 0; 4579 } else { 4580 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) 4581 return 0; 4582 } 4583 return 1; 4584 } 4585 4586 for_each_tracing_cpu(cpu) { 4587 buf_iter = trace_buffer_iter(iter, cpu); 4588 if (buf_iter) { 4589 if (!ring_buffer_iter_empty(buf_iter)) 4590 return 0; 4591 } else { 4592 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) 4593 return 0; 4594 } 4595 } 4596 4597 return 1; 4598 } 4599 4600 /* Called with trace_event_read_lock() held. */ 4601 enum print_line_t print_trace_line(struct trace_iterator *iter) 4602 { 4603 struct trace_array *tr = iter->tr; 4604 unsigned long trace_flags = tr->trace_flags; 4605 enum print_line_t ret; 4606 4607 if (iter->lost_events) { 4608 if (iter->lost_events == (unsigned long)-1) 4609 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n", 4610 iter->cpu); 4611 else 4612 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", 4613 iter->cpu, iter->lost_events); 4614 if (trace_seq_has_overflowed(&iter->seq)) 4615 return TRACE_TYPE_PARTIAL_LINE; 4616 } 4617 4618 if (iter->trace && iter->trace->print_line) { 4619 ret = iter->trace->print_line(iter); 4620 if (ret != TRACE_TYPE_UNHANDLED) 4621 return ret; 4622 } 4623 4624 if (iter->ent->type == TRACE_BPUTS && 4625 trace_flags & TRACE_ITER_PRINTK && 4626 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 4627 return trace_print_bputs_msg_only(iter); 4628 4629 if (iter->ent->type == TRACE_BPRINT && 4630 trace_flags & TRACE_ITER_PRINTK && 4631 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 4632 return trace_print_bprintk_msg_only(iter); 4633 4634 if (iter->ent->type == TRACE_PRINT && 4635 trace_flags & TRACE_ITER_PRINTK && 4636 trace_flags & TRACE_ITER_PRINTK_MSGONLY) 4637 return trace_print_printk_msg_only(iter); 4638 4639 if (trace_flags & TRACE_ITER_BIN) 4640 return print_bin_fmt(iter); 4641 4642 if (trace_flags & TRACE_ITER_HEX) 4643 return print_hex_fmt(iter); 4644 4645 if (trace_flags & TRACE_ITER_RAW) 4646 return print_raw_fmt(iter); 4647 4648 return print_trace_fmt(iter); 4649 } 4650 4651 void trace_latency_header(struct seq_file *m) 4652 { 4653 struct trace_iterator *iter = m->private; 4654 struct trace_array *tr = iter->tr; 4655 4656 /* print nothing if the buffers are empty */ 4657 if (trace_empty(iter)) 4658 return; 4659 4660 if (iter->iter_flags & TRACE_FILE_LAT_FMT) 4661 print_trace_header(m, iter); 4662 4663 if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) 4664 print_lat_help_header(m); 4665 } 4666 4667 void trace_default_header(struct seq_file *m) 4668 { 4669 struct trace_iterator *iter = m->private; 4670 struct trace_array *tr = iter->tr; 4671 unsigned long trace_flags = tr->trace_flags; 4672 4673 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) 4674 return; 4675 4676 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { 4677 /* print nothing if the buffers are empty */ 4678 if (trace_empty(iter)) 4679 return; 4680 print_trace_header(m, iter); 4681 if (!(trace_flags & TRACE_ITER_VERBOSE)) 4682 print_lat_help_header(m); 4683 } else { 4684 if (!(trace_flags & TRACE_ITER_VERBOSE)) { 4685 if (trace_flags & TRACE_ITER_IRQ_INFO) 4686 print_func_help_header_irq(iter->array_buffer, 4687 m, trace_flags); 4688 else 4689 print_func_help_header(iter->array_buffer, m, 4690 trace_flags); 4691 } 4692 } 4693 } 4694 4695 static void test_ftrace_alive(struct seq_file *m) 4696 { 4697 if (!ftrace_is_dead()) 4698 return; 4699 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n" 4700 "# MAY BE MISSING FUNCTION EVENTS\n"); 4701 } 4702 4703 #ifdef CONFIG_TRACER_MAX_TRACE 4704 static void show_snapshot_main_help(struct seq_file *m) 4705 { 4706 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" 4707 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" 4708 "# Takes a snapshot of the main buffer.\n" 4709 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n" 4710 "# (Doesn't have to be '2' works with any number that\n" 4711 "# is not a '0' or '1')\n"); 4712 } 4713 4714 static void show_snapshot_percpu_help(struct seq_file *m) 4715 { 4716 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); 4717 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP 4718 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" 4719 "# Takes a snapshot of the main buffer for this cpu.\n"); 4720 #else 4721 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n" 4722 "# Must use main snapshot file to allocate.\n"); 4723 #endif 4724 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n" 4725 "# (Doesn't have to be '2' works with any number that\n" 4726 "# is not a '0' or '1')\n"); 4727 } 4728 4729 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) 4730 { 4731 if (iter->tr->allocated_snapshot) 4732 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n"); 4733 else 4734 seq_puts(m, "#\n# * Snapshot is freed *\n#\n"); 4735 4736 seq_puts(m, "# Snapshot commands:\n"); 4737 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 4738 show_snapshot_main_help(m); 4739 else 4740 show_snapshot_percpu_help(m); 4741 } 4742 #else 4743 /* Should never be called */ 4744 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } 4745 #endif 4746 4747 static int s_show(struct seq_file *m, void *v) 4748 { 4749 struct trace_iterator *iter = v; 4750 int ret; 4751 4752 if (iter->ent == NULL) { 4753 if (iter->tr) { 4754 seq_printf(m, "# tracer: %s\n", iter->trace->name); 4755 seq_puts(m, "#\n"); 4756 test_ftrace_alive(m); 4757 } 4758 if (iter->snapshot && trace_empty(iter)) 4759 print_snapshot_help(m, iter); 4760 else if (iter->trace && iter->trace->print_header) 4761 iter->trace->print_header(m); 4762 else 4763 trace_default_header(m); 4764 4765 } else if (iter->leftover) { 4766 /* 4767 * If we filled the seq_file buffer earlier, we 4768 * want to just show it now. 4769 */ 4770 ret = trace_print_seq(m, &iter->seq); 4771 4772 /* ret should this time be zero, but you never know */ 4773 iter->leftover = ret; 4774 4775 } else { 4776 print_trace_line(iter); 4777 ret = trace_print_seq(m, &iter->seq); 4778 /* 4779 * If we overflow the seq_file buffer, then it will 4780 * ask us for this data again at start up. 4781 * Use that instead. 4782 * ret is 0 if seq_file write succeeded. 4783 * -1 otherwise. 4784 */ 4785 iter->leftover = ret; 4786 } 4787 4788 return 0; 4789 } 4790 4791 /* 4792 * Should be used after trace_array_get(), trace_types_lock 4793 * ensures that i_cdev was already initialized. 4794 */ 4795 static inline int tracing_get_cpu(struct inode *inode) 4796 { 4797 if (inode->i_cdev) /* See trace_create_cpu_file() */ 4798 return (long)inode->i_cdev - 1; 4799 return RING_BUFFER_ALL_CPUS; 4800 } 4801 4802 static const struct seq_operations tracer_seq_ops = { 4803 .start = s_start, 4804 .next = s_next, 4805 .stop = s_stop, 4806 .show = s_show, 4807 }; 4808 4809 /* 4810 * Note, as iter itself can be allocated and freed in different 4811 * ways, this function is only used to free its content, and not 4812 * the iterator itself. The only requirement to all the allocations 4813 * is that it must zero all fields (kzalloc), as freeing works with 4814 * ethier allocated content or NULL. 4815 */ 4816 static void free_trace_iter_content(struct trace_iterator *iter) 4817 { 4818 /* The fmt is either NULL, allocated or points to static_fmt_buf */ 4819 if (iter->fmt != static_fmt_buf) 4820 kfree(iter->fmt); 4821 4822 kfree(iter->temp); 4823 kfree(iter->buffer_iter); 4824 mutex_destroy(&iter->mutex); 4825 free_cpumask_var(iter->started); 4826 } 4827 4828 static struct trace_iterator * 4829 __tracing_open(struct inode *inode, struct file *file, bool snapshot) 4830 { 4831 struct trace_array *tr = inode->i_private; 4832 struct trace_iterator *iter; 4833 int cpu; 4834 4835 if (tracing_disabled) 4836 return ERR_PTR(-ENODEV); 4837 4838 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); 4839 if (!iter) 4840 return ERR_PTR(-ENOMEM); 4841 4842 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), 4843 GFP_KERNEL); 4844 if (!iter->buffer_iter) 4845 goto release; 4846 4847 /* 4848 * trace_find_next_entry() may need to save off iter->ent. 4849 * It will place it into the iter->temp buffer. As most 4850 * events are less than 128, allocate a buffer of that size. 4851 * If one is greater, then trace_find_next_entry() will 4852 * allocate a new buffer to adjust for the bigger iter->ent. 4853 * It's not critical if it fails to get allocated here. 4854 */ 4855 iter->temp = kmalloc(128, GFP_KERNEL); 4856 if (iter->temp) 4857 iter->temp_size = 128; 4858 4859 /* 4860 * trace_event_printf() may need to modify given format 4861 * string to replace %p with %px so that it shows real address 4862 * instead of hash value. However, that is only for the event 4863 * tracing, other tracer may not need. Defer the allocation 4864 * until it is needed. 4865 */ 4866 iter->fmt = NULL; 4867 iter->fmt_size = 0; 4868 4869 mutex_lock(&trace_types_lock); 4870 iter->trace = tr->current_trace; 4871 4872 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 4873 goto fail; 4874 4875 iter->tr = tr; 4876 4877 #ifdef CONFIG_TRACER_MAX_TRACE 4878 /* Currently only the top directory has a snapshot */ 4879 if (tr->current_trace->print_max || snapshot) 4880 iter->array_buffer = &tr->max_buffer; 4881 else 4882 #endif 4883 iter->array_buffer = &tr->array_buffer; 4884 iter->snapshot = snapshot; 4885 iter->pos = -1; 4886 iter->cpu_file = tracing_get_cpu(inode); 4887 mutex_init(&iter->mutex); 4888 4889 /* Notify the tracer early; before we stop tracing. */ 4890 if (iter->trace->open) 4891 iter->trace->open(iter); 4892 4893 /* Annotate start of buffers if we had overruns */ 4894 if (ring_buffer_overruns(iter->array_buffer->buffer)) 4895 iter->iter_flags |= TRACE_FILE_ANNOTATE; 4896 4897 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 4898 if (trace_clocks[tr->clock_id].in_ns) 4899 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 4900 4901 /* 4902 * If pause-on-trace is enabled, then stop the trace while 4903 * dumping, unless this is the "snapshot" file 4904 */ 4905 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE)) 4906 tracing_stop_tr(tr); 4907 4908 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { 4909 for_each_tracing_cpu(cpu) { 4910 iter->buffer_iter[cpu] = 4911 ring_buffer_read_prepare(iter->array_buffer->buffer, 4912 cpu, GFP_KERNEL); 4913 } 4914 ring_buffer_read_prepare_sync(); 4915 for_each_tracing_cpu(cpu) { 4916 ring_buffer_read_start(iter->buffer_iter[cpu]); 4917 tracing_iter_reset(iter, cpu); 4918 } 4919 } else { 4920 cpu = iter->cpu_file; 4921 iter->buffer_iter[cpu] = 4922 ring_buffer_read_prepare(iter->array_buffer->buffer, 4923 cpu, GFP_KERNEL); 4924 ring_buffer_read_prepare_sync(); 4925 ring_buffer_read_start(iter->buffer_iter[cpu]); 4926 tracing_iter_reset(iter, cpu); 4927 } 4928 4929 mutex_unlock(&trace_types_lock); 4930 4931 return iter; 4932 4933 fail: 4934 mutex_unlock(&trace_types_lock); 4935 free_trace_iter_content(iter); 4936 release: 4937 seq_release_private(inode, file); 4938 return ERR_PTR(-ENOMEM); 4939 } 4940 4941 int tracing_open_generic(struct inode *inode, struct file *filp) 4942 { 4943 int ret; 4944 4945 ret = tracing_check_open_get_tr(NULL); 4946 if (ret) 4947 return ret; 4948 4949 filp->private_data = inode->i_private; 4950 return 0; 4951 } 4952 4953 bool tracing_is_disabled(void) 4954 { 4955 return (tracing_disabled) ? true: false; 4956 } 4957 4958 /* 4959 * Open and update trace_array ref count. 4960 * Must have the current trace_array passed to it. 4961 */ 4962 int tracing_open_generic_tr(struct inode *inode, struct file *filp) 4963 { 4964 struct trace_array *tr = inode->i_private; 4965 int ret; 4966 4967 ret = tracing_check_open_get_tr(tr); 4968 if (ret) 4969 return ret; 4970 4971 filp->private_data = inode->i_private; 4972 4973 return 0; 4974 } 4975 4976 static int tracing_mark_open(struct inode *inode, struct file *filp) 4977 { 4978 stream_open(inode, filp); 4979 return tracing_open_generic_tr(inode, filp); 4980 } 4981 4982 static int tracing_release(struct inode *inode, struct file *file) 4983 { 4984 struct trace_array *tr = inode->i_private; 4985 struct seq_file *m = file->private_data; 4986 struct trace_iterator *iter; 4987 int cpu; 4988 4989 if (!(file->f_mode & FMODE_READ)) { 4990 trace_array_put(tr); 4991 return 0; 4992 } 4993 4994 /* Writes do not use seq_file */ 4995 iter = m->private; 4996 mutex_lock(&trace_types_lock); 4997 4998 for_each_tracing_cpu(cpu) { 4999 if (iter->buffer_iter[cpu]) 5000 ring_buffer_read_finish(iter->buffer_iter[cpu]); 5001 } 5002 5003 if (iter->trace && iter->trace->close) 5004 iter->trace->close(iter); 5005 5006 if (!iter->snapshot && tr->stop_count) 5007 /* reenable tracing if it was previously enabled */ 5008 tracing_start_tr(tr); 5009 5010 __trace_array_put(tr); 5011 5012 mutex_unlock(&trace_types_lock); 5013 5014 free_trace_iter_content(iter); 5015 seq_release_private(inode, file); 5016 5017 return 0; 5018 } 5019 5020 static int tracing_release_generic_tr(struct inode *inode, struct file *file) 5021 { 5022 struct trace_array *tr = inode->i_private; 5023 5024 trace_array_put(tr); 5025 return 0; 5026 } 5027 5028 static int tracing_single_release_tr(struct inode *inode, struct file *file) 5029 { 5030 struct trace_array *tr = inode->i_private; 5031 5032 trace_array_put(tr); 5033 5034 return single_release(inode, file); 5035 } 5036 5037 static int tracing_open(struct inode *inode, struct file *file) 5038 { 5039 struct trace_array *tr = inode->i_private; 5040 struct trace_iterator *iter; 5041 int ret; 5042 5043 ret = tracing_check_open_get_tr(tr); 5044 if (ret) 5045 return ret; 5046 5047 /* If this file was open for write, then erase contents */ 5048 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { 5049 int cpu = tracing_get_cpu(inode); 5050 struct array_buffer *trace_buf = &tr->array_buffer; 5051 5052 #ifdef CONFIG_TRACER_MAX_TRACE 5053 if (tr->current_trace->print_max) 5054 trace_buf = &tr->max_buffer; 5055 #endif 5056 5057 if (cpu == RING_BUFFER_ALL_CPUS) 5058 tracing_reset_online_cpus(trace_buf); 5059 else 5060 tracing_reset_cpu(trace_buf, cpu); 5061 } 5062 5063 if (file->f_mode & FMODE_READ) { 5064 iter = __tracing_open(inode, file, false); 5065 if (IS_ERR(iter)) 5066 ret = PTR_ERR(iter); 5067 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 5068 iter->iter_flags |= TRACE_FILE_LAT_FMT; 5069 } 5070 5071 if (ret < 0) 5072 trace_array_put(tr); 5073 5074 return ret; 5075 } 5076 5077 /* 5078 * Some tracers are not suitable for instance buffers. 5079 * A tracer is always available for the global array (toplevel) 5080 * or if it explicitly states that it is. 5081 */ 5082 static bool 5083 trace_ok_for_array(struct tracer *t, struct trace_array *tr) 5084 { 5085 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; 5086 } 5087 5088 /* Find the next tracer that this trace array may use */ 5089 static struct tracer * 5090 get_tracer_for_array(struct trace_array *tr, struct tracer *t) 5091 { 5092 while (t && !trace_ok_for_array(t, tr)) 5093 t = t->next; 5094 5095 return t; 5096 } 5097 5098 static void * 5099 t_next(struct seq_file *m, void *v, loff_t *pos) 5100 { 5101 struct trace_array *tr = m->private; 5102 struct tracer *t = v; 5103 5104 (*pos)++; 5105 5106 if (t) 5107 t = get_tracer_for_array(tr, t->next); 5108 5109 return t; 5110 } 5111 5112 static void *t_start(struct seq_file *m, loff_t *pos) 5113 { 5114 struct trace_array *tr = m->private; 5115 struct tracer *t; 5116 loff_t l = 0; 5117 5118 mutex_lock(&trace_types_lock); 5119 5120 t = get_tracer_for_array(tr, trace_types); 5121 for (; t && l < *pos; t = t_next(m, t, &l)) 5122 ; 5123 5124 return t; 5125 } 5126 5127 static void t_stop(struct seq_file *m, void *p) 5128 { 5129 mutex_unlock(&trace_types_lock); 5130 } 5131 5132 static int t_show(struct seq_file *m, void *v) 5133 { 5134 struct tracer *t = v; 5135 5136 if (!t) 5137 return 0; 5138 5139 seq_puts(m, t->name); 5140 if (t->next) 5141 seq_putc(m, ' '); 5142 else 5143 seq_putc(m, '\n'); 5144 5145 return 0; 5146 } 5147 5148 static const struct seq_operations show_traces_seq_ops = { 5149 .start = t_start, 5150 .next = t_next, 5151 .stop = t_stop, 5152 .show = t_show, 5153 }; 5154 5155 static int show_traces_open(struct inode *inode, struct file *file) 5156 { 5157 struct trace_array *tr = inode->i_private; 5158 struct seq_file *m; 5159 int ret; 5160 5161 ret = tracing_check_open_get_tr(tr); 5162 if (ret) 5163 return ret; 5164 5165 ret = seq_open(file, &show_traces_seq_ops); 5166 if (ret) { 5167 trace_array_put(tr); 5168 return ret; 5169 } 5170 5171 m = file->private_data; 5172 m->private = tr; 5173 5174 return 0; 5175 } 5176 5177 static int show_traces_release(struct inode *inode, struct file *file) 5178 { 5179 struct trace_array *tr = inode->i_private; 5180 5181 trace_array_put(tr); 5182 return seq_release(inode, file); 5183 } 5184 5185 static ssize_t 5186 tracing_write_stub(struct file *filp, const char __user *ubuf, 5187 size_t count, loff_t *ppos) 5188 { 5189 return count; 5190 } 5191 5192 loff_t tracing_lseek(struct file *file, loff_t offset, int whence) 5193 { 5194 int ret; 5195 5196 if (file->f_mode & FMODE_READ) 5197 ret = seq_lseek(file, offset, whence); 5198 else 5199 file->f_pos = ret = 0; 5200 5201 return ret; 5202 } 5203 5204 static const struct file_operations tracing_fops = { 5205 .open = tracing_open, 5206 .read = seq_read, 5207 .read_iter = seq_read_iter, 5208 .splice_read = copy_splice_read, 5209 .write = tracing_write_stub, 5210 .llseek = tracing_lseek, 5211 .release = tracing_release, 5212 }; 5213 5214 static const struct file_operations show_traces_fops = { 5215 .open = show_traces_open, 5216 .read = seq_read, 5217 .llseek = seq_lseek, 5218 .release = show_traces_release, 5219 }; 5220 5221 static ssize_t 5222 tracing_cpumask_read(struct file *filp, char __user *ubuf, 5223 size_t count, loff_t *ppos) 5224 { 5225 struct trace_array *tr = file_inode(filp)->i_private; 5226 char *mask_str; 5227 int len; 5228 5229 len = snprintf(NULL, 0, "%*pb\n", 5230 cpumask_pr_args(tr->tracing_cpumask)) + 1; 5231 mask_str = kmalloc(len, GFP_KERNEL); 5232 if (!mask_str) 5233 return -ENOMEM; 5234 5235 len = snprintf(mask_str, len, "%*pb\n", 5236 cpumask_pr_args(tr->tracing_cpumask)); 5237 if (len >= count) { 5238 count = -EINVAL; 5239 goto out_err; 5240 } 5241 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len); 5242 5243 out_err: 5244 kfree(mask_str); 5245 5246 return count; 5247 } 5248 5249 int tracing_set_cpumask(struct trace_array *tr, 5250 cpumask_var_t tracing_cpumask_new) 5251 { 5252 int cpu; 5253 5254 if (!tr) 5255 return -EINVAL; 5256 5257 local_irq_disable(); 5258 arch_spin_lock(&tr->max_lock); 5259 for_each_tracing_cpu(cpu) { 5260 /* 5261 * Increase/decrease the disabled counter if we are 5262 * about to flip a bit in the cpumask: 5263 */ 5264 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && 5265 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 5266 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); 5267 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); 5268 #ifdef CONFIG_TRACER_MAX_TRACE 5269 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu); 5270 #endif 5271 } 5272 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && 5273 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 5274 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); 5275 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); 5276 #ifdef CONFIG_TRACER_MAX_TRACE 5277 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu); 5278 #endif 5279 } 5280 } 5281 arch_spin_unlock(&tr->max_lock); 5282 local_irq_enable(); 5283 5284 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); 5285 5286 return 0; 5287 } 5288 5289 static ssize_t 5290 tracing_cpumask_write(struct file *filp, const char __user *ubuf, 5291 size_t count, loff_t *ppos) 5292 { 5293 struct trace_array *tr = file_inode(filp)->i_private; 5294 cpumask_var_t tracing_cpumask_new; 5295 int err; 5296 5297 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) 5298 return -ENOMEM; 5299 5300 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 5301 if (err) 5302 goto err_free; 5303 5304 err = tracing_set_cpumask(tr, tracing_cpumask_new); 5305 if (err) 5306 goto err_free; 5307 5308 free_cpumask_var(tracing_cpumask_new); 5309 5310 return count; 5311 5312 err_free: 5313 free_cpumask_var(tracing_cpumask_new); 5314 5315 return err; 5316 } 5317 5318 static const struct file_operations tracing_cpumask_fops = { 5319 .open = tracing_open_generic_tr, 5320 .read = tracing_cpumask_read, 5321 .write = tracing_cpumask_write, 5322 .release = tracing_release_generic_tr, 5323 .llseek = generic_file_llseek, 5324 }; 5325 5326 static int tracing_trace_options_show(struct seq_file *m, void *v) 5327 { 5328 struct tracer_opt *trace_opts; 5329 struct trace_array *tr = m->private; 5330 u32 tracer_flags; 5331 int i; 5332 5333 mutex_lock(&trace_types_lock); 5334 tracer_flags = tr->current_trace->flags->val; 5335 trace_opts = tr->current_trace->flags->opts; 5336 5337 for (i = 0; trace_options[i]; i++) { 5338 if (tr->trace_flags & (1 << i)) 5339 seq_printf(m, "%s\n", trace_options[i]); 5340 else 5341 seq_printf(m, "no%s\n", trace_options[i]); 5342 } 5343 5344 for (i = 0; trace_opts[i].name; i++) { 5345 if (tracer_flags & trace_opts[i].bit) 5346 seq_printf(m, "%s\n", trace_opts[i].name); 5347 else 5348 seq_printf(m, "no%s\n", trace_opts[i].name); 5349 } 5350 mutex_unlock(&trace_types_lock); 5351 5352 return 0; 5353 } 5354 5355 static int __set_tracer_option(struct trace_array *tr, 5356 struct tracer_flags *tracer_flags, 5357 struct tracer_opt *opts, int neg) 5358 { 5359 struct tracer *trace = tracer_flags->trace; 5360 int ret; 5361 5362 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); 5363 if (ret) 5364 return ret; 5365 5366 if (neg) 5367 tracer_flags->val &= ~opts->bit; 5368 else 5369 tracer_flags->val |= opts->bit; 5370 return 0; 5371 } 5372 5373 /* Try to assign a tracer specific option */ 5374 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) 5375 { 5376 struct tracer *trace = tr->current_trace; 5377 struct tracer_flags *tracer_flags = trace->flags; 5378 struct tracer_opt *opts = NULL; 5379 int i; 5380 5381 for (i = 0; tracer_flags->opts[i].name; i++) { 5382 opts = &tracer_flags->opts[i]; 5383 5384 if (strcmp(cmp, opts->name) == 0) 5385 return __set_tracer_option(tr, trace->flags, opts, neg); 5386 } 5387 5388 return -EINVAL; 5389 } 5390 5391 /* Some tracers require overwrite to stay enabled */ 5392 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) 5393 { 5394 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) 5395 return -1; 5396 5397 return 0; 5398 } 5399 5400 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) 5401 { 5402 int *map; 5403 5404 if ((mask == TRACE_ITER_RECORD_TGID) || 5405 (mask == TRACE_ITER_RECORD_CMD)) 5406 lockdep_assert_held(&event_mutex); 5407 5408 /* do nothing if flag is already set */ 5409 if (!!(tr->trace_flags & mask) == !!enabled) 5410 return 0; 5411 5412 /* Give the tracer a chance to approve the change */ 5413 if (tr->current_trace->flag_changed) 5414 if (tr->current_trace->flag_changed(tr, mask, !!enabled)) 5415 return -EINVAL; 5416 5417 if (enabled) 5418 tr->trace_flags |= mask; 5419 else 5420 tr->trace_flags &= ~mask; 5421 5422 if (mask == TRACE_ITER_RECORD_CMD) 5423 trace_event_enable_cmd_record(enabled); 5424 5425 if (mask == TRACE_ITER_RECORD_TGID) { 5426 if (!tgid_map) { 5427 tgid_map_max = pid_max; 5428 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map), 5429 GFP_KERNEL); 5430 5431 /* 5432 * Pairs with smp_load_acquire() in 5433 * trace_find_tgid_ptr() to ensure that if it observes 5434 * the tgid_map we just allocated then it also observes 5435 * the corresponding tgid_map_max value. 5436 */ 5437 smp_store_release(&tgid_map, map); 5438 } 5439 if (!tgid_map) { 5440 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; 5441 return -ENOMEM; 5442 } 5443 5444 trace_event_enable_tgid_record(enabled); 5445 } 5446 5447 if (mask == TRACE_ITER_EVENT_FORK) 5448 trace_event_follow_fork(tr, enabled); 5449 5450 if (mask == TRACE_ITER_FUNC_FORK) 5451 ftrace_pid_follow_fork(tr, enabled); 5452 5453 if (mask == TRACE_ITER_OVERWRITE) { 5454 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled); 5455 #ifdef CONFIG_TRACER_MAX_TRACE 5456 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); 5457 #endif 5458 } 5459 5460 if (mask == TRACE_ITER_PRINTK) { 5461 trace_printk_start_stop_comm(enabled); 5462 trace_printk_control(enabled); 5463 } 5464 5465 return 0; 5466 } 5467 5468 int trace_set_options(struct trace_array *tr, char *option) 5469 { 5470 char *cmp; 5471 int neg = 0; 5472 int ret; 5473 size_t orig_len = strlen(option); 5474 int len; 5475 5476 cmp = strstrip(option); 5477 5478 len = str_has_prefix(cmp, "no"); 5479 if (len) 5480 neg = 1; 5481 5482 cmp += len; 5483 5484 mutex_lock(&event_mutex); 5485 mutex_lock(&trace_types_lock); 5486 5487 ret = match_string(trace_options, -1, cmp); 5488 /* If no option could be set, test the specific tracer options */ 5489 if (ret < 0) 5490 ret = set_tracer_option(tr, cmp, neg); 5491 else 5492 ret = set_tracer_flag(tr, 1 << ret, !neg); 5493 5494 mutex_unlock(&trace_types_lock); 5495 mutex_unlock(&event_mutex); 5496 5497 /* 5498 * If the first trailing whitespace is replaced with '\0' by strstrip, 5499 * turn it back into a space. 5500 */ 5501 if (orig_len > strlen(option)) 5502 option[strlen(option)] = ' '; 5503 5504 return ret; 5505 } 5506 5507 static void __init apply_trace_boot_options(void) 5508 { 5509 char *buf = trace_boot_options_buf; 5510 char *option; 5511 5512 while (true) { 5513 option = strsep(&buf, ","); 5514 5515 if (!option) 5516 break; 5517 5518 if (*option) 5519 trace_set_options(&global_trace, option); 5520 5521 /* Put back the comma to allow this to be called again */ 5522 if (buf) 5523 *(buf - 1) = ','; 5524 } 5525 } 5526 5527 static ssize_t 5528 tracing_trace_options_write(struct file *filp, const char __user *ubuf, 5529 size_t cnt, loff_t *ppos) 5530 { 5531 struct seq_file *m = filp->private_data; 5532 struct trace_array *tr = m->private; 5533 char buf[64]; 5534 int ret; 5535 5536 if (cnt >= sizeof(buf)) 5537 return -EINVAL; 5538 5539 if (copy_from_user(buf, ubuf, cnt)) 5540 return -EFAULT; 5541 5542 buf[cnt] = 0; 5543 5544 ret = trace_set_options(tr, buf); 5545 if (ret < 0) 5546 return ret; 5547 5548 *ppos += cnt; 5549 5550 return cnt; 5551 } 5552 5553 static int tracing_trace_options_open(struct inode *inode, struct file *file) 5554 { 5555 struct trace_array *tr = inode->i_private; 5556 int ret; 5557 5558 ret = tracing_check_open_get_tr(tr); 5559 if (ret) 5560 return ret; 5561 5562 ret = single_open(file, tracing_trace_options_show, inode->i_private); 5563 if (ret < 0) 5564 trace_array_put(tr); 5565 5566 return ret; 5567 } 5568 5569 static const struct file_operations tracing_iter_fops = { 5570 .open = tracing_trace_options_open, 5571 .read = seq_read, 5572 .llseek = seq_lseek, 5573 .release = tracing_single_release_tr, 5574 .write = tracing_trace_options_write, 5575 }; 5576 5577 static const char readme_msg[] = 5578 "tracing mini-HOWTO:\n\n" 5579 "# echo 0 > tracing_on : quick way to disable tracing\n" 5580 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n" 5581 " Important files:\n" 5582 " trace\t\t\t- The static contents of the buffer\n" 5583 "\t\t\t To clear the buffer write into this file: echo > trace\n" 5584 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" 5585 " current_tracer\t- function and latency tracers\n" 5586 " available_tracers\t- list of configured tracers for current_tracer\n" 5587 " error_log\t- error log for failed commands (that support it)\n" 5588 " buffer_size_kb\t- view and modify size of per cpu buffer\n" 5589 " buffer_total_size_kb - view total size of all cpu buffers\n\n" 5590 " trace_clock\t\t- change the clock used to order events\n" 5591 " local: Per cpu clock but may not be synced across CPUs\n" 5592 " global: Synced across CPUs but slows tracing down.\n" 5593 " counter: Not a clock, but just an increment\n" 5594 " uptime: Jiffy counter from time of boot\n" 5595 " perf: Same clock that perf events use\n" 5596 #ifdef CONFIG_X86_64 5597 " x86-tsc: TSC cycle counter\n" 5598 #endif 5599 "\n timestamp_mode\t- view the mode used to timestamp events\n" 5600 " delta: Delta difference against a buffer-wide timestamp\n" 5601 " absolute: Absolute (standalone) timestamp\n" 5602 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n" 5603 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n" 5604 " tracing_cpumask\t- Limit which CPUs to trace\n" 5605 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" 5606 "\t\t\t Remove sub-buffer with rmdir\n" 5607 " trace_options\t\t- Set format or modify how tracing happens\n" 5608 "\t\t\t Disable an option by prefixing 'no' to the\n" 5609 "\t\t\t option name\n" 5610 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n" 5611 #ifdef CONFIG_DYNAMIC_FTRACE 5612 "\n available_filter_functions - list of functions that can be filtered on\n" 5613 " set_ftrace_filter\t- echo function name in here to only trace these\n" 5614 "\t\t\t functions\n" 5615 "\t accepts: func_full_name or glob-matching-pattern\n" 5616 "\t modules: Can select a group via module\n" 5617 "\t Format: :mod:<module-name>\n" 5618 "\t example: echo :mod:ext3 > set_ftrace_filter\n" 5619 "\t triggers: a command to perform when function is hit\n" 5620 "\t Format: <function>:<trigger>[:count]\n" 5621 "\t trigger: traceon, traceoff\n" 5622 "\t\t enable_event:<system>:<event>\n" 5623 "\t\t disable_event:<system>:<event>\n" 5624 #ifdef CONFIG_STACKTRACE 5625 "\t\t stacktrace\n" 5626 #endif 5627 #ifdef CONFIG_TRACER_SNAPSHOT 5628 "\t\t snapshot\n" 5629 #endif 5630 "\t\t dump\n" 5631 "\t\t cpudump\n" 5632 "\t example: echo do_fault:traceoff > set_ftrace_filter\n" 5633 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" 5634 "\t The first one will disable tracing every time do_fault is hit\n" 5635 "\t The second will disable tracing at most 3 times when do_trap is hit\n" 5636 "\t The first time do trap is hit and it disables tracing, the\n" 5637 "\t counter will decrement to 2. If tracing is already disabled,\n" 5638 "\t the counter will not decrement. It only decrements when the\n" 5639 "\t trigger did work\n" 5640 "\t To remove trigger without count:\n" 5641 "\t echo '!<function>:<trigger> > set_ftrace_filter\n" 5642 "\t To remove trigger with a count:\n" 5643 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n" 5644 " set_ftrace_notrace\t- echo function name in here to never trace.\n" 5645 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" 5646 "\t modules: Can select a group via module command :mod:\n" 5647 "\t Does not accept triggers\n" 5648 #endif /* CONFIG_DYNAMIC_FTRACE */ 5649 #ifdef CONFIG_FUNCTION_TRACER 5650 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n" 5651 "\t\t (function)\n" 5652 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n" 5653 "\t\t (function)\n" 5654 #endif 5655 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 5656 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" 5657 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n" 5658 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" 5659 #endif 5660 #ifdef CONFIG_TRACER_SNAPSHOT 5661 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n" 5662 "\t\t\t snapshot buffer. Read the contents for more\n" 5663 "\t\t\t information\n" 5664 #endif 5665 #ifdef CONFIG_STACK_TRACER 5666 " stack_trace\t\t- Shows the max stack trace when active\n" 5667 " stack_max_size\t- Shows current max stack size that was traced\n" 5668 "\t\t\t Write into this file to reset the max size (trigger a\n" 5669 "\t\t\t new trace)\n" 5670 #ifdef CONFIG_DYNAMIC_FTRACE 5671 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n" 5672 "\t\t\t traces\n" 5673 #endif 5674 #endif /* CONFIG_STACK_TRACER */ 5675 #ifdef CONFIG_DYNAMIC_EVENTS 5676 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n" 5677 "\t\t\t Write into this file to define/undefine new trace events.\n" 5678 #endif 5679 #ifdef CONFIG_KPROBE_EVENTS 5680 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n" 5681 "\t\t\t Write into this file to define/undefine new trace events.\n" 5682 #endif 5683 #ifdef CONFIG_UPROBE_EVENTS 5684 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n" 5685 "\t\t\t Write into this file to define/undefine new trace events.\n" 5686 #endif 5687 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \ 5688 defined(CONFIG_FPROBE_EVENTS) 5689 "\t accepts: event-definitions (one definition per line)\n" 5690 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) 5691 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n" 5692 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n" 5693 #endif 5694 #ifdef CONFIG_FPROBE_EVENTS 5695 "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n" 5696 "\t t[:[<group>/][<event>]] <tracepoint> [<args>]\n" 5697 #endif 5698 #ifdef CONFIG_HIST_TRIGGERS 5699 "\t s:[synthetic/]<event> <field> [<field>]\n" 5700 #endif 5701 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n" 5702 "\t -:[<group>/][<event>]\n" 5703 #ifdef CONFIG_KPROBE_EVENTS 5704 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" 5705 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n" 5706 #endif 5707 #ifdef CONFIG_UPROBE_EVENTS 5708 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n" 5709 #endif 5710 "\t args: <name>=fetcharg[:type]\n" 5711 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n" 5712 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API 5713 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS 5714 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n" 5715 "\t <argname>[->field[->field|.field...]],\n" 5716 #else 5717 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n" 5718 #endif 5719 #else 5720 "\t $stack<index>, $stack, $retval, $comm,\n" 5721 #endif 5722 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n" 5723 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n" 5724 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n" 5725 "\t symstr, <type>\\[<array-size>\\]\n" 5726 #ifdef CONFIG_HIST_TRIGGERS 5727 "\t field: <stype> <name>;\n" 5728 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n" 5729 "\t [unsigned] char/int/long\n" 5730 #endif 5731 "\t efield: For event probes ('e' types), the field is on of the fields\n" 5732 "\t of the <attached-group>/<attached-event>.\n" 5733 #endif 5734 " events/\t\t- Directory containing all trace event subsystems:\n" 5735 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n" 5736 " events/<system>/\t- Directory containing all trace events for <system>:\n" 5737 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n" 5738 "\t\t\t events\n" 5739 " filter\t\t- If set, only events passing filter are traced\n" 5740 " events/<system>/<event>/\t- Directory containing control files for\n" 5741 "\t\t\t <event>:\n" 5742 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n" 5743 " filter\t\t- If set, only events passing filter are traced\n" 5744 " trigger\t\t- If set, a command to perform when event is hit\n" 5745 "\t Format: <trigger>[:count][if <filter>]\n" 5746 "\t trigger: traceon, traceoff\n" 5747 "\t enable_event:<system>:<event>\n" 5748 "\t disable_event:<system>:<event>\n" 5749 #ifdef CONFIG_HIST_TRIGGERS 5750 "\t enable_hist:<system>:<event>\n" 5751 "\t disable_hist:<system>:<event>\n" 5752 #endif 5753 #ifdef CONFIG_STACKTRACE 5754 "\t\t stacktrace\n" 5755 #endif 5756 #ifdef CONFIG_TRACER_SNAPSHOT 5757 "\t\t snapshot\n" 5758 #endif 5759 #ifdef CONFIG_HIST_TRIGGERS 5760 "\t\t hist (see below)\n" 5761 #endif 5762 "\t example: echo traceoff > events/block/block_unplug/trigger\n" 5763 "\t echo traceoff:3 > events/block/block_unplug/trigger\n" 5764 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n" 5765 "\t events/block/block_unplug/trigger\n" 5766 "\t The first disables tracing every time block_unplug is hit.\n" 5767 "\t The second disables tracing the first 3 times block_unplug is hit.\n" 5768 "\t The third enables the kmalloc event the first 3 times block_unplug\n" 5769 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n" 5770 "\t Like function triggers, the counter is only decremented if it\n" 5771 "\t enabled or disabled tracing.\n" 5772 "\t To remove a trigger without a count:\n" 5773 "\t echo '!<trigger> > <system>/<event>/trigger\n" 5774 "\t To remove a trigger with a count:\n" 5775 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n" 5776 "\t Filters can be ignored when removing a trigger.\n" 5777 #ifdef CONFIG_HIST_TRIGGERS 5778 " hist trigger\t- If set, event hits are aggregated into a hash table\n" 5779 "\t Format: hist:keys=<field1[,field2,...]>\n" 5780 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n" 5781 "\t [:values=<field1[,field2,...]>]\n" 5782 "\t [:sort=<field1[,field2,...]>]\n" 5783 "\t [:size=#entries]\n" 5784 "\t [:pause][:continue][:clear]\n" 5785 "\t [:name=histname1]\n" 5786 "\t [:nohitcount]\n" 5787 "\t [:<handler>.<action>]\n" 5788 "\t [if <filter>]\n\n" 5789 "\t Note, special fields can be used as well:\n" 5790 "\t common_timestamp - to record current timestamp\n" 5791 "\t common_cpu - to record the CPU the event happened on\n" 5792 "\n" 5793 "\t A hist trigger variable can be:\n" 5794 "\t - a reference to a field e.g. x=current_timestamp,\n" 5795 "\t - a reference to another variable e.g. y=$x,\n" 5796 "\t - a numeric literal: e.g. ms_per_sec=1000,\n" 5797 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n" 5798 "\n" 5799 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n" 5800 "\t multiplication(*) and division(/) operators. An operand can be either a\n" 5801 "\t variable reference, field or numeric literal.\n" 5802 "\n" 5803 "\t When a matching event is hit, an entry is added to a hash\n" 5804 "\t table using the key(s) and value(s) named, and the value of a\n" 5805 "\t sum called 'hitcount' is incremented. Keys and values\n" 5806 "\t correspond to fields in the event's format description. Keys\n" 5807 "\t can be any field, or the special string 'common_stacktrace'.\n" 5808 "\t Compound keys consisting of up to two fields can be specified\n" 5809 "\t by the 'keys' keyword. Values must correspond to numeric\n" 5810 "\t fields. Sort keys consisting of up to two fields can be\n" 5811 "\t specified using the 'sort' keyword. The sort direction can\n" 5812 "\t be modified by appending '.descending' or '.ascending' to a\n" 5813 "\t sort field. The 'size' parameter can be used to specify more\n" 5814 "\t or fewer than the default 2048 entries for the hashtable size.\n" 5815 "\t If a hist trigger is given a name using the 'name' parameter,\n" 5816 "\t its histogram data will be shared with other triggers of the\n" 5817 "\t same name, and trigger hits will update this common data.\n\n" 5818 "\t Reading the 'hist' file for the event will dump the hash\n" 5819 "\t table in its entirety to stdout. If there are multiple hist\n" 5820 "\t triggers attached to an event, there will be a table for each\n" 5821 "\t trigger in the output. The table displayed for a named\n" 5822 "\t trigger will be the same as any other instance having the\n" 5823 "\t same name. The default format used to display a given field\n" 5824 "\t can be modified by appending any of the following modifiers\n" 5825 "\t to the field name, as applicable:\n\n" 5826 "\t .hex display a number as a hex value\n" 5827 "\t .sym display an address as a symbol\n" 5828 "\t .sym-offset display an address as a symbol and offset\n" 5829 "\t .execname display a common_pid as a program name\n" 5830 "\t .syscall display a syscall id as a syscall name\n" 5831 "\t .log2 display log2 value rather than raw number\n" 5832 "\t .buckets=size display values in groups of size rather than raw number\n" 5833 "\t .usecs display a common_timestamp in microseconds\n" 5834 "\t .percent display a number of percentage value\n" 5835 "\t .graph display a bar-graph of a value\n\n" 5836 "\t The 'pause' parameter can be used to pause an existing hist\n" 5837 "\t trigger or to start a hist trigger but not log any events\n" 5838 "\t until told to do so. 'continue' can be used to start or\n" 5839 "\t restart a paused hist trigger.\n\n" 5840 "\t The 'clear' parameter will clear the contents of a running\n" 5841 "\t hist trigger and leave its current paused/active state\n" 5842 "\t unchanged.\n\n" 5843 "\t The 'nohitcount' (or NOHC) parameter will suppress display of\n" 5844 "\t raw hitcount in the histogram.\n\n" 5845 "\t The enable_hist and disable_hist triggers can be used to\n" 5846 "\t have one event conditionally start and stop another event's\n" 5847 "\t already-attached hist trigger. The syntax is analogous to\n" 5848 "\t the enable_event and disable_event triggers.\n\n" 5849 "\t Hist trigger handlers and actions are executed whenever a\n" 5850 "\t a histogram entry is added or updated. They take the form:\n\n" 5851 "\t <handler>.<action>\n\n" 5852 "\t The available handlers are:\n\n" 5853 "\t onmatch(matching.event) - invoke on addition or update\n" 5854 "\t onmax(var) - invoke if var exceeds current max\n" 5855 "\t onchange(var) - invoke action if var changes\n\n" 5856 "\t The available actions are:\n\n" 5857 "\t trace(<synthetic_event>,param list) - generate synthetic event\n" 5858 "\t save(field,...) - save current event fields\n" 5859 #ifdef CONFIG_TRACER_SNAPSHOT 5860 "\t snapshot() - snapshot the trace buffer\n\n" 5861 #endif 5862 #ifdef CONFIG_SYNTH_EVENTS 5863 " events/synthetic_events\t- Create/append/remove/show synthetic events\n" 5864 "\t Write into this file to define/undefine new synthetic events.\n" 5865 "\t example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n" 5866 #endif 5867 #endif 5868 ; 5869 5870 static ssize_t 5871 tracing_readme_read(struct file *filp, char __user *ubuf, 5872 size_t cnt, loff_t *ppos) 5873 { 5874 return simple_read_from_buffer(ubuf, cnt, ppos, 5875 readme_msg, strlen(readme_msg)); 5876 } 5877 5878 static const struct file_operations tracing_readme_fops = { 5879 .open = tracing_open_generic, 5880 .read = tracing_readme_read, 5881 .llseek = generic_file_llseek, 5882 }; 5883 5884 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos) 5885 { 5886 int pid = ++(*pos); 5887 5888 return trace_find_tgid_ptr(pid); 5889 } 5890 5891 static void *saved_tgids_start(struct seq_file *m, loff_t *pos) 5892 { 5893 int pid = *pos; 5894 5895 return trace_find_tgid_ptr(pid); 5896 } 5897 5898 static void saved_tgids_stop(struct seq_file *m, void *v) 5899 { 5900 } 5901 5902 static int saved_tgids_show(struct seq_file *m, void *v) 5903 { 5904 int *entry = (int *)v; 5905 int pid = entry - tgid_map; 5906 int tgid = *entry; 5907 5908 if (tgid == 0) 5909 return SEQ_SKIP; 5910 5911 seq_printf(m, "%d %d\n", pid, tgid); 5912 return 0; 5913 } 5914 5915 static const struct seq_operations tracing_saved_tgids_seq_ops = { 5916 .start = saved_tgids_start, 5917 .stop = saved_tgids_stop, 5918 .next = saved_tgids_next, 5919 .show = saved_tgids_show, 5920 }; 5921 5922 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp) 5923 { 5924 int ret; 5925 5926 ret = tracing_check_open_get_tr(NULL); 5927 if (ret) 5928 return ret; 5929 5930 return seq_open(filp, &tracing_saved_tgids_seq_ops); 5931 } 5932 5933 5934 static const struct file_operations tracing_saved_tgids_fops = { 5935 .open = tracing_saved_tgids_open, 5936 .read = seq_read, 5937 .llseek = seq_lseek, 5938 .release = seq_release, 5939 }; 5940 5941 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos) 5942 { 5943 unsigned int *ptr = v; 5944 5945 if (*pos || m->count) 5946 ptr++; 5947 5948 (*pos)++; 5949 5950 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num]; 5951 ptr++) { 5952 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP) 5953 continue; 5954 5955 return ptr; 5956 } 5957 5958 return NULL; 5959 } 5960 5961 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos) 5962 { 5963 void *v; 5964 loff_t l = 0; 5965 5966 preempt_disable(); 5967 arch_spin_lock(&trace_cmdline_lock); 5968 5969 v = &savedcmd->map_cmdline_to_pid[0]; 5970 while (l <= *pos) { 5971 v = saved_cmdlines_next(m, v, &l); 5972 if (!v) 5973 return NULL; 5974 } 5975 5976 return v; 5977 } 5978 5979 static void saved_cmdlines_stop(struct seq_file *m, void *v) 5980 { 5981 arch_spin_unlock(&trace_cmdline_lock); 5982 preempt_enable(); 5983 } 5984 5985 static int saved_cmdlines_show(struct seq_file *m, void *v) 5986 { 5987 char buf[TASK_COMM_LEN]; 5988 unsigned int *pid = v; 5989 5990 __trace_find_cmdline(*pid, buf); 5991 seq_printf(m, "%d %s\n", *pid, buf); 5992 return 0; 5993 } 5994 5995 static const struct seq_operations tracing_saved_cmdlines_seq_ops = { 5996 .start = saved_cmdlines_start, 5997 .next = saved_cmdlines_next, 5998 .stop = saved_cmdlines_stop, 5999 .show = saved_cmdlines_show, 6000 }; 6001 6002 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp) 6003 { 6004 int ret; 6005 6006 ret = tracing_check_open_get_tr(NULL); 6007 if (ret) 6008 return ret; 6009 6010 return seq_open(filp, &tracing_saved_cmdlines_seq_ops); 6011 } 6012 6013 static const struct file_operations tracing_saved_cmdlines_fops = { 6014 .open = tracing_saved_cmdlines_open, 6015 .read = seq_read, 6016 .llseek = seq_lseek, 6017 .release = seq_release, 6018 }; 6019 6020 static ssize_t 6021 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf, 6022 size_t cnt, loff_t *ppos) 6023 { 6024 char buf[64]; 6025 int r; 6026 6027 preempt_disable(); 6028 arch_spin_lock(&trace_cmdline_lock); 6029 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num); 6030 arch_spin_unlock(&trace_cmdline_lock); 6031 preempt_enable(); 6032 6033 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 6034 } 6035 6036 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s) 6037 { 6038 kfree(s->saved_cmdlines); 6039 kfree(s->map_cmdline_to_pid); 6040 kfree(s); 6041 } 6042 6043 static int tracing_resize_saved_cmdlines(unsigned int val) 6044 { 6045 struct saved_cmdlines_buffer *s, *savedcmd_temp; 6046 6047 s = kmalloc(sizeof(*s), GFP_KERNEL); 6048 if (!s) 6049 return -ENOMEM; 6050 6051 if (allocate_cmdlines_buffer(val, s) < 0) { 6052 kfree(s); 6053 return -ENOMEM; 6054 } 6055 6056 preempt_disable(); 6057 arch_spin_lock(&trace_cmdline_lock); 6058 savedcmd_temp = savedcmd; 6059 savedcmd = s; 6060 arch_spin_unlock(&trace_cmdline_lock); 6061 preempt_enable(); 6062 free_saved_cmdlines_buffer(savedcmd_temp); 6063 6064 return 0; 6065 } 6066 6067 static ssize_t 6068 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf, 6069 size_t cnt, loff_t *ppos) 6070 { 6071 unsigned long val; 6072 int ret; 6073 6074 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 6075 if (ret) 6076 return ret; 6077 6078 /* must have at least 1 entry or less than PID_MAX_DEFAULT */ 6079 if (!val || val > PID_MAX_DEFAULT) 6080 return -EINVAL; 6081 6082 ret = tracing_resize_saved_cmdlines((unsigned int)val); 6083 if (ret < 0) 6084 return ret; 6085 6086 *ppos += cnt; 6087 6088 return cnt; 6089 } 6090 6091 static const struct file_operations tracing_saved_cmdlines_size_fops = { 6092 .open = tracing_open_generic, 6093 .read = tracing_saved_cmdlines_size_read, 6094 .write = tracing_saved_cmdlines_size_write, 6095 }; 6096 6097 #ifdef CONFIG_TRACE_EVAL_MAP_FILE 6098 static union trace_eval_map_item * 6099 update_eval_map(union trace_eval_map_item *ptr) 6100 { 6101 if (!ptr->map.eval_string) { 6102 if (ptr->tail.next) { 6103 ptr = ptr->tail.next; 6104 /* Set ptr to the next real item (skip head) */ 6105 ptr++; 6106 } else 6107 return NULL; 6108 } 6109 return ptr; 6110 } 6111 6112 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos) 6113 { 6114 union trace_eval_map_item *ptr = v; 6115 6116 /* 6117 * Paranoid! If ptr points to end, we don't want to increment past it. 6118 * This really should never happen. 6119 */ 6120 (*pos)++; 6121 ptr = update_eval_map(ptr); 6122 if (WARN_ON_ONCE(!ptr)) 6123 return NULL; 6124 6125 ptr++; 6126 ptr = update_eval_map(ptr); 6127 6128 return ptr; 6129 } 6130 6131 static void *eval_map_start(struct seq_file *m, loff_t *pos) 6132 { 6133 union trace_eval_map_item *v; 6134 loff_t l = 0; 6135 6136 mutex_lock(&trace_eval_mutex); 6137 6138 v = trace_eval_maps; 6139 if (v) 6140 v++; 6141 6142 while (v && l < *pos) { 6143 v = eval_map_next(m, v, &l); 6144 } 6145 6146 return v; 6147 } 6148 6149 static void eval_map_stop(struct seq_file *m, void *v) 6150 { 6151 mutex_unlock(&trace_eval_mutex); 6152 } 6153 6154 static int eval_map_show(struct seq_file *m, void *v) 6155 { 6156 union trace_eval_map_item *ptr = v; 6157 6158 seq_printf(m, "%s %ld (%s)\n", 6159 ptr->map.eval_string, ptr->map.eval_value, 6160 ptr->map.system); 6161 6162 return 0; 6163 } 6164 6165 static const struct seq_operations tracing_eval_map_seq_ops = { 6166 .start = eval_map_start, 6167 .next = eval_map_next, 6168 .stop = eval_map_stop, 6169 .show = eval_map_show, 6170 }; 6171 6172 static int tracing_eval_map_open(struct inode *inode, struct file *filp) 6173 { 6174 int ret; 6175 6176 ret = tracing_check_open_get_tr(NULL); 6177 if (ret) 6178 return ret; 6179 6180 return seq_open(filp, &tracing_eval_map_seq_ops); 6181 } 6182 6183 static const struct file_operations tracing_eval_map_fops = { 6184 .open = tracing_eval_map_open, 6185 .read = seq_read, 6186 .llseek = seq_lseek, 6187 .release = seq_release, 6188 }; 6189 6190 static inline union trace_eval_map_item * 6191 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr) 6192 { 6193 /* Return tail of array given the head */ 6194 return ptr + ptr->head.length + 1; 6195 } 6196 6197 static void 6198 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, 6199 int len) 6200 { 6201 struct trace_eval_map **stop; 6202 struct trace_eval_map **map; 6203 union trace_eval_map_item *map_array; 6204 union trace_eval_map_item *ptr; 6205 6206 stop = start + len; 6207 6208 /* 6209 * The trace_eval_maps contains the map plus a head and tail item, 6210 * where the head holds the module and length of array, and the 6211 * tail holds a pointer to the next list. 6212 */ 6213 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL); 6214 if (!map_array) { 6215 pr_warn("Unable to allocate trace eval mapping\n"); 6216 return; 6217 } 6218 6219 mutex_lock(&trace_eval_mutex); 6220 6221 if (!trace_eval_maps) 6222 trace_eval_maps = map_array; 6223 else { 6224 ptr = trace_eval_maps; 6225 for (;;) { 6226 ptr = trace_eval_jmp_to_tail(ptr); 6227 if (!ptr->tail.next) 6228 break; 6229 ptr = ptr->tail.next; 6230 6231 } 6232 ptr->tail.next = map_array; 6233 } 6234 map_array->head.mod = mod; 6235 map_array->head.length = len; 6236 map_array++; 6237 6238 for (map = start; (unsigned long)map < (unsigned long)stop; map++) { 6239 map_array->map = **map; 6240 map_array++; 6241 } 6242 memset(map_array, 0, sizeof(*map_array)); 6243 6244 mutex_unlock(&trace_eval_mutex); 6245 } 6246 6247 static void trace_create_eval_file(struct dentry *d_tracer) 6248 { 6249 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer, 6250 NULL, &tracing_eval_map_fops); 6251 } 6252 6253 #else /* CONFIG_TRACE_EVAL_MAP_FILE */ 6254 static inline void trace_create_eval_file(struct dentry *d_tracer) { } 6255 static inline void trace_insert_eval_map_file(struct module *mod, 6256 struct trace_eval_map **start, int len) { } 6257 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */ 6258 6259 static void trace_insert_eval_map(struct module *mod, 6260 struct trace_eval_map **start, int len) 6261 { 6262 struct trace_eval_map **map; 6263 6264 if (len <= 0) 6265 return; 6266 6267 map = start; 6268 6269 trace_event_eval_update(map, len); 6270 6271 trace_insert_eval_map_file(mod, start, len); 6272 } 6273 6274 static ssize_t 6275 tracing_set_trace_read(struct file *filp, char __user *ubuf, 6276 size_t cnt, loff_t *ppos) 6277 { 6278 struct trace_array *tr = filp->private_data; 6279 char buf[MAX_TRACER_SIZE+2]; 6280 int r; 6281 6282 mutex_lock(&trace_types_lock); 6283 r = sprintf(buf, "%s\n", tr->current_trace->name); 6284 mutex_unlock(&trace_types_lock); 6285 6286 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 6287 } 6288 6289 int tracer_init(struct tracer *t, struct trace_array *tr) 6290 { 6291 tracing_reset_online_cpus(&tr->array_buffer); 6292 return t->init(tr); 6293 } 6294 6295 static void set_buffer_entries(struct array_buffer *buf, unsigned long val) 6296 { 6297 int cpu; 6298 6299 for_each_tracing_cpu(cpu) 6300 per_cpu_ptr(buf->data, cpu)->entries = val; 6301 } 6302 6303 static void update_buffer_entries(struct array_buffer *buf, int cpu) 6304 { 6305 if (cpu == RING_BUFFER_ALL_CPUS) { 6306 set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0)); 6307 } else { 6308 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu); 6309 } 6310 } 6311 6312 #ifdef CONFIG_TRACER_MAX_TRACE 6313 /* resize @tr's buffer to the size of @size_tr's entries */ 6314 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, 6315 struct array_buffer *size_buf, int cpu_id) 6316 { 6317 int cpu, ret = 0; 6318 6319 if (cpu_id == RING_BUFFER_ALL_CPUS) { 6320 for_each_tracing_cpu(cpu) { 6321 ret = ring_buffer_resize(trace_buf->buffer, 6322 per_cpu_ptr(size_buf->data, cpu)->entries, cpu); 6323 if (ret < 0) 6324 break; 6325 per_cpu_ptr(trace_buf->data, cpu)->entries = 6326 per_cpu_ptr(size_buf->data, cpu)->entries; 6327 } 6328 } else { 6329 ret = ring_buffer_resize(trace_buf->buffer, 6330 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); 6331 if (ret == 0) 6332 per_cpu_ptr(trace_buf->data, cpu_id)->entries = 6333 per_cpu_ptr(size_buf->data, cpu_id)->entries; 6334 } 6335 6336 return ret; 6337 } 6338 #endif /* CONFIG_TRACER_MAX_TRACE */ 6339 6340 static int __tracing_resize_ring_buffer(struct trace_array *tr, 6341 unsigned long size, int cpu) 6342 { 6343 int ret; 6344 6345 /* 6346 * If kernel or user changes the size of the ring buffer 6347 * we use the size that was given, and we can forget about 6348 * expanding it later. 6349 */ 6350 ring_buffer_expanded = true; 6351 6352 /* May be called before buffers are initialized */ 6353 if (!tr->array_buffer.buffer) 6354 return 0; 6355 6356 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu); 6357 if (ret < 0) 6358 return ret; 6359 6360 #ifdef CONFIG_TRACER_MAX_TRACE 6361 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) || 6362 !tr->current_trace->use_max_tr) 6363 goto out; 6364 6365 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); 6366 if (ret < 0) { 6367 int r = resize_buffer_duplicate_size(&tr->array_buffer, 6368 &tr->array_buffer, cpu); 6369 if (r < 0) { 6370 /* 6371 * AARGH! We are left with different 6372 * size max buffer!!!! 6373 * The max buffer is our "snapshot" buffer. 6374 * When a tracer needs a snapshot (one of the 6375 * latency tracers), it swaps the max buffer 6376 * with the saved snap shot. We succeeded to 6377 * update the size of the main buffer, but failed to 6378 * update the size of the max buffer. But when we tried 6379 * to reset the main buffer to the original size, we 6380 * failed there too. This is very unlikely to 6381 * happen, but if it does, warn and kill all 6382 * tracing. 6383 */ 6384 WARN_ON(1); 6385 tracing_disabled = 1; 6386 } 6387 return ret; 6388 } 6389 6390 update_buffer_entries(&tr->max_buffer, cpu); 6391 6392 out: 6393 #endif /* CONFIG_TRACER_MAX_TRACE */ 6394 6395 update_buffer_entries(&tr->array_buffer, cpu); 6396 6397 return ret; 6398 } 6399 6400 ssize_t tracing_resize_ring_buffer(struct trace_array *tr, 6401 unsigned long size, int cpu_id) 6402 { 6403 int ret; 6404 6405 mutex_lock(&trace_types_lock); 6406 6407 if (cpu_id != RING_BUFFER_ALL_CPUS) { 6408 /* make sure, this cpu is enabled in the mask */ 6409 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { 6410 ret = -EINVAL; 6411 goto out; 6412 } 6413 } 6414 6415 ret = __tracing_resize_ring_buffer(tr, size, cpu_id); 6416 if (ret < 0) 6417 ret = -ENOMEM; 6418 6419 out: 6420 mutex_unlock(&trace_types_lock); 6421 6422 return ret; 6423 } 6424 6425 6426 /** 6427 * tracing_update_buffers - used by tracing facility to expand ring buffers 6428 * 6429 * To save on memory when the tracing is never used on a system with it 6430 * configured in. The ring buffers are set to a minimum size. But once 6431 * a user starts to use the tracing facility, then they need to grow 6432 * to their default size. 6433 * 6434 * This function is to be called when a tracer is about to be used. 6435 */ 6436 int tracing_update_buffers(void) 6437 { 6438 int ret = 0; 6439 6440 mutex_lock(&trace_types_lock); 6441 if (!ring_buffer_expanded) 6442 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size, 6443 RING_BUFFER_ALL_CPUS); 6444 mutex_unlock(&trace_types_lock); 6445 6446 return ret; 6447 } 6448 6449 struct trace_option_dentry; 6450 6451 static void 6452 create_trace_option_files(struct trace_array *tr, struct tracer *tracer); 6453 6454 /* 6455 * Used to clear out the tracer before deletion of an instance. 6456 * Must have trace_types_lock held. 6457 */ 6458 static void tracing_set_nop(struct trace_array *tr) 6459 { 6460 if (tr->current_trace == &nop_trace) 6461 return; 6462 6463 tr->current_trace->enabled--; 6464 6465 if (tr->current_trace->reset) 6466 tr->current_trace->reset(tr); 6467 6468 tr->current_trace = &nop_trace; 6469 } 6470 6471 static bool tracer_options_updated; 6472 6473 static void add_tracer_options(struct trace_array *tr, struct tracer *t) 6474 { 6475 /* Only enable if the directory has been created already. */ 6476 if (!tr->dir) 6477 return; 6478 6479 /* Only create trace option files after update_tracer_options finish */ 6480 if (!tracer_options_updated) 6481 return; 6482 6483 create_trace_option_files(tr, t); 6484 } 6485 6486 int tracing_set_tracer(struct trace_array *tr, const char *buf) 6487 { 6488 struct tracer *t; 6489 #ifdef CONFIG_TRACER_MAX_TRACE 6490 bool had_max_tr; 6491 #endif 6492 int ret = 0; 6493 6494 mutex_lock(&trace_types_lock); 6495 6496 if (!ring_buffer_expanded) { 6497 ret = __tracing_resize_ring_buffer(tr, trace_buf_size, 6498 RING_BUFFER_ALL_CPUS); 6499 if (ret < 0) 6500 goto out; 6501 ret = 0; 6502 } 6503 6504 for (t = trace_types; t; t = t->next) { 6505 if (strcmp(t->name, buf) == 0) 6506 break; 6507 } 6508 if (!t) { 6509 ret = -EINVAL; 6510 goto out; 6511 } 6512 if (t == tr->current_trace) 6513 goto out; 6514 6515 #ifdef CONFIG_TRACER_SNAPSHOT 6516 if (t->use_max_tr) { 6517 local_irq_disable(); 6518 arch_spin_lock(&tr->max_lock); 6519 if (tr->cond_snapshot) 6520 ret = -EBUSY; 6521 arch_spin_unlock(&tr->max_lock); 6522 local_irq_enable(); 6523 if (ret) 6524 goto out; 6525 } 6526 #endif 6527 /* Some tracers won't work on kernel command line */ 6528 if (system_state < SYSTEM_RUNNING && t->noboot) { 6529 pr_warn("Tracer '%s' is not allowed on command line, ignored\n", 6530 t->name); 6531 goto out; 6532 } 6533 6534 /* Some tracers are only allowed for the top level buffer */ 6535 if (!trace_ok_for_array(t, tr)) { 6536 ret = -EINVAL; 6537 goto out; 6538 } 6539 6540 /* If trace pipe files are being read, we can't change the tracer */ 6541 if (tr->trace_ref) { 6542 ret = -EBUSY; 6543 goto out; 6544 } 6545 6546 trace_branch_disable(); 6547 6548 tr->current_trace->enabled--; 6549 6550 if (tr->current_trace->reset) 6551 tr->current_trace->reset(tr); 6552 6553 #ifdef CONFIG_TRACER_MAX_TRACE 6554 had_max_tr = tr->current_trace->use_max_tr; 6555 6556 /* Current trace needs to be nop_trace before synchronize_rcu */ 6557 tr->current_trace = &nop_trace; 6558 6559 if (had_max_tr && !t->use_max_tr) { 6560 /* 6561 * We need to make sure that the update_max_tr sees that 6562 * current_trace changed to nop_trace to keep it from 6563 * swapping the buffers after we resize it. 6564 * The update_max_tr is called from interrupts disabled 6565 * so a synchronized_sched() is sufficient. 6566 */ 6567 synchronize_rcu(); 6568 free_snapshot(tr); 6569 } 6570 6571 if (t->use_max_tr && !tr->allocated_snapshot) { 6572 ret = tracing_alloc_snapshot_instance(tr); 6573 if (ret < 0) 6574 goto out; 6575 } 6576 #else 6577 tr->current_trace = &nop_trace; 6578 #endif 6579 6580 if (t->init) { 6581 ret = tracer_init(t, tr); 6582 if (ret) 6583 goto out; 6584 } 6585 6586 tr->current_trace = t; 6587 tr->current_trace->enabled++; 6588 trace_branch_enable(tr); 6589 out: 6590 mutex_unlock(&trace_types_lock); 6591 6592 return ret; 6593 } 6594 6595 static ssize_t 6596 tracing_set_trace_write(struct file *filp, const char __user *ubuf, 6597 size_t cnt, loff_t *ppos) 6598 { 6599 struct trace_array *tr = filp->private_data; 6600 char buf[MAX_TRACER_SIZE+1]; 6601 char *name; 6602 size_t ret; 6603 int err; 6604 6605 ret = cnt; 6606 6607 if (cnt > MAX_TRACER_SIZE) 6608 cnt = MAX_TRACER_SIZE; 6609 6610 if (copy_from_user(buf, ubuf, cnt)) 6611 return -EFAULT; 6612 6613 buf[cnt] = 0; 6614 6615 name = strim(buf); 6616 6617 err = tracing_set_tracer(tr, name); 6618 if (err) 6619 return err; 6620 6621 *ppos += ret; 6622 6623 return ret; 6624 } 6625 6626 static ssize_t 6627 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf, 6628 size_t cnt, loff_t *ppos) 6629 { 6630 char buf[64]; 6631 int r; 6632 6633 r = snprintf(buf, sizeof(buf), "%ld\n", 6634 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); 6635 if (r > sizeof(buf)) 6636 r = sizeof(buf); 6637 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 6638 } 6639 6640 static ssize_t 6641 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf, 6642 size_t cnt, loff_t *ppos) 6643 { 6644 unsigned long val; 6645 int ret; 6646 6647 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 6648 if (ret) 6649 return ret; 6650 6651 *ptr = val * 1000; 6652 6653 return cnt; 6654 } 6655 6656 static ssize_t 6657 tracing_thresh_read(struct file *filp, char __user *ubuf, 6658 size_t cnt, loff_t *ppos) 6659 { 6660 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos); 6661 } 6662 6663 static ssize_t 6664 tracing_thresh_write(struct file *filp, const char __user *ubuf, 6665 size_t cnt, loff_t *ppos) 6666 { 6667 struct trace_array *tr = filp->private_data; 6668 int ret; 6669 6670 mutex_lock(&trace_types_lock); 6671 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos); 6672 if (ret < 0) 6673 goto out; 6674 6675 if (tr->current_trace->update_thresh) { 6676 ret = tr->current_trace->update_thresh(tr); 6677 if (ret < 0) 6678 goto out; 6679 } 6680 6681 ret = cnt; 6682 out: 6683 mutex_unlock(&trace_types_lock); 6684 6685 return ret; 6686 } 6687 6688 #ifdef CONFIG_TRACER_MAX_TRACE 6689 6690 static ssize_t 6691 tracing_max_lat_read(struct file *filp, char __user *ubuf, 6692 size_t cnt, loff_t *ppos) 6693 { 6694 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos); 6695 } 6696 6697 static ssize_t 6698 tracing_max_lat_write(struct file *filp, const char __user *ubuf, 6699 size_t cnt, loff_t *ppos) 6700 { 6701 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos); 6702 } 6703 6704 #endif 6705 6706 static int open_pipe_on_cpu(struct trace_array *tr, int cpu) 6707 { 6708 if (cpu == RING_BUFFER_ALL_CPUS) { 6709 if (cpumask_empty(tr->pipe_cpumask)) { 6710 cpumask_setall(tr->pipe_cpumask); 6711 return 0; 6712 } 6713 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) { 6714 cpumask_set_cpu(cpu, tr->pipe_cpumask); 6715 return 0; 6716 } 6717 return -EBUSY; 6718 } 6719 6720 static void close_pipe_on_cpu(struct trace_array *tr, int cpu) 6721 { 6722 if (cpu == RING_BUFFER_ALL_CPUS) { 6723 WARN_ON(!cpumask_full(tr->pipe_cpumask)); 6724 cpumask_clear(tr->pipe_cpumask); 6725 } else { 6726 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask)); 6727 cpumask_clear_cpu(cpu, tr->pipe_cpumask); 6728 } 6729 } 6730 6731 static int tracing_open_pipe(struct inode *inode, struct file *filp) 6732 { 6733 struct trace_array *tr = inode->i_private; 6734 struct trace_iterator *iter; 6735 int cpu; 6736 int ret; 6737 6738 ret = tracing_check_open_get_tr(tr); 6739 if (ret) 6740 return ret; 6741 6742 mutex_lock(&trace_types_lock); 6743 cpu = tracing_get_cpu(inode); 6744 ret = open_pipe_on_cpu(tr, cpu); 6745 if (ret) 6746 goto fail_pipe_on_cpu; 6747 6748 /* create a buffer to store the information to pass to userspace */ 6749 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 6750 if (!iter) { 6751 ret = -ENOMEM; 6752 goto fail_alloc_iter; 6753 } 6754 6755 trace_seq_init(&iter->seq); 6756 iter->trace = tr->current_trace; 6757 6758 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 6759 ret = -ENOMEM; 6760 goto fail; 6761 } 6762 6763 /* trace pipe does not show start of buffer */ 6764 cpumask_setall(iter->started); 6765 6766 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 6767 iter->iter_flags |= TRACE_FILE_LAT_FMT; 6768 6769 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 6770 if (trace_clocks[tr->clock_id].in_ns) 6771 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 6772 6773 iter->tr = tr; 6774 iter->array_buffer = &tr->array_buffer; 6775 iter->cpu_file = cpu; 6776 mutex_init(&iter->mutex); 6777 filp->private_data = iter; 6778 6779 if (iter->trace->pipe_open) 6780 iter->trace->pipe_open(iter); 6781 6782 nonseekable_open(inode, filp); 6783 6784 tr->trace_ref++; 6785 6786 mutex_unlock(&trace_types_lock); 6787 return ret; 6788 6789 fail: 6790 kfree(iter); 6791 fail_alloc_iter: 6792 close_pipe_on_cpu(tr, cpu); 6793 fail_pipe_on_cpu: 6794 __trace_array_put(tr); 6795 mutex_unlock(&trace_types_lock); 6796 return ret; 6797 } 6798 6799 static int tracing_release_pipe(struct inode *inode, struct file *file) 6800 { 6801 struct trace_iterator *iter = file->private_data; 6802 struct trace_array *tr = inode->i_private; 6803 6804 mutex_lock(&trace_types_lock); 6805 6806 tr->trace_ref--; 6807 6808 if (iter->trace->pipe_close) 6809 iter->trace->pipe_close(iter); 6810 close_pipe_on_cpu(tr, iter->cpu_file); 6811 mutex_unlock(&trace_types_lock); 6812 6813 free_trace_iter_content(iter); 6814 kfree(iter); 6815 6816 trace_array_put(tr); 6817 6818 return 0; 6819 } 6820 6821 static __poll_t 6822 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) 6823 { 6824 struct trace_array *tr = iter->tr; 6825 6826 /* Iterators are static, they should be filled or empty */ 6827 if (trace_buffer_iter(iter, iter->cpu_file)) 6828 return EPOLLIN | EPOLLRDNORM; 6829 6830 if (tr->trace_flags & TRACE_ITER_BLOCK) 6831 /* 6832 * Always select as readable when in blocking mode 6833 */ 6834 return EPOLLIN | EPOLLRDNORM; 6835 else 6836 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file, 6837 filp, poll_table, iter->tr->buffer_percent); 6838 } 6839 6840 static __poll_t 6841 tracing_poll_pipe(struct file *filp, poll_table *poll_table) 6842 { 6843 struct trace_iterator *iter = filp->private_data; 6844 6845 return trace_poll(iter, filp, poll_table); 6846 } 6847 6848 /* Must be called with iter->mutex held. */ 6849 static int tracing_wait_pipe(struct file *filp) 6850 { 6851 struct trace_iterator *iter = filp->private_data; 6852 int ret; 6853 6854 while (trace_empty(iter)) { 6855 6856 if ((filp->f_flags & O_NONBLOCK)) { 6857 return -EAGAIN; 6858 } 6859 6860 /* 6861 * We block until we read something and tracing is disabled. 6862 * We still block if tracing is disabled, but we have never 6863 * read anything. This allows a user to cat this file, and 6864 * then enable tracing. But after we have read something, 6865 * we give an EOF when tracing is again disabled. 6866 * 6867 * iter->pos will be 0 if we haven't read anything. 6868 */ 6869 if (!tracer_tracing_is_on(iter->tr) && iter->pos) 6870 break; 6871 6872 mutex_unlock(&iter->mutex); 6873 6874 ret = wait_on_pipe(iter, 0); 6875 6876 mutex_lock(&iter->mutex); 6877 6878 if (ret) 6879 return ret; 6880 } 6881 6882 return 1; 6883 } 6884 6885 /* 6886 * Consumer reader. 6887 */ 6888 static ssize_t 6889 tracing_read_pipe(struct file *filp, char __user *ubuf, 6890 size_t cnt, loff_t *ppos) 6891 { 6892 struct trace_iterator *iter = filp->private_data; 6893 ssize_t sret; 6894 6895 /* 6896 * Avoid more than one consumer on a single file descriptor 6897 * This is just a matter of traces coherency, the ring buffer itself 6898 * is protected. 6899 */ 6900 mutex_lock(&iter->mutex); 6901 6902 /* return any leftover data */ 6903 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 6904 if (sret != -EBUSY) 6905 goto out; 6906 6907 trace_seq_init(&iter->seq); 6908 6909 if (iter->trace->read) { 6910 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); 6911 if (sret) 6912 goto out; 6913 } 6914 6915 waitagain: 6916 sret = tracing_wait_pipe(filp); 6917 if (sret <= 0) 6918 goto out; 6919 6920 /* stop when tracing is finished */ 6921 if (trace_empty(iter)) { 6922 sret = 0; 6923 goto out; 6924 } 6925 6926 if (cnt >= PAGE_SIZE) 6927 cnt = PAGE_SIZE - 1; 6928 6929 /* reset all but tr, trace, and overruns */ 6930 trace_iterator_reset(iter); 6931 cpumask_clear(iter->started); 6932 trace_seq_init(&iter->seq); 6933 6934 trace_event_read_lock(); 6935 trace_access_lock(iter->cpu_file); 6936 while (trace_find_next_entry_inc(iter) != NULL) { 6937 enum print_line_t ret; 6938 int save_len = iter->seq.seq.len; 6939 6940 ret = print_trace_line(iter); 6941 if (ret == TRACE_TYPE_PARTIAL_LINE) { 6942 /* 6943 * If one print_trace_line() fills entire trace_seq in one shot, 6944 * trace_seq_to_user() will returns -EBUSY because save_len == 0, 6945 * In this case, we need to consume it, otherwise, loop will peek 6946 * this event next time, resulting in an infinite loop. 6947 */ 6948 if (save_len == 0) { 6949 iter->seq.full = 0; 6950 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); 6951 trace_consume(iter); 6952 break; 6953 } 6954 6955 /* In other cases, don't print partial lines */ 6956 iter->seq.seq.len = save_len; 6957 break; 6958 } 6959 if (ret != TRACE_TYPE_NO_CONSUME) 6960 trace_consume(iter); 6961 6962 if (trace_seq_used(&iter->seq) >= cnt) 6963 break; 6964 6965 /* 6966 * Setting the full flag means we reached the trace_seq buffer 6967 * size and we should leave by partial output condition above. 6968 * One of the trace_seq_* functions is not used properly. 6969 */ 6970 WARN_ONCE(iter->seq.full, "full flag set for trace type %d", 6971 iter->ent->type); 6972 } 6973 trace_access_unlock(iter->cpu_file); 6974 trace_event_read_unlock(); 6975 6976 /* Now copy what we have to the user */ 6977 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 6978 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq)) 6979 trace_seq_init(&iter->seq); 6980 6981 /* 6982 * If there was nothing to send to user, in spite of consuming trace 6983 * entries, go back to wait for more entries. 6984 */ 6985 if (sret == -EBUSY) 6986 goto waitagain; 6987 6988 out: 6989 mutex_unlock(&iter->mutex); 6990 6991 return sret; 6992 } 6993 6994 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, 6995 unsigned int idx) 6996 { 6997 __free_page(spd->pages[idx]); 6998 } 6999 7000 static size_t 7001 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) 7002 { 7003 size_t count; 7004 int save_len; 7005 int ret; 7006 7007 /* Seq buffer is page-sized, exactly what we need. */ 7008 for (;;) { 7009 save_len = iter->seq.seq.len; 7010 ret = print_trace_line(iter); 7011 7012 if (trace_seq_has_overflowed(&iter->seq)) { 7013 iter->seq.seq.len = save_len; 7014 break; 7015 } 7016 7017 /* 7018 * This should not be hit, because it should only 7019 * be set if the iter->seq overflowed. But check it 7020 * anyway to be safe. 7021 */ 7022 if (ret == TRACE_TYPE_PARTIAL_LINE) { 7023 iter->seq.seq.len = save_len; 7024 break; 7025 } 7026 7027 count = trace_seq_used(&iter->seq) - save_len; 7028 if (rem < count) { 7029 rem = 0; 7030 iter->seq.seq.len = save_len; 7031 break; 7032 } 7033 7034 if (ret != TRACE_TYPE_NO_CONSUME) 7035 trace_consume(iter); 7036 rem -= count; 7037 if (!trace_find_next_entry_inc(iter)) { 7038 rem = 0; 7039 iter->ent = NULL; 7040 break; 7041 } 7042 } 7043 7044 return rem; 7045 } 7046 7047 static ssize_t tracing_splice_read_pipe(struct file *filp, 7048 loff_t *ppos, 7049 struct pipe_inode_info *pipe, 7050 size_t len, 7051 unsigned int flags) 7052 { 7053 struct page *pages_def[PIPE_DEF_BUFFERS]; 7054 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 7055 struct trace_iterator *iter = filp->private_data; 7056 struct splice_pipe_desc spd = { 7057 .pages = pages_def, 7058 .partial = partial_def, 7059 .nr_pages = 0, /* This gets updated below. */ 7060 .nr_pages_max = PIPE_DEF_BUFFERS, 7061 .ops = &default_pipe_buf_ops, 7062 .spd_release = tracing_spd_release_pipe, 7063 }; 7064 ssize_t ret; 7065 size_t rem; 7066 unsigned int i; 7067 7068 if (splice_grow_spd(pipe, &spd)) 7069 return -ENOMEM; 7070 7071 mutex_lock(&iter->mutex); 7072 7073 if (iter->trace->splice_read) { 7074 ret = iter->trace->splice_read(iter, filp, 7075 ppos, pipe, len, flags); 7076 if (ret) 7077 goto out_err; 7078 } 7079 7080 ret = tracing_wait_pipe(filp); 7081 if (ret <= 0) 7082 goto out_err; 7083 7084 if (!iter->ent && !trace_find_next_entry_inc(iter)) { 7085 ret = -EFAULT; 7086 goto out_err; 7087 } 7088 7089 trace_event_read_lock(); 7090 trace_access_lock(iter->cpu_file); 7091 7092 /* Fill as many pages as possible. */ 7093 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) { 7094 spd.pages[i] = alloc_page(GFP_KERNEL); 7095 if (!spd.pages[i]) 7096 break; 7097 7098 rem = tracing_fill_pipe_page(rem, iter); 7099 7100 /* Copy the data into the page, so we can start over. */ 7101 ret = trace_seq_to_buffer(&iter->seq, 7102 page_address(spd.pages[i]), 7103 trace_seq_used(&iter->seq)); 7104 if (ret < 0) { 7105 __free_page(spd.pages[i]); 7106 break; 7107 } 7108 spd.partial[i].offset = 0; 7109 spd.partial[i].len = trace_seq_used(&iter->seq); 7110 7111 trace_seq_init(&iter->seq); 7112 } 7113 7114 trace_access_unlock(iter->cpu_file); 7115 trace_event_read_unlock(); 7116 mutex_unlock(&iter->mutex); 7117 7118 spd.nr_pages = i; 7119 7120 if (i) 7121 ret = splice_to_pipe(pipe, &spd); 7122 else 7123 ret = 0; 7124 out: 7125 splice_shrink_spd(&spd); 7126 return ret; 7127 7128 out_err: 7129 mutex_unlock(&iter->mutex); 7130 goto out; 7131 } 7132 7133 static ssize_t 7134 tracing_entries_read(struct file *filp, char __user *ubuf, 7135 size_t cnt, loff_t *ppos) 7136 { 7137 struct inode *inode = file_inode(filp); 7138 struct trace_array *tr = inode->i_private; 7139 int cpu = tracing_get_cpu(inode); 7140 char buf[64]; 7141 int r = 0; 7142 ssize_t ret; 7143 7144 mutex_lock(&trace_types_lock); 7145 7146 if (cpu == RING_BUFFER_ALL_CPUS) { 7147 int cpu, buf_size_same; 7148 unsigned long size; 7149 7150 size = 0; 7151 buf_size_same = 1; 7152 /* check if all cpu sizes are same */ 7153 for_each_tracing_cpu(cpu) { 7154 /* fill in the size from first enabled cpu */ 7155 if (size == 0) 7156 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries; 7157 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) { 7158 buf_size_same = 0; 7159 break; 7160 } 7161 } 7162 7163 if (buf_size_same) { 7164 if (!ring_buffer_expanded) 7165 r = sprintf(buf, "%lu (expanded: %lu)\n", 7166 size >> 10, 7167 trace_buf_size >> 10); 7168 else 7169 r = sprintf(buf, "%lu\n", size >> 10); 7170 } else 7171 r = sprintf(buf, "X\n"); 7172 } else 7173 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10); 7174 7175 mutex_unlock(&trace_types_lock); 7176 7177 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 7178 return ret; 7179 } 7180 7181 static ssize_t 7182 tracing_entries_write(struct file *filp, const char __user *ubuf, 7183 size_t cnt, loff_t *ppos) 7184 { 7185 struct inode *inode = file_inode(filp); 7186 struct trace_array *tr = inode->i_private; 7187 unsigned long val; 7188 int ret; 7189 7190 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 7191 if (ret) 7192 return ret; 7193 7194 /* must have at least 1 entry */ 7195 if (!val) 7196 return -EINVAL; 7197 7198 /* value is in KB */ 7199 val <<= 10; 7200 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); 7201 if (ret < 0) 7202 return ret; 7203 7204 *ppos += cnt; 7205 7206 return cnt; 7207 } 7208 7209 static ssize_t 7210 tracing_total_entries_read(struct file *filp, char __user *ubuf, 7211 size_t cnt, loff_t *ppos) 7212 { 7213 struct trace_array *tr = filp->private_data; 7214 char buf[64]; 7215 int r, cpu; 7216 unsigned long size = 0, expanded_size = 0; 7217 7218 mutex_lock(&trace_types_lock); 7219 for_each_tracing_cpu(cpu) { 7220 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10; 7221 if (!ring_buffer_expanded) 7222 expanded_size += trace_buf_size >> 10; 7223 } 7224 if (ring_buffer_expanded) 7225 r = sprintf(buf, "%lu\n", size); 7226 else 7227 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); 7228 mutex_unlock(&trace_types_lock); 7229 7230 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 7231 } 7232 7233 static ssize_t 7234 tracing_free_buffer_write(struct file *filp, const char __user *ubuf, 7235 size_t cnt, loff_t *ppos) 7236 { 7237 /* 7238 * There is no need to read what the user has written, this function 7239 * is just to make sure that there is no error when "echo" is used 7240 */ 7241 7242 *ppos += cnt; 7243 7244 return cnt; 7245 } 7246 7247 static int 7248 tracing_free_buffer_release(struct inode *inode, struct file *filp) 7249 { 7250 struct trace_array *tr = inode->i_private; 7251 7252 /* disable tracing ? */ 7253 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) 7254 tracer_tracing_off(tr); 7255 /* resize the ring buffer to 0 */ 7256 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); 7257 7258 trace_array_put(tr); 7259 7260 return 0; 7261 } 7262 7263 static ssize_t 7264 tracing_mark_write(struct file *filp, const char __user *ubuf, 7265 size_t cnt, loff_t *fpos) 7266 { 7267 struct trace_array *tr = filp->private_data; 7268 struct ring_buffer_event *event; 7269 enum event_trigger_type tt = ETT_NONE; 7270 struct trace_buffer *buffer; 7271 struct print_entry *entry; 7272 ssize_t written; 7273 int size; 7274 int len; 7275 7276 /* Used in tracing_mark_raw_write() as well */ 7277 #define FAULTED_STR "<faulted>" 7278 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */ 7279 7280 if (tracing_disabled) 7281 return -EINVAL; 7282 7283 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) 7284 return -EINVAL; 7285 7286 if (cnt > TRACE_BUF_SIZE) 7287 cnt = TRACE_BUF_SIZE; 7288 7289 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); 7290 7291 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */ 7292 7293 /* If less than "<faulted>", then make sure we can still add that */ 7294 if (cnt < FAULTED_SIZE) 7295 size += FAULTED_SIZE - cnt; 7296 7297 buffer = tr->array_buffer.buffer; 7298 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, 7299 tracing_gen_ctx()); 7300 if (unlikely(!event)) 7301 /* Ring buffer disabled, return as if not open for write */ 7302 return -EBADF; 7303 7304 entry = ring_buffer_event_data(event); 7305 entry->ip = _THIS_IP_; 7306 7307 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt); 7308 if (len) { 7309 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); 7310 cnt = FAULTED_SIZE; 7311 written = -EFAULT; 7312 } else 7313 written = cnt; 7314 7315 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { 7316 /* do not add \n before testing triggers, but add \0 */ 7317 entry->buf[cnt] = '\0'; 7318 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event); 7319 } 7320 7321 if (entry->buf[cnt - 1] != '\n') { 7322 entry->buf[cnt] = '\n'; 7323 entry->buf[cnt + 1] = '\0'; 7324 } else 7325 entry->buf[cnt] = '\0'; 7326 7327 if (static_branch_unlikely(&trace_marker_exports_enabled)) 7328 ftrace_exports(event, TRACE_EXPORT_MARKER); 7329 __buffer_unlock_commit(buffer, event); 7330 7331 if (tt) 7332 event_triggers_post_call(tr->trace_marker_file, tt); 7333 7334 return written; 7335 } 7336 7337 /* Limit it for now to 3K (including tag) */ 7338 #define RAW_DATA_MAX_SIZE (1024*3) 7339 7340 static ssize_t 7341 tracing_mark_raw_write(struct file *filp, const char __user *ubuf, 7342 size_t cnt, loff_t *fpos) 7343 { 7344 struct trace_array *tr = filp->private_data; 7345 struct ring_buffer_event *event; 7346 struct trace_buffer *buffer; 7347 struct raw_data_entry *entry; 7348 ssize_t written; 7349 int size; 7350 int len; 7351 7352 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int)) 7353 7354 if (tracing_disabled) 7355 return -EINVAL; 7356 7357 if (!(tr->trace_flags & TRACE_ITER_MARKERS)) 7358 return -EINVAL; 7359 7360 /* The marker must at least have a tag id */ 7361 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE) 7362 return -EINVAL; 7363 7364 if (cnt > TRACE_BUF_SIZE) 7365 cnt = TRACE_BUF_SIZE; 7366 7367 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); 7368 7369 size = sizeof(*entry) + cnt; 7370 if (cnt < FAULT_SIZE_ID) 7371 size += FAULT_SIZE_ID - cnt; 7372 7373 buffer = tr->array_buffer.buffer; 7374 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, 7375 tracing_gen_ctx()); 7376 if (!event) 7377 /* Ring buffer disabled, return as if not open for write */ 7378 return -EBADF; 7379 7380 entry = ring_buffer_event_data(event); 7381 7382 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt); 7383 if (len) { 7384 entry->id = -1; 7385 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); 7386 written = -EFAULT; 7387 } else 7388 written = cnt; 7389 7390 __buffer_unlock_commit(buffer, event); 7391 7392 return written; 7393 } 7394 7395 static int tracing_clock_show(struct seq_file *m, void *v) 7396 { 7397 struct trace_array *tr = m->private; 7398 int i; 7399 7400 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) 7401 seq_printf(m, 7402 "%s%s%s%s", i ? " " : "", 7403 i == tr->clock_id ? "[" : "", trace_clocks[i].name, 7404 i == tr->clock_id ? "]" : ""); 7405 seq_putc(m, '\n'); 7406 7407 return 0; 7408 } 7409 7410 int tracing_set_clock(struct trace_array *tr, const char *clockstr) 7411 { 7412 int i; 7413 7414 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { 7415 if (strcmp(trace_clocks[i].name, clockstr) == 0) 7416 break; 7417 } 7418 if (i == ARRAY_SIZE(trace_clocks)) 7419 return -EINVAL; 7420 7421 mutex_lock(&trace_types_lock); 7422 7423 tr->clock_id = i; 7424 7425 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func); 7426 7427 /* 7428 * New clock may not be consistent with the previous clock. 7429 * Reset the buffer so that it doesn't have incomparable timestamps. 7430 */ 7431 tracing_reset_online_cpus(&tr->array_buffer); 7432 7433 #ifdef CONFIG_TRACER_MAX_TRACE 7434 if (tr->max_buffer.buffer) 7435 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); 7436 tracing_reset_online_cpus(&tr->max_buffer); 7437 #endif 7438 7439 mutex_unlock(&trace_types_lock); 7440 7441 return 0; 7442 } 7443 7444 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, 7445 size_t cnt, loff_t *fpos) 7446 { 7447 struct seq_file *m = filp->private_data; 7448 struct trace_array *tr = m->private; 7449 char buf[64]; 7450 const char *clockstr; 7451 int ret; 7452 7453 if (cnt >= sizeof(buf)) 7454 return -EINVAL; 7455 7456 if (copy_from_user(buf, ubuf, cnt)) 7457 return -EFAULT; 7458 7459 buf[cnt] = 0; 7460 7461 clockstr = strstrip(buf); 7462 7463 ret = tracing_set_clock(tr, clockstr); 7464 if (ret) 7465 return ret; 7466 7467 *fpos += cnt; 7468 7469 return cnt; 7470 } 7471 7472 static int tracing_clock_open(struct inode *inode, struct file *file) 7473 { 7474 struct trace_array *tr = inode->i_private; 7475 int ret; 7476 7477 ret = tracing_check_open_get_tr(tr); 7478 if (ret) 7479 return ret; 7480 7481 ret = single_open(file, tracing_clock_show, inode->i_private); 7482 if (ret < 0) 7483 trace_array_put(tr); 7484 7485 return ret; 7486 } 7487 7488 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v) 7489 { 7490 struct trace_array *tr = m->private; 7491 7492 mutex_lock(&trace_types_lock); 7493 7494 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer)) 7495 seq_puts(m, "delta [absolute]\n"); 7496 else 7497 seq_puts(m, "[delta] absolute\n"); 7498 7499 mutex_unlock(&trace_types_lock); 7500 7501 return 0; 7502 } 7503 7504 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file) 7505 { 7506 struct trace_array *tr = inode->i_private; 7507 int ret; 7508 7509 ret = tracing_check_open_get_tr(tr); 7510 if (ret) 7511 return ret; 7512 7513 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private); 7514 if (ret < 0) 7515 trace_array_put(tr); 7516 7517 return ret; 7518 } 7519 7520 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe) 7521 { 7522 if (rbe == this_cpu_read(trace_buffered_event)) 7523 return ring_buffer_time_stamp(buffer); 7524 7525 return ring_buffer_event_time_stamp(buffer, rbe); 7526 } 7527 7528 /* 7529 * Set or disable using the per CPU trace_buffer_event when possible. 7530 */ 7531 int tracing_set_filter_buffering(struct trace_array *tr, bool set) 7532 { 7533 int ret = 0; 7534 7535 mutex_lock(&trace_types_lock); 7536 7537 if (set && tr->no_filter_buffering_ref++) 7538 goto out; 7539 7540 if (!set) { 7541 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) { 7542 ret = -EINVAL; 7543 goto out; 7544 } 7545 7546 --tr->no_filter_buffering_ref; 7547 } 7548 out: 7549 mutex_unlock(&trace_types_lock); 7550 7551 return ret; 7552 } 7553 7554 struct ftrace_buffer_info { 7555 struct trace_iterator iter; 7556 void *spare; 7557 unsigned int spare_cpu; 7558 unsigned int read; 7559 }; 7560 7561 #ifdef CONFIG_TRACER_SNAPSHOT 7562 static int tracing_snapshot_open(struct inode *inode, struct file *file) 7563 { 7564 struct trace_array *tr = inode->i_private; 7565 struct trace_iterator *iter; 7566 struct seq_file *m; 7567 int ret; 7568 7569 ret = tracing_check_open_get_tr(tr); 7570 if (ret) 7571 return ret; 7572 7573 if (file->f_mode & FMODE_READ) { 7574 iter = __tracing_open(inode, file, true); 7575 if (IS_ERR(iter)) 7576 ret = PTR_ERR(iter); 7577 } else { 7578 /* Writes still need the seq_file to hold the private data */ 7579 ret = -ENOMEM; 7580 m = kzalloc(sizeof(*m), GFP_KERNEL); 7581 if (!m) 7582 goto out; 7583 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 7584 if (!iter) { 7585 kfree(m); 7586 goto out; 7587 } 7588 ret = 0; 7589 7590 iter->tr = tr; 7591 iter->array_buffer = &tr->max_buffer; 7592 iter->cpu_file = tracing_get_cpu(inode); 7593 m->private = iter; 7594 file->private_data = m; 7595 } 7596 out: 7597 if (ret < 0) 7598 trace_array_put(tr); 7599 7600 return ret; 7601 } 7602 7603 static void tracing_swap_cpu_buffer(void *tr) 7604 { 7605 update_max_tr_single((struct trace_array *)tr, current, smp_processor_id()); 7606 } 7607 7608 static ssize_t 7609 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, 7610 loff_t *ppos) 7611 { 7612 struct seq_file *m = filp->private_data; 7613 struct trace_iterator *iter = m->private; 7614 struct trace_array *tr = iter->tr; 7615 unsigned long val; 7616 int ret; 7617 7618 ret = tracing_update_buffers(); 7619 if (ret < 0) 7620 return ret; 7621 7622 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 7623 if (ret) 7624 return ret; 7625 7626 mutex_lock(&trace_types_lock); 7627 7628 if (tr->current_trace->use_max_tr) { 7629 ret = -EBUSY; 7630 goto out; 7631 } 7632 7633 local_irq_disable(); 7634 arch_spin_lock(&tr->max_lock); 7635 if (tr->cond_snapshot) 7636 ret = -EBUSY; 7637 arch_spin_unlock(&tr->max_lock); 7638 local_irq_enable(); 7639 if (ret) 7640 goto out; 7641 7642 switch (val) { 7643 case 0: 7644 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 7645 ret = -EINVAL; 7646 break; 7647 } 7648 if (tr->allocated_snapshot) 7649 free_snapshot(tr); 7650 break; 7651 case 1: 7652 /* Only allow per-cpu swap if the ring buffer supports it */ 7653 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP 7654 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { 7655 ret = -EINVAL; 7656 break; 7657 } 7658 #endif 7659 if (tr->allocated_snapshot) 7660 ret = resize_buffer_duplicate_size(&tr->max_buffer, 7661 &tr->array_buffer, iter->cpu_file); 7662 else 7663 ret = tracing_alloc_snapshot_instance(tr); 7664 if (ret < 0) 7665 break; 7666 /* Now, we're going to swap */ 7667 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { 7668 local_irq_disable(); 7669 update_max_tr(tr, current, smp_processor_id(), NULL); 7670 local_irq_enable(); 7671 } else { 7672 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer, 7673 (void *)tr, 1); 7674 } 7675 break; 7676 default: 7677 if (tr->allocated_snapshot) { 7678 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) 7679 tracing_reset_online_cpus(&tr->max_buffer); 7680 else 7681 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file); 7682 } 7683 break; 7684 } 7685 7686 if (ret >= 0) { 7687 *ppos += cnt; 7688 ret = cnt; 7689 } 7690 out: 7691 mutex_unlock(&trace_types_lock); 7692 return ret; 7693 } 7694 7695 static int tracing_snapshot_release(struct inode *inode, struct file *file) 7696 { 7697 struct seq_file *m = file->private_data; 7698 int ret; 7699 7700 ret = tracing_release(inode, file); 7701 7702 if (file->f_mode & FMODE_READ) 7703 return ret; 7704 7705 /* If write only, the seq_file is just a stub */ 7706 if (m) 7707 kfree(m->private); 7708 kfree(m); 7709 7710 return 0; 7711 } 7712 7713 static int tracing_buffers_open(struct inode *inode, struct file *filp); 7714 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, 7715 size_t count, loff_t *ppos); 7716 static int tracing_buffers_release(struct inode *inode, struct file *file); 7717 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, 7718 struct pipe_inode_info *pipe, size_t len, unsigned int flags); 7719 7720 static int snapshot_raw_open(struct inode *inode, struct file *filp) 7721 { 7722 struct ftrace_buffer_info *info; 7723 int ret; 7724 7725 /* The following checks for tracefs lockdown */ 7726 ret = tracing_buffers_open(inode, filp); 7727 if (ret < 0) 7728 return ret; 7729 7730 info = filp->private_data; 7731 7732 if (info->iter.trace->use_max_tr) { 7733 tracing_buffers_release(inode, filp); 7734 return -EBUSY; 7735 } 7736 7737 info->iter.snapshot = true; 7738 info->iter.array_buffer = &info->iter.tr->max_buffer; 7739 7740 return ret; 7741 } 7742 7743 #endif /* CONFIG_TRACER_SNAPSHOT */ 7744 7745 7746 static const struct file_operations tracing_thresh_fops = { 7747 .open = tracing_open_generic, 7748 .read = tracing_thresh_read, 7749 .write = tracing_thresh_write, 7750 .llseek = generic_file_llseek, 7751 }; 7752 7753 #ifdef CONFIG_TRACER_MAX_TRACE 7754 static const struct file_operations tracing_max_lat_fops = { 7755 .open = tracing_open_generic, 7756 .read = tracing_max_lat_read, 7757 .write = tracing_max_lat_write, 7758 .llseek = generic_file_llseek, 7759 }; 7760 #endif 7761 7762 static const struct file_operations set_tracer_fops = { 7763 .open = tracing_open_generic, 7764 .read = tracing_set_trace_read, 7765 .write = tracing_set_trace_write, 7766 .llseek = generic_file_llseek, 7767 }; 7768 7769 static const struct file_operations tracing_pipe_fops = { 7770 .open = tracing_open_pipe, 7771 .poll = tracing_poll_pipe, 7772 .read = tracing_read_pipe, 7773 .splice_read = tracing_splice_read_pipe, 7774 .release = tracing_release_pipe, 7775 .llseek = no_llseek, 7776 }; 7777 7778 static const struct file_operations tracing_entries_fops = { 7779 .open = tracing_open_generic_tr, 7780 .read = tracing_entries_read, 7781 .write = tracing_entries_write, 7782 .llseek = generic_file_llseek, 7783 .release = tracing_release_generic_tr, 7784 }; 7785 7786 static const struct file_operations tracing_total_entries_fops = { 7787 .open = tracing_open_generic_tr, 7788 .read = tracing_total_entries_read, 7789 .llseek = generic_file_llseek, 7790 .release = tracing_release_generic_tr, 7791 }; 7792 7793 static const struct file_operations tracing_free_buffer_fops = { 7794 .open = tracing_open_generic_tr, 7795 .write = tracing_free_buffer_write, 7796 .release = tracing_free_buffer_release, 7797 }; 7798 7799 static const struct file_operations tracing_mark_fops = { 7800 .open = tracing_mark_open, 7801 .write = tracing_mark_write, 7802 .release = tracing_release_generic_tr, 7803 }; 7804 7805 static const struct file_operations tracing_mark_raw_fops = { 7806 .open = tracing_mark_open, 7807 .write = tracing_mark_raw_write, 7808 .release = tracing_release_generic_tr, 7809 }; 7810 7811 static const struct file_operations trace_clock_fops = { 7812 .open = tracing_clock_open, 7813 .read = seq_read, 7814 .llseek = seq_lseek, 7815 .release = tracing_single_release_tr, 7816 .write = tracing_clock_write, 7817 }; 7818 7819 static const struct file_operations trace_time_stamp_mode_fops = { 7820 .open = tracing_time_stamp_mode_open, 7821 .read = seq_read, 7822 .llseek = seq_lseek, 7823 .release = tracing_single_release_tr, 7824 }; 7825 7826 #ifdef CONFIG_TRACER_SNAPSHOT 7827 static const struct file_operations snapshot_fops = { 7828 .open = tracing_snapshot_open, 7829 .read = seq_read, 7830 .write = tracing_snapshot_write, 7831 .llseek = tracing_lseek, 7832 .release = tracing_snapshot_release, 7833 }; 7834 7835 static const struct file_operations snapshot_raw_fops = { 7836 .open = snapshot_raw_open, 7837 .read = tracing_buffers_read, 7838 .release = tracing_buffers_release, 7839 .splice_read = tracing_buffers_splice_read, 7840 .llseek = no_llseek, 7841 }; 7842 7843 #endif /* CONFIG_TRACER_SNAPSHOT */ 7844 7845 /* 7846 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct 7847 * @filp: The active open file structure 7848 * @ubuf: The userspace provided buffer to read value into 7849 * @cnt: The maximum number of bytes to read 7850 * @ppos: The current "file" position 7851 * 7852 * This function implements the write interface for a struct trace_min_max_param. 7853 * The filp->private_data must point to a trace_min_max_param structure that 7854 * defines where to write the value, the min and the max acceptable values, 7855 * and a lock to protect the write. 7856 */ 7857 static ssize_t 7858 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) 7859 { 7860 struct trace_min_max_param *param = filp->private_data; 7861 u64 val; 7862 int err; 7863 7864 if (!param) 7865 return -EFAULT; 7866 7867 err = kstrtoull_from_user(ubuf, cnt, 10, &val); 7868 if (err) 7869 return err; 7870 7871 if (param->lock) 7872 mutex_lock(param->lock); 7873 7874 if (param->min && val < *param->min) 7875 err = -EINVAL; 7876 7877 if (param->max && val > *param->max) 7878 err = -EINVAL; 7879 7880 if (!err) 7881 *param->val = val; 7882 7883 if (param->lock) 7884 mutex_unlock(param->lock); 7885 7886 if (err) 7887 return err; 7888 7889 return cnt; 7890 } 7891 7892 /* 7893 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct 7894 * @filp: The active open file structure 7895 * @ubuf: The userspace provided buffer to read value into 7896 * @cnt: The maximum number of bytes to read 7897 * @ppos: The current "file" position 7898 * 7899 * This function implements the read interface for a struct trace_min_max_param. 7900 * The filp->private_data must point to a trace_min_max_param struct with valid 7901 * data. 7902 */ 7903 static ssize_t 7904 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 7905 { 7906 struct trace_min_max_param *param = filp->private_data; 7907 char buf[U64_STR_SIZE]; 7908 int len; 7909 u64 val; 7910 7911 if (!param) 7912 return -EFAULT; 7913 7914 val = *param->val; 7915 7916 if (cnt > sizeof(buf)) 7917 cnt = sizeof(buf); 7918 7919 len = snprintf(buf, sizeof(buf), "%llu\n", val); 7920 7921 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); 7922 } 7923 7924 const struct file_operations trace_min_max_fops = { 7925 .open = tracing_open_generic, 7926 .read = trace_min_max_read, 7927 .write = trace_min_max_write, 7928 }; 7929 7930 #define TRACING_LOG_ERRS_MAX 8 7931 #define TRACING_LOG_LOC_MAX 128 7932 7933 #define CMD_PREFIX " Command: " 7934 7935 struct err_info { 7936 const char **errs; /* ptr to loc-specific array of err strings */ 7937 u8 type; /* index into errs -> specific err string */ 7938 u16 pos; /* caret position */ 7939 u64 ts; 7940 }; 7941 7942 struct tracing_log_err { 7943 struct list_head list; 7944 struct err_info info; 7945 char loc[TRACING_LOG_LOC_MAX]; /* err location */ 7946 char *cmd; /* what caused err */ 7947 }; 7948 7949 static DEFINE_MUTEX(tracing_err_log_lock); 7950 7951 static struct tracing_log_err *alloc_tracing_log_err(int len) 7952 { 7953 struct tracing_log_err *err; 7954 7955 err = kzalloc(sizeof(*err), GFP_KERNEL); 7956 if (!err) 7957 return ERR_PTR(-ENOMEM); 7958 7959 err->cmd = kzalloc(len, GFP_KERNEL); 7960 if (!err->cmd) { 7961 kfree(err); 7962 return ERR_PTR(-ENOMEM); 7963 } 7964 7965 return err; 7966 } 7967 7968 static void free_tracing_log_err(struct tracing_log_err *err) 7969 { 7970 kfree(err->cmd); 7971 kfree(err); 7972 } 7973 7974 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr, 7975 int len) 7976 { 7977 struct tracing_log_err *err; 7978 char *cmd; 7979 7980 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) { 7981 err = alloc_tracing_log_err(len); 7982 if (PTR_ERR(err) != -ENOMEM) 7983 tr->n_err_log_entries++; 7984 7985 return err; 7986 } 7987 cmd = kzalloc(len, GFP_KERNEL); 7988 if (!cmd) 7989 return ERR_PTR(-ENOMEM); 7990 err = list_first_entry(&tr->err_log, struct tracing_log_err, list); 7991 kfree(err->cmd); 7992 err->cmd = cmd; 7993 list_del(&err->list); 7994 7995 return err; 7996 } 7997 7998 /** 7999 * err_pos - find the position of a string within a command for error careting 8000 * @cmd: The tracing command that caused the error 8001 * @str: The string to position the caret at within @cmd 8002 * 8003 * Finds the position of the first occurrence of @str within @cmd. The 8004 * return value can be passed to tracing_log_err() for caret placement 8005 * within @cmd. 8006 * 8007 * Returns the index within @cmd of the first occurrence of @str or 0 8008 * if @str was not found. 8009 */ 8010 unsigned int err_pos(char *cmd, const char *str) 8011 { 8012 char *found; 8013 8014 if (WARN_ON(!strlen(cmd))) 8015 return 0; 8016 8017 found = strstr(cmd, str); 8018 if (found) 8019 return found - cmd; 8020 8021 return 0; 8022 } 8023 8024 /** 8025 * tracing_log_err - write an error to the tracing error log 8026 * @tr: The associated trace array for the error (NULL for top level array) 8027 * @loc: A string describing where the error occurred 8028 * @cmd: The tracing command that caused the error 8029 * @errs: The array of loc-specific static error strings 8030 * @type: The index into errs[], which produces the specific static err string 8031 * @pos: The position the caret should be placed in the cmd 8032 * 8033 * Writes an error into tracing/error_log of the form: 8034 * 8035 * <loc>: error: <text> 8036 * Command: <cmd> 8037 * ^ 8038 * 8039 * tracing/error_log is a small log file containing the last 8040 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated 8041 * unless there has been a tracing error, and the error log can be 8042 * cleared and have its memory freed by writing the empty string in 8043 * truncation mode to it i.e. echo > tracing/error_log. 8044 * 8045 * NOTE: the @errs array along with the @type param are used to 8046 * produce a static error string - this string is not copied and saved 8047 * when the error is logged - only a pointer to it is saved. See 8048 * existing callers for examples of how static strings are typically 8049 * defined for use with tracing_log_err(). 8050 */ 8051 void tracing_log_err(struct trace_array *tr, 8052 const char *loc, const char *cmd, 8053 const char **errs, u8 type, u16 pos) 8054 { 8055 struct tracing_log_err *err; 8056 int len = 0; 8057 8058 if (!tr) 8059 tr = &global_trace; 8060 8061 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1; 8062 8063 mutex_lock(&tracing_err_log_lock); 8064 err = get_tracing_log_err(tr, len); 8065 if (PTR_ERR(err) == -ENOMEM) { 8066 mutex_unlock(&tracing_err_log_lock); 8067 return; 8068 } 8069 8070 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc); 8071 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd); 8072 8073 err->info.errs = errs; 8074 err->info.type = type; 8075 err->info.pos = pos; 8076 err->info.ts = local_clock(); 8077 8078 list_add_tail(&err->list, &tr->err_log); 8079 mutex_unlock(&tracing_err_log_lock); 8080 } 8081 8082 static void clear_tracing_err_log(struct trace_array *tr) 8083 { 8084 struct tracing_log_err *err, *next; 8085 8086 mutex_lock(&tracing_err_log_lock); 8087 list_for_each_entry_safe(err, next, &tr->err_log, list) { 8088 list_del(&err->list); 8089 free_tracing_log_err(err); 8090 } 8091 8092 tr->n_err_log_entries = 0; 8093 mutex_unlock(&tracing_err_log_lock); 8094 } 8095 8096 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos) 8097 { 8098 struct trace_array *tr = m->private; 8099 8100 mutex_lock(&tracing_err_log_lock); 8101 8102 return seq_list_start(&tr->err_log, *pos); 8103 } 8104 8105 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos) 8106 { 8107 struct trace_array *tr = m->private; 8108 8109 return seq_list_next(v, &tr->err_log, pos); 8110 } 8111 8112 static void tracing_err_log_seq_stop(struct seq_file *m, void *v) 8113 { 8114 mutex_unlock(&tracing_err_log_lock); 8115 } 8116 8117 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos) 8118 { 8119 u16 i; 8120 8121 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++) 8122 seq_putc(m, ' '); 8123 for (i = 0; i < pos; i++) 8124 seq_putc(m, ' '); 8125 seq_puts(m, "^\n"); 8126 } 8127 8128 static int tracing_err_log_seq_show(struct seq_file *m, void *v) 8129 { 8130 struct tracing_log_err *err = v; 8131 8132 if (err) { 8133 const char *err_text = err->info.errs[err->info.type]; 8134 u64 sec = err->info.ts; 8135 u32 nsec; 8136 8137 nsec = do_div(sec, NSEC_PER_SEC); 8138 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000, 8139 err->loc, err_text); 8140 seq_printf(m, "%s", err->cmd); 8141 tracing_err_log_show_pos(m, err->info.pos); 8142 } 8143 8144 return 0; 8145 } 8146 8147 static const struct seq_operations tracing_err_log_seq_ops = { 8148 .start = tracing_err_log_seq_start, 8149 .next = tracing_err_log_seq_next, 8150 .stop = tracing_err_log_seq_stop, 8151 .show = tracing_err_log_seq_show 8152 }; 8153 8154 static int tracing_err_log_open(struct inode *inode, struct file *file) 8155 { 8156 struct trace_array *tr = inode->i_private; 8157 int ret = 0; 8158 8159 ret = tracing_check_open_get_tr(tr); 8160 if (ret) 8161 return ret; 8162 8163 /* If this file was opened for write, then erase contents */ 8164 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) 8165 clear_tracing_err_log(tr); 8166 8167 if (file->f_mode & FMODE_READ) { 8168 ret = seq_open(file, &tracing_err_log_seq_ops); 8169 if (!ret) { 8170 struct seq_file *m = file->private_data; 8171 m->private = tr; 8172 } else { 8173 trace_array_put(tr); 8174 } 8175 } 8176 return ret; 8177 } 8178 8179 static ssize_t tracing_err_log_write(struct file *file, 8180 const char __user *buffer, 8181 size_t count, loff_t *ppos) 8182 { 8183 return count; 8184 } 8185 8186 static int tracing_err_log_release(struct inode *inode, struct file *file) 8187 { 8188 struct trace_array *tr = inode->i_private; 8189 8190 trace_array_put(tr); 8191 8192 if (file->f_mode & FMODE_READ) 8193 seq_release(inode, file); 8194 8195 return 0; 8196 } 8197 8198 static const struct file_operations tracing_err_log_fops = { 8199 .open = tracing_err_log_open, 8200 .write = tracing_err_log_write, 8201 .read = seq_read, 8202 .llseek = tracing_lseek, 8203 .release = tracing_err_log_release, 8204 }; 8205 8206 static int tracing_buffers_open(struct inode *inode, struct file *filp) 8207 { 8208 struct trace_array *tr = inode->i_private; 8209 struct ftrace_buffer_info *info; 8210 int ret; 8211 8212 ret = tracing_check_open_get_tr(tr); 8213 if (ret) 8214 return ret; 8215 8216 info = kvzalloc(sizeof(*info), GFP_KERNEL); 8217 if (!info) { 8218 trace_array_put(tr); 8219 return -ENOMEM; 8220 } 8221 8222 mutex_lock(&trace_types_lock); 8223 8224 info->iter.tr = tr; 8225 info->iter.cpu_file = tracing_get_cpu(inode); 8226 info->iter.trace = tr->current_trace; 8227 info->iter.array_buffer = &tr->array_buffer; 8228 info->spare = NULL; 8229 /* Force reading ring buffer for first read */ 8230 info->read = (unsigned int)-1; 8231 8232 filp->private_data = info; 8233 8234 tr->trace_ref++; 8235 8236 mutex_unlock(&trace_types_lock); 8237 8238 ret = nonseekable_open(inode, filp); 8239 if (ret < 0) 8240 trace_array_put(tr); 8241 8242 return ret; 8243 } 8244 8245 static __poll_t 8246 tracing_buffers_poll(struct file *filp, poll_table *poll_table) 8247 { 8248 struct ftrace_buffer_info *info = filp->private_data; 8249 struct trace_iterator *iter = &info->iter; 8250 8251 return trace_poll(iter, filp, poll_table); 8252 } 8253 8254 static ssize_t 8255 tracing_buffers_read(struct file *filp, char __user *ubuf, 8256 size_t count, loff_t *ppos) 8257 { 8258 struct ftrace_buffer_info *info = filp->private_data; 8259 struct trace_iterator *iter = &info->iter; 8260 ssize_t ret = 0; 8261 ssize_t size; 8262 8263 if (!count) 8264 return 0; 8265 8266 #ifdef CONFIG_TRACER_MAX_TRACE 8267 if (iter->snapshot && iter->tr->current_trace->use_max_tr) 8268 return -EBUSY; 8269 #endif 8270 8271 if (!info->spare) { 8272 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer, 8273 iter->cpu_file); 8274 if (IS_ERR(info->spare)) { 8275 ret = PTR_ERR(info->spare); 8276 info->spare = NULL; 8277 } else { 8278 info->spare_cpu = iter->cpu_file; 8279 } 8280 } 8281 if (!info->spare) 8282 return ret; 8283 8284 /* Do we have previous read data to read? */ 8285 if (info->read < PAGE_SIZE) 8286 goto read; 8287 8288 again: 8289 trace_access_lock(iter->cpu_file); 8290 ret = ring_buffer_read_page(iter->array_buffer->buffer, 8291 &info->spare, 8292 count, 8293 iter->cpu_file, 0); 8294 trace_access_unlock(iter->cpu_file); 8295 8296 if (ret < 0) { 8297 if (trace_empty(iter)) { 8298 if ((filp->f_flags & O_NONBLOCK)) 8299 return -EAGAIN; 8300 8301 ret = wait_on_pipe(iter, 0); 8302 if (ret) 8303 return ret; 8304 8305 goto again; 8306 } 8307 return 0; 8308 } 8309 8310 info->read = 0; 8311 read: 8312 size = PAGE_SIZE - info->read; 8313 if (size > count) 8314 size = count; 8315 8316 ret = copy_to_user(ubuf, info->spare + info->read, size); 8317 if (ret == size) 8318 return -EFAULT; 8319 8320 size -= ret; 8321 8322 *ppos += size; 8323 info->read += size; 8324 8325 return size; 8326 } 8327 8328 static int tracing_buffers_release(struct inode *inode, struct file *file) 8329 { 8330 struct ftrace_buffer_info *info = file->private_data; 8331 struct trace_iterator *iter = &info->iter; 8332 8333 mutex_lock(&trace_types_lock); 8334 8335 iter->tr->trace_ref--; 8336 8337 __trace_array_put(iter->tr); 8338 8339 iter->wait_index++; 8340 /* Make sure the waiters see the new wait_index */ 8341 smp_wmb(); 8342 8343 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); 8344 8345 if (info->spare) 8346 ring_buffer_free_read_page(iter->array_buffer->buffer, 8347 info->spare_cpu, info->spare); 8348 kvfree(info); 8349 8350 mutex_unlock(&trace_types_lock); 8351 8352 return 0; 8353 } 8354 8355 struct buffer_ref { 8356 struct trace_buffer *buffer; 8357 void *page; 8358 int cpu; 8359 refcount_t refcount; 8360 }; 8361 8362 static void buffer_ref_release(struct buffer_ref *ref) 8363 { 8364 if (!refcount_dec_and_test(&ref->refcount)) 8365 return; 8366 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); 8367 kfree(ref); 8368 } 8369 8370 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, 8371 struct pipe_buffer *buf) 8372 { 8373 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 8374 8375 buffer_ref_release(ref); 8376 buf->private = 0; 8377 } 8378 8379 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe, 8380 struct pipe_buffer *buf) 8381 { 8382 struct buffer_ref *ref = (struct buffer_ref *)buf->private; 8383 8384 if (refcount_read(&ref->refcount) > INT_MAX/2) 8385 return false; 8386 8387 refcount_inc(&ref->refcount); 8388 return true; 8389 } 8390 8391 /* Pipe buffer operations for a buffer. */ 8392 static const struct pipe_buf_operations buffer_pipe_buf_ops = { 8393 .release = buffer_pipe_buf_release, 8394 .get = buffer_pipe_buf_get, 8395 }; 8396 8397 /* 8398 * Callback from splice_to_pipe(), if we need to release some pages 8399 * at the end of the spd in case we error'ed out in filling the pipe. 8400 */ 8401 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) 8402 { 8403 struct buffer_ref *ref = 8404 (struct buffer_ref *)spd->partial[i].private; 8405 8406 buffer_ref_release(ref); 8407 spd->partial[i].private = 0; 8408 } 8409 8410 static ssize_t 8411 tracing_buffers_splice_read(struct file *file, loff_t *ppos, 8412 struct pipe_inode_info *pipe, size_t len, 8413 unsigned int flags) 8414 { 8415 struct ftrace_buffer_info *info = file->private_data; 8416 struct trace_iterator *iter = &info->iter; 8417 struct partial_page partial_def[PIPE_DEF_BUFFERS]; 8418 struct page *pages_def[PIPE_DEF_BUFFERS]; 8419 struct splice_pipe_desc spd = { 8420 .pages = pages_def, 8421 .partial = partial_def, 8422 .nr_pages_max = PIPE_DEF_BUFFERS, 8423 .ops = &buffer_pipe_buf_ops, 8424 .spd_release = buffer_spd_release, 8425 }; 8426 struct buffer_ref *ref; 8427 int entries, i; 8428 ssize_t ret = 0; 8429 8430 #ifdef CONFIG_TRACER_MAX_TRACE 8431 if (iter->snapshot && iter->tr->current_trace->use_max_tr) 8432 return -EBUSY; 8433 #endif 8434 8435 if (*ppos & (PAGE_SIZE - 1)) 8436 return -EINVAL; 8437 8438 if (len & (PAGE_SIZE - 1)) { 8439 if (len < PAGE_SIZE) 8440 return -EINVAL; 8441 len &= PAGE_MASK; 8442 } 8443 8444 if (splice_grow_spd(pipe, &spd)) 8445 return -ENOMEM; 8446 8447 again: 8448 trace_access_lock(iter->cpu_file); 8449 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); 8450 8451 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) { 8452 struct page *page; 8453 int r; 8454 8455 ref = kzalloc(sizeof(*ref), GFP_KERNEL); 8456 if (!ref) { 8457 ret = -ENOMEM; 8458 break; 8459 } 8460 8461 refcount_set(&ref->refcount, 1); 8462 ref->buffer = iter->array_buffer->buffer; 8463 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); 8464 if (IS_ERR(ref->page)) { 8465 ret = PTR_ERR(ref->page); 8466 ref->page = NULL; 8467 kfree(ref); 8468 break; 8469 } 8470 ref->cpu = iter->cpu_file; 8471 8472 r = ring_buffer_read_page(ref->buffer, &ref->page, 8473 len, iter->cpu_file, 1); 8474 if (r < 0) { 8475 ring_buffer_free_read_page(ref->buffer, ref->cpu, 8476 ref->page); 8477 kfree(ref); 8478 break; 8479 } 8480 8481 page = virt_to_page(ref->page); 8482 8483 spd.pages[i] = page; 8484 spd.partial[i].len = PAGE_SIZE; 8485 spd.partial[i].offset = 0; 8486 spd.partial[i].private = (unsigned long)ref; 8487 spd.nr_pages++; 8488 *ppos += PAGE_SIZE; 8489 8490 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); 8491 } 8492 8493 trace_access_unlock(iter->cpu_file); 8494 spd.nr_pages = i; 8495 8496 /* did we read anything? */ 8497 if (!spd.nr_pages) { 8498 long wait_index; 8499 8500 if (ret) 8501 goto out; 8502 8503 ret = -EAGAIN; 8504 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) 8505 goto out; 8506 8507 wait_index = READ_ONCE(iter->wait_index); 8508 8509 ret = wait_on_pipe(iter, iter->tr->buffer_percent); 8510 if (ret) 8511 goto out; 8512 8513 /* No need to wait after waking up when tracing is off */ 8514 if (!tracer_tracing_is_on(iter->tr)) 8515 goto out; 8516 8517 /* Make sure we see the new wait_index */ 8518 smp_rmb(); 8519 if (wait_index != iter->wait_index) 8520 goto out; 8521 8522 goto again; 8523 } 8524 8525 ret = splice_to_pipe(pipe, &spd); 8526 out: 8527 splice_shrink_spd(&spd); 8528 8529 return ret; 8530 } 8531 8532 /* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */ 8533 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 8534 { 8535 struct ftrace_buffer_info *info = file->private_data; 8536 struct trace_iterator *iter = &info->iter; 8537 8538 if (cmd) 8539 return -ENOIOCTLCMD; 8540 8541 mutex_lock(&trace_types_lock); 8542 8543 iter->wait_index++; 8544 /* Make sure the waiters see the new wait_index */ 8545 smp_wmb(); 8546 8547 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); 8548 8549 mutex_unlock(&trace_types_lock); 8550 return 0; 8551 } 8552 8553 static const struct file_operations tracing_buffers_fops = { 8554 .open = tracing_buffers_open, 8555 .read = tracing_buffers_read, 8556 .poll = tracing_buffers_poll, 8557 .release = tracing_buffers_release, 8558 .splice_read = tracing_buffers_splice_read, 8559 .unlocked_ioctl = tracing_buffers_ioctl, 8560 .llseek = no_llseek, 8561 }; 8562 8563 static ssize_t 8564 tracing_stats_read(struct file *filp, char __user *ubuf, 8565 size_t count, loff_t *ppos) 8566 { 8567 struct inode *inode = file_inode(filp); 8568 struct trace_array *tr = inode->i_private; 8569 struct array_buffer *trace_buf = &tr->array_buffer; 8570 int cpu = tracing_get_cpu(inode); 8571 struct trace_seq *s; 8572 unsigned long cnt; 8573 unsigned long long t; 8574 unsigned long usec_rem; 8575 8576 s = kmalloc(sizeof(*s), GFP_KERNEL); 8577 if (!s) 8578 return -ENOMEM; 8579 8580 trace_seq_init(s); 8581 8582 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu); 8583 trace_seq_printf(s, "entries: %ld\n", cnt); 8584 8585 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu); 8586 trace_seq_printf(s, "overrun: %ld\n", cnt); 8587 8588 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu); 8589 trace_seq_printf(s, "commit overrun: %ld\n", cnt); 8590 8591 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); 8592 trace_seq_printf(s, "bytes: %ld\n", cnt); 8593 8594 if (trace_clocks[tr->clock_id].in_ns) { 8595 /* local or global for trace_clock */ 8596 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 8597 usec_rem = do_div(t, USEC_PER_SEC); 8598 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", 8599 t, usec_rem); 8600 8601 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer)); 8602 usec_rem = do_div(t, USEC_PER_SEC); 8603 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); 8604 } else { 8605 /* counter or tsc mode for trace_clock */ 8606 trace_seq_printf(s, "oldest event ts: %llu\n", 8607 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); 8608 8609 trace_seq_printf(s, "now ts: %llu\n", 8610 ring_buffer_time_stamp(trace_buf->buffer)); 8611 } 8612 8613 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu); 8614 trace_seq_printf(s, "dropped events: %ld\n", cnt); 8615 8616 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); 8617 trace_seq_printf(s, "read events: %ld\n", cnt); 8618 8619 count = simple_read_from_buffer(ubuf, count, ppos, 8620 s->buffer, trace_seq_used(s)); 8621 8622 kfree(s); 8623 8624 return count; 8625 } 8626 8627 static const struct file_operations tracing_stats_fops = { 8628 .open = tracing_open_generic_tr, 8629 .read = tracing_stats_read, 8630 .llseek = generic_file_llseek, 8631 .release = tracing_release_generic_tr, 8632 }; 8633 8634 #ifdef CONFIG_DYNAMIC_FTRACE 8635 8636 static ssize_t 8637 tracing_read_dyn_info(struct file *filp, char __user *ubuf, 8638 size_t cnt, loff_t *ppos) 8639 { 8640 ssize_t ret; 8641 char *buf; 8642 int r; 8643 8644 /* 256 should be plenty to hold the amount needed */ 8645 buf = kmalloc(256, GFP_KERNEL); 8646 if (!buf) 8647 return -ENOMEM; 8648 8649 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n", 8650 ftrace_update_tot_cnt, 8651 ftrace_number_of_pages, 8652 ftrace_number_of_groups); 8653 8654 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 8655 kfree(buf); 8656 return ret; 8657 } 8658 8659 static const struct file_operations tracing_dyn_info_fops = { 8660 .open = tracing_open_generic, 8661 .read = tracing_read_dyn_info, 8662 .llseek = generic_file_llseek, 8663 }; 8664 #endif /* CONFIG_DYNAMIC_FTRACE */ 8665 8666 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) 8667 static void 8668 ftrace_snapshot(unsigned long ip, unsigned long parent_ip, 8669 struct trace_array *tr, struct ftrace_probe_ops *ops, 8670 void *data) 8671 { 8672 tracing_snapshot_instance(tr); 8673 } 8674 8675 static void 8676 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, 8677 struct trace_array *tr, struct ftrace_probe_ops *ops, 8678 void *data) 8679 { 8680 struct ftrace_func_mapper *mapper = data; 8681 long *count = NULL; 8682 8683 if (mapper) 8684 count = (long *)ftrace_func_mapper_find_ip(mapper, ip); 8685 8686 if (count) { 8687 8688 if (*count <= 0) 8689 return; 8690 8691 (*count)--; 8692 } 8693 8694 tracing_snapshot_instance(tr); 8695 } 8696 8697 static int 8698 ftrace_snapshot_print(struct seq_file *m, unsigned long ip, 8699 struct ftrace_probe_ops *ops, void *data) 8700 { 8701 struct ftrace_func_mapper *mapper = data; 8702 long *count = NULL; 8703 8704 seq_printf(m, "%ps:", (void *)ip); 8705 8706 seq_puts(m, "snapshot"); 8707 8708 if (mapper) 8709 count = (long *)ftrace_func_mapper_find_ip(mapper, ip); 8710 8711 if (count) 8712 seq_printf(m, ":count=%ld\n", *count); 8713 else 8714 seq_puts(m, ":unlimited\n"); 8715 8716 return 0; 8717 } 8718 8719 static int 8720 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, 8721 unsigned long ip, void *init_data, void **data) 8722 { 8723 struct ftrace_func_mapper *mapper = *data; 8724 8725 if (!mapper) { 8726 mapper = allocate_ftrace_func_mapper(); 8727 if (!mapper) 8728 return -ENOMEM; 8729 *data = mapper; 8730 } 8731 8732 return ftrace_func_mapper_add_ip(mapper, ip, init_data); 8733 } 8734 8735 static void 8736 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, 8737 unsigned long ip, void *data) 8738 { 8739 struct ftrace_func_mapper *mapper = data; 8740 8741 if (!ip) { 8742 if (!mapper) 8743 return; 8744 free_ftrace_func_mapper(mapper, NULL); 8745 return; 8746 } 8747 8748 ftrace_func_mapper_remove_ip(mapper, ip); 8749 } 8750 8751 static struct ftrace_probe_ops snapshot_probe_ops = { 8752 .func = ftrace_snapshot, 8753 .print = ftrace_snapshot_print, 8754 }; 8755 8756 static struct ftrace_probe_ops snapshot_count_probe_ops = { 8757 .func = ftrace_count_snapshot, 8758 .print = ftrace_snapshot_print, 8759 .init = ftrace_snapshot_init, 8760 .free = ftrace_snapshot_free, 8761 }; 8762 8763 static int 8764 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, 8765 char *glob, char *cmd, char *param, int enable) 8766 { 8767 struct ftrace_probe_ops *ops; 8768 void *count = (void *)-1; 8769 char *number; 8770 int ret; 8771 8772 if (!tr) 8773 return -ENODEV; 8774 8775 /* hash funcs only work with set_ftrace_filter */ 8776 if (!enable) 8777 return -EINVAL; 8778 8779 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; 8780 8781 if (glob[0] == '!') 8782 return unregister_ftrace_function_probe_func(glob+1, tr, ops); 8783 8784 if (!param) 8785 goto out_reg; 8786 8787 number = strsep(¶m, ":"); 8788 8789 if (!strlen(number)) 8790 goto out_reg; 8791 8792 /* 8793 * We use the callback data field (which is a pointer) 8794 * as our counter. 8795 */ 8796 ret = kstrtoul(number, 0, (unsigned long *)&count); 8797 if (ret) 8798 return ret; 8799 8800 out_reg: 8801 ret = tracing_alloc_snapshot_instance(tr); 8802 if (ret < 0) 8803 goto out; 8804 8805 ret = register_ftrace_function_probe(glob, tr, ops, count); 8806 8807 out: 8808 return ret < 0 ? ret : 0; 8809 } 8810 8811 static struct ftrace_func_command ftrace_snapshot_cmd = { 8812 .name = "snapshot", 8813 .func = ftrace_trace_snapshot_callback, 8814 }; 8815 8816 static __init int register_snapshot_cmd(void) 8817 { 8818 return register_ftrace_command(&ftrace_snapshot_cmd); 8819 } 8820 #else 8821 static inline __init int register_snapshot_cmd(void) { return 0; } 8822 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ 8823 8824 static struct dentry *tracing_get_dentry(struct trace_array *tr) 8825 { 8826 if (WARN_ON(!tr->dir)) 8827 return ERR_PTR(-ENODEV); 8828 8829 /* Top directory uses NULL as the parent */ 8830 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) 8831 return NULL; 8832 8833 /* All sub buffers have a descriptor */ 8834 return tr->dir; 8835 } 8836 8837 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) 8838 { 8839 struct dentry *d_tracer; 8840 8841 if (tr->percpu_dir) 8842 return tr->percpu_dir; 8843 8844 d_tracer = tracing_get_dentry(tr); 8845 if (IS_ERR(d_tracer)) 8846 return NULL; 8847 8848 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); 8849 8850 MEM_FAIL(!tr->percpu_dir, 8851 "Could not create tracefs directory 'per_cpu/%d'\n", cpu); 8852 8853 return tr->percpu_dir; 8854 } 8855 8856 static struct dentry * 8857 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, 8858 void *data, long cpu, const struct file_operations *fops) 8859 { 8860 struct dentry *ret = trace_create_file(name, mode, parent, data, fops); 8861 8862 if (ret) /* See tracing_get_cpu() */ 8863 d_inode(ret)->i_cdev = (void *)(cpu + 1); 8864 return ret; 8865 } 8866 8867 static void 8868 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) 8869 { 8870 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); 8871 struct dentry *d_cpu; 8872 char cpu_dir[30]; /* 30 characters should be more than enough */ 8873 8874 if (!d_percpu) 8875 return; 8876 8877 snprintf(cpu_dir, 30, "cpu%ld", cpu); 8878 d_cpu = tracefs_create_dir(cpu_dir, d_percpu); 8879 if (!d_cpu) { 8880 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir); 8881 return; 8882 } 8883 8884 /* per cpu trace_pipe */ 8885 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu, 8886 tr, cpu, &tracing_pipe_fops); 8887 8888 /* per cpu trace */ 8889 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu, 8890 tr, cpu, &tracing_fops); 8891 8892 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu, 8893 tr, cpu, &tracing_buffers_fops); 8894 8895 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu, 8896 tr, cpu, &tracing_stats_fops); 8897 8898 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu, 8899 tr, cpu, &tracing_entries_fops); 8900 8901 #ifdef CONFIG_TRACER_SNAPSHOT 8902 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu, 8903 tr, cpu, &snapshot_fops); 8904 8905 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu, 8906 tr, cpu, &snapshot_raw_fops); 8907 #endif 8908 } 8909 8910 #ifdef CONFIG_FTRACE_SELFTEST 8911 /* Let selftest have access to static functions in this file */ 8912 #include "trace_selftest.c" 8913 #endif 8914 8915 static ssize_t 8916 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, 8917 loff_t *ppos) 8918 { 8919 struct trace_option_dentry *topt = filp->private_data; 8920 char *buf; 8921 8922 if (topt->flags->val & topt->opt->bit) 8923 buf = "1\n"; 8924 else 8925 buf = "0\n"; 8926 8927 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 8928 } 8929 8930 static ssize_t 8931 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, 8932 loff_t *ppos) 8933 { 8934 struct trace_option_dentry *topt = filp->private_data; 8935 unsigned long val; 8936 int ret; 8937 8938 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 8939 if (ret) 8940 return ret; 8941 8942 if (val != 0 && val != 1) 8943 return -EINVAL; 8944 8945 if (!!(topt->flags->val & topt->opt->bit) != val) { 8946 mutex_lock(&trace_types_lock); 8947 ret = __set_tracer_option(topt->tr, topt->flags, 8948 topt->opt, !val); 8949 mutex_unlock(&trace_types_lock); 8950 if (ret) 8951 return ret; 8952 } 8953 8954 *ppos += cnt; 8955 8956 return cnt; 8957 } 8958 8959 8960 static const struct file_operations trace_options_fops = { 8961 .open = tracing_open_generic, 8962 .read = trace_options_read, 8963 .write = trace_options_write, 8964 .llseek = generic_file_llseek, 8965 }; 8966 8967 /* 8968 * In order to pass in both the trace_array descriptor as well as the index 8969 * to the flag that the trace option file represents, the trace_array 8970 * has a character array of trace_flags_index[], which holds the index 8971 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc. 8972 * The address of this character array is passed to the flag option file 8973 * read/write callbacks. 8974 * 8975 * In order to extract both the index and the trace_array descriptor, 8976 * get_tr_index() uses the following algorithm. 8977 * 8978 * idx = *ptr; 8979 * 8980 * As the pointer itself contains the address of the index (remember 8981 * index[1] == 1). 8982 * 8983 * Then to get the trace_array descriptor, by subtracting that index 8984 * from the ptr, we get to the start of the index itself. 8985 * 8986 * ptr - idx == &index[0] 8987 * 8988 * Then a simple container_of() from that pointer gets us to the 8989 * trace_array descriptor. 8990 */ 8991 static void get_tr_index(void *data, struct trace_array **ptr, 8992 unsigned int *pindex) 8993 { 8994 *pindex = *(unsigned char *)data; 8995 8996 *ptr = container_of(data - *pindex, struct trace_array, 8997 trace_flags_index); 8998 } 8999 9000 static ssize_t 9001 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, 9002 loff_t *ppos) 9003 { 9004 void *tr_index = filp->private_data; 9005 struct trace_array *tr; 9006 unsigned int index; 9007 char *buf; 9008 9009 get_tr_index(tr_index, &tr, &index); 9010 9011 if (tr->trace_flags & (1 << index)) 9012 buf = "1\n"; 9013 else 9014 buf = "0\n"; 9015 9016 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 9017 } 9018 9019 static ssize_t 9020 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, 9021 loff_t *ppos) 9022 { 9023 void *tr_index = filp->private_data; 9024 struct trace_array *tr; 9025 unsigned int index; 9026 unsigned long val; 9027 int ret; 9028 9029 get_tr_index(tr_index, &tr, &index); 9030 9031 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 9032 if (ret) 9033 return ret; 9034 9035 if (val != 0 && val != 1) 9036 return -EINVAL; 9037 9038 mutex_lock(&event_mutex); 9039 mutex_lock(&trace_types_lock); 9040 ret = set_tracer_flag(tr, 1 << index, val); 9041 mutex_unlock(&trace_types_lock); 9042 mutex_unlock(&event_mutex); 9043 9044 if (ret < 0) 9045 return ret; 9046 9047 *ppos += cnt; 9048 9049 return cnt; 9050 } 9051 9052 static const struct file_operations trace_options_core_fops = { 9053 .open = tracing_open_generic, 9054 .read = trace_options_core_read, 9055 .write = trace_options_core_write, 9056 .llseek = generic_file_llseek, 9057 }; 9058 9059 struct dentry *trace_create_file(const char *name, 9060 umode_t mode, 9061 struct dentry *parent, 9062 void *data, 9063 const struct file_operations *fops) 9064 { 9065 struct dentry *ret; 9066 9067 ret = tracefs_create_file(name, mode, parent, data, fops); 9068 if (!ret) 9069 pr_warn("Could not create tracefs '%s' entry\n", name); 9070 9071 return ret; 9072 } 9073 9074 9075 static struct dentry *trace_options_init_dentry(struct trace_array *tr) 9076 { 9077 struct dentry *d_tracer; 9078 9079 if (tr->options) 9080 return tr->options; 9081 9082 d_tracer = tracing_get_dentry(tr); 9083 if (IS_ERR(d_tracer)) 9084 return NULL; 9085 9086 tr->options = tracefs_create_dir("options", d_tracer); 9087 if (!tr->options) { 9088 pr_warn("Could not create tracefs directory 'options'\n"); 9089 return NULL; 9090 } 9091 9092 return tr->options; 9093 } 9094 9095 static void 9096 create_trace_option_file(struct trace_array *tr, 9097 struct trace_option_dentry *topt, 9098 struct tracer_flags *flags, 9099 struct tracer_opt *opt) 9100 { 9101 struct dentry *t_options; 9102 9103 t_options = trace_options_init_dentry(tr); 9104 if (!t_options) 9105 return; 9106 9107 topt->flags = flags; 9108 topt->opt = opt; 9109 topt->tr = tr; 9110 9111 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE, 9112 t_options, topt, &trace_options_fops); 9113 9114 } 9115 9116 static void 9117 create_trace_option_files(struct trace_array *tr, struct tracer *tracer) 9118 { 9119 struct trace_option_dentry *topts; 9120 struct trace_options *tr_topts; 9121 struct tracer_flags *flags; 9122 struct tracer_opt *opts; 9123 int cnt; 9124 int i; 9125 9126 if (!tracer) 9127 return; 9128 9129 flags = tracer->flags; 9130 9131 if (!flags || !flags->opts) 9132 return; 9133 9134 /* 9135 * If this is an instance, only create flags for tracers 9136 * the instance may have. 9137 */ 9138 if (!trace_ok_for_array(tracer, tr)) 9139 return; 9140 9141 for (i = 0; i < tr->nr_topts; i++) { 9142 /* Make sure there's no duplicate flags. */ 9143 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags)) 9144 return; 9145 } 9146 9147 opts = flags->opts; 9148 9149 for (cnt = 0; opts[cnt].name; cnt++) 9150 ; 9151 9152 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); 9153 if (!topts) 9154 return; 9155 9156 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), 9157 GFP_KERNEL); 9158 if (!tr_topts) { 9159 kfree(topts); 9160 return; 9161 } 9162 9163 tr->topts = tr_topts; 9164 tr->topts[tr->nr_topts].tracer = tracer; 9165 tr->topts[tr->nr_topts].topts = topts; 9166 tr->nr_topts++; 9167 9168 for (cnt = 0; opts[cnt].name; cnt++) { 9169 create_trace_option_file(tr, &topts[cnt], flags, 9170 &opts[cnt]); 9171 MEM_FAIL(topts[cnt].entry == NULL, 9172 "Failed to create trace option: %s", 9173 opts[cnt].name); 9174 } 9175 } 9176 9177 static struct dentry * 9178 create_trace_option_core_file(struct trace_array *tr, 9179 const char *option, long index) 9180 { 9181 struct dentry *t_options; 9182 9183 t_options = trace_options_init_dentry(tr); 9184 if (!t_options) 9185 return NULL; 9186 9187 return trace_create_file(option, TRACE_MODE_WRITE, t_options, 9188 (void *)&tr->trace_flags_index[index], 9189 &trace_options_core_fops); 9190 } 9191 9192 static void create_trace_options_dir(struct trace_array *tr) 9193 { 9194 struct dentry *t_options; 9195 bool top_level = tr == &global_trace; 9196 int i; 9197 9198 t_options = trace_options_init_dentry(tr); 9199 if (!t_options) 9200 return; 9201 9202 for (i = 0; trace_options[i]; i++) { 9203 if (top_level || 9204 !((1 << i) & TOP_LEVEL_TRACE_FLAGS)) 9205 create_trace_option_core_file(tr, trace_options[i], i); 9206 } 9207 } 9208 9209 static ssize_t 9210 rb_simple_read(struct file *filp, char __user *ubuf, 9211 size_t cnt, loff_t *ppos) 9212 { 9213 struct trace_array *tr = filp->private_data; 9214 char buf[64]; 9215 int r; 9216 9217 r = tracer_tracing_is_on(tr); 9218 r = sprintf(buf, "%d\n", r); 9219 9220 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 9221 } 9222 9223 static ssize_t 9224 rb_simple_write(struct file *filp, const char __user *ubuf, 9225 size_t cnt, loff_t *ppos) 9226 { 9227 struct trace_array *tr = filp->private_data; 9228 struct trace_buffer *buffer = tr->array_buffer.buffer; 9229 unsigned long val; 9230 int ret; 9231 9232 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 9233 if (ret) 9234 return ret; 9235 9236 if (buffer) { 9237 mutex_lock(&trace_types_lock); 9238 if (!!val == tracer_tracing_is_on(tr)) { 9239 val = 0; /* do nothing */ 9240 } else if (val) { 9241 tracer_tracing_on(tr); 9242 if (tr->current_trace->start) 9243 tr->current_trace->start(tr); 9244 } else { 9245 tracer_tracing_off(tr); 9246 if (tr->current_trace->stop) 9247 tr->current_trace->stop(tr); 9248 /* Wake up any waiters */ 9249 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS); 9250 } 9251 mutex_unlock(&trace_types_lock); 9252 } 9253 9254 (*ppos)++; 9255 9256 return cnt; 9257 } 9258 9259 static const struct file_operations rb_simple_fops = { 9260 .open = tracing_open_generic_tr, 9261 .read = rb_simple_read, 9262 .write = rb_simple_write, 9263 .release = tracing_release_generic_tr, 9264 .llseek = default_llseek, 9265 }; 9266 9267 static ssize_t 9268 buffer_percent_read(struct file *filp, char __user *ubuf, 9269 size_t cnt, loff_t *ppos) 9270 { 9271 struct trace_array *tr = filp->private_data; 9272 char buf[64]; 9273 int r; 9274 9275 r = tr->buffer_percent; 9276 r = sprintf(buf, "%d\n", r); 9277 9278 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 9279 } 9280 9281 static ssize_t 9282 buffer_percent_write(struct file *filp, const char __user *ubuf, 9283 size_t cnt, loff_t *ppos) 9284 { 9285 struct trace_array *tr = filp->private_data; 9286 unsigned long val; 9287 int ret; 9288 9289 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 9290 if (ret) 9291 return ret; 9292 9293 if (val > 100) 9294 return -EINVAL; 9295 9296 tr->buffer_percent = val; 9297 9298 (*ppos)++; 9299 9300 return cnt; 9301 } 9302 9303 static const struct file_operations buffer_percent_fops = { 9304 .open = tracing_open_generic_tr, 9305 .read = buffer_percent_read, 9306 .write = buffer_percent_write, 9307 .release = tracing_release_generic_tr, 9308 .llseek = default_llseek, 9309 }; 9310 9311 static struct dentry *trace_instance_dir; 9312 9313 static void 9314 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); 9315 9316 static int 9317 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size) 9318 { 9319 enum ring_buffer_flags rb_flags; 9320 9321 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; 9322 9323 buf->tr = tr; 9324 9325 buf->buffer = ring_buffer_alloc(size, rb_flags); 9326 if (!buf->buffer) 9327 return -ENOMEM; 9328 9329 buf->data = alloc_percpu(struct trace_array_cpu); 9330 if (!buf->data) { 9331 ring_buffer_free(buf->buffer); 9332 buf->buffer = NULL; 9333 return -ENOMEM; 9334 } 9335 9336 /* Allocate the first page for all buffers */ 9337 set_buffer_entries(&tr->array_buffer, 9338 ring_buffer_size(tr->array_buffer.buffer, 0)); 9339 9340 return 0; 9341 } 9342 9343 static void free_trace_buffer(struct array_buffer *buf) 9344 { 9345 if (buf->buffer) { 9346 ring_buffer_free(buf->buffer); 9347 buf->buffer = NULL; 9348 free_percpu(buf->data); 9349 buf->data = NULL; 9350 } 9351 } 9352 9353 static int allocate_trace_buffers(struct trace_array *tr, int size) 9354 { 9355 int ret; 9356 9357 ret = allocate_trace_buffer(tr, &tr->array_buffer, size); 9358 if (ret) 9359 return ret; 9360 9361 #ifdef CONFIG_TRACER_MAX_TRACE 9362 ret = allocate_trace_buffer(tr, &tr->max_buffer, 9363 allocate_snapshot ? size : 1); 9364 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) { 9365 free_trace_buffer(&tr->array_buffer); 9366 return -ENOMEM; 9367 } 9368 tr->allocated_snapshot = allocate_snapshot; 9369 9370 allocate_snapshot = false; 9371 #endif 9372 9373 return 0; 9374 } 9375 9376 static void free_trace_buffers(struct trace_array *tr) 9377 { 9378 if (!tr) 9379 return; 9380 9381 free_trace_buffer(&tr->array_buffer); 9382 9383 #ifdef CONFIG_TRACER_MAX_TRACE 9384 free_trace_buffer(&tr->max_buffer); 9385 #endif 9386 } 9387 9388 static void init_trace_flags_index(struct trace_array *tr) 9389 { 9390 int i; 9391 9392 /* Used by the trace options files */ 9393 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) 9394 tr->trace_flags_index[i] = i; 9395 } 9396 9397 static void __update_tracer_options(struct trace_array *tr) 9398 { 9399 struct tracer *t; 9400 9401 for (t = trace_types; t; t = t->next) 9402 add_tracer_options(tr, t); 9403 } 9404 9405 static void update_tracer_options(struct trace_array *tr) 9406 { 9407 mutex_lock(&trace_types_lock); 9408 tracer_options_updated = true; 9409 __update_tracer_options(tr); 9410 mutex_unlock(&trace_types_lock); 9411 } 9412 9413 /* Must have trace_types_lock held */ 9414 struct trace_array *trace_array_find(const char *instance) 9415 { 9416 struct trace_array *tr, *found = NULL; 9417 9418 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9419 if (tr->name && strcmp(tr->name, instance) == 0) { 9420 found = tr; 9421 break; 9422 } 9423 } 9424 9425 return found; 9426 } 9427 9428 struct trace_array *trace_array_find_get(const char *instance) 9429 { 9430 struct trace_array *tr; 9431 9432 mutex_lock(&trace_types_lock); 9433 tr = trace_array_find(instance); 9434 if (tr) 9435 tr->ref++; 9436 mutex_unlock(&trace_types_lock); 9437 9438 return tr; 9439 } 9440 9441 static int trace_array_create_dir(struct trace_array *tr) 9442 { 9443 int ret; 9444 9445 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir); 9446 if (!tr->dir) 9447 return -EINVAL; 9448 9449 ret = event_trace_add_tracer(tr->dir, tr); 9450 if (ret) { 9451 tracefs_remove(tr->dir); 9452 return ret; 9453 } 9454 9455 init_tracer_tracefs(tr, tr->dir); 9456 __update_tracer_options(tr); 9457 9458 return ret; 9459 } 9460 9461 static struct trace_array *trace_array_create(const char *name) 9462 { 9463 struct trace_array *tr; 9464 int ret; 9465 9466 ret = -ENOMEM; 9467 tr = kzalloc(sizeof(*tr), GFP_KERNEL); 9468 if (!tr) 9469 return ERR_PTR(ret); 9470 9471 tr->name = kstrdup(name, GFP_KERNEL); 9472 if (!tr->name) 9473 goto out_free_tr; 9474 9475 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) 9476 goto out_free_tr; 9477 9478 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL)) 9479 goto out_free_tr; 9480 9481 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; 9482 9483 cpumask_copy(tr->tracing_cpumask, cpu_all_mask); 9484 9485 raw_spin_lock_init(&tr->start_lock); 9486 9487 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 9488 9489 tr->current_trace = &nop_trace; 9490 9491 INIT_LIST_HEAD(&tr->systems); 9492 INIT_LIST_HEAD(&tr->events); 9493 INIT_LIST_HEAD(&tr->hist_vars); 9494 INIT_LIST_HEAD(&tr->err_log); 9495 9496 if (allocate_trace_buffers(tr, trace_buf_size) < 0) 9497 goto out_free_tr; 9498 9499 if (ftrace_allocate_ftrace_ops(tr) < 0) 9500 goto out_free_tr; 9501 9502 ftrace_init_trace_array(tr); 9503 9504 init_trace_flags_index(tr); 9505 9506 if (trace_instance_dir) { 9507 ret = trace_array_create_dir(tr); 9508 if (ret) 9509 goto out_free_tr; 9510 } else 9511 __trace_early_add_events(tr); 9512 9513 list_add(&tr->list, &ftrace_trace_arrays); 9514 9515 tr->ref++; 9516 9517 return tr; 9518 9519 out_free_tr: 9520 ftrace_free_ftrace_ops(tr); 9521 free_trace_buffers(tr); 9522 free_cpumask_var(tr->pipe_cpumask); 9523 free_cpumask_var(tr->tracing_cpumask); 9524 kfree(tr->name); 9525 kfree(tr); 9526 9527 return ERR_PTR(ret); 9528 } 9529 9530 static int instance_mkdir(const char *name) 9531 { 9532 struct trace_array *tr; 9533 int ret; 9534 9535 mutex_lock(&event_mutex); 9536 mutex_lock(&trace_types_lock); 9537 9538 ret = -EEXIST; 9539 if (trace_array_find(name)) 9540 goto out_unlock; 9541 9542 tr = trace_array_create(name); 9543 9544 ret = PTR_ERR_OR_ZERO(tr); 9545 9546 out_unlock: 9547 mutex_unlock(&trace_types_lock); 9548 mutex_unlock(&event_mutex); 9549 return ret; 9550 } 9551 9552 /** 9553 * trace_array_get_by_name - Create/Lookup a trace array, given its name. 9554 * @name: The name of the trace array to be looked up/created. 9555 * 9556 * Returns pointer to trace array with given name. 9557 * NULL, if it cannot be created. 9558 * 9559 * NOTE: This function increments the reference counter associated with the 9560 * trace array returned. This makes sure it cannot be freed while in use. 9561 * Use trace_array_put() once the trace array is no longer needed. 9562 * If the trace_array is to be freed, trace_array_destroy() needs to 9563 * be called after the trace_array_put(), or simply let user space delete 9564 * it from the tracefs instances directory. But until the 9565 * trace_array_put() is called, user space can not delete it. 9566 * 9567 */ 9568 struct trace_array *trace_array_get_by_name(const char *name) 9569 { 9570 struct trace_array *tr; 9571 9572 mutex_lock(&event_mutex); 9573 mutex_lock(&trace_types_lock); 9574 9575 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9576 if (tr->name && strcmp(tr->name, name) == 0) 9577 goto out_unlock; 9578 } 9579 9580 tr = trace_array_create(name); 9581 9582 if (IS_ERR(tr)) 9583 tr = NULL; 9584 out_unlock: 9585 if (tr) 9586 tr->ref++; 9587 9588 mutex_unlock(&trace_types_lock); 9589 mutex_unlock(&event_mutex); 9590 return tr; 9591 } 9592 EXPORT_SYMBOL_GPL(trace_array_get_by_name); 9593 9594 static int __remove_instance(struct trace_array *tr) 9595 { 9596 int i; 9597 9598 /* Reference counter for a newly created trace array = 1. */ 9599 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref)) 9600 return -EBUSY; 9601 9602 list_del(&tr->list); 9603 9604 /* Disable all the flags that were enabled coming in */ 9605 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) { 9606 if ((1 << i) & ZEROED_TRACE_FLAGS) 9607 set_tracer_flag(tr, 1 << i, 0); 9608 } 9609 9610 tracing_set_nop(tr); 9611 clear_ftrace_function_probes(tr); 9612 event_trace_del_tracer(tr); 9613 ftrace_clear_pids(tr); 9614 ftrace_destroy_function_files(tr); 9615 tracefs_remove(tr->dir); 9616 free_percpu(tr->last_func_repeats); 9617 free_trace_buffers(tr); 9618 clear_tracing_err_log(tr); 9619 9620 for (i = 0; i < tr->nr_topts; i++) { 9621 kfree(tr->topts[i].topts); 9622 } 9623 kfree(tr->topts); 9624 9625 free_cpumask_var(tr->pipe_cpumask); 9626 free_cpumask_var(tr->tracing_cpumask); 9627 kfree(tr->name); 9628 kfree(tr); 9629 9630 return 0; 9631 } 9632 9633 int trace_array_destroy(struct trace_array *this_tr) 9634 { 9635 struct trace_array *tr; 9636 int ret; 9637 9638 if (!this_tr) 9639 return -EINVAL; 9640 9641 mutex_lock(&event_mutex); 9642 mutex_lock(&trace_types_lock); 9643 9644 ret = -ENODEV; 9645 9646 /* Making sure trace array exists before destroying it. */ 9647 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9648 if (tr == this_tr) { 9649 ret = __remove_instance(tr); 9650 break; 9651 } 9652 } 9653 9654 mutex_unlock(&trace_types_lock); 9655 mutex_unlock(&event_mutex); 9656 9657 return ret; 9658 } 9659 EXPORT_SYMBOL_GPL(trace_array_destroy); 9660 9661 static int instance_rmdir(const char *name) 9662 { 9663 struct trace_array *tr; 9664 int ret; 9665 9666 mutex_lock(&event_mutex); 9667 mutex_lock(&trace_types_lock); 9668 9669 ret = -ENODEV; 9670 tr = trace_array_find(name); 9671 if (tr) 9672 ret = __remove_instance(tr); 9673 9674 mutex_unlock(&trace_types_lock); 9675 mutex_unlock(&event_mutex); 9676 9677 return ret; 9678 } 9679 9680 static __init void create_trace_instances(struct dentry *d_tracer) 9681 { 9682 struct trace_array *tr; 9683 9684 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer, 9685 instance_mkdir, 9686 instance_rmdir); 9687 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n")) 9688 return; 9689 9690 mutex_lock(&event_mutex); 9691 mutex_lock(&trace_types_lock); 9692 9693 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 9694 if (!tr->name) 9695 continue; 9696 if (MEM_FAIL(trace_array_create_dir(tr) < 0, 9697 "Failed to create instance directory\n")) 9698 break; 9699 } 9700 9701 mutex_unlock(&trace_types_lock); 9702 mutex_unlock(&event_mutex); 9703 } 9704 9705 static void 9706 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) 9707 { 9708 struct trace_event_file *file; 9709 int cpu; 9710 9711 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer, 9712 tr, &show_traces_fops); 9713 9714 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer, 9715 tr, &set_tracer_fops); 9716 9717 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer, 9718 tr, &tracing_cpumask_fops); 9719 9720 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer, 9721 tr, &tracing_iter_fops); 9722 9723 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer, 9724 tr, &tracing_fops); 9725 9726 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer, 9727 tr, &tracing_pipe_fops); 9728 9729 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer, 9730 tr, &tracing_entries_fops); 9731 9732 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer, 9733 tr, &tracing_total_entries_fops); 9734 9735 trace_create_file("free_buffer", 0200, d_tracer, 9736 tr, &tracing_free_buffer_fops); 9737 9738 trace_create_file("trace_marker", 0220, d_tracer, 9739 tr, &tracing_mark_fops); 9740 9741 file = __find_event_file(tr, "ftrace", "print"); 9742 if (file && file->dir) 9743 trace_create_file("trigger", TRACE_MODE_WRITE, file->dir, 9744 file, &event_trigger_fops); 9745 tr->trace_marker_file = file; 9746 9747 trace_create_file("trace_marker_raw", 0220, d_tracer, 9748 tr, &tracing_mark_raw_fops); 9749 9750 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr, 9751 &trace_clock_fops); 9752 9753 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer, 9754 tr, &rb_simple_fops); 9755 9756 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr, 9757 &trace_time_stamp_mode_fops); 9758 9759 tr->buffer_percent = 50; 9760 9761 trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer, 9762 tr, &buffer_percent_fops); 9763 9764 create_trace_options_dir(tr); 9765 9766 #ifdef CONFIG_TRACER_MAX_TRACE 9767 trace_create_maxlat_file(tr, d_tracer); 9768 #endif 9769 9770 if (ftrace_create_function_files(tr, d_tracer)) 9771 MEM_FAIL(1, "Could not allocate function filter files"); 9772 9773 #ifdef CONFIG_TRACER_SNAPSHOT 9774 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer, 9775 tr, &snapshot_fops); 9776 #endif 9777 9778 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer, 9779 tr, &tracing_err_log_fops); 9780 9781 for_each_tracing_cpu(cpu) 9782 tracing_init_tracefs_percpu(tr, cpu); 9783 9784 ftrace_init_tracefs(tr, d_tracer); 9785 } 9786 9787 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore) 9788 { 9789 struct vfsmount *mnt; 9790 struct file_system_type *type; 9791 9792 /* 9793 * To maintain backward compatibility for tools that mount 9794 * debugfs to get to the tracing facility, tracefs is automatically 9795 * mounted to the debugfs/tracing directory. 9796 */ 9797 type = get_fs_type("tracefs"); 9798 if (!type) 9799 return NULL; 9800 mnt = vfs_submount(mntpt, type, "tracefs", NULL); 9801 put_filesystem(type); 9802 if (IS_ERR(mnt)) 9803 return NULL; 9804 mntget(mnt); 9805 9806 return mnt; 9807 } 9808 9809 /** 9810 * tracing_init_dentry - initialize top level trace array 9811 * 9812 * This is called when creating files or directories in the tracing 9813 * directory. It is called via fs_initcall() by any of the boot up code 9814 * and expects to return the dentry of the top level tracing directory. 9815 */ 9816 int tracing_init_dentry(void) 9817 { 9818 struct trace_array *tr = &global_trace; 9819 9820 if (security_locked_down(LOCKDOWN_TRACEFS)) { 9821 pr_warn("Tracing disabled due to lockdown\n"); 9822 return -EPERM; 9823 } 9824 9825 /* The top level trace array uses NULL as parent */ 9826 if (tr->dir) 9827 return 0; 9828 9829 if (WARN_ON(!tracefs_initialized())) 9830 return -ENODEV; 9831 9832 /* 9833 * As there may still be users that expect the tracing 9834 * files to exist in debugfs/tracing, we must automount 9835 * the tracefs file system there, so older tools still 9836 * work with the newer kernel. 9837 */ 9838 tr->dir = debugfs_create_automount("tracing", NULL, 9839 trace_automount, NULL); 9840 9841 return 0; 9842 } 9843 9844 extern struct trace_eval_map *__start_ftrace_eval_maps[]; 9845 extern struct trace_eval_map *__stop_ftrace_eval_maps[]; 9846 9847 static struct workqueue_struct *eval_map_wq __initdata; 9848 static struct work_struct eval_map_work __initdata; 9849 static struct work_struct tracerfs_init_work __initdata; 9850 9851 static void __init eval_map_work_func(struct work_struct *work) 9852 { 9853 int len; 9854 9855 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps; 9856 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len); 9857 } 9858 9859 static int __init trace_eval_init(void) 9860 { 9861 INIT_WORK(&eval_map_work, eval_map_work_func); 9862 9863 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0); 9864 if (!eval_map_wq) { 9865 pr_err("Unable to allocate eval_map_wq\n"); 9866 /* Do work here */ 9867 eval_map_work_func(&eval_map_work); 9868 return -ENOMEM; 9869 } 9870 9871 queue_work(eval_map_wq, &eval_map_work); 9872 return 0; 9873 } 9874 9875 subsys_initcall(trace_eval_init); 9876 9877 static int __init trace_eval_sync(void) 9878 { 9879 /* Make sure the eval map updates are finished */ 9880 if (eval_map_wq) 9881 destroy_workqueue(eval_map_wq); 9882 return 0; 9883 } 9884 9885 late_initcall_sync(trace_eval_sync); 9886 9887 9888 #ifdef CONFIG_MODULES 9889 static void trace_module_add_evals(struct module *mod) 9890 { 9891 if (!mod->num_trace_evals) 9892 return; 9893 9894 /* 9895 * Modules with bad taint do not have events created, do 9896 * not bother with enums either. 9897 */ 9898 if (trace_module_has_bad_taint(mod)) 9899 return; 9900 9901 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals); 9902 } 9903 9904 #ifdef CONFIG_TRACE_EVAL_MAP_FILE 9905 static void trace_module_remove_evals(struct module *mod) 9906 { 9907 union trace_eval_map_item *map; 9908 union trace_eval_map_item **last = &trace_eval_maps; 9909 9910 if (!mod->num_trace_evals) 9911 return; 9912 9913 mutex_lock(&trace_eval_mutex); 9914 9915 map = trace_eval_maps; 9916 9917 while (map) { 9918 if (map->head.mod == mod) 9919 break; 9920 map = trace_eval_jmp_to_tail(map); 9921 last = &map->tail.next; 9922 map = map->tail.next; 9923 } 9924 if (!map) 9925 goto out; 9926 9927 *last = trace_eval_jmp_to_tail(map)->tail.next; 9928 kfree(map); 9929 out: 9930 mutex_unlock(&trace_eval_mutex); 9931 } 9932 #else 9933 static inline void trace_module_remove_evals(struct module *mod) { } 9934 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ 9935 9936 static int trace_module_notify(struct notifier_block *self, 9937 unsigned long val, void *data) 9938 { 9939 struct module *mod = data; 9940 9941 switch (val) { 9942 case MODULE_STATE_COMING: 9943 trace_module_add_evals(mod); 9944 break; 9945 case MODULE_STATE_GOING: 9946 trace_module_remove_evals(mod); 9947 break; 9948 } 9949 9950 return NOTIFY_OK; 9951 } 9952 9953 static struct notifier_block trace_module_nb = { 9954 .notifier_call = trace_module_notify, 9955 .priority = 0, 9956 }; 9957 #endif /* CONFIG_MODULES */ 9958 9959 static __init void tracer_init_tracefs_work_func(struct work_struct *work) 9960 { 9961 9962 event_trace_init(); 9963 9964 init_tracer_tracefs(&global_trace, NULL); 9965 ftrace_init_tracefs_toplevel(&global_trace, NULL); 9966 9967 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL, 9968 &global_trace, &tracing_thresh_fops); 9969 9970 trace_create_file("README", TRACE_MODE_READ, NULL, 9971 NULL, &tracing_readme_fops); 9972 9973 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL, 9974 NULL, &tracing_saved_cmdlines_fops); 9975 9976 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL, 9977 NULL, &tracing_saved_cmdlines_size_fops); 9978 9979 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL, 9980 NULL, &tracing_saved_tgids_fops); 9981 9982 trace_create_eval_file(NULL); 9983 9984 #ifdef CONFIG_MODULES 9985 register_module_notifier(&trace_module_nb); 9986 #endif 9987 9988 #ifdef CONFIG_DYNAMIC_FTRACE 9989 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL, 9990 NULL, &tracing_dyn_info_fops); 9991 #endif 9992 9993 create_trace_instances(NULL); 9994 9995 update_tracer_options(&global_trace); 9996 } 9997 9998 static __init int tracer_init_tracefs(void) 9999 { 10000 int ret; 10001 10002 trace_access_lock_init(); 10003 10004 ret = tracing_init_dentry(); 10005 if (ret) 10006 return 0; 10007 10008 if (eval_map_wq) { 10009 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func); 10010 queue_work(eval_map_wq, &tracerfs_init_work); 10011 } else { 10012 tracer_init_tracefs_work_func(NULL); 10013 } 10014 10015 rv_init_interface(); 10016 10017 return 0; 10018 } 10019 10020 fs_initcall(tracer_init_tracefs); 10021 10022 static int trace_die_panic_handler(struct notifier_block *self, 10023 unsigned long ev, void *unused); 10024 10025 static struct notifier_block trace_panic_notifier = { 10026 .notifier_call = trace_die_panic_handler, 10027 .priority = INT_MAX - 1, 10028 }; 10029 10030 static struct notifier_block trace_die_notifier = { 10031 .notifier_call = trace_die_panic_handler, 10032 .priority = INT_MAX - 1, 10033 }; 10034 10035 /* 10036 * The idea is to execute the following die/panic callback early, in order 10037 * to avoid showing irrelevant information in the trace (like other panic 10038 * notifier functions); we are the 2nd to run, after hung_task/rcu_stall 10039 * warnings get disabled (to prevent potential log flooding). 10040 */ 10041 static int trace_die_panic_handler(struct notifier_block *self, 10042 unsigned long ev, void *unused) 10043 { 10044 if (!ftrace_dump_on_oops) 10045 return NOTIFY_DONE; 10046 10047 /* The die notifier requires DIE_OOPS to trigger */ 10048 if (self == &trace_die_notifier && ev != DIE_OOPS) 10049 return NOTIFY_DONE; 10050 10051 ftrace_dump(ftrace_dump_on_oops); 10052 10053 return NOTIFY_DONE; 10054 } 10055 10056 /* 10057 * printk is set to max of 1024, we really don't need it that big. 10058 * Nothing should be printing 1000 characters anyway. 10059 */ 10060 #define TRACE_MAX_PRINT 1000 10061 10062 /* 10063 * Define here KERN_TRACE so that we have one place to modify 10064 * it if we decide to change what log level the ftrace dump 10065 * should be at. 10066 */ 10067 #define KERN_TRACE KERN_EMERG 10068 10069 void 10070 trace_printk_seq(struct trace_seq *s) 10071 { 10072 /* Probably should print a warning here. */ 10073 if (s->seq.len >= TRACE_MAX_PRINT) 10074 s->seq.len = TRACE_MAX_PRINT; 10075 10076 /* 10077 * More paranoid code. Although the buffer size is set to 10078 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just 10079 * an extra layer of protection. 10080 */ 10081 if (WARN_ON_ONCE(s->seq.len >= s->seq.size)) 10082 s->seq.len = s->seq.size - 1; 10083 10084 /* should be zero ended, but we are paranoid. */ 10085 s->buffer[s->seq.len] = 0; 10086 10087 printk(KERN_TRACE "%s", s->buffer); 10088 10089 trace_seq_init(s); 10090 } 10091 10092 void trace_init_global_iter(struct trace_iterator *iter) 10093 { 10094 iter->tr = &global_trace; 10095 iter->trace = iter->tr->current_trace; 10096 iter->cpu_file = RING_BUFFER_ALL_CPUS; 10097 iter->array_buffer = &global_trace.array_buffer; 10098 10099 if (iter->trace && iter->trace->open) 10100 iter->trace->open(iter); 10101 10102 /* Annotate start of buffers if we had overruns */ 10103 if (ring_buffer_overruns(iter->array_buffer->buffer)) 10104 iter->iter_flags |= TRACE_FILE_ANNOTATE; 10105 10106 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 10107 if (trace_clocks[iter->tr->clock_id].in_ns) 10108 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 10109 10110 /* Can not use kmalloc for iter.temp and iter.fmt */ 10111 iter->temp = static_temp_buf; 10112 iter->temp_size = STATIC_TEMP_BUF_SIZE; 10113 iter->fmt = static_fmt_buf; 10114 iter->fmt_size = STATIC_FMT_BUF_SIZE; 10115 } 10116 10117 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) 10118 { 10119 /* use static because iter can be a bit big for the stack */ 10120 static struct trace_iterator iter; 10121 static atomic_t dump_running; 10122 struct trace_array *tr = &global_trace; 10123 unsigned int old_userobj; 10124 unsigned long flags; 10125 int cnt = 0, cpu; 10126 10127 /* Only allow one dump user at a time. */ 10128 if (atomic_inc_return(&dump_running) != 1) { 10129 atomic_dec(&dump_running); 10130 return; 10131 } 10132 10133 /* 10134 * Always turn off tracing when we dump. 10135 * We don't need to show trace output of what happens 10136 * between multiple crashes. 10137 * 10138 * If the user does a sysrq-z, then they can re-enable 10139 * tracing with echo 1 > tracing_on. 10140 */ 10141 tracing_off(); 10142 10143 local_irq_save(flags); 10144 10145 /* Simulate the iterator */ 10146 trace_init_global_iter(&iter); 10147 10148 for_each_tracing_cpu(cpu) { 10149 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); 10150 } 10151 10152 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; 10153 10154 /* don't look at user memory in panic mode */ 10155 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 10156 10157 switch (oops_dump_mode) { 10158 case DUMP_ALL: 10159 iter.cpu_file = RING_BUFFER_ALL_CPUS; 10160 break; 10161 case DUMP_ORIG: 10162 iter.cpu_file = raw_smp_processor_id(); 10163 break; 10164 case DUMP_NONE: 10165 goto out_enable; 10166 default: 10167 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); 10168 iter.cpu_file = RING_BUFFER_ALL_CPUS; 10169 } 10170 10171 printk(KERN_TRACE "Dumping ftrace buffer:\n"); 10172 10173 /* Did function tracer already get disabled? */ 10174 if (ftrace_is_dead()) { 10175 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); 10176 printk("# MAY BE MISSING FUNCTION EVENTS\n"); 10177 } 10178 10179 /* 10180 * We need to stop all tracing on all CPUS to read 10181 * the next buffer. This is a bit expensive, but is 10182 * not done often. We fill all what we can read, 10183 * and then release the locks again. 10184 */ 10185 10186 while (!trace_empty(&iter)) { 10187 10188 if (!cnt) 10189 printk(KERN_TRACE "---------------------------------\n"); 10190 10191 cnt++; 10192 10193 trace_iterator_reset(&iter); 10194 iter.iter_flags |= TRACE_FILE_LAT_FMT; 10195 10196 if (trace_find_next_entry_inc(&iter) != NULL) { 10197 int ret; 10198 10199 ret = print_trace_line(&iter); 10200 if (ret != TRACE_TYPE_NO_CONSUME) 10201 trace_consume(&iter); 10202 } 10203 touch_nmi_watchdog(); 10204 10205 trace_printk_seq(&iter.seq); 10206 } 10207 10208 if (!cnt) 10209 printk(KERN_TRACE " (ftrace buffer empty)\n"); 10210 else 10211 printk(KERN_TRACE "---------------------------------\n"); 10212 10213 out_enable: 10214 tr->trace_flags |= old_userobj; 10215 10216 for_each_tracing_cpu(cpu) { 10217 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); 10218 } 10219 atomic_dec(&dump_running); 10220 local_irq_restore(flags); 10221 } 10222 EXPORT_SYMBOL_GPL(ftrace_dump); 10223 10224 #define WRITE_BUFSIZE 4096 10225 10226 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer, 10227 size_t count, loff_t *ppos, 10228 int (*createfn)(const char *)) 10229 { 10230 char *kbuf, *buf, *tmp; 10231 int ret = 0; 10232 size_t done = 0; 10233 size_t size; 10234 10235 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); 10236 if (!kbuf) 10237 return -ENOMEM; 10238 10239 while (done < count) { 10240 size = count - done; 10241 10242 if (size >= WRITE_BUFSIZE) 10243 size = WRITE_BUFSIZE - 1; 10244 10245 if (copy_from_user(kbuf, buffer + done, size)) { 10246 ret = -EFAULT; 10247 goto out; 10248 } 10249 kbuf[size] = '\0'; 10250 buf = kbuf; 10251 do { 10252 tmp = strchr(buf, '\n'); 10253 if (tmp) { 10254 *tmp = '\0'; 10255 size = tmp - buf + 1; 10256 } else { 10257 size = strlen(buf); 10258 if (done + size < count) { 10259 if (buf != kbuf) 10260 break; 10261 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */ 10262 pr_warn("Line length is too long: Should be less than %d\n", 10263 WRITE_BUFSIZE - 2); 10264 ret = -EINVAL; 10265 goto out; 10266 } 10267 } 10268 done += size; 10269 10270 /* Remove comments */ 10271 tmp = strchr(buf, '#'); 10272 10273 if (tmp) 10274 *tmp = '\0'; 10275 10276 ret = createfn(buf); 10277 if (ret) 10278 goto out; 10279 buf += size; 10280 10281 } while (done < count); 10282 } 10283 ret = done; 10284 10285 out: 10286 kfree(kbuf); 10287 10288 return ret; 10289 } 10290 10291 #ifdef CONFIG_TRACER_MAX_TRACE 10292 __init static bool tr_needs_alloc_snapshot(const char *name) 10293 { 10294 char *test; 10295 int len = strlen(name); 10296 bool ret; 10297 10298 if (!boot_snapshot_index) 10299 return false; 10300 10301 if (strncmp(name, boot_snapshot_info, len) == 0 && 10302 boot_snapshot_info[len] == '\t') 10303 return true; 10304 10305 test = kmalloc(strlen(name) + 3, GFP_KERNEL); 10306 if (!test) 10307 return false; 10308 10309 sprintf(test, "\t%s\t", name); 10310 ret = strstr(boot_snapshot_info, test) == NULL; 10311 kfree(test); 10312 return ret; 10313 } 10314 10315 __init static void do_allocate_snapshot(const char *name) 10316 { 10317 if (!tr_needs_alloc_snapshot(name)) 10318 return; 10319 10320 /* 10321 * When allocate_snapshot is set, the next call to 10322 * allocate_trace_buffers() (called by trace_array_get_by_name()) 10323 * will allocate the snapshot buffer. That will alse clear 10324 * this flag. 10325 */ 10326 allocate_snapshot = true; 10327 } 10328 #else 10329 static inline void do_allocate_snapshot(const char *name) { } 10330 #endif 10331 10332 __init static void enable_instances(void) 10333 { 10334 struct trace_array *tr; 10335 char *curr_str; 10336 char *str; 10337 char *tok; 10338 10339 /* A tab is always appended */ 10340 boot_instance_info[boot_instance_index - 1] = '\0'; 10341 str = boot_instance_info; 10342 10343 while ((curr_str = strsep(&str, "\t"))) { 10344 10345 tok = strsep(&curr_str, ","); 10346 10347 if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE)) 10348 do_allocate_snapshot(tok); 10349 10350 tr = trace_array_get_by_name(tok); 10351 if (!tr) { 10352 pr_warn("Failed to create instance buffer %s\n", curr_str); 10353 continue; 10354 } 10355 /* Allow user space to delete it */ 10356 trace_array_put(tr); 10357 10358 while ((tok = strsep(&curr_str, ","))) { 10359 early_enable_events(tr, tok, true); 10360 } 10361 } 10362 } 10363 10364 __init static int tracer_alloc_buffers(void) 10365 { 10366 int ring_buf_size; 10367 int ret = -ENOMEM; 10368 10369 10370 if (security_locked_down(LOCKDOWN_TRACEFS)) { 10371 pr_warn("Tracing disabled due to lockdown\n"); 10372 return -EPERM; 10373 } 10374 10375 /* 10376 * Make sure we don't accidentally add more trace options 10377 * than we have bits for. 10378 */ 10379 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); 10380 10381 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 10382 goto out; 10383 10384 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) 10385 goto out_free_buffer_mask; 10386 10387 /* Only allocate trace_printk buffers if a trace_printk exists */ 10388 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt) 10389 /* Must be called before global_trace.buffer is allocated */ 10390 trace_printk_init_buffers(); 10391 10392 /* To save memory, keep the ring buffer size to its minimum */ 10393 if (ring_buffer_expanded) 10394 ring_buf_size = trace_buf_size; 10395 else 10396 ring_buf_size = 1; 10397 10398 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 10399 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask); 10400 10401 raw_spin_lock_init(&global_trace.start_lock); 10402 10403 /* 10404 * The prepare callbacks allocates some memory for the ring buffer. We 10405 * don't free the buffer if the CPU goes down. If we were to free 10406 * the buffer, then the user would lose any trace that was in the 10407 * buffer. The memory will be removed once the "instance" is removed. 10408 */ 10409 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE, 10410 "trace/RB:prepare", trace_rb_cpu_prepare, 10411 NULL); 10412 if (ret < 0) 10413 goto out_free_cpumask; 10414 /* Used for event triggers */ 10415 ret = -ENOMEM; 10416 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); 10417 if (!temp_buffer) 10418 goto out_rm_hp_state; 10419 10420 if (trace_create_savedcmd() < 0) 10421 goto out_free_temp_buffer; 10422 10423 if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL)) 10424 goto out_free_savedcmd; 10425 10426 /* TODO: make the number of buffers hot pluggable with CPUS */ 10427 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { 10428 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n"); 10429 goto out_free_pipe_cpumask; 10430 } 10431 if (global_trace.buffer_disabled) 10432 tracing_off(); 10433 10434 if (trace_boot_clock) { 10435 ret = tracing_set_clock(&global_trace, trace_boot_clock); 10436 if (ret < 0) 10437 pr_warn("Trace clock %s not defined, going back to default\n", 10438 trace_boot_clock); 10439 } 10440 10441 /* 10442 * register_tracer() might reference current_trace, so it 10443 * needs to be set before we register anything. This is 10444 * just a bootstrap of current_trace anyway. 10445 */ 10446 global_trace.current_trace = &nop_trace; 10447 10448 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 10449 10450 ftrace_init_global_array_ops(&global_trace); 10451 10452 init_trace_flags_index(&global_trace); 10453 10454 register_tracer(&nop_trace); 10455 10456 /* Function tracing may start here (via kernel command line) */ 10457 init_function_trace(); 10458 10459 /* All seems OK, enable tracing */ 10460 tracing_disabled = 0; 10461 10462 atomic_notifier_chain_register(&panic_notifier_list, 10463 &trace_panic_notifier); 10464 10465 register_die_notifier(&trace_die_notifier); 10466 10467 global_trace.flags = TRACE_ARRAY_FL_GLOBAL; 10468 10469 INIT_LIST_HEAD(&global_trace.systems); 10470 INIT_LIST_HEAD(&global_trace.events); 10471 INIT_LIST_HEAD(&global_trace.hist_vars); 10472 INIT_LIST_HEAD(&global_trace.err_log); 10473 list_add(&global_trace.list, &ftrace_trace_arrays); 10474 10475 apply_trace_boot_options(); 10476 10477 register_snapshot_cmd(); 10478 10479 test_can_verify(); 10480 10481 return 0; 10482 10483 out_free_pipe_cpumask: 10484 free_cpumask_var(global_trace.pipe_cpumask); 10485 out_free_savedcmd: 10486 free_saved_cmdlines_buffer(savedcmd); 10487 out_free_temp_buffer: 10488 ring_buffer_free(temp_buffer); 10489 out_rm_hp_state: 10490 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE); 10491 out_free_cpumask: 10492 free_cpumask_var(global_trace.tracing_cpumask); 10493 out_free_buffer_mask: 10494 free_cpumask_var(tracing_buffer_mask); 10495 out: 10496 return ret; 10497 } 10498 10499 void __init ftrace_boot_snapshot(void) 10500 { 10501 #ifdef CONFIG_TRACER_MAX_TRACE 10502 struct trace_array *tr; 10503 10504 if (!snapshot_at_boot) 10505 return; 10506 10507 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 10508 if (!tr->allocated_snapshot) 10509 continue; 10510 10511 tracing_snapshot_instance(tr); 10512 trace_array_puts(tr, "** Boot snapshot taken **\n"); 10513 } 10514 #endif 10515 } 10516 10517 void __init early_trace_init(void) 10518 { 10519 if (tracepoint_printk) { 10520 tracepoint_print_iter = 10521 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); 10522 if (MEM_FAIL(!tracepoint_print_iter, 10523 "Failed to allocate trace iterator\n")) 10524 tracepoint_printk = 0; 10525 else 10526 static_key_enable(&tracepoint_printk_key.key); 10527 } 10528 tracer_alloc_buffers(); 10529 10530 init_events(); 10531 } 10532 10533 void __init trace_init(void) 10534 { 10535 trace_event_init(); 10536 10537 if (boot_instance_index) 10538 enable_instances(); 10539 } 10540 10541 __init static void clear_boot_tracer(void) 10542 { 10543 /* 10544 * The default tracer at boot buffer is an init section. 10545 * This function is called in lateinit. If we did not 10546 * find the boot tracer, then clear it out, to prevent 10547 * later registration from accessing the buffer that is 10548 * about to be freed. 10549 */ 10550 if (!default_bootup_tracer) 10551 return; 10552 10553 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", 10554 default_bootup_tracer); 10555 default_bootup_tracer = NULL; 10556 } 10557 10558 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 10559 __init static void tracing_set_default_clock(void) 10560 { 10561 /* sched_clock_stable() is determined in late_initcall */ 10562 if (!trace_boot_clock && !sched_clock_stable()) { 10563 if (security_locked_down(LOCKDOWN_TRACEFS)) { 10564 pr_warn("Can not set tracing clock due to lockdown\n"); 10565 return; 10566 } 10567 10568 printk(KERN_WARNING 10569 "Unstable clock detected, switching default tracing clock to \"global\"\n" 10570 "If you want to keep using the local clock, then add:\n" 10571 " \"trace_clock=local\"\n" 10572 "on the kernel command line\n"); 10573 tracing_set_clock(&global_trace, "global"); 10574 } 10575 } 10576 #else 10577 static inline void tracing_set_default_clock(void) { } 10578 #endif 10579 10580 __init static int late_trace_init(void) 10581 { 10582 if (tracepoint_printk && tracepoint_printk_stop_on_boot) { 10583 static_key_disable(&tracepoint_printk_key.key); 10584 tracepoint_printk = 0; 10585 } 10586 10587 tracing_set_default_clock(); 10588 clear_boot_tracer(); 10589 return 0; 10590 } 10591 10592 late_initcall_sync(late_trace_init); 10593