1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * event tracer 4 * 5 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> 6 * 7 * - Added format output of fields of the trace point. 8 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>. 9 * 10 */ 11 12 #define pr_fmt(fmt) fmt 13 14 #include <linux/workqueue.h> 15 #include <linux/security.h> 16 #include <linux/spinlock.h> 17 #include <linux/kthread.h> 18 #include <linux/tracefs.h> 19 #include <linux/uaccess.h> 20 #include <linux/module.h> 21 #include <linux/ctype.h> 22 #include <linux/sort.h> 23 #include <linux/slab.h> 24 #include <linux/delay.h> 25 26 #include <trace/events/sched.h> 27 #include <trace/syscall.h> 28 29 #include <asm/setup.h> 30 31 #include "trace_output.h" 32 33 #undef TRACE_SYSTEM 34 #define TRACE_SYSTEM "TRACE_SYSTEM" 35 36 DEFINE_MUTEX(event_mutex); 37 38 LIST_HEAD(ftrace_events); 39 static LIST_HEAD(ftrace_generic_fields); 40 static LIST_HEAD(ftrace_common_fields); 41 static bool eventdir_initialized; 42 43 static LIST_HEAD(module_strings); 44 45 struct module_string { 46 struct list_head next; 47 struct module *module; 48 char *str; 49 }; 50 51 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO) 52 53 static struct kmem_cache *field_cachep; 54 static struct kmem_cache *file_cachep; 55 56 static inline int system_refcount(struct event_subsystem *system) 57 { 58 return system->ref_count; 59 } 60 61 static int system_refcount_inc(struct event_subsystem *system) 62 { 63 return system->ref_count++; 64 } 65 66 static int system_refcount_dec(struct event_subsystem *system) 67 { 68 return --system->ref_count; 69 } 70 71 /* Double loops, do not use break, only goto's work */ 72 #define do_for_each_event_file(tr, file) \ 73 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ 74 list_for_each_entry(file, &tr->events, list) 75 76 #define do_for_each_event_file_safe(tr, file) \ 77 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ 78 struct trace_event_file *___n; \ 79 list_for_each_entry_safe(file, ___n, &tr->events, list) 80 81 #define while_for_each_event_file() \ 82 } 83 84 static struct ftrace_event_field * 85 __find_event_field(struct list_head *head, char *name) 86 { 87 struct ftrace_event_field *field; 88 89 list_for_each_entry(field, head, link) { 90 if (!strcmp(field->name, name)) 91 return field; 92 } 93 94 return NULL; 95 } 96 97 struct ftrace_event_field * 98 trace_find_event_field(struct trace_event_call *call, char *name) 99 { 100 struct ftrace_event_field *field; 101 struct list_head *head; 102 103 head = trace_get_fields(call); 104 field = __find_event_field(head, name); 105 if (field) 106 return field; 107 108 field = __find_event_field(&ftrace_generic_fields, name); 109 if (field) 110 return field; 111 112 return __find_event_field(&ftrace_common_fields, name); 113 } 114 115 static int __trace_define_field(struct list_head *head, const char *type, 116 const char *name, int offset, int size, 117 int is_signed, int filter_type, int len) 118 { 119 struct ftrace_event_field *field; 120 121 field = kmem_cache_alloc(field_cachep, GFP_TRACE); 122 if (!field) 123 return -ENOMEM; 124 125 field->name = name; 126 field->type = type; 127 128 if (filter_type == FILTER_OTHER) 129 field->filter_type = filter_assign_type(type); 130 else 131 field->filter_type = filter_type; 132 133 field->offset = offset; 134 field->size = size; 135 field->is_signed = is_signed; 136 field->len = len; 137 138 list_add(&field->link, head); 139 140 return 0; 141 } 142 143 int trace_define_field(struct trace_event_call *call, const char *type, 144 const char *name, int offset, int size, int is_signed, 145 int filter_type) 146 { 147 struct list_head *head; 148 149 if (WARN_ON(!call->class)) 150 return 0; 151 152 head = trace_get_fields(call); 153 return __trace_define_field(head, type, name, offset, size, 154 is_signed, filter_type, 0); 155 } 156 EXPORT_SYMBOL_GPL(trace_define_field); 157 158 static int trace_define_field_ext(struct trace_event_call *call, const char *type, 159 const char *name, int offset, int size, int is_signed, 160 int filter_type, int len) 161 { 162 struct list_head *head; 163 164 if (WARN_ON(!call->class)) 165 return 0; 166 167 head = trace_get_fields(call); 168 return __trace_define_field(head, type, name, offset, size, 169 is_signed, filter_type, len); 170 } 171 172 #define __generic_field(type, item, filter_type) \ 173 ret = __trace_define_field(&ftrace_generic_fields, #type, \ 174 #item, 0, 0, is_signed_type(type), \ 175 filter_type, 0); \ 176 if (ret) \ 177 return ret; 178 179 #define __common_field(type, item) \ 180 ret = __trace_define_field(&ftrace_common_fields, #type, \ 181 "common_" #item, \ 182 offsetof(typeof(ent), item), \ 183 sizeof(ent.item), \ 184 is_signed_type(type), FILTER_OTHER, 0); \ 185 if (ret) \ 186 return ret; 187 188 static int trace_define_generic_fields(void) 189 { 190 int ret; 191 192 __generic_field(int, CPU, FILTER_CPU); 193 __generic_field(int, cpu, FILTER_CPU); 194 __generic_field(int, common_cpu, FILTER_CPU); 195 __generic_field(char *, COMM, FILTER_COMM); 196 __generic_field(char *, comm, FILTER_COMM); 197 __generic_field(char *, stacktrace, FILTER_STACKTRACE); 198 __generic_field(char *, STACKTRACE, FILTER_STACKTRACE); 199 200 return ret; 201 } 202 203 static int trace_define_common_fields(void) 204 { 205 int ret; 206 struct trace_entry ent; 207 208 __common_field(unsigned short, type); 209 __common_field(unsigned char, flags); 210 /* Holds both preempt_count and migrate_disable */ 211 __common_field(unsigned char, preempt_count); 212 __common_field(int, pid); 213 214 return ret; 215 } 216 217 static void trace_destroy_fields(struct trace_event_call *call) 218 { 219 struct ftrace_event_field *field, *next; 220 struct list_head *head; 221 222 head = trace_get_fields(call); 223 list_for_each_entry_safe(field, next, head, link) { 224 list_del(&field->link); 225 kmem_cache_free(field_cachep, field); 226 } 227 } 228 229 /* 230 * run-time version of trace_event_get_offsets_<call>() that returns the last 231 * accessible offset of trace fields excluding __dynamic_array bytes 232 */ 233 int trace_event_get_offsets(struct trace_event_call *call) 234 { 235 struct ftrace_event_field *tail; 236 struct list_head *head; 237 238 head = trace_get_fields(call); 239 /* 240 * head->next points to the last field with the largest offset, 241 * since it was added last by trace_define_field() 242 */ 243 tail = list_first_entry(head, struct ftrace_event_field, link); 244 return tail->offset + tail->size; 245 } 246 247 /* 248 * Check if the referenced field is an array and return true, 249 * as arrays are OK to dereference. 250 */ 251 static bool test_field(const char *fmt, struct trace_event_call *call) 252 { 253 struct trace_event_fields *field = call->class->fields_array; 254 const char *array_descriptor; 255 const char *p = fmt; 256 int len; 257 258 if (!(len = str_has_prefix(fmt, "REC->"))) 259 return false; 260 fmt += len; 261 for (p = fmt; *p; p++) { 262 if (!isalnum(*p) && *p != '_') 263 break; 264 } 265 len = p - fmt; 266 267 for (; field->type; field++) { 268 if (strncmp(field->name, fmt, len) || 269 field->name[len]) 270 continue; 271 array_descriptor = strchr(field->type, '['); 272 /* This is an array and is OK to dereference. */ 273 return array_descriptor != NULL; 274 } 275 return false; 276 } 277 278 /* 279 * Examine the print fmt of the event looking for unsafe dereference 280 * pointers using %p* that could be recorded in the trace event and 281 * much later referenced after the pointer was freed. Dereferencing 282 * pointers are OK, if it is dereferenced into the event itself. 283 */ 284 static void test_event_printk(struct trace_event_call *call) 285 { 286 u64 dereference_flags = 0; 287 bool first = true; 288 const char *fmt, *c, *r, *a; 289 int parens = 0; 290 char in_quote = 0; 291 int start_arg = 0; 292 int arg = 0; 293 int i; 294 295 fmt = call->print_fmt; 296 297 if (!fmt) 298 return; 299 300 for (i = 0; fmt[i]; i++) { 301 switch (fmt[i]) { 302 case '\\': 303 i++; 304 if (!fmt[i]) 305 return; 306 continue; 307 case '"': 308 case '\'': 309 /* 310 * The print fmt starts with a string that 311 * is processed first to find %p* usage, 312 * then after the first string, the print fmt 313 * contains arguments that are used to check 314 * if the dereferenced %p* usage is safe. 315 */ 316 if (first) { 317 if (fmt[i] == '\'') 318 continue; 319 if (in_quote) { 320 arg = 0; 321 first = false; 322 /* 323 * If there was no %p* uses 324 * the fmt is OK. 325 */ 326 if (!dereference_flags) 327 return; 328 } 329 } 330 if (in_quote) { 331 if (in_quote == fmt[i]) 332 in_quote = 0; 333 } else { 334 in_quote = fmt[i]; 335 } 336 continue; 337 case '%': 338 if (!first || !in_quote) 339 continue; 340 i++; 341 if (!fmt[i]) 342 return; 343 switch (fmt[i]) { 344 case '%': 345 continue; 346 case 'p': 347 /* Find dereferencing fields */ 348 switch (fmt[i + 1]) { 349 case 'B': case 'R': case 'r': 350 case 'b': case 'M': case 'm': 351 case 'I': case 'i': case 'E': 352 case 'U': case 'V': case 'N': 353 case 'a': case 'd': case 'D': 354 case 'g': case 't': case 'C': 355 case 'O': case 'f': 356 if (WARN_ONCE(arg == 63, 357 "Too many args for event: %s", 358 trace_event_name(call))) 359 return; 360 dereference_flags |= 1ULL << arg; 361 } 362 break; 363 default: 364 { 365 bool star = false; 366 int j; 367 368 /* Increment arg if %*s exists. */ 369 for (j = 0; fmt[i + j]; j++) { 370 if (isdigit(fmt[i + j]) || 371 fmt[i + j] == '.') 372 continue; 373 if (fmt[i + j] == '*') { 374 star = true; 375 continue; 376 } 377 if ((fmt[i + j] == 's') && star) 378 arg++; 379 break; 380 } 381 break; 382 } /* default */ 383 384 } /* switch */ 385 arg++; 386 continue; 387 case '(': 388 if (in_quote) 389 continue; 390 parens++; 391 continue; 392 case ')': 393 if (in_quote) 394 continue; 395 parens--; 396 if (WARN_ONCE(parens < 0, 397 "Paren mismatch for event: %s\narg='%s'\n%*s", 398 trace_event_name(call), 399 fmt + start_arg, 400 (i - start_arg) + 5, "^")) 401 return; 402 continue; 403 case ',': 404 if (in_quote || parens) 405 continue; 406 i++; 407 while (isspace(fmt[i])) 408 i++; 409 start_arg = i; 410 if (!(dereference_flags & (1ULL << arg))) 411 goto next_arg; 412 413 /* Find the REC-> in the argument */ 414 c = strchr(fmt + i, ','); 415 r = strstr(fmt + i, "REC->"); 416 if (r && (!c || r < c)) { 417 /* 418 * Addresses of events on the buffer, 419 * or an array on the buffer is 420 * OK to dereference. 421 * There's ways to fool this, but 422 * this is to catch common mistakes, 423 * not malicious code. 424 */ 425 a = strchr(fmt + i, '&'); 426 if ((a && (a < r)) || test_field(r, call)) 427 dereference_flags &= ~(1ULL << arg); 428 } else if ((r = strstr(fmt + i, "__get_dynamic_array(")) && 429 (!c || r < c)) { 430 dereference_flags &= ~(1ULL << arg); 431 } else if ((r = strstr(fmt + i, "__get_sockaddr(")) && 432 (!c || r < c)) { 433 dereference_flags &= ~(1ULL << arg); 434 } 435 436 next_arg: 437 i--; 438 arg++; 439 } 440 } 441 442 /* 443 * If you triggered the below warning, the trace event reported 444 * uses an unsafe dereference pointer %p*. As the data stored 445 * at the trace event time may no longer exist when the trace 446 * event is printed, dereferencing to the original source is 447 * unsafe. The source of the dereference must be copied into the 448 * event itself, and the dereference must access the copy instead. 449 */ 450 if (WARN_ON_ONCE(dereference_flags)) { 451 arg = 1; 452 while (!(dereference_flags & 1)) { 453 dereference_flags >>= 1; 454 arg++; 455 } 456 pr_warn("event %s has unsafe dereference of argument %d\n", 457 trace_event_name(call), arg); 458 pr_warn("print_fmt: %s\n", fmt); 459 } 460 } 461 462 int trace_event_raw_init(struct trace_event_call *call) 463 { 464 int id; 465 466 id = register_trace_event(&call->event); 467 if (!id) 468 return -ENODEV; 469 470 test_event_printk(call); 471 472 return 0; 473 } 474 EXPORT_SYMBOL_GPL(trace_event_raw_init); 475 476 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file) 477 { 478 struct trace_array *tr = trace_file->tr; 479 struct trace_array_cpu *data; 480 struct trace_pid_list *no_pid_list; 481 struct trace_pid_list *pid_list; 482 483 pid_list = rcu_dereference_raw(tr->filtered_pids); 484 no_pid_list = rcu_dereference_raw(tr->filtered_no_pids); 485 486 if (!pid_list && !no_pid_list) 487 return false; 488 489 data = this_cpu_ptr(tr->array_buffer.data); 490 491 return data->ignore_pid; 492 } 493 EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid); 494 495 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, 496 struct trace_event_file *trace_file, 497 unsigned long len) 498 { 499 struct trace_event_call *event_call = trace_file->event_call; 500 501 if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) && 502 trace_event_ignore_this_pid(trace_file)) 503 return NULL; 504 505 /* 506 * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables 507 * preemption (adding one to the preempt_count). Since we are 508 * interested in the preempt_count at the time the tracepoint was 509 * hit, we need to subtract one to offset the increment. 510 */ 511 fbuffer->trace_ctx = tracing_gen_ctx_dec(); 512 fbuffer->trace_file = trace_file; 513 514 fbuffer->event = 515 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file, 516 event_call->event.type, len, 517 fbuffer->trace_ctx); 518 if (!fbuffer->event) 519 return NULL; 520 521 fbuffer->regs = NULL; 522 fbuffer->entry = ring_buffer_event_data(fbuffer->event); 523 return fbuffer->entry; 524 } 525 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve); 526 527 int trace_event_reg(struct trace_event_call *call, 528 enum trace_reg type, void *data) 529 { 530 struct trace_event_file *file = data; 531 532 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); 533 switch (type) { 534 case TRACE_REG_REGISTER: 535 return tracepoint_probe_register(call->tp, 536 call->class->probe, 537 file); 538 case TRACE_REG_UNREGISTER: 539 tracepoint_probe_unregister(call->tp, 540 call->class->probe, 541 file); 542 return 0; 543 544 #ifdef CONFIG_PERF_EVENTS 545 case TRACE_REG_PERF_REGISTER: 546 return tracepoint_probe_register(call->tp, 547 call->class->perf_probe, 548 call); 549 case TRACE_REG_PERF_UNREGISTER: 550 tracepoint_probe_unregister(call->tp, 551 call->class->perf_probe, 552 call); 553 return 0; 554 case TRACE_REG_PERF_OPEN: 555 case TRACE_REG_PERF_CLOSE: 556 case TRACE_REG_PERF_ADD: 557 case TRACE_REG_PERF_DEL: 558 return 0; 559 #endif 560 } 561 return 0; 562 } 563 EXPORT_SYMBOL_GPL(trace_event_reg); 564 565 void trace_event_enable_cmd_record(bool enable) 566 { 567 struct trace_event_file *file; 568 struct trace_array *tr; 569 570 lockdep_assert_held(&event_mutex); 571 572 do_for_each_event_file(tr, file) { 573 574 if (!(file->flags & EVENT_FILE_FL_ENABLED)) 575 continue; 576 577 if (enable) { 578 tracing_start_cmdline_record(); 579 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 580 } else { 581 tracing_stop_cmdline_record(); 582 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 583 } 584 } while_for_each_event_file(); 585 } 586 587 void trace_event_enable_tgid_record(bool enable) 588 { 589 struct trace_event_file *file; 590 struct trace_array *tr; 591 592 lockdep_assert_held(&event_mutex); 593 594 do_for_each_event_file(tr, file) { 595 if (!(file->flags & EVENT_FILE_FL_ENABLED)) 596 continue; 597 598 if (enable) { 599 tracing_start_tgid_record(); 600 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); 601 } else { 602 tracing_stop_tgid_record(); 603 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, 604 &file->flags); 605 } 606 } while_for_each_event_file(); 607 } 608 609 static int __ftrace_event_enable_disable(struct trace_event_file *file, 610 int enable, int soft_disable) 611 { 612 struct trace_event_call *call = file->event_call; 613 struct trace_array *tr = file->tr; 614 int ret = 0; 615 int disable; 616 617 switch (enable) { 618 case 0: 619 /* 620 * When soft_disable is set and enable is cleared, the sm_ref 621 * reference counter is decremented. If it reaches 0, we want 622 * to clear the SOFT_DISABLED flag but leave the event in the 623 * state that it was. That is, if the event was enabled and 624 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED 625 * is set we do not want the event to be enabled before we 626 * clear the bit. 627 * 628 * When soft_disable is not set but the SOFT_MODE flag is, 629 * we do nothing. Do not disable the tracepoint, otherwise 630 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work. 631 */ 632 if (soft_disable) { 633 if (atomic_dec_return(&file->sm_ref) > 0) 634 break; 635 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED; 636 clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); 637 /* Disable use of trace_buffered_event */ 638 trace_buffered_event_disable(); 639 } else 640 disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE); 641 642 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) { 643 clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags); 644 if (file->flags & EVENT_FILE_FL_RECORDED_CMD) { 645 tracing_stop_cmdline_record(); 646 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 647 } 648 649 if (file->flags & EVENT_FILE_FL_RECORDED_TGID) { 650 tracing_stop_tgid_record(); 651 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); 652 } 653 654 call->class->reg(call, TRACE_REG_UNREGISTER, file); 655 } 656 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ 657 if (file->flags & EVENT_FILE_FL_SOFT_MODE) 658 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 659 else 660 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 661 break; 662 case 1: 663 /* 664 * When soft_disable is set and enable is set, we want to 665 * register the tracepoint for the event, but leave the event 666 * as is. That means, if the event was already enabled, we do 667 * nothing (but set SOFT_MODE). If the event is disabled, we 668 * set SOFT_DISABLED before enabling the event tracepoint, so 669 * it still seems to be disabled. 670 */ 671 if (!soft_disable) 672 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 673 else { 674 if (atomic_inc_return(&file->sm_ref) > 1) 675 break; 676 set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); 677 /* Enable use of trace_buffered_event */ 678 trace_buffered_event_enable(); 679 } 680 681 if (!(file->flags & EVENT_FILE_FL_ENABLED)) { 682 bool cmd = false, tgid = false; 683 684 /* Keep the event disabled, when going to SOFT_MODE. */ 685 if (soft_disable) 686 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 687 688 if (tr->trace_flags & TRACE_ITER_RECORD_CMD) { 689 cmd = true; 690 tracing_start_cmdline_record(); 691 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 692 } 693 694 if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { 695 tgid = true; 696 tracing_start_tgid_record(); 697 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); 698 } 699 700 ret = call->class->reg(call, TRACE_REG_REGISTER, file); 701 if (ret) { 702 if (cmd) 703 tracing_stop_cmdline_record(); 704 if (tgid) 705 tracing_stop_tgid_record(); 706 pr_info("event trace: Could not enable event " 707 "%s\n", trace_event_name(call)); 708 break; 709 } 710 set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags); 711 712 /* WAS_ENABLED gets set but never cleared. */ 713 set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags); 714 } 715 break; 716 } 717 718 return ret; 719 } 720 721 int trace_event_enable_disable(struct trace_event_file *file, 722 int enable, int soft_disable) 723 { 724 return __ftrace_event_enable_disable(file, enable, soft_disable); 725 } 726 727 static int ftrace_event_enable_disable(struct trace_event_file *file, 728 int enable) 729 { 730 return __ftrace_event_enable_disable(file, enable, 0); 731 } 732 733 static void ftrace_clear_events(struct trace_array *tr) 734 { 735 struct trace_event_file *file; 736 737 mutex_lock(&event_mutex); 738 list_for_each_entry(file, &tr->events, list) { 739 ftrace_event_enable_disable(file, 0); 740 } 741 mutex_unlock(&event_mutex); 742 } 743 744 static void 745 event_filter_pid_sched_process_exit(void *data, struct task_struct *task) 746 { 747 struct trace_pid_list *pid_list; 748 struct trace_array *tr = data; 749 750 pid_list = rcu_dereference_raw(tr->filtered_pids); 751 trace_filter_add_remove_task(pid_list, NULL, task); 752 753 pid_list = rcu_dereference_raw(tr->filtered_no_pids); 754 trace_filter_add_remove_task(pid_list, NULL, task); 755 } 756 757 static void 758 event_filter_pid_sched_process_fork(void *data, 759 struct task_struct *self, 760 struct task_struct *task) 761 { 762 struct trace_pid_list *pid_list; 763 struct trace_array *tr = data; 764 765 pid_list = rcu_dereference_sched(tr->filtered_pids); 766 trace_filter_add_remove_task(pid_list, self, task); 767 768 pid_list = rcu_dereference_sched(tr->filtered_no_pids); 769 trace_filter_add_remove_task(pid_list, self, task); 770 } 771 772 void trace_event_follow_fork(struct trace_array *tr, bool enable) 773 { 774 if (enable) { 775 register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork, 776 tr, INT_MIN); 777 register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit, 778 tr, INT_MAX); 779 } else { 780 unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork, 781 tr); 782 unregister_trace_sched_process_free(event_filter_pid_sched_process_exit, 783 tr); 784 } 785 } 786 787 static void 788 event_filter_pid_sched_switch_probe_pre(void *data, bool preempt, 789 struct task_struct *prev, 790 struct task_struct *next, 791 unsigned int prev_state) 792 { 793 struct trace_array *tr = data; 794 struct trace_pid_list *no_pid_list; 795 struct trace_pid_list *pid_list; 796 bool ret; 797 798 pid_list = rcu_dereference_sched(tr->filtered_pids); 799 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); 800 801 /* 802 * Sched switch is funny, as we only want to ignore it 803 * in the notrace case if both prev and next should be ignored. 804 */ 805 ret = trace_ignore_this_task(NULL, no_pid_list, prev) && 806 trace_ignore_this_task(NULL, no_pid_list, next); 807 808 this_cpu_write(tr->array_buffer.data->ignore_pid, ret || 809 (trace_ignore_this_task(pid_list, NULL, prev) && 810 trace_ignore_this_task(pid_list, NULL, next))); 811 } 812 813 static void 814 event_filter_pid_sched_switch_probe_post(void *data, bool preempt, 815 struct task_struct *prev, 816 struct task_struct *next, 817 unsigned int prev_state) 818 { 819 struct trace_array *tr = data; 820 struct trace_pid_list *no_pid_list; 821 struct trace_pid_list *pid_list; 822 823 pid_list = rcu_dereference_sched(tr->filtered_pids); 824 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); 825 826 this_cpu_write(tr->array_buffer.data->ignore_pid, 827 trace_ignore_this_task(pid_list, no_pid_list, next)); 828 } 829 830 static void 831 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task) 832 { 833 struct trace_array *tr = data; 834 struct trace_pid_list *no_pid_list; 835 struct trace_pid_list *pid_list; 836 837 /* Nothing to do if we are already tracing */ 838 if (!this_cpu_read(tr->array_buffer.data->ignore_pid)) 839 return; 840 841 pid_list = rcu_dereference_sched(tr->filtered_pids); 842 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); 843 844 this_cpu_write(tr->array_buffer.data->ignore_pid, 845 trace_ignore_this_task(pid_list, no_pid_list, task)); 846 } 847 848 static void 849 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task) 850 { 851 struct trace_array *tr = data; 852 struct trace_pid_list *no_pid_list; 853 struct trace_pid_list *pid_list; 854 855 /* Nothing to do if we are not tracing */ 856 if (this_cpu_read(tr->array_buffer.data->ignore_pid)) 857 return; 858 859 pid_list = rcu_dereference_sched(tr->filtered_pids); 860 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); 861 862 /* Set tracing if current is enabled */ 863 this_cpu_write(tr->array_buffer.data->ignore_pid, 864 trace_ignore_this_task(pid_list, no_pid_list, current)); 865 } 866 867 static void unregister_pid_events(struct trace_array *tr) 868 { 869 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr); 870 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr); 871 872 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr); 873 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr); 874 875 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr); 876 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr); 877 878 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr); 879 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr); 880 } 881 882 static void __ftrace_clear_event_pids(struct trace_array *tr, int type) 883 { 884 struct trace_pid_list *pid_list; 885 struct trace_pid_list *no_pid_list; 886 struct trace_event_file *file; 887 int cpu; 888 889 pid_list = rcu_dereference_protected(tr->filtered_pids, 890 lockdep_is_held(&event_mutex)); 891 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, 892 lockdep_is_held(&event_mutex)); 893 894 /* Make sure there's something to do */ 895 if (!pid_type_enabled(type, pid_list, no_pid_list)) 896 return; 897 898 if (!still_need_pid_events(type, pid_list, no_pid_list)) { 899 unregister_pid_events(tr); 900 901 list_for_each_entry(file, &tr->events, list) { 902 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); 903 } 904 905 for_each_possible_cpu(cpu) 906 per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false; 907 } 908 909 if (type & TRACE_PIDS) 910 rcu_assign_pointer(tr->filtered_pids, NULL); 911 912 if (type & TRACE_NO_PIDS) 913 rcu_assign_pointer(tr->filtered_no_pids, NULL); 914 915 /* Wait till all users are no longer using pid filtering */ 916 tracepoint_synchronize_unregister(); 917 918 if ((type & TRACE_PIDS) && pid_list) 919 trace_pid_list_free(pid_list); 920 921 if ((type & TRACE_NO_PIDS) && no_pid_list) 922 trace_pid_list_free(no_pid_list); 923 } 924 925 static void ftrace_clear_event_pids(struct trace_array *tr, int type) 926 { 927 mutex_lock(&event_mutex); 928 __ftrace_clear_event_pids(tr, type); 929 mutex_unlock(&event_mutex); 930 } 931 932 static void __put_system(struct event_subsystem *system) 933 { 934 struct event_filter *filter = system->filter; 935 936 WARN_ON_ONCE(system_refcount(system) == 0); 937 if (system_refcount_dec(system)) 938 return; 939 940 list_del(&system->list); 941 942 if (filter) { 943 kfree(filter->filter_string); 944 kfree(filter); 945 } 946 kfree_const(system->name); 947 kfree(system); 948 } 949 950 static void __get_system(struct event_subsystem *system) 951 { 952 WARN_ON_ONCE(system_refcount(system) == 0); 953 system_refcount_inc(system); 954 } 955 956 static void __get_system_dir(struct trace_subsystem_dir *dir) 957 { 958 WARN_ON_ONCE(dir->ref_count == 0); 959 dir->ref_count++; 960 __get_system(dir->subsystem); 961 } 962 963 static void __put_system_dir(struct trace_subsystem_dir *dir) 964 { 965 WARN_ON_ONCE(dir->ref_count == 0); 966 /* If the subsystem is about to be freed, the dir must be too */ 967 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1); 968 969 __put_system(dir->subsystem); 970 if (!--dir->ref_count) 971 kfree(dir); 972 } 973 974 static void put_system(struct trace_subsystem_dir *dir) 975 { 976 mutex_lock(&event_mutex); 977 __put_system_dir(dir); 978 mutex_unlock(&event_mutex); 979 } 980 981 static void remove_subsystem(struct trace_subsystem_dir *dir) 982 { 983 if (!dir) 984 return; 985 986 if (!--dir->nr_events) { 987 eventfs_remove_dir(dir->ei); 988 list_del(&dir->list); 989 __put_system_dir(dir); 990 } 991 } 992 993 void event_file_get(struct trace_event_file *file) 994 { 995 atomic_inc(&file->ref); 996 } 997 998 void event_file_put(struct trace_event_file *file) 999 { 1000 if (WARN_ON_ONCE(!atomic_read(&file->ref))) { 1001 if (file->flags & EVENT_FILE_FL_FREED) 1002 kmem_cache_free(file_cachep, file); 1003 return; 1004 } 1005 1006 if (atomic_dec_and_test(&file->ref)) { 1007 /* Count should only go to zero when it is freed */ 1008 if (WARN_ON_ONCE(!(file->flags & EVENT_FILE_FL_FREED))) 1009 return; 1010 kmem_cache_free(file_cachep, file); 1011 } 1012 } 1013 1014 static void remove_event_file_dir(struct trace_event_file *file) 1015 { 1016 eventfs_remove_dir(file->ei); 1017 list_del(&file->list); 1018 remove_subsystem(file->system); 1019 free_event_filter(file->filter); 1020 file->flags |= EVENT_FILE_FL_FREED; 1021 event_file_put(file); 1022 } 1023 1024 /* 1025 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. 1026 */ 1027 static int 1028 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, 1029 const char *sub, const char *event, int set) 1030 { 1031 struct trace_event_file *file; 1032 struct trace_event_call *call; 1033 const char *name; 1034 int ret = -EINVAL; 1035 int eret = 0; 1036 1037 list_for_each_entry(file, &tr->events, list) { 1038 1039 call = file->event_call; 1040 name = trace_event_name(call); 1041 1042 if (!name || !call->class || !call->class->reg) 1043 continue; 1044 1045 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 1046 continue; 1047 1048 if (match && 1049 strcmp(match, name) != 0 && 1050 strcmp(match, call->class->system) != 0) 1051 continue; 1052 1053 if (sub && strcmp(sub, call->class->system) != 0) 1054 continue; 1055 1056 if (event && strcmp(event, name) != 0) 1057 continue; 1058 1059 ret = ftrace_event_enable_disable(file, set); 1060 1061 /* 1062 * Save the first error and return that. Some events 1063 * may still have been enabled, but let the user 1064 * know that something went wrong. 1065 */ 1066 if (ret && !eret) 1067 eret = ret; 1068 1069 ret = eret; 1070 } 1071 1072 return ret; 1073 } 1074 1075 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, 1076 const char *sub, const char *event, int set) 1077 { 1078 int ret; 1079 1080 mutex_lock(&event_mutex); 1081 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set); 1082 mutex_unlock(&event_mutex); 1083 1084 return ret; 1085 } 1086 1087 int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) 1088 { 1089 char *event = NULL, *sub = NULL, *match; 1090 int ret; 1091 1092 if (!tr) 1093 return -ENOENT; 1094 /* 1095 * The buf format can be <subsystem>:<event-name> 1096 * *:<event-name> means any event by that name. 1097 * :<event-name> is the same. 1098 * 1099 * <subsystem>:* means all events in that subsystem 1100 * <subsystem>: means the same. 1101 * 1102 * <name> (no ':') means all events in a subsystem with 1103 * the name <name> or any event that matches <name> 1104 */ 1105 1106 match = strsep(&buf, ":"); 1107 if (buf) { 1108 sub = match; 1109 event = buf; 1110 match = NULL; 1111 1112 if (!strlen(sub) || strcmp(sub, "*") == 0) 1113 sub = NULL; 1114 if (!strlen(event) || strcmp(event, "*") == 0) 1115 event = NULL; 1116 } 1117 1118 ret = __ftrace_set_clr_event(tr, match, sub, event, set); 1119 1120 /* Put back the colon to allow this to be called again */ 1121 if (buf) 1122 *(buf - 1) = ':'; 1123 1124 return ret; 1125 } 1126 1127 /** 1128 * trace_set_clr_event - enable or disable an event 1129 * @system: system name to match (NULL for any system) 1130 * @event: event name to match (NULL for all events, within system) 1131 * @set: 1 to enable, 0 to disable 1132 * 1133 * This is a way for other parts of the kernel to enable or disable 1134 * event recording. 1135 * 1136 * Returns 0 on success, -EINVAL if the parameters do not match any 1137 * registered events. 1138 */ 1139 int trace_set_clr_event(const char *system, const char *event, int set) 1140 { 1141 struct trace_array *tr = top_trace_array(); 1142 1143 if (!tr) 1144 return -ENODEV; 1145 1146 return __ftrace_set_clr_event(tr, NULL, system, event, set); 1147 } 1148 EXPORT_SYMBOL_GPL(trace_set_clr_event); 1149 1150 /** 1151 * trace_array_set_clr_event - enable or disable an event for a trace array. 1152 * @tr: concerned trace array. 1153 * @system: system name to match (NULL for any system) 1154 * @event: event name to match (NULL for all events, within system) 1155 * @enable: true to enable, false to disable 1156 * 1157 * This is a way for other parts of the kernel to enable or disable 1158 * event recording. 1159 * 1160 * Returns 0 on success, -EINVAL if the parameters do not match any 1161 * registered events. 1162 */ 1163 int trace_array_set_clr_event(struct trace_array *tr, const char *system, 1164 const char *event, bool enable) 1165 { 1166 int set; 1167 1168 if (!tr) 1169 return -ENOENT; 1170 1171 set = (enable == true) ? 1 : 0; 1172 return __ftrace_set_clr_event(tr, NULL, system, event, set); 1173 } 1174 EXPORT_SYMBOL_GPL(trace_array_set_clr_event); 1175 1176 /* 128 should be much more than enough */ 1177 #define EVENT_BUF_SIZE 127 1178 1179 static ssize_t 1180 ftrace_event_write(struct file *file, const char __user *ubuf, 1181 size_t cnt, loff_t *ppos) 1182 { 1183 struct trace_parser parser; 1184 struct seq_file *m = file->private_data; 1185 struct trace_array *tr = m->private; 1186 ssize_t read, ret; 1187 1188 if (!cnt) 1189 return 0; 1190 1191 ret = tracing_update_buffers(); 1192 if (ret < 0) 1193 return ret; 1194 1195 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1)) 1196 return -ENOMEM; 1197 1198 read = trace_get_user(&parser, ubuf, cnt, ppos); 1199 1200 if (read >= 0 && trace_parser_loaded((&parser))) { 1201 int set = 1; 1202 1203 if (*parser.buffer == '!') 1204 set = 0; 1205 1206 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set); 1207 if (ret) 1208 goto out_put; 1209 } 1210 1211 ret = read; 1212 1213 out_put: 1214 trace_parser_put(&parser); 1215 1216 return ret; 1217 } 1218 1219 static void * 1220 t_next(struct seq_file *m, void *v, loff_t *pos) 1221 { 1222 struct trace_event_file *file = v; 1223 struct trace_event_call *call; 1224 struct trace_array *tr = m->private; 1225 1226 (*pos)++; 1227 1228 list_for_each_entry_continue(file, &tr->events, list) { 1229 call = file->event_call; 1230 /* 1231 * The ftrace subsystem is for showing formats only. 1232 * They can not be enabled or disabled via the event files. 1233 */ 1234 if (call->class && call->class->reg && 1235 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) 1236 return file; 1237 } 1238 1239 return NULL; 1240 } 1241 1242 static void *t_start(struct seq_file *m, loff_t *pos) 1243 { 1244 struct trace_event_file *file; 1245 struct trace_array *tr = m->private; 1246 loff_t l; 1247 1248 mutex_lock(&event_mutex); 1249 1250 file = list_entry(&tr->events, struct trace_event_file, list); 1251 for (l = 0; l <= *pos; ) { 1252 file = t_next(m, file, &l); 1253 if (!file) 1254 break; 1255 } 1256 return file; 1257 } 1258 1259 static void * 1260 s_next(struct seq_file *m, void *v, loff_t *pos) 1261 { 1262 struct trace_event_file *file = v; 1263 struct trace_array *tr = m->private; 1264 1265 (*pos)++; 1266 1267 list_for_each_entry_continue(file, &tr->events, list) { 1268 if (file->flags & EVENT_FILE_FL_ENABLED) 1269 return file; 1270 } 1271 1272 return NULL; 1273 } 1274 1275 static void *s_start(struct seq_file *m, loff_t *pos) 1276 { 1277 struct trace_event_file *file; 1278 struct trace_array *tr = m->private; 1279 loff_t l; 1280 1281 mutex_lock(&event_mutex); 1282 1283 file = list_entry(&tr->events, struct trace_event_file, list); 1284 for (l = 0; l <= *pos; ) { 1285 file = s_next(m, file, &l); 1286 if (!file) 1287 break; 1288 } 1289 return file; 1290 } 1291 1292 static int t_show(struct seq_file *m, void *v) 1293 { 1294 struct trace_event_file *file = v; 1295 struct trace_event_call *call = file->event_call; 1296 1297 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) 1298 seq_printf(m, "%s:", call->class->system); 1299 seq_printf(m, "%s\n", trace_event_name(call)); 1300 1301 return 0; 1302 } 1303 1304 static void t_stop(struct seq_file *m, void *p) 1305 { 1306 mutex_unlock(&event_mutex); 1307 } 1308 1309 static void * 1310 __next(struct seq_file *m, void *v, loff_t *pos, int type) 1311 { 1312 struct trace_array *tr = m->private; 1313 struct trace_pid_list *pid_list; 1314 1315 if (type == TRACE_PIDS) 1316 pid_list = rcu_dereference_sched(tr->filtered_pids); 1317 else 1318 pid_list = rcu_dereference_sched(tr->filtered_no_pids); 1319 1320 return trace_pid_next(pid_list, v, pos); 1321 } 1322 1323 static void * 1324 p_next(struct seq_file *m, void *v, loff_t *pos) 1325 { 1326 return __next(m, v, pos, TRACE_PIDS); 1327 } 1328 1329 static void * 1330 np_next(struct seq_file *m, void *v, loff_t *pos) 1331 { 1332 return __next(m, v, pos, TRACE_NO_PIDS); 1333 } 1334 1335 static void *__start(struct seq_file *m, loff_t *pos, int type) 1336 __acquires(RCU) 1337 { 1338 struct trace_pid_list *pid_list; 1339 struct trace_array *tr = m->private; 1340 1341 /* 1342 * Grab the mutex, to keep calls to p_next() having the same 1343 * tr->filtered_pids as p_start() has. 1344 * If we just passed the tr->filtered_pids around, then RCU would 1345 * have been enough, but doing that makes things more complex. 1346 */ 1347 mutex_lock(&event_mutex); 1348 rcu_read_lock_sched(); 1349 1350 if (type == TRACE_PIDS) 1351 pid_list = rcu_dereference_sched(tr->filtered_pids); 1352 else 1353 pid_list = rcu_dereference_sched(tr->filtered_no_pids); 1354 1355 if (!pid_list) 1356 return NULL; 1357 1358 return trace_pid_start(pid_list, pos); 1359 } 1360 1361 static void *p_start(struct seq_file *m, loff_t *pos) 1362 __acquires(RCU) 1363 { 1364 return __start(m, pos, TRACE_PIDS); 1365 } 1366 1367 static void *np_start(struct seq_file *m, loff_t *pos) 1368 __acquires(RCU) 1369 { 1370 return __start(m, pos, TRACE_NO_PIDS); 1371 } 1372 1373 static void p_stop(struct seq_file *m, void *p) 1374 __releases(RCU) 1375 { 1376 rcu_read_unlock_sched(); 1377 mutex_unlock(&event_mutex); 1378 } 1379 1380 static ssize_t 1381 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 1382 loff_t *ppos) 1383 { 1384 struct trace_event_file *file; 1385 unsigned long flags; 1386 char buf[4] = "0"; 1387 1388 mutex_lock(&event_mutex); 1389 file = event_file_data(filp); 1390 if (likely(file)) 1391 flags = file->flags; 1392 mutex_unlock(&event_mutex); 1393 1394 if (!file || flags & EVENT_FILE_FL_FREED) 1395 return -ENODEV; 1396 1397 if (flags & EVENT_FILE_FL_ENABLED && 1398 !(flags & EVENT_FILE_FL_SOFT_DISABLED)) 1399 strcpy(buf, "1"); 1400 1401 if (flags & EVENT_FILE_FL_SOFT_DISABLED || 1402 flags & EVENT_FILE_FL_SOFT_MODE) 1403 strcat(buf, "*"); 1404 1405 strcat(buf, "\n"); 1406 1407 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf)); 1408 } 1409 1410 static ssize_t 1411 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, 1412 loff_t *ppos) 1413 { 1414 struct trace_event_file *file; 1415 unsigned long val; 1416 int ret; 1417 1418 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 1419 if (ret) 1420 return ret; 1421 1422 ret = tracing_update_buffers(); 1423 if (ret < 0) 1424 return ret; 1425 1426 switch (val) { 1427 case 0: 1428 case 1: 1429 ret = -ENODEV; 1430 mutex_lock(&event_mutex); 1431 file = event_file_data(filp); 1432 if (likely(file && !(file->flags & EVENT_FILE_FL_FREED))) 1433 ret = ftrace_event_enable_disable(file, val); 1434 mutex_unlock(&event_mutex); 1435 break; 1436 1437 default: 1438 return -EINVAL; 1439 } 1440 1441 *ppos += cnt; 1442 1443 return ret ? ret : cnt; 1444 } 1445 1446 static ssize_t 1447 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 1448 loff_t *ppos) 1449 { 1450 const char set_to_char[4] = { '?', '0', '1', 'X' }; 1451 struct trace_subsystem_dir *dir = filp->private_data; 1452 struct event_subsystem *system = dir->subsystem; 1453 struct trace_event_call *call; 1454 struct trace_event_file *file; 1455 struct trace_array *tr = dir->tr; 1456 char buf[2]; 1457 int set = 0; 1458 int ret; 1459 1460 mutex_lock(&event_mutex); 1461 list_for_each_entry(file, &tr->events, list) { 1462 call = file->event_call; 1463 if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) || 1464 !trace_event_name(call) || !call->class || !call->class->reg) 1465 continue; 1466 1467 if (system && strcmp(call->class->system, system->name) != 0) 1468 continue; 1469 1470 /* 1471 * We need to find out if all the events are set 1472 * or if all events or cleared, or if we have 1473 * a mixture. 1474 */ 1475 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED)); 1476 1477 /* 1478 * If we have a mixture, no need to look further. 1479 */ 1480 if (set == 3) 1481 break; 1482 } 1483 mutex_unlock(&event_mutex); 1484 1485 buf[0] = set_to_char[set]; 1486 buf[1] = '\n'; 1487 1488 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 1489 1490 return ret; 1491 } 1492 1493 static ssize_t 1494 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, 1495 loff_t *ppos) 1496 { 1497 struct trace_subsystem_dir *dir = filp->private_data; 1498 struct event_subsystem *system = dir->subsystem; 1499 const char *name = NULL; 1500 unsigned long val; 1501 ssize_t ret; 1502 1503 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 1504 if (ret) 1505 return ret; 1506 1507 ret = tracing_update_buffers(); 1508 if (ret < 0) 1509 return ret; 1510 1511 if (val != 0 && val != 1) 1512 return -EINVAL; 1513 1514 /* 1515 * Opening of "enable" adds a ref count to system, 1516 * so the name is safe to use. 1517 */ 1518 if (system) 1519 name = system->name; 1520 1521 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val); 1522 if (ret) 1523 goto out; 1524 1525 ret = cnt; 1526 1527 out: 1528 *ppos += cnt; 1529 1530 return ret; 1531 } 1532 1533 enum { 1534 FORMAT_HEADER = 1, 1535 FORMAT_FIELD_SEPERATOR = 2, 1536 FORMAT_PRINTFMT = 3, 1537 }; 1538 1539 static void *f_next(struct seq_file *m, void *v, loff_t *pos) 1540 { 1541 struct trace_event_call *call = event_file_data(m->private); 1542 struct list_head *common_head = &ftrace_common_fields; 1543 struct list_head *head = trace_get_fields(call); 1544 struct list_head *node = v; 1545 1546 (*pos)++; 1547 1548 switch ((unsigned long)v) { 1549 case FORMAT_HEADER: 1550 node = common_head; 1551 break; 1552 1553 case FORMAT_FIELD_SEPERATOR: 1554 node = head; 1555 break; 1556 1557 case FORMAT_PRINTFMT: 1558 /* all done */ 1559 return NULL; 1560 } 1561 1562 node = node->prev; 1563 if (node == common_head) 1564 return (void *)FORMAT_FIELD_SEPERATOR; 1565 else if (node == head) 1566 return (void *)FORMAT_PRINTFMT; 1567 else 1568 return node; 1569 } 1570 1571 static int f_show(struct seq_file *m, void *v) 1572 { 1573 struct trace_event_call *call = event_file_data(m->private); 1574 struct ftrace_event_field *field; 1575 const char *array_descriptor; 1576 1577 switch ((unsigned long)v) { 1578 case FORMAT_HEADER: 1579 seq_printf(m, "name: %s\n", trace_event_name(call)); 1580 seq_printf(m, "ID: %d\n", call->event.type); 1581 seq_puts(m, "format:\n"); 1582 return 0; 1583 1584 case FORMAT_FIELD_SEPERATOR: 1585 seq_putc(m, '\n'); 1586 return 0; 1587 1588 case FORMAT_PRINTFMT: 1589 seq_printf(m, "\nprint fmt: %s\n", 1590 call->print_fmt); 1591 return 0; 1592 } 1593 1594 field = list_entry(v, struct ftrace_event_field, link); 1595 /* 1596 * Smartly shows the array type(except dynamic array). 1597 * Normal: 1598 * field:TYPE VAR 1599 * If TYPE := TYPE[LEN], it is shown: 1600 * field:TYPE VAR[LEN] 1601 */ 1602 array_descriptor = strchr(field->type, '['); 1603 1604 if (str_has_prefix(field->type, "__data_loc")) 1605 array_descriptor = NULL; 1606 1607 if (!array_descriptor) 1608 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n", 1609 field->type, field->name, field->offset, 1610 field->size, !!field->is_signed); 1611 else if (field->len) 1612 seq_printf(m, "\tfield:%.*s %s[%d];\toffset:%u;\tsize:%u;\tsigned:%d;\n", 1613 (int)(array_descriptor - field->type), 1614 field->type, field->name, 1615 field->len, field->offset, 1616 field->size, !!field->is_signed); 1617 else 1618 seq_printf(m, "\tfield:%.*s %s[];\toffset:%u;\tsize:%u;\tsigned:%d;\n", 1619 (int)(array_descriptor - field->type), 1620 field->type, field->name, 1621 field->offset, field->size, !!field->is_signed); 1622 1623 return 0; 1624 } 1625 1626 static void *f_start(struct seq_file *m, loff_t *pos) 1627 { 1628 void *p = (void *)FORMAT_HEADER; 1629 loff_t l = 0; 1630 1631 /* ->stop() is called even if ->start() fails */ 1632 mutex_lock(&event_mutex); 1633 if (!event_file_data(m->private)) 1634 return ERR_PTR(-ENODEV); 1635 1636 while (l < *pos && p) 1637 p = f_next(m, p, &l); 1638 1639 return p; 1640 } 1641 1642 static void f_stop(struct seq_file *m, void *p) 1643 { 1644 mutex_unlock(&event_mutex); 1645 } 1646 1647 static const struct seq_operations trace_format_seq_ops = { 1648 .start = f_start, 1649 .next = f_next, 1650 .stop = f_stop, 1651 .show = f_show, 1652 }; 1653 1654 static int trace_format_open(struct inode *inode, struct file *file) 1655 { 1656 struct seq_file *m; 1657 int ret; 1658 1659 /* Do we want to hide event format files on tracefs lockdown? */ 1660 1661 ret = seq_open(file, &trace_format_seq_ops); 1662 if (ret < 0) 1663 return ret; 1664 1665 m = file->private_data; 1666 m->private = file; 1667 1668 return 0; 1669 } 1670 1671 #ifdef CONFIG_PERF_EVENTS 1672 static ssize_t 1673 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 1674 { 1675 int id = (long)event_file_data(filp); 1676 char buf[32]; 1677 int len; 1678 1679 if (unlikely(!id)) 1680 return -ENODEV; 1681 1682 len = sprintf(buf, "%d\n", id); 1683 1684 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); 1685 } 1686 #endif 1687 1688 static ssize_t 1689 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 1690 loff_t *ppos) 1691 { 1692 struct trace_event_file *file; 1693 struct trace_seq *s; 1694 int r = -ENODEV; 1695 1696 if (*ppos) 1697 return 0; 1698 1699 s = kmalloc(sizeof(*s), GFP_KERNEL); 1700 1701 if (!s) 1702 return -ENOMEM; 1703 1704 trace_seq_init(s); 1705 1706 mutex_lock(&event_mutex); 1707 file = event_file_data(filp); 1708 if (file && !(file->flags & EVENT_FILE_FL_FREED)) 1709 print_event_filter(file, s); 1710 mutex_unlock(&event_mutex); 1711 1712 if (file) 1713 r = simple_read_from_buffer(ubuf, cnt, ppos, 1714 s->buffer, trace_seq_used(s)); 1715 1716 kfree(s); 1717 1718 return r; 1719 } 1720 1721 static ssize_t 1722 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1723 loff_t *ppos) 1724 { 1725 struct trace_event_file *file; 1726 char *buf; 1727 int err = -ENODEV; 1728 1729 if (cnt >= PAGE_SIZE) 1730 return -EINVAL; 1731 1732 buf = memdup_user_nul(ubuf, cnt); 1733 if (IS_ERR(buf)) 1734 return PTR_ERR(buf); 1735 1736 mutex_lock(&event_mutex); 1737 file = event_file_data(filp); 1738 if (file) 1739 err = apply_event_filter(file, buf); 1740 mutex_unlock(&event_mutex); 1741 1742 kfree(buf); 1743 if (err < 0) 1744 return err; 1745 1746 *ppos += cnt; 1747 1748 return cnt; 1749 } 1750 1751 static LIST_HEAD(event_subsystems); 1752 1753 static int subsystem_open(struct inode *inode, struct file *filp) 1754 { 1755 struct trace_subsystem_dir *dir = NULL, *iter_dir; 1756 struct trace_array *tr = NULL, *iter_tr; 1757 struct event_subsystem *system = NULL; 1758 int ret; 1759 1760 if (tracing_is_disabled()) 1761 return -ENODEV; 1762 1763 /* Make sure the system still exists */ 1764 mutex_lock(&event_mutex); 1765 mutex_lock(&trace_types_lock); 1766 list_for_each_entry(iter_tr, &ftrace_trace_arrays, list) { 1767 list_for_each_entry(iter_dir, &iter_tr->systems, list) { 1768 if (iter_dir == inode->i_private) { 1769 /* Don't open systems with no events */ 1770 tr = iter_tr; 1771 dir = iter_dir; 1772 if (dir->nr_events) { 1773 __get_system_dir(dir); 1774 system = dir->subsystem; 1775 } 1776 goto exit_loop; 1777 } 1778 } 1779 } 1780 exit_loop: 1781 mutex_unlock(&trace_types_lock); 1782 mutex_unlock(&event_mutex); 1783 1784 if (!system) 1785 return -ENODEV; 1786 1787 /* Still need to increment the ref count of the system */ 1788 if (trace_array_get(tr) < 0) { 1789 put_system(dir); 1790 return -ENODEV; 1791 } 1792 1793 ret = tracing_open_generic(inode, filp); 1794 if (ret < 0) { 1795 trace_array_put(tr); 1796 put_system(dir); 1797 } 1798 1799 return ret; 1800 } 1801 1802 static int system_tr_open(struct inode *inode, struct file *filp) 1803 { 1804 struct trace_subsystem_dir *dir; 1805 struct trace_array *tr = inode->i_private; 1806 int ret; 1807 1808 /* Make a temporary dir that has no system but points to tr */ 1809 dir = kzalloc(sizeof(*dir), GFP_KERNEL); 1810 if (!dir) 1811 return -ENOMEM; 1812 1813 ret = tracing_open_generic_tr(inode, filp); 1814 if (ret < 0) { 1815 kfree(dir); 1816 return ret; 1817 } 1818 dir->tr = tr; 1819 filp->private_data = dir; 1820 1821 return 0; 1822 } 1823 1824 static int subsystem_release(struct inode *inode, struct file *file) 1825 { 1826 struct trace_subsystem_dir *dir = file->private_data; 1827 1828 trace_array_put(dir->tr); 1829 1830 /* 1831 * If dir->subsystem is NULL, then this is a temporary 1832 * descriptor that was made for a trace_array to enable 1833 * all subsystems. 1834 */ 1835 if (dir->subsystem) 1836 put_system(dir); 1837 else 1838 kfree(dir); 1839 1840 return 0; 1841 } 1842 1843 static ssize_t 1844 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 1845 loff_t *ppos) 1846 { 1847 struct trace_subsystem_dir *dir = filp->private_data; 1848 struct event_subsystem *system = dir->subsystem; 1849 struct trace_seq *s; 1850 int r; 1851 1852 if (*ppos) 1853 return 0; 1854 1855 s = kmalloc(sizeof(*s), GFP_KERNEL); 1856 if (!s) 1857 return -ENOMEM; 1858 1859 trace_seq_init(s); 1860 1861 print_subsystem_event_filter(system, s); 1862 r = simple_read_from_buffer(ubuf, cnt, ppos, 1863 s->buffer, trace_seq_used(s)); 1864 1865 kfree(s); 1866 1867 return r; 1868 } 1869 1870 static ssize_t 1871 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1872 loff_t *ppos) 1873 { 1874 struct trace_subsystem_dir *dir = filp->private_data; 1875 char *buf; 1876 int err; 1877 1878 if (cnt >= PAGE_SIZE) 1879 return -EINVAL; 1880 1881 buf = memdup_user_nul(ubuf, cnt); 1882 if (IS_ERR(buf)) 1883 return PTR_ERR(buf); 1884 1885 err = apply_subsystem_event_filter(dir, buf); 1886 kfree(buf); 1887 if (err < 0) 1888 return err; 1889 1890 *ppos += cnt; 1891 1892 return cnt; 1893 } 1894 1895 static ssize_t 1896 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 1897 { 1898 int (*func)(struct trace_seq *s) = filp->private_data; 1899 struct trace_seq *s; 1900 int r; 1901 1902 if (*ppos) 1903 return 0; 1904 1905 s = kmalloc(sizeof(*s), GFP_KERNEL); 1906 if (!s) 1907 return -ENOMEM; 1908 1909 trace_seq_init(s); 1910 1911 func(s); 1912 r = simple_read_from_buffer(ubuf, cnt, ppos, 1913 s->buffer, trace_seq_used(s)); 1914 1915 kfree(s); 1916 1917 return r; 1918 } 1919 1920 static void ignore_task_cpu(void *data) 1921 { 1922 struct trace_array *tr = data; 1923 struct trace_pid_list *pid_list; 1924 struct trace_pid_list *no_pid_list; 1925 1926 /* 1927 * This function is called by on_each_cpu() while the 1928 * event_mutex is held. 1929 */ 1930 pid_list = rcu_dereference_protected(tr->filtered_pids, 1931 mutex_is_locked(&event_mutex)); 1932 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, 1933 mutex_is_locked(&event_mutex)); 1934 1935 this_cpu_write(tr->array_buffer.data->ignore_pid, 1936 trace_ignore_this_task(pid_list, no_pid_list, current)); 1937 } 1938 1939 static void register_pid_events(struct trace_array *tr) 1940 { 1941 /* 1942 * Register a probe that is called before all other probes 1943 * to set ignore_pid if next or prev do not match. 1944 * Register a probe this is called after all other probes 1945 * to only keep ignore_pid set if next pid matches. 1946 */ 1947 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre, 1948 tr, INT_MAX); 1949 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post, 1950 tr, 0); 1951 1952 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, 1953 tr, INT_MAX); 1954 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, 1955 tr, 0); 1956 1957 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, 1958 tr, INT_MAX); 1959 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, 1960 tr, 0); 1961 1962 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre, 1963 tr, INT_MAX); 1964 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post, 1965 tr, 0); 1966 } 1967 1968 static ssize_t 1969 event_pid_write(struct file *filp, const char __user *ubuf, 1970 size_t cnt, loff_t *ppos, int type) 1971 { 1972 struct seq_file *m = filp->private_data; 1973 struct trace_array *tr = m->private; 1974 struct trace_pid_list *filtered_pids = NULL; 1975 struct trace_pid_list *other_pids = NULL; 1976 struct trace_pid_list *pid_list; 1977 struct trace_event_file *file; 1978 ssize_t ret; 1979 1980 if (!cnt) 1981 return 0; 1982 1983 ret = tracing_update_buffers(); 1984 if (ret < 0) 1985 return ret; 1986 1987 mutex_lock(&event_mutex); 1988 1989 if (type == TRACE_PIDS) { 1990 filtered_pids = rcu_dereference_protected(tr->filtered_pids, 1991 lockdep_is_held(&event_mutex)); 1992 other_pids = rcu_dereference_protected(tr->filtered_no_pids, 1993 lockdep_is_held(&event_mutex)); 1994 } else { 1995 filtered_pids = rcu_dereference_protected(tr->filtered_no_pids, 1996 lockdep_is_held(&event_mutex)); 1997 other_pids = rcu_dereference_protected(tr->filtered_pids, 1998 lockdep_is_held(&event_mutex)); 1999 } 2000 2001 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); 2002 if (ret < 0) 2003 goto out; 2004 2005 if (type == TRACE_PIDS) 2006 rcu_assign_pointer(tr->filtered_pids, pid_list); 2007 else 2008 rcu_assign_pointer(tr->filtered_no_pids, pid_list); 2009 2010 list_for_each_entry(file, &tr->events, list) { 2011 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); 2012 } 2013 2014 if (filtered_pids) { 2015 tracepoint_synchronize_unregister(); 2016 trace_pid_list_free(filtered_pids); 2017 } else if (pid_list && !other_pids) { 2018 register_pid_events(tr); 2019 } 2020 2021 /* 2022 * Ignoring of pids is done at task switch. But we have to 2023 * check for those tasks that are currently running. 2024 * Always do this in case a pid was appended or removed. 2025 */ 2026 on_each_cpu(ignore_task_cpu, tr, 1); 2027 2028 out: 2029 mutex_unlock(&event_mutex); 2030 2031 if (ret > 0) 2032 *ppos += ret; 2033 2034 return ret; 2035 } 2036 2037 static ssize_t 2038 ftrace_event_pid_write(struct file *filp, const char __user *ubuf, 2039 size_t cnt, loff_t *ppos) 2040 { 2041 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS); 2042 } 2043 2044 static ssize_t 2045 ftrace_event_npid_write(struct file *filp, const char __user *ubuf, 2046 size_t cnt, loff_t *ppos) 2047 { 2048 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS); 2049 } 2050 2051 static int ftrace_event_avail_open(struct inode *inode, struct file *file); 2052 static int ftrace_event_set_open(struct inode *inode, struct file *file); 2053 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file); 2054 static int ftrace_event_set_npid_open(struct inode *inode, struct file *file); 2055 static int ftrace_event_release(struct inode *inode, struct file *file); 2056 2057 static const struct seq_operations show_event_seq_ops = { 2058 .start = t_start, 2059 .next = t_next, 2060 .show = t_show, 2061 .stop = t_stop, 2062 }; 2063 2064 static const struct seq_operations show_set_event_seq_ops = { 2065 .start = s_start, 2066 .next = s_next, 2067 .show = t_show, 2068 .stop = t_stop, 2069 }; 2070 2071 static const struct seq_operations show_set_pid_seq_ops = { 2072 .start = p_start, 2073 .next = p_next, 2074 .show = trace_pid_show, 2075 .stop = p_stop, 2076 }; 2077 2078 static const struct seq_operations show_set_no_pid_seq_ops = { 2079 .start = np_start, 2080 .next = np_next, 2081 .show = trace_pid_show, 2082 .stop = p_stop, 2083 }; 2084 2085 static const struct file_operations ftrace_avail_fops = { 2086 .open = ftrace_event_avail_open, 2087 .read = seq_read, 2088 .llseek = seq_lseek, 2089 .release = seq_release, 2090 }; 2091 2092 static const struct file_operations ftrace_set_event_fops = { 2093 .open = ftrace_event_set_open, 2094 .read = seq_read, 2095 .write = ftrace_event_write, 2096 .llseek = seq_lseek, 2097 .release = ftrace_event_release, 2098 }; 2099 2100 static const struct file_operations ftrace_set_event_pid_fops = { 2101 .open = ftrace_event_set_pid_open, 2102 .read = seq_read, 2103 .write = ftrace_event_pid_write, 2104 .llseek = seq_lseek, 2105 .release = ftrace_event_release, 2106 }; 2107 2108 static const struct file_operations ftrace_set_event_notrace_pid_fops = { 2109 .open = ftrace_event_set_npid_open, 2110 .read = seq_read, 2111 .write = ftrace_event_npid_write, 2112 .llseek = seq_lseek, 2113 .release = ftrace_event_release, 2114 }; 2115 2116 static const struct file_operations ftrace_enable_fops = { 2117 .open = tracing_open_file_tr, 2118 .read = event_enable_read, 2119 .write = event_enable_write, 2120 .release = tracing_release_file_tr, 2121 .llseek = default_llseek, 2122 }; 2123 2124 static const struct file_operations ftrace_event_format_fops = { 2125 .open = trace_format_open, 2126 .read = seq_read, 2127 .llseek = seq_lseek, 2128 .release = seq_release, 2129 }; 2130 2131 #ifdef CONFIG_PERF_EVENTS 2132 static const struct file_operations ftrace_event_id_fops = { 2133 .read = event_id_read, 2134 .llseek = default_llseek, 2135 }; 2136 #endif 2137 2138 static const struct file_operations ftrace_event_filter_fops = { 2139 .open = tracing_open_file_tr, 2140 .read = event_filter_read, 2141 .write = event_filter_write, 2142 .release = tracing_release_file_tr, 2143 .llseek = default_llseek, 2144 }; 2145 2146 static const struct file_operations ftrace_subsystem_filter_fops = { 2147 .open = subsystem_open, 2148 .read = subsystem_filter_read, 2149 .write = subsystem_filter_write, 2150 .llseek = default_llseek, 2151 .release = subsystem_release, 2152 }; 2153 2154 static const struct file_operations ftrace_system_enable_fops = { 2155 .open = subsystem_open, 2156 .read = system_enable_read, 2157 .write = system_enable_write, 2158 .llseek = default_llseek, 2159 .release = subsystem_release, 2160 }; 2161 2162 static const struct file_operations ftrace_tr_enable_fops = { 2163 .open = system_tr_open, 2164 .read = system_enable_read, 2165 .write = system_enable_write, 2166 .llseek = default_llseek, 2167 .release = subsystem_release, 2168 }; 2169 2170 static const struct file_operations ftrace_show_header_fops = { 2171 .open = tracing_open_generic, 2172 .read = show_header, 2173 .llseek = default_llseek, 2174 }; 2175 2176 static int 2177 ftrace_event_open(struct inode *inode, struct file *file, 2178 const struct seq_operations *seq_ops) 2179 { 2180 struct seq_file *m; 2181 int ret; 2182 2183 ret = security_locked_down(LOCKDOWN_TRACEFS); 2184 if (ret) 2185 return ret; 2186 2187 ret = seq_open(file, seq_ops); 2188 if (ret < 0) 2189 return ret; 2190 m = file->private_data; 2191 /* copy tr over to seq ops */ 2192 m->private = inode->i_private; 2193 2194 return ret; 2195 } 2196 2197 static int ftrace_event_release(struct inode *inode, struct file *file) 2198 { 2199 struct trace_array *tr = inode->i_private; 2200 2201 trace_array_put(tr); 2202 2203 return seq_release(inode, file); 2204 } 2205 2206 static int 2207 ftrace_event_avail_open(struct inode *inode, struct file *file) 2208 { 2209 const struct seq_operations *seq_ops = &show_event_seq_ops; 2210 2211 /* Checks for tracefs lockdown */ 2212 return ftrace_event_open(inode, file, seq_ops); 2213 } 2214 2215 static int 2216 ftrace_event_set_open(struct inode *inode, struct file *file) 2217 { 2218 const struct seq_operations *seq_ops = &show_set_event_seq_ops; 2219 struct trace_array *tr = inode->i_private; 2220 int ret; 2221 2222 ret = tracing_check_open_get_tr(tr); 2223 if (ret) 2224 return ret; 2225 2226 if ((file->f_mode & FMODE_WRITE) && 2227 (file->f_flags & O_TRUNC)) 2228 ftrace_clear_events(tr); 2229 2230 ret = ftrace_event_open(inode, file, seq_ops); 2231 if (ret < 0) 2232 trace_array_put(tr); 2233 return ret; 2234 } 2235 2236 static int 2237 ftrace_event_set_pid_open(struct inode *inode, struct file *file) 2238 { 2239 const struct seq_operations *seq_ops = &show_set_pid_seq_ops; 2240 struct trace_array *tr = inode->i_private; 2241 int ret; 2242 2243 ret = tracing_check_open_get_tr(tr); 2244 if (ret) 2245 return ret; 2246 2247 if ((file->f_mode & FMODE_WRITE) && 2248 (file->f_flags & O_TRUNC)) 2249 ftrace_clear_event_pids(tr, TRACE_PIDS); 2250 2251 ret = ftrace_event_open(inode, file, seq_ops); 2252 if (ret < 0) 2253 trace_array_put(tr); 2254 return ret; 2255 } 2256 2257 static int 2258 ftrace_event_set_npid_open(struct inode *inode, struct file *file) 2259 { 2260 const struct seq_operations *seq_ops = &show_set_no_pid_seq_ops; 2261 struct trace_array *tr = inode->i_private; 2262 int ret; 2263 2264 ret = tracing_check_open_get_tr(tr); 2265 if (ret) 2266 return ret; 2267 2268 if ((file->f_mode & FMODE_WRITE) && 2269 (file->f_flags & O_TRUNC)) 2270 ftrace_clear_event_pids(tr, TRACE_NO_PIDS); 2271 2272 ret = ftrace_event_open(inode, file, seq_ops); 2273 if (ret < 0) 2274 trace_array_put(tr); 2275 return ret; 2276 } 2277 2278 static struct event_subsystem * 2279 create_new_subsystem(const char *name) 2280 { 2281 struct event_subsystem *system; 2282 2283 /* need to create new entry */ 2284 system = kmalloc(sizeof(*system), GFP_KERNEL); 2285 if (!system) 2286 return NULL; 2287 2288 system->ref_count = 1; 2289 2290 /* Only allocate if dynamic (kprobes and modules) */ 2291 system->name = kstrdup_const(name, GFP_KERNEL); 2292 if (!system->name) 2293 goto out_free; 2294 2295 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL); 2296 if (!system->filter) 2297 goto out_free; 2298 2299 list_add(&system->list, &event_subsystems); 2300 2301 return system; 2302 2303 out_free: 2304 kfree_const(system->name); 2305 kfree(system); 2306 return NULL; 2307 } 2308 2309 static int system_callback(const char *name, umode_t *mode, void **data, 2310 const struct file_operations **fops) 2311 { 2312 if (strcmp(name, "filter") == 0) 2313 *fops = &ftrace_subsystem_filter_fops; 2314 2315 else if (strcmp(name, "enable") == 0) 2316 *fops = &ftrace_system_enable_fops; 2317 2318 else 2319 return 0; 2320 2321 *mode = TRACE_MODE_WRITE; 2322 return 1; 2323 } 2324 2325 static struct eventfs_inode * 2326 event_subsystem_dir(struct trace_array *tr, const char *name, 2327 struct trace_event_file *file, struct eventfs_inode *parent) 2328 { 2329 struct event_subsystem *system, *iter; 2330 struct trace_subsystem_dir *dir; 2331 struct eventfs_inode *ei; 2332 int nr_entries; 2333 static struct eventfs_entry system_entries[] = { 2334 { 2335 .name = "filter", 2336 .callback = system_callback, 2337 }, 2338 { 2339 .name = "enable", 2340 .callback = system_callback, 2341 } 2342 }; 2343 2344 /* First see if we did not already create this dir */ 2345 list_for_each_entry(dir, &tr->systems, list) { 2346 system = dir->subsystem; 2347 if (strcmp(system->name, name) == 0) { 2348 dir->nr_events++; 2349 file->system = dir; 2350 return dir->ei; 2351 } 2352 } 2353 2354 /* Now see if the system itself exists. */ 2355 system = NULL; 2356 list_for_each_entry(iter, &event_subsystems, list) { 2357 if (strcmp(iter->name, name) == 0) { 2358 system = iter; 2359 break; 2360 } 2361 } 2362 2363 dir = kmalloc(sizeof(*dir), GFP_KERNEL); 2364 if (!dir) 2365 goto out_fail; 2366 2367 if (!system) { 2368 system = create_new_subsystem(name); 2369 if (!system) 2370 goto out_free; 2371 } else 2372 __get_system(system); 2373 2374 /* ftrace only has directories no files */ 2375 if (strcmp(name, "ftrace") == 0) 2376 nr_entries = 0; 2377 else 2378 nr_entries = ARRAY_SIZE(system_entries); 2379 2380 ei = eventfs_create_dir(name, parent, system_entries, nr_entries, dir); 2381 if (IS_ERR(ei)) { 2382 pr_warn("Failed to create system directory %s\n", name); 2383 __put_system(system); 2384 goto out_free; 2385 } 2386 2387 dir->ei = ei; 2388 dir->tr = tr; 2389 dir->ref_count = 1; 2390 dir->nr_events = 1; 2391 dir->subsystem = system; 2392 file->system = dir; 2393 2394 list_add(&dir->list, &tr->systems); 2395 2396 return dir->ei; 2397 2398 out_free: 2399 kfree(dir); 2400 out_fail: 2401 /* Only print this message if failed on memory allocation */ 2402 if (!dir || !system) 2403 pr_warn("No memory to create event subsystem %s\n", name); 2404 return NULL; 2405 } 2406 2407 static int 2408 event_define_fields(struct trace_event_call *call) 2409 { 2410 struct list_head *head; 2411 int ret = 0; 2412 2413 /* 2414 * Other events may have the same class. Only update 2415 * the fields if they are not already defined. 2416 */ 2417 head = trace_get_fields(call); 2418 if (list_empty(head)) { 2419 struct trace_event_fields *field = call->class->fields_array; 2420 unsigned int offset = sizeof(struct trace_entry); 2421 2422 for (; field->type; field++) { 2423 if (field->type == TRACE_FUNCTION_TYPE) { 2424 field->define_fields(call); 2425 break; 2426 } 2427 2428 offset = ALIGN(offset, field->align); 2429 ret = trace_define_field_ext(call, field->type, field->name, 2430 offset, field->size, 2431 field->is_signed, field->filter_type, 2432 field->len); 2433 if (WARN_ON_ONCE(ret)) { 2434 pr_err("error code is %d\n", ret); 2435 break; 2436 } 2437 2438 offset += field->size; 2439 } 2440 } 2441 2442 return ret; 2443 } 2444 2445 static int event_callback(const char *name, umode_t *mode, void **data, 2446 const struct file_operations **fops) 2447 { 2448 struct trace_event_file *file = *data; 2449 struct trace_event_call *call = file->event_call; 2450 2451 if (strcmp(name, "format") == 0) { 2452 *mode = TRACE_MODE_READ; 2453 *fops = &ftrace_event_format_fops; 2454 *data = call; 2455 return 1; 2456 } 2457 2458 /* 2459 * Only event directories that can be enabled should have 2460 * triggers or filters, with the exception of the "print" 2461 * event that can have a "trigger" file. 2462 */ 2463 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) { 2464 if (call->class->reg && strcmp(name, "enable") == 0) { 2465 *mode = TRACE_MODE_WRITE; 2466 *fops = &ftrace_enable_fops; 2467 return 1; 2468 } 2469 2470 if (strcmp(name, "filter") == 0) { 2471 *mode = TRACE_MODE_WRITE; 2472 *fops = &ftrace_event_filter_fops; 2473 return 1; 2474 } 2475 } 2476 2477 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) || 2478 strcmp(trace_event_name(call), "print") == 0) { 2479 if (strcmp(name, "trigger") == 0) { 2480 *mode = TRACE_MODE_WRITE; 2481 *fops = &event_trigger_fops; 2482 return 1; 2483 } 2484 } 2485 2486 #ifdef CONFIG_PERF_EVENTS 2487 if (call->event.type && call->class->reg && 2488 strcmp(name, "id") == 0) { 2489 *mode = TRACE_MODE_READ; 2490 *data = (void *)(long)call->event.type; 2491 *fops = &ftrace_event_id_fops; 2492 return 1; 2493 } 2494 #endif 2495 2496 #ifdef CONFIG_HIST_TRIGGERS 2497 if (strcmp(name, "hist") == 0) { 2498 *mode = TRACE_MODE_READ; 2499 *fops = &event_hist_fops; 2500 return 1; 2501 } 2502 #endif 2503 #ifdef CONFIG_HIST_TRIGGERS_DEBUG 2504 if (strcmp(name, "hist_debug") == 0) { 2505 *mode = TRACE_MODE_READ; 2506 *fops = &event_hist_debug_fops; 2507 return 1; 2508 } 2509 #endif 2510 #ifdef CONFIG_TRACE_EVENT_INJECT 2511 if (call->event.type && call->class->reg && 2512 strcmp(name, "inject") == 0) { 2513 *mode = 0200; 2514 *fops = &event_inject_fops; 2515 return 1; 2516 } 2517 #endif 2518 return 0; 2519 } 2520 2521 static int 2522 event_create_dir(struct eventfs_inode *parent, struct trace_event_file *file) 2523 { 2524 struct trace_event_call *call = file->event_call; 2525 struct trace_array *tr = file->tr; 2526 struct eventfs_inode *e_events; 2527 struct eventfs_inode *ei; 2528 const char *name; 2529 int nr_entries; 2530 int ret; 2531 static struct eventfs_entry event_entries[] = { 2532 { 2533 .name = "enable", 2534 .callback = event_callback, 2535 }, 2536 { 2537 .name = "filter", 2538 .callback = event_callback, 2539 }, 2540 { 2541 .name = "trigger", 2542 .callback = event_callback, 2543 }, 2544 { 2545 .name = "format", 2546 .callback = event_callback, 2547 }, 2548 #ifdef CONFIG_PERF_EVENTS 2549 { 2550 .name = "id", 2551 .callback = event_callback, 2552 }, 2553 #endif 2554 #ifdef CONFIG_HIST_TRIGGERS 2555 { 2556 .name = "hist", 2557 .callback = event_callback, 2558 }, 2559 #endif 2560 #ifdef CONFIG_HIST_TRIGGERS_DEBUG 2561 { 2562 .name = "hist_debug", 2563 .callback = event_callback, 2564 }, 2565 #endif 2566 #ifdef CONFIG_TRACE_EVENT_INJECT 2567 { 2568 .name = "inject", 2569 .callback = event_callback, 2570 }, 2571 #endif 2572 }; 2573 2574 /* 2575 * If the trace point header did not define TRACE_SYSTEM 2576 * then the system would be called "TRACE_SYSTEM". This should 2577 * never happen. 2578 */ 2579 if (WARN_ON_ONCE(strcmp(call->class->system, TRACE_SYSTEM) == 0)) 2580 return -ENODEV; 2581 2582 e_events = event_subsystem_dir(tr, call->class->system, file, parent); 2583 if (!e_events) 2584 return -ENOMEM; 2585 2586 nr_entries = ARRAY_SIZE(event_entries); 2587 2588 name = trace_event_name(call); 2589 ei = eventfs_create_dir(name, e_events, event_entries, nr_entries, file); 2590 if (IS_ERR(ei)) { 2591 pr_warn("Could not create tracefs '%s' directory\n", name); 2592 return -1; 2593 } 2594 2595 file->ei = ei; 2596 2597 ret = event_define_fields(call); 2598 if (ret < 0) { 2599 pr_warn("Could not initialize trace point events/%s\n", name); 2600 return ret; 2601 } 2602 2603 return 0; 2604 } 2605 2606 static void remove_event_from_tracers(struct trace_event_call *call) 2607 { 2608 struct trace_event_file *file; 2609 struct trace_array *tr; 2610 2611 do_for_each_event_file_safe(tr, file) { 2612 if (file->event_call != call) 2613 continue; 2614 2615 remove_event_file_dir(file); 2616 /* 2617 * The do_for_each_event_file_safe() is 2618 * a double loop. After finding the call for this 2619 * trace_array, we use break to jump to the next 2620 * trace_array. 2621 */ 2622 break; 2623 } while_for_each_event_file(); 2624 } 2625 2626 static void event_remove(struct trace_event_call *call) 2627 { 2628 struct trace_array *tr; 2629 struct trace_event_file *file; 2630 2631 do_for_each_event_file(tr, file) { 2632 if (file->event_call != call) 2633 continue; 2634 2635 if (file->flags & EVENT_FILE_FL_WAS_ENABLED) 2636 tr->clear_trace = true; 2637 2638 ftrace_event_enable_disable(file, 0); 2639 /* 2640 * The do_for_each_event_file() is 2641 * a double loop. After finding the call for this 2642 * trace_array, we use break to jump to the next 2643 * trace_array. 2644 */ 2645 break; 2646 } while_for_each_event_file(); 2647 2648 if (call->event.funcs) 2649 __unregister_trace_event(&call->event); 2650 remove_event_from_tracers(call); 2651 list_del(&call->list); 2652 } 2653 2654 static int event_init(struct trace_event_call *call) 2655 { 2656 int ret = 0; 2657 const char *name; 2658 2659 name = trace_event_name(call); 2660 if (WARN_ON(!name)) 2661 return -EINVAL; 2662 2663 if (call->class->raw_init) { 2664 ret = call->class->raw_init(call); 2665 if (ret < 0 && ret != -ENOSYS) 2666 pr_warn("Could not initialize trace events/%s\n", name); 2667 } 2668 2669 return ret; 2670 } 2671 2672 static int 2673 __register_event(struct trace_event_call *call, struct module *mod) 2674 { 2675 int ret; 2676 2677 ret = event_init(call); 2678 if (ret < 0) 2679 return ret; 2680 2681 list_add(&call->list, &ftrace_events); 2682 if (call->flags & TRACE_EVENT_FL_DYNAMIC) 2683 atomic_set(&call->refcnt, 0); 2684 else 2685 call->module = mod; 2686 2687 return 0; 2688 } 2689 2690 static char *eval_replace(char *ptr, struct trace_eval_map *map, int len) 2691 { 2692 int rlen; 2693 int elen; 2694 2695 /* Find the length of the eval value as a string */ 2696 elen = snprintf(ptr, 0, "%ld", map->eval_value); 2697 /* Make sure there's enough room to replace the string with the value */ 2698 if (len < elen) 2699 return NULL; 2700 2701 snprintf(ptr, elen + 1, "%ld", map->eval_value); 2702 2703 /* Get the rest of the string of ptr */ 2704 rlen = strlen(ptr + len); 2705 memmove(ptr + elen, ptr + len, rlen); 2706 /* Make sure we end the new string */ 2707 ptr[elen + rlen] = 0; 2708 2709 return ptr + elen; 2710 } 2711 2712 static void update_event_printk(struct trace_event_call *call, 2713 struct trace_eval_map *map) 2714 { 2715 char *ptr; 2716 int quote = 0; 2717 int len = strlen(map->eval_string); 2718 2719 for (ptr = call->print_fmt; *ptr; ptr++) { 2720 if (*ptr == '\\') { 2721 ptr++; 2722 /* paranoid */ 2723 if (!*ptr) 2724 break; 2725 continue; 2726 } 2727 if (*ptr == '"') { 2728 quote ^= 1; 2729 continue; 2730 } 2731 if (quote) 2732 continue; 2733 if (isdigit(*ptr)) { 2734 /* skip numbers */ 2735 do { 2736 ptr++; 2737 /* Check for alpha chars like ULL */ 2738 } while (isalnum(*ptr)); 2739 if (!*ptr) 2740 break; 2741 /* 2742 * A number must have some kind of delimiter after 2743 * it, and we can ignore that too. 2744 */ 2745 continue; 2746 } 2747 if (isalpha(*ptr) || *ptr == '_') { 2748 if (strncmp(map->eval_string, ptr, len) == 0 && 2749 !isalnum(ptr[len]) && ptr[len] != '_') { 2750 ptr = eval_replace(ptr, map, len); 2751 /* enum/sizeof string smaller than value */ 2752 if (WARN_ON_ONCE(!ptr)) 2753 return; 2754 /* 2755 * No need to decrement here, as eval_replace() 2756 * returns the pointer to the character passed 2757 * the eval, and two evals can not be placed 2758 * back to back without something in between. 2759 * We can skip that something in between. 2760 */ 2761 continue; 2762 } 2763 skip_more: 2764 do { 2765 ptr++; 2766 } while (isalnum(*ptr) || *ptr == '_'); 2767 if (!*ptr) 2768 break; 2769 /* 2770 * If what comes after this variable is a '.' or 2771 * '->' then we can continue to ignore that string. 2772 */ 2773 if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) { 2774 ptr += *ptr == '.' ? 1 : 2; 2775 if (!*ptr) 2776 break; 2777 goto skip_more; 2778 } 2779 /* 2780 * Once again, we can skip the delimiter that came 2781 * after the string. 2782 */ 2783 continue; 2784 } 2785 } 2786 } 2787 2788 static void add_str_to_module(struct module *module, char *str) 2789 { 2790 struct module_string *modstr; 2791 2792 modstr = kmalloc(sizeof(*modstr), GFP_KERNEL); 2793 2794 /* 2795 * If we failed to allocate memory here, then we'll just 2796 * let the str memory leak when the module is removed. 2797 * If this fails to allocate, there's worse problems than 2798 * a leaked string on module removal. 2799 */ 2800 if (WARN_ON_ONCE(!modstr)) 2801 return; 2802 2803 modstr->module = module; 2804 modstr->str = str; 2805 2806 list_add(&modstr->next, &module_strings); 2807 } 2808 2809 static void update_event_fields(struct trace_event_call *call, 2810 struct trace_eval_map *map) 2811 { 2812 struct ftrace_event_field *field; 2813 struct list_head *head; 2814 char *ptr; 2815 char *str; 2816 int len = strlen(map->eval_string); 2817 2818 /* Dynamic events should never have field maps */ 2819 if (WARN_ON_ONCE(call->flags & TRACE_EVENT_FL_DYNAMIC)) 2820 return; 2821 2822 head = trace_get_fields(call); 2823 list_for_each_entry(field, head, link) { 2824 ptr = strchr(field->type, '['); 2825 if (!ptr) 2826 continue; 2827 ptr++; 2828 2829 if (!isalpha(*ptr) && *ptr != '_') 2830 continue; 2831 2832 if (strncmp(map->eval_string, ptr, len) != 0) 2833 continue; 2834 2835 str = kstrdup(field->type, GFP_KERNEL); 2836 if (WARN_ON_ONCE(!str)) 2837 return; 2838 ptr = str + (ptr - field->type); 2839 ptr = eval_replace(ptr, map, len); 2840 /* enum/sizeof string smaller than value */ 2841 if (WARN_ON_ONCE(!ptr)) { 2842 kfree(str); 2843 continue; 2844 } 2845 2846 /* 2847 * If the event is part of a module, then we need to free the string 2848 * when the module is removed. Otherwise, it will stay allocated 2849 * until a reboot. 2850 */ 2851 if (call->module) 2852 add_str_to_module(call->module, str); 2853 2854 field->type = str; 2855 } 2856 } 2857 2858 void trace_event_eval_update(struct trace_eval_map **map, int len) 2859 { 2860 struct trace_event_call *call, *p; 2861 const char *last_system = NULL; 2862 bool first = false; 2863 int last_i; 2864 int i; 2865 2866 down_write(&trace_event_sem); 2867 list_for_each_entry_safe(call, p, &ftrace_events, list) { 2868 /* events are usually grouped together with systems */ 2869 if (!last_system || call->class->system != last_system) { 2870 first = true; 2871 last_i = 0; 2872 last_system = call->class->system; 2873 } 2874 2875 /* 2876 * Since calls are grouped by systems, the likelihood that the 2877 * next call in the iteration belongs to the same system as the 2878 * previous call is high. As an optimization, we skip searching 2879 * for a map[] that matches the call's system if the last call 2880 * was from the same system. That's what last_i is for. If the 2881 * call has the same system as the previous call, then last_i 2882 * will be the index of the first map[] that has a matching 2883 * system. 2884 */ 2885 for (i = last_i; i < len; i++) { 2886 if (call->class->system == map[i]->system) { 2887 /* Save the first system if need be */ 2888 if (first) { 2889 last_i = i; 2890 first = false; 2891 } 2892 update_event_printk(call, map[i]); 2893 update_event_fields(call, map[i]); 2894 } 2895 } 2896 cond_resched(); 2897 } 2898 up_write(&trace_event_sem); 2899 } 2900 2901 static struct trace_event_file * 2902 trace_create_new_event(struct trace_event_call *call, 2903 struct trace_array *tr) 2904 { 2905 struct trace_pid_list *no_pid_list; 2906 struct trace_pid_list *pid_list; 2907 struct trace_event_file *file; 2908 unsigned int first; 2909 2910 file = kmem_cache_alloc(file_cachep, GFP_TRACE); 2911 if (!file) 2912 return NULL; 2913 2914 pid_list = rcu_dereference_protected(tr->filtered_pids, 2915 lockdep_is_held(&event_mutex)); 2916 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, 2917 lockdep_is_held(&event_mutex)); 2918 2919 if (!trace_pid_list_first(pid_list, &first) || 2920 !trace_pid_list_first(no_pid_list, &first)) 2921 file->flags |= EVENT_FILE_FL_PID_FILTER; 2922 2923 file->event_call = call; 2924 file->tr = tr; 2925 atomic_set(&file->sm_ref, 0); 2926 atomic_set(&file->tm_ref, 0); 2927 INIT_LIST_HEAD(&file->triggers); 2928 list_add(&file->list, &tr->events); 2929 event_file_get(file); 2930 2931 return file; 2932 } 2933 2934 #define MAX_BOOT_TRIGGERS 32 2935 2936 static struct boot_triggers { 2937 const char *event; 2938 char *trigger; 2939 } bootup_triggers[MAX_BOOT_TRIGGERS]; 2940 2941 static char bootup_trigger_buf[COMMAND_LINE_SIZE]; 2942 static int nr_boot_triggers; 2943 2944 static __init int setup_trace_triggers(char *str) 2945 { 2946 char *trigger; 2947 char *buf; 2948 int i; 2949 2950 strscpy(bootup_trigger_buf, str, COMMAND_LINE_SIZE); 2951 ring_buffer_expanded = true; 2952 disable_tracing_selftest("running event triggers"); 2953 2954 buf = bootup_trigger_buf; 2955 for (i = 0; i < MAX_BOOT_TRIGGERS; i++) { 2956 trigger = strsep(&buf, ","); 2957 if (!trigger) 2958 break; 2959 bootup_triggers[i].event = strsep(&trigger, "."); 2960 bootup_triggers[i].trigger = trigger; 2961 if (!bootup_triggers[i].trigger) 2962 break; 2963 } 2964 2965 nr_boot_triggers = i; 2966 return 1; 2967 } 2968 __setup("trace_trigger=", setup_trace_triggers); 2969 2970 /* Add an event to a trace directory */ 2971 static int 2972 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr) 2973 { 2974 struct trace_event_file *file; 2975 2976 file = trace_create_new_event(call, tr); 2977 if (!file) 2978 return -ENOMEM; 2979 2980 if (eventdir_initialized) 2981 return event_create_dir(tr->event_dir, file); 2982 else 2983 return event_define_fields(call); 2984 } 2985 2986 static void trace_early_triggers(struct trace_event_file *file, const char *name) 2987 { 2988 int ret; 2989 int i; 2990 2991 for (i = 0; i < nr_boot_triggers; i++) { 2992 if (strcmp(name, bootup_triggers[i].event)) 2993 continue; 2994 mutex_lock(&event_mutex); 2995 ret = trigger_process_regex(file, bootup_triggers[i].trigger); 2996 mutex_unlock(&event_mutex); 2997 if (ret) 2998 pr_err("Failed to register trigger '%s' on event %s\n", 2999 bootup_triggers[i].trigger, 3000 bootup_triggers[i].event); 3001 } 3002 } 3003 3004 /* 3005 * Just create a descriptor for early init. A descriptor is required 3006 * for enabling events at boot. We want to enable events before 3007 * the filesystem is initialized. 3008 */ 3009 static int 3010 __trace_early_add_new_event(struct trace_event_call *call, 3011 struct trace_array *tr) 3012 { 3013 struct trace_event_file *file; 3014 int ret; 3015 3016 file = trace_create_new_event(call, tr); 3017 if (!file) 3018 return -ENOMEM; 3019 3020 ret = event_define_fields(call); 3021 if (ret) 3022 return ret; 3023 3024 trace_early_triggers(file, trace_event_name(call)); 3025 3026 return 0; 3027 } 3028 3029 struct ftrace_module_file_ops; 3030 static void __add_event_to_tracers(struct trace_event_call *call); 3031 3032 /* Add an additional event_call dynamically */ 3033 int trace_add_event_call(struct trace_event_call *call) 3034 { 3035 int ret; 3036 lockdep_assert_held(&event_mutex); 3037 3038 mutex_lock(&trace_types_lock); 3039 3040 ret = __register_event(call, NULL); 3041 if (ret >= 0) 3042 __add_event_to_tracers(call); 3043 3044 mutex_unlock(&trace_types_lock); 3045 return ret; 3046 } 3047 EXPORT_SYMBOL_GPL(trace_add_event_call); 3048 3049 /* 3050 * Must be called under locking of trace_types_lock, event_mutex and 3051 * trace_event_sem. 3052 */ 3053 static void __trace_remove_event_call(struct trace_event_call *call) 3054 { 3055 event_remove(call); 3056 trace_destroy_fields(call); 3057 free_event_filter(call->filter); 3058 call->filter = NULL; 3059 } 3060 3061 static int probe_remove_event_call(struct trace_event_call *call) 3062 { 3063 struct trace_array *tr; 3064 struct trace_event_file *file; 3065 3066 #ifdef CONFIG_PERF_EVENTS 3067 if (call->perf_refcount) 3068 return -EBUSY; 3069 #endif 3070 do_for_each_event_file(tr, file) { 3071 if (file->event_call != call) 3072 continue; 3073 /* 3074 * We can't rely on ftrace_event_enable_disable(enable => 0) 3075 * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress 3076 * TRACE_REG_UNREGISTER. 3077 */ 3078 if (file->flags & EVENT_FILE_FL_ENABLED) 3079 goto busy; 3080 3081 if (file->flags & EVENT_FILE_FL_WAS_ENABLED) 3082 tr->clear_trace = true; 3083 /* 3084 * The do_for_each_event_file_safe() is 3085 * a double loop. After finding the call for this 3086 * trace_array, we use break to jump to the next 3087 * trace_array. 3088 */ 3089 break; 3090 } while_for_each_event_file(); 3091 3092 __trace_remove_event_call(call); 3093 3094 return 0; 3095 busy: 3096 /* No need to clear the trace now */ 3097 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 3098 tr->clear_trace = false; 3099 } 3100 return -EBUSY; 3101 } 3102 3103 /* Remove an event_call */ 3104 int trace_remove_event_call(struct trace_event_call *call) 3105 { 3106 int ret; 3107 3108 lockdep_assert_held(&event_mutex); 3109 3110 mutex_lock(&trace_types_lock); 3111 down_write(&trace_event_sem); 3112 ret = probe_remove_event_call(call); 3113 up_write(&trace_event_sem); 3114 mutex_unlock(&trace_types_lock); 3115 3116 return ret; 3117 } 3118 EXPORT_SYMBOL_GPL(trace_remove_event_call); 3119 3120 #define for_each_event(event, start, end) \ 3121 for (event = start; \ 3122 (unsigned long)event < (unsigned long)end; \ 3123 event++) 3124 3125 #ifdef CONFIG_MODULES 3126 3127 static void trace_module_add_events(struct module *mod) 3128 { 3129 struct trace_event_call **call, **start, **end; 3130 3131 if (!mod->num_trace_events) 3132 return; 3133 3134 /* Don't add infrastructure for mods without tracepoints */ 3135 if (trace_module_has_bad_taint(mod)) { 3136 pr_err("%s: module has bad taint, not creating trace events\n", 3137 mod->name); 3138 return; 3139 } 3140 3141 start = mod->trace_events; 3142 end = mod->trace_events + mod->num_trace_events; 3143 3144 for_each_event(call, start, end) { 3145 __register_event(*call, mod); 3146 __add_event_to_tracers(*call); 3147 } 3148 } 3149 3150 static void trace_module_remove_events(struct module *mod) 3151 { 3152 struct trace_event_call *call, *p; 3153 struct module_string *modstr, *m; 3154 3155 down_write(&trace_event_sem); 3156 list_for_each_entry_safe(call, p, &ftrace_events, list) { 3157 if ((call->flags & TRACE_EVENT_FL_DYNAMIC) || !call->module) 3158 continue; 3159 if (call->module == mod) 3160 __trace_remove_event_call(call); 3161 } 3162 /* Check for any strings allocade for this module */ 3163 list_for_each_entry_safe(modstr, m, &module_strings, next) { 3164 if (modstr->module != mod) 3165 continue; 3166 list_del(&modstr->next); 3167 kfree(modstr->str); 3168 kfree(modstr); 3169 } 3170 up_write(&trace_event_sem); 3171 3172 /* 3173 * It is safest to reset the ring buffer if the module being unloaded 3174 * registered any events that were used. The only worry is if 3175 * a new module gets loaded, and takes on the same id as the events 3176 * of this module. When printing out the buffer, traced events left 3177 * over from this module may be passed to the new module events and 3178 * unexpected results may occur. 3179 */ 3180 tracing_reset_all_online_cpus_unlocked(); 3181 } 3182 3183 static int trace_module_notify(struct notifier_block *self, 3184 unsigned long val, void *data) 3185 { 3186 struct module *mod = data; 3187 3188 mutex_lock(&event_mutex); 3189 mutex_lock(&trace_types_lock); 3190 switch (val) { 3191 case MODULE_STATE_COMING: 3192 trace_module_add_events(mod); 3193 break; 3194 case MODULE_STATE_GOING: 3195 trace_module_remove_events(mod); 3196 break; 3197 } 3198 mutex_unlock(&trace_types_lock); 3199 mutex_unlock(&event_mutex); 3200 3201 return NOTIFY_OK; 3202 } 3203 3204 static struct notifier_block trace_module_nb = { 3205 .notifier_call = trace_module_notify, 3206 .priority = 1, /* higher than trace.c module notify */ 3207 }; 3208 #endif /* CONFIG_MODULES */ 3209 3210 /* Create a new event directory structure for a trace directory. */ 3211 static void 3212 __trace_add_event_dirs(struct trace_array *tr) 3213 { 3214 struct trace_event_call *call; 3215 int ret; 3216 3217 list_for_each_entry(call, &ftrace_events, list) { 3218 ret = __trace_add_new_event(call, tr); 3219 if (ret < 0) 3220 pr_warn("Could not create directory for event %s\n", 3221 trace_event_name(call)); 3222 } 3223 } 3224 3225 /* Returns any file that matches the system and event */ 3226 struct trace_event_file * 3227 __find_event_file(struct trace_array *tr, const char *system, const char *event) 3228 { 3229 struct trace_event_file *file; 3230 struct trace_event_call *call; 3231 const char *name; 3232 3233 list_for_each_entry(file, &tr->events, list) { 3234 3235 call = file->event_call; 3236 name = trace_event_name(call); 3237 3238 if (!name || !call->class) 3239 continue; 3240 3241 if (strcmp(event, name) == 0 && 3242 strcmp(system, call->class->system) == 0) 3243 return file; 3244 } 3245 return NULL; 3246 } 3247 3248 /* Returns valid trace event files that match system and event */ 3249 struct trace_event_file * 3250 find_event_file(struct trace_array *tr, const char *system, const char *event) 3251 { 3252 struct trace_event_file *file; 3253 3254 file = __find_event_file(tr, system, event); 3255 if (!file || !file->event_call->class->reg || 3256 file->event_call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 3257 return NULL; 3258 3259 return file; 3260 } 3261 3262 /** 3263 * trace_get_event_file - Find and return a trace event file 3264 * @instance: The name of the trace instance containing the event 3265 * @system: The name of the system containing the event 3266 * @event: The name of the event 3267 * 3268 * Return a trace event file given the trace instance name, trace 3269 * system, and trace event name. If the instance name is NULL, it 3270 * refers to the top-level trace array. 3271 * 3272 * This function will look it up and return it if found, after calling 3273 * trace_array_get() to prevent the instance from going away, and 3274 * increment the event's module refcount to prevent it from being 3275 * removed. 3276 * 3277 * To release the file, call trace_put_event_file(), which will call 3278 * trace_array_put() and decrement the event's module refcount. 3279 * 3280 * Return: The trace event on success, ERR_PTR otherwise. 3281 */ 3282 struct trace_event_file *trace_get_event_file(const char *instance, 3283 const char *system, 3284 const char *event) 3285 { 3286 struct trace_array *tr = top_trace_array(); 3287 struct trace_event_file *file = NULL; 3288 int ret = -EINVAL; 3289 3290 if (instance) { 3291 tr = trace_array_find_get(instance); 3292 if (!tr) 3293 return ERR_PTR(-ENOENT); 3294 } else { 3295 ret = trace_array_get(tr); 3296 if (ret) 3297 return ERR_PTR(ret); 3298 } 3299 3300 mutex_lock(&event_mutex); 3301 3302 file = find_event_file(tr, system, event); 3303 if (!file) { 3304 trace_array_put(tr); 3305 ret = -EINVAL; 3306 goto out; 3307 } 3308 3309 /* Don't let event modules unload while in use */ 3310 ret = trace_event_try_get_ref(file->event_call); 3311 if (!ret) { 3312 trace_array_put(tr); 3313 ret = -EBUSY; 3314 goto out; 3315 } 3316 3317 ret = 0; 3318 out: 3319 mutex_unlock(&event_mutex); 3320 3321 if (ret) 3322 file = ERR_PTR(ret); 3323 3324 return file; 3325 } 3326 EXPORT_SYMBOL_GPL(trace_get_event_file); 3327 3328 /** 3329 * trace_put_event_file - Release a file from trace_get_event_file() 3330 * @file: The trace event file 3331 * 3332 * If a file was retrieved using trace_get_event_file(), this should 3333 * be called when it's no longer needed. It will cancel the previous 3334 * trace_array_get() called by that function, and decrement the 3335 * event's module refcount. 3336 */ 3337 void trace_put_event_file(struct trace_event_file *file) 3338 { 3339 mutex_lock(&event_mutex); 3340 trace_event_put_ref(file->event_call); 3341 mutex_unlock(&event_mutex); 3342 3343 trace_array_put(file->tr); 3344 } 3345 EXPORT_SYMBOL_GPL(trace_put_event_file); 3346 3347 #ifdef CONFIG_DYNAMIC_FTRACE 3348 3349 /* Avoid typos */ 3350 #define ENABLE_EVENT_STR "enable_event" 3351 #define DISABLE_EVENT_STR "disable_event" 3352 3353 struct event_probe_data { 3354 struct trace_event_file *file; 3355 unsigned long count; 3356 int ref; 3357 bool enable; 3358 }; 3359 3360 static void update_event_probe(struct event_probe_data *data) 3361 { 3362 if (data->enable) 3363 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags); 3364 else 3365 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags); 3366 } 3367 3368 static void 3369 event_enable_probe(unsigned long ip, unsigned long parent_ip, 3370 struct trace_array *tr, struct ftrace_probe_ops *ops, 3371 void *data) 3372 { 3373 struct ftrace_func_mapper *mapper = data; 3374 struct event_probe_data *edata; 3375 void **pdata; 3376 3377 pdata = ftrace_func_mapper_find_ip(mapper, ip); 3378 if (!pdata || !*pdata) 3379 return; 3380 3381 edata = *pdata; 3382 update_event_probe(edata); 3383 } 3384 3385 static void 3386 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, 3387 struct trace_array *tr, struct ftrace_probe_ops *ops, 3388 void *data) 3389 { 3390 struct ftrace_func_mapper *mapper = data; 3391 struct event_probe_data *edata; 3392 void **pdata; 3393 3394 pdata = ftrace_func_mapper_find_ip(mapper, ip); 3395 if (!pdata || !*pdata) 3396 return; 3397 3398 edata = *pdata; 3399 3400 if (!edata->count) 3401 return; 3402 3403 /* Skip if the event is in a state we want to switch to */ 3404 if (edata->enable == !(edata->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) 3405 return; 3406 3407 if (edata->count != -1) 3408 (edata->count)--; 3409 3410 update_event_probe(edata); 3411 } 3412 3413 static int 3414 event_enable_print(struct seq_file *m, unsigned long ip, 3415 struct ftrace_probe_ops *ops, void *data) 3416 { 3417 struct ftrace_func_mapper *mapper = data; 3418 struct event_probe_data *edata; 3419 void **pdata; 3420 3421 pdata = ftrace_func_mapper_find_ip(mapper, ip); 3422 3423 if (WARN_ON_ONCE(!pdata || !*pdata)) 3424 return 0; 3425 3426 edata = *pdata; 3427 3428 seq_printf(m, "%ps:", (void *)ip); 3429 3430 seq_printf(m, "%s:%s:%s", 3431 edata->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, 3432 edata->file->event_call->class->system, 3433 trace_event_name(edata->file->event_call)); 3434 3435 if (edata->count == -1) 3436 seq_puts(m, ":unlimited\n"); 3437 else 3438 seq_printf(m, ":count=%ld\n", edata->count); 3439 3440 return 0; 3441 } 3442 3443 static int 3444 event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr, 3445 unsigned long ip, void *init_data, void **data) 3446 { 3447 struct ftrace_func_mapper *mapper = *data; 3448 struct event_probe_data *edata = init_data; 3449 int ret; 3450 3451 if (!mapper) { 3452 mapper = allocate_ftrace_func_mapper(); 3453 if (!mapper) 3454 return -ENODEV; 3455 *data = mapper; 3456 } 3457 3458 ret = ftrace_func_mapper_add_ip(mapper, ip, edata); 3459 if (ret < 0) 3460 return ret; 3461 3462 edata->ref++; 3463 3464 return 0; 3465 } 3466 3467 static int free_probe_data(void *data) 3468 { 3469 struct event_probe_data *edata = data; 3470 3471 edata->ref--; 3472 if (!edata->ref) { 3473 /* Remove the SOFT_MODE flag */ 3474 __ftrace_event_enable_disable(edata->file, 0, 1); 3475 trace_event_put_ref(edata->file->event_call); 3476 kfree(edata); 3477 } 3478 return 0; 3479 } 3480 3481 static void 3482 event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr, 3483 unsigned long ip, void *data) 3484 { 3485 struct ftrace_func_mapper *mapper = data; 3486 struct event_probe_data *edata; 3487 3488 if (!ip) { 3489 if (!mapper) 3490 return; 3491 free_ftrace_func_mapper(mapper, free_probe_data); 3492 return; 3493 } 3494 3495 edata = ftrace_func_mapper_remove_ip(mapper, ip); 3496 3497 if (WARN_ON_ONCE(!edata)) 3498 return; 3499 3500 if (WARN_ON_ONCE(edata->ref <= 0)) 3501 return; 3502 3503 free_probe_data(edata); 3504 } 3505 3506 static struct ftrace_probe_ops event_enable_probe_ops = { 3507 .func = event_enable_probe, 3508 .print = event_enable_print, 3509 .init = event_enable_init, 3510 .free = event_enable_free, 3511 }; 3512 3513 static struct ftrace_probe_ops event_enable_count_probe_ops = { 3514 .func = event_enable_count_probe, 3515 .print = event_enable_print, 3516 .init = event_enable_init, 3517 .free = event_enable_free, 3518 }; 3519 3520 static struct ftrace_probe_ops event_disable_probe_ops = { 3521 .func = event_enable_probe, 3522 .print = event_enable_print, 3523 .init = event_enable_init, 3524 .free = event_enable_free, 3525 }; 3526 3527 static struct ftrace_probe_ops event_disable_count_probe_ops = { 3528 .func = event_enable_count_probe, 3529 .print = event_enable_print, 3530 .init = event_enable_init, 3531 .free = event_enable_free, 3532 }; 3533 3534 static int 3535 event_enable_func(struct trace_array *tr, struct ftrace_hash *hash, 3536 char *glob, char *cmd, char *param, int enabled) 3537 { 3538 struct trace_event_file *file; 3539 struct ftrace_probe_ops *ops; 3540 struct event_probe_data *data; 3541 const char *system; 3542 const char *event; 3543 char *number; 3544 bool enable; 3545 int ret; 3546 3547 if (!tr) 3548 return -ENODEV; 3549 3550 /* hash funcs only work with set_ftrace_filter */ 3551 if (!enabled || !param) 3552 return -EINVAL; 3553 3554 system = strsep(¶m, ":"); 3555 if (!param) 3556 return -EINVAL; 3557 3558 event = strsep(¶m, ":"); 3559 3560 mutex_lock(&event_mutex); 3561 3562 ret = -EINVAL; 3563 file = find_event_file(tr, system, event); 3564 if (!file) 3565 goto out; 3566 3567 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 3568 3569 if (enable) 3570 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops; 3571 else 3572 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops; 3573 3574 if (glob[0] == '!') { 3575 ret = unregister_ftrace_function_probe_func(glob+1, tr, ops); 3576 goto out; 3577 } 3578 3579 ret = -ENOMEM; 3580 3581 data = kzalloc(sizeof(*data), GFP_KERNEL); 3582 if (!data) 3583 goto out; 3584 3585 data->enable = enable; 3586 data->count = -1; 3587 data->file = file; 3588 3589 if (!param) 3590 goto out_reg; 3591 3592 number = strsep(¶m, ":"); 3593 3594 ret = -EINVAL; 3595 if (!strlen(number)) 3596 goto out_free; 3597 3598 /* 3599 * We use the callback data field (which is a pointer) 3600 * as our counter. 3601 */ 3602 ret = kstrtoul(number, 0, &data->count); 3603 if (ret) 3604 goto out_free; 3605 3606 out_reg: 3607 /* Don't let event modules unload while probe registered */ 3608 ret = trace_event_try_get_ref(file->event_call); 3609 if (!ret) { 3610 ret = -EBUSY; 3611 goto out_free; 3612 } 3613 3614 ret = __ftrace_event_enable_disable(file, 1, 1); 3615 if (ret < 0) 3616 goto out_put; 3617 3618 ret = register_ftrace_function_probe(glob, tr, ops, data); 3619 /* 3620 * The above returns on success the # of functions enabled, 3621 * but if it didn't find any functions it returns zero. 3622 * Consider no functions a failure too. 3623 */ 3624 if (!ret) { 3625 ret = -ENOENT; 3626 goto out_disable; 3627 } else if (ret < 0) 3628 goto out_disable; 3629 /* Just return zero, not the number of enabled functions */ 3630 ret = 0; 3631 out: 3632 mutex_unlock(&event_mutex); 3633 return ret; 3634 3635 out_disable: 3636 __ftrace_event_enable_disable(file, 0, 1); 3637 out_put: 3638 trace_event_put_ref(file->event_call); 3639 out_free: 3640 kfree(data); 3641 goto out; 3642 } 3643 3644 static struct ftrace_func_command event_enable_cmd = { 3645 .name = ENABLE_EVENT_STR, 3646 .func = event_enable_func, 3647 }; 3648 3649 static struct ftrace_func_command event_disable_cmd = { 3650 .name = DISABLE_EVENT_STR, 3651 .func = event_enable_func, 3652 }; 3653 3654 static __init int register_event_cmds(void) 3655 { 3656 int ret; 3657 3658 ret = register_ftrace_command(&event_enable_cmd); 3659 if (WARN_ON(ret < 0)) 3660 return ret; 3661 ret = register_ftrace_command(&event_disable_cmd); 3662 if (WARN_ON(ret < 0)) 3663 unregister_ftrace_command(&event_enable_cmd); 3664 return ret; 3665 } 3666 #else 3667 static inline int register_event_cmds(void) { return 0; } 3668 #endif /* CONFIG_DYNAMIC_FTRACE */ 3669 3670 /* 3671 * The top level array and trace arrays created by boot-time tracing 3672 * have already had its trace_event_file descriptors created in order 3673 * to allow for early events to be recorded. 3674 * This function is called after the tracefs has been initialized, 3675 * and we now have to create the files associated to the events. 3676 */ 3677 static void __trace_early_add_event_dirs(struct trace_array *tr) 3678 { 3679 struct trace_event_file *file; 3680 int ret; 3681 3682 3683 list_for_each_entry(file, &tr->events, list) { 3684 ret = event_create_dir(tr->event_dir, file); 3685 if (ret < 0) 3686 pr_warn("Could not create directory for event %s\n", 3687 trace_event_name(file->event_call)); 3688 } 3689 } 3690 3691 /* 3692 * For early boot up, the top trace array and the trace arrays created 3693 * by boot-time tracing require to have a list of events that can be 3694 * enabled. This must be done before the filesystem is set up in order 3695 * to allow events to be traced early. 3696 */ 3697 void __trace_early_add_events(struct trace_array *tr) 3698 { 3699 struct trace_event_call *call; 3700 int ret; 3701 3702 list_for_each_entry(call, &ftrace_events, list) { 3703 /* Early boot up should not have any modules loaded */ 3704 if (!(call->flags & TRACE_EVENT_FL_DYNAMIC) && 3705 WARN_ON_ONCE(call->module)) 3706 continue; 3707 3708 ret = __trace_early_add_new_event(call, tr); 3709 if (ret < 0) 3710 pr_warn("Could not create early event %s\n", 3711 trace_event_name(call)); 3712 } 3713 } 3714 3715 /* Remove the event directory structure for a trace directory. */ 3716 static void 3717 __trace_remove_event_dirs(struct trace_array *tr) 3718 { 3719 struct trace_event_file *file, *next; 3720 3721 list_for_each_entry_safe(file, next, &tr->events, list) 3722 remove_event_file_dir(file); 3723 } 3724 3725 static void __add_event_to_tracers(struct trace_event_call *call) 3726 { 3727 struct trace_array *tr; 3728 3729 list_for_each_entry(tr, &ftrace_trace_arrays, list) 3730 __trace_add_new_event(call, tr); 3731 } 3732 3733 extern struct trace_event_call *__start_ftrace_events[]; 3734 extern struct trace_event_call *__stop_ftrace_events[]; 3735 3736 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; 3737 3738 static __init int setup_trace_event(char *str) 3739 { 3740 strscpy(bootup_event_buf, str, COMMAND_LINE_SIZE); 3741 ring_buffer_expanded = true; 3742 disable_tracing_selftest("running event tracing"); 3743 3744 return 1; 3745 } 3746 __setup("trace_event=", setup_trace_event); 3747 3748 static int events_callback(const char *name, umode_t *mode, void **data, 3749 const struct file_operations **fops) 3750 { 3751 if (strcmp(name, "enable") == 0) { 3752 *mode = TRACE_MODE_WRITE; 3753 *fops = &ftrace_tr_enable_fops; 3754 return 1; 3755 } 3756 3757 if (strcmp(name, "header_page") == 0) 3758 *data = ring_buffer_print_page_header; 3759 3760 else if (strcmp(name, "header_event") == 0) 3761 *data = ring_buffer_print_entry_header; 3762 3763 else 3764 return 0; 3765 3766 *mode = TRACE_MODE_READ; 3767 *fops = &ftrace_show_header_fops; 3768 return 1; 3769 } 3770 3771 /* Expects to have event_mutex held when called */ 3772 static int 3773 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr) 3774 { 3775 struct eventfs_inode *e_events; 3776 struct dentry *entry; 3777 int nr_entries; 3778 static struct eventfs_entry events_entries[] = { 3779 { 3780 .name = "enable", 3781 .callback = events_callback, 3782 }, 3783 { 3784 .name = "header_page", 3785 .callback = events_callback, 3786 }, 3787 { 3788 .name = "header_event", 3789 .callback = events_callback, 3790 }, 3791 }; 3792 3793 entry = trace_create_file("set_event", TRACE_MODE_WRITE, parent, 3794 tr, &ftrace_set_event_fops); 3795 if (!entry) 3796 return -ENOMEM; 3797 3798 nr_entries = ARRAY_SIZE(events_entries); 3799 3800 e_events = eventfs_create_events_dir("events", parent, events_entries, 3801 nr_entries, tr); 3802 if (IS_ERR(e_events)) { 3803 pr_warn("Could not create tracefs 'events' directory\n"); 3804 return -ENOMEM; 3805 } 3806 3807 /* There are not as crucial, just warn if they are not created */ 3808 3809 trace_create_file("set_event_pid", TRACE_MODE_WRITE, parent, 3810 tr, &ftrace_set_event_pid_fops); 3811 3812 trace_create_file("set_event_notrace_pid", 3813 TRACE_MODE_WRITE, parent, tr, 3814 &ftrace_set_event_notrace_pid_fops); 3815 3816 tr->event_dir = e_events; 3817 3818 return 0; 3819 } 3820 3821 /** 3822 * event_trace_add_tracer - add a instance of a trace_array to events 3823 * @parent: The parent dentry to place the files/directories for events in 3824 * @tr: The trace array associated with these events 3825 * 3826 * When a new instance is created, it needs to set up its events 3827 * directory, as well as other files associated with events. It also 3828 * creates the event hierarchy in the @parent/events directory. 3829 * 3830 * Returns 0 on success. 3831 * 3832 * Must be called with event_mutex held. 3833 */ 3834 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr) 3835 { 3836 int ret; 3837 3838 lockdep_assert_held(&event_mutex); 3839 3840 ret = create_event_toplevel_files(parent, tr); 3841 if (ret) 3842 goto out; 3843 3844 down_write(&trace_event_sem); 3845 /* If tr already has the event list, it is initialized in early boot. */ 3846 if (unlikely(!list_empty(&tr->events))) 3847 __trace_early_add_event_dirs(tr); 3848 else 3849 __trace_add_event_dirs(tr); 3850 up_write(&trace_event_sem); 3851 3852 out: 3853 return ret; 3854 } 3855 3856 /* 3857 * The top trace array already had its file descriptors created. 3858 * Now the files themselves need to be created. 3859 */ 3860 static __init int 3861 early_event_add_tracer(struct dentry *parent, struct trace_array *tr) 3862 { 3863 int ret; 3864 3865 mutex_lock(&event_mutex); 3866 3867 ret = create_event_toplevel_files(parent, tr); 3868 if (ret) 3869 goto out_unlock; 3870 3871 down_write(&trace_event_sem); 3872 __trace_early_add_event_dirs(tr); 3873 up_write(&trace_event_sem); 3874 3875 out_unlock: 3876 mutex_unlock(&event_mutex); 3877 3878 return ret; 3879 } 3880 3881 /* Must be called with event_mutex held */ 3882 int event_trace_del_tracer(struct trace_array *tr) 3883 { 3884 lockdep_assert_held(&event_mutex); 3885 3886 /* Disable any event triggers and associated soft-disabled events */ 3887 clear_event_triggers(tr); 3888 3889 /* Clear the pid list */ 3890 __ftrace_clear_event_pids(tr, TRACE_PIDS | TRACE_NO_PIDS); 3891 3892 /* Disable any running events */ 3893 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); 3894 3895 /* Make sure no more events are being executed */ 3896 tracepoint_synchronize_unregister(); 3897 3898 down_write(&trace_event_sem); 3899 __trace_remove_event_dirs(tr); 3900 eventfs_remove_events_dir(tr->event_dir); 3901 up_write(&trace_event_sem); 3902 3903 tr->event_dir = NULL; 3904 3905 return 0; 3906 } 3907 3908 static __init int event_trace_memsetup(void) 3909 { 3910 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC); 3911 file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC); 3912 return 0; 3913 } 3914 3915 __init void 3916 early_enable_events(struct trace_array *tr, char *buf, bool disable_first) 3917 { 3918 char *token; 3919 int ret; 3920 3921 while (true) { 3922 token = strsep(&buf, ","); 3923 3924 if (!token) 3925 break; 3926 3927 if (*token) { 3928 /* Restarting syscalls requires that we stop them first */ 3929 if (disable_first) 3930 ftrace_set_clr_event(tr, token, 0); 3931 3932 ret = ftrace_set_clr_event(tr, token, 1); 3933 if (ret) 3934 pr_warn("Failed to enable trace event: %s\n", token); 3935 } 3936 3937 /* Put back the comma to allow this to be called again */ 3938 if (buf) 3939 *(buf - 1) = ','; 3940 } 3941 } 3942 3943 static __init int event_trace_enable(void) 3944 { 3945 struct trace_array *tr = top_trace_array(); 3946 struct trace_event_call **iter, *call; 3947 int ret; 3948 3949 if (!tr) 3950 return -ENODEV; 3951 3952 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) { 3953 3954 call = *iter; 3955 ret = event_init(call); 3956 if (!ret) 3957 list_add(&call->list, &ftrace_events); 3958 } 3959 3960 register_trigger_cmds(); 3961 3962 /* 3963 * We need the top trace array to have a working set of trace 3964 * points at early init, before the debug files and directories 3965 * are created. Create the file entries now, and attach them 3966 * to the actual file dentries later. 3967 */ 3968 __trace_early_add_events(tr); 3969 3970 early_enable_events(tr, bootup_event_buf, false); 3971 3972 trace_printk_start_comm(); 3973 3974 register_event_cmds(); 3975 3976 3977 return 0; 3978 } 3979 3980 /* 3981 * event_trace_enable() is called from trace_event_init() first to 3982 * initialize events and perhaps start any events that are on the 3983 * command line. Unfortunately, there are some events that will not 3984 * start this early, like the system call tracepoints that need 3985 * to set the %SYSCALL_WORK_SYSCALL_TRACEPOINT flag of pid 1. But 3986 * event_trace_enable() is called before pid 1 starts, and this flag 3987 * is never set, making the syscall tracepoint never get reached, but 3988 * the event is enabled regardless (and not doing anything). 3989 */ 3990 static __init int event_trace_enable_again(void) 3991 { 3992 struct trace_array *tr; 3993 3994 tr = top_trace_array(); 3995 if (!tr) 3996 return -ENODEV; 3997 3998 early_enable_events(tr, bootup_event_buf, true); 3999 4000 return 0; 4001 } 4002 4003 early_initcall(event_trace_enable_again); 4004 4005 /* Init fields which doesn't related to the tracefs */ 4006 static __init int event_trace_init_fields(void) 4007 { 4008 if (trace_define_generic_fields()) 4009 pr_warn("tracing: Failed to allocated generic fields"); 4010 4011 if (trace_define_common_fields()) 4012 pr_warn("tracing: Failed to allocate common fields"); 4013 4014 return 0; 4015 } 4016 4017 __init int event_trace_init(void) 4018 { 4019 struct trace_array *tr; 4020 int ret; 4021 4022 tr = top_trace_array(); 4023 if (!tr) 4024 return -ENODEV; 4025 4026 trace_create_file("available_events", TRACE_MODE_READ, 4027 NULL, tr, &ftrace_avail_fops); 4028 4029 ret = early_event_add_tracer(NULL, tr); 4030 if (ret) 4031 return ret; 4032 4033 #ifdef CONFIG_MODULES 4034 ret = register_module_notifier(&trace_module_nb); 4035 if (ret) 4036 pr_warn("Failed to register trace events module notifier\n"); 4037 #endif 4038 4039 eventdir_initialized = true; 4040 4041 return 0; 4042 } 4043 4044 void __init trace_event_init(void) 4045 { 4046 event_trace_memsetup(); 4047 init_ftrace_syscalls(); 4048 event_trace_enable(); 4049 event_trace_init_fields(); 4050 } 4051 4052 #ifdef CONFIG_EVENT_TRACE_STARTUP_TEST 4053 4054 static DEFINE_SPINLOCK(test_spinlock); 4055 static DEFINE_SPINLOCK(test_spinlock_irq); 4056 static DEFINE_MUTEX(test_mutex); 4057 4058 static __init void test_work(struct work_struct *dummy) 4059 { 4060 spin_lock(&test_spinlock); 4061 spin_lock_irq(&test_spinlock_irq); 4062 udelay(1); 4063 spin_unlock_irq(&test_spinlock_irq); 4064 spin_unlock(&test_spinlock); 4065 4066 mutex_lock(&test_mutex); 4067 msleep(1); 4068 mutex_unlock(&test_mutex); 4069 } 4070 4071 static __init int event_test_thread(void *unused) 4072 { 4073 void *test_malloc; 4074 4075 test_malloc = kmalloc(1234, GFP_KERNEL); 4076 if (!test_malloc) 4077 pr_info("failed to kmalloc\n"); 4078 4079 schedule_on_each_cpu(test_work); 4080 4081 kfree(test_malloc); 4082 4083 set_current_state(TASK_INTERRUPTIBLE); 4084 while (!kthread_should_stop()) { 4085 schedule(); 4086 set_current_state(TASK_INTERRUPTIBLE); 4087 } 4088 __set_current_state(TASK_RUNNING); 4089 4090 return 0; 4091 } 4092 4093 /* 4094 * Do various things that may trigger events. 4095 */ 4096 static __init void event_test_stuff(void) 4097 { 4098 struct task_struct *test_thread; 4099 4100 test_thread = kthread_run(event_test_thread, NULL, "test-events"); 4101 msleep(1); 4102 kthread_stop(test_thread); 4103 } 4104 4105 /* 4106 * For every trace event defined, we will test each trace point separately, 4107 * and then by groups, and finally all trace points. 4108 */ 4109 static __init void event_trace_self_tests(void) 4110 { 4111 struct trace_subsystem_dir *dir; 4112 struct trace_event_file *file; 4113 struct trace_event_call *call; 4114 struct event_subsystem *system; 4115 struct trace_array *tr; 4116 int ret; 4117 4118 tr = top_trace_array(); 4119 if (!tr) 4120 return; 4121 4122 pr_info("Running tests on trace events:\n"); 4123 4124 list_for_each_entry(file, &tr->events, list) { 4125 4126 call = file->event_call; 4127 4128 /* Only test those that have a probe */ 4129 if (!call->class || !call->class->probe) 4130 continue; 4131 4132 /* 4133 * Testing syscall events here is pretty useless, but 4134 * we still do it if configured. But this is time consuming. 4135 * What we really need is a user thread to perform the 4136 * syscalls as we test. 4137 */ 4138 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS 4139 if (call->class->system && 4140 strcmp(call->class->system, "syscalls") == 0) 4141 continue; 4142 #endif 4143 4144 pr_info("Testing event %s: ", trace_event_name(call)); 4145 4146 /* 4147 * If an event is already enabled, someone is using 4148 * it and the self test should not be on. 4149 */ 4150 if (file->flags & EVENT_FILE_FL_ENABLED) { 4151 pr_warn("Enabled event during self test!\n"); 4152 WARN_ON_ONCE(1); 4153 continue; 4154 } 4155 4156 ftrace_event_enable_disable(file, 1); 4157 event_test_stuff(); 4158 ftrace_event_enable_disable(file, 0); 4159 4160 pr_cont("OK\n"); 4161 } 4162 4163 /* Now test at the sub system level */ 4164 4165 pr_info("Running tests on trace event systems:\n"); 4166 4167 list_for_each_entry(dir, &tr->systems, list) { 4168 4169 system = dir->subsystem; 4170 4171 /* the ftrace system is special, skip it */ 4172 if (strcmp(system->name, "ftrace") == 0) 4173 continue; 4174 4175 pr_info("Testing event system %s: ", system->name); 4176 4177 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1); 4178 if (WARN_ON_ONCE(ret)) { 4179 pr_warn("error enabling system %s\n", 4180 system->name); 4181 continue; 4182 } 4183 4184 event_test_stuff(); 4185 4186 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0); 4187 if (WARN_ON_ONCE(ret)) { 4188 pr_warn("error disabling system %s\n", 4189 system->name); 4190 continue; 4191 } 4192 4193 pr_cont("OK\n"); 4194 } 4195 4196 /* Test with all events enabled */ 4197 4198 pr_info("Running tests on all trace events:\n"); 4199 pr_info("Testing all events: "); 4200 4201 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1); 4202 if (WARN_ON_ONCE(ret)) { 4203 pr_warn("error enabling all events\n"); 4204 return; 4205 } 4206 4207 event_test_stuff(); 4208 4209 /* reset sysname */ 4210 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0); 4211 if (WARN_ON_ONCE(ret)) { 4212 pr_warn("error disabling all events\n"); 4213 return; 4214 } 4215 4216 pr_cont("OK\n"); 4217 } 4218 4219 #ifdef CONFIG_FUNCTION_TRACER 4220 4221 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); 4222 4223 static struct trace_event_file event_trace_file __initdata; 4224 4225 static void __init 4226 function_test_events_call(unsigned long ip, unsigned long parent_ip, 4227 struct ftrace_ops *op, struct ftrace_regs *regs) 4228 { 4229 struct trace_buffer *buffer; 4230 struct ring_buffer_event *event; 4231 struct ftrace_entry *entry; 4232 unsigned int trace_ctx; 4233 long disabled; 4234 int cpu; 4235 4236 trace_ctx = tracing_gen_ctx(); 4237 preempt_disable_notrace(); 4238 cpu = raw_smp_processor_id(); 4239 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); 4240 4241 if (disabled != 1) 4242 goto out; 4243 4244 event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file, 4245 TRACE_FN, sizeof(*entry), 4246 trace_ctx); 4247 if (!event) 4248 goto out; 4249 entry = ring_buffer_event_data(event); 4250 entry->ip = ip; 4251 entry->parent_ip = parent_ip; 4252 4253 event_trigger_unlock_commit(&event_trace_file, buffer, event, 4254 entry, trace_ctx); 4255 out: 4256 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); 4257 preempt_enable_notrace(); 4258 } 4259 4260 static struct ftrace_ops trace_ops __initdata = 4261 { 4262 .func = function_test_events_call, 4263 }; 4264 4265 static __init void event_trace_self_test_with_function(void) 4266 { 4267 int ret; 4268 4269 event_trace_file.tr = top_trace_array(); 4270 if (WARN_ON(!event_trace_file.tr)) 4271 return; 4272 4273 ret = register_ftrace_function(&trace_ops); 4274 if (WARN_ON(ret < 0)) { 4275 pr_info("Failed to enable function tracer for event tests\n"); 4276 return; 4277 } 4278 pr_info("Running tests again, along with the function tracer\n"); 4279 event_trace_self_tests(); 4280 unregister_ftrace_function(&trace_ops); 4281 } 4282 #else 4283 static __init void event_trace_self_test_with_function(void) 4284 { 4285 } 4286 #endif 4287 4288 static __init int event_trace_self_tests_init(void) 4289 { 4290 if (!tracing_selftest_disabled) { 4291 event_trace_self_tests(); 4292 event_trace_self_test_with_function(); 4293 } 4294 4295 return 0; 4296 } 4297 4298 late_initcall(event_trace_self_tests_init); 4299 4300 #endif 4301