1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * event tracer 4 * 5 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> 6 * 7 * - Added format output of fields of the trace point. 8 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>. 9 * 10 */ 11 12 #define pr_fmt(fmt) fmt 13 14 #include <linux/workqueue.h> 15 #include <linux/security.h> 16 #include <linux/spinlock.h> 17 #include <linux/kthread.h> 18 #include <linux/tracefs.h> 19 #include <linux/uaccess.h> 20 #include <linux/module.h> 21 #include <linux/ctype.h> 22 #include <linux/sort.h> 23 #include <linux/slab.h> 24 #include <linux/delay.h> 25 26 #include <trace/events/sched.h> 27 #include <trace/syscall.h> 28 29 #include <asm/setup.h> 30 31 #include "trace_output.h" 32 33 #undef TRACE_SYSTEM 34 #define TRACE_SYSTEM "TRACE_SYSTEM" 35 36 DEFINE_MUTEX(event_mutex); 37 38 LIST_HEAD(ftrace_events); 39 static LIST_HEAD(ftrace_generic_fields); 40 static LIST_HEAD(ftrace_common_fields); 41 static bool eventdir_initialized; 42 43 static LIST_HEAD(module_strings); 44 45 struct module_string { 46 struct list_head next; 47 struct module *module; 48 char *str; 49 }; 50 51 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO) 52 53 static struct kmem_cache *field_cachep; 54 static struct kmem_cache *file_cachep; 55 56 static inline int system_refcount(struct event_subsystem *system) 57 { 58 return system->ref_count; 59 } 60 61 static int system_refcount_inc(struct event_subsystem *system) 62 { 63 return system->ref_count++; 64 } 65 66 static int system_refcount_dec(struct event_subsystem *system) 67 { 68 return --system->ref_count; 69 } 70 71 /* Double loops, do not use break, only goto's work */ 72 #define do_for_each_event_file(tr, file) \ 73 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ 74 list_for_each_entry(file, &tr->events, list) 75 76 #define do_for_each_event_file_safe(tr, file) \ 77 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ 78 struct trace_event_file *___n; \ 79 list_for_each_entry_safe(file, ___n, &tr->events, list) 80 81 #define while_for_each_event_file() \ 82 } 83 84 static struct ftrace_event_field * 85 __find_event_field(struct list_head *head, char *name) 86 { 87 struct ftrace_event_field *field; 88 89 list_for_each_entry(field, head, link) { 90 if (!strcmp(field->name, name)) 91 return field; 92 } 93 94 return NULL; 95 } 96 97 struct ftrace_event_field * 98 trace_find_event_field(struct trace_event_call *call, char *name) 99 { 100 struct ftrace_event_field *field; 101 struct list_head *head; 102 103 head = trace_get_fields(call); 104 field = __find_event_field(head, name); 105 if (field) 106 return field; 107 108 field = __find_event_field(&ftrace_generic_fields, name); 109 if (field) 110 return field; 111 112 return __find_event_field(&ftrace_common_fields, name); 113 } 114 115 static int __trace_define_field(struct list_head *head, const char *type, 116 const char *name, int offset, int size, 117 int is_signed, int filter_type, int len) 118 { 119 struct ftrace_event_field *field; 120 121 field = kmem_cache_alloc(field_cachep, GFP_TRACE); 122 if (!field) 123 return -ENOMEM; 124 125 field->name = name; 126 field->type = type; 127 128 if (filter_type == FILTER_OTHER) 129 field->filter_type = filter_assign_type(type); 130 else 131 field->filter_type = filter_type; 132 133 field->offset = offset; 134 field->size = size; 135 field->is_signed = is_signed; 136 field->len = len; 137 138 list_add(&field->link, head); 139 140 return 0; 141 } 142 143 int trace_define_field(struct trace_event_call *call, const char *type, 144 const char *name, int offset, int size, int is_signed, 145 int filter_type) 146 { 147 struct list_head *head; 148 149 if (WARN_ON(!call->class)) 150 return 0; 151 152 head = trace_get_fields(call); 153 return __trace_define_field(head, type, name, offset, size, 154 is_signed, filter_type, 0); 155 } 156 EXPORT_SYMBOL_GPL(trace_define_field); 157 158 int trace_define_field_ext(struct trace_event_call *call, const char *type, 159 const char *name, int offset, int size, int is_signed, 160 int filter_type, int len) 161 { 162 struct list_head *head; 163 164 if (WARN_ON(!call->class)) 165 return 0; 166 167 head = trace_get_fields(call); 168 return __trace_define_field(head, type, name, offset, size, 169 is_signed, filter_type, len); 170 } 171 172 #define __generic_field(type, item, filter_type) \ 173 ret = __trace_define_field(&ftrace_generic_fields, #type, \ 174 #item, 0, 0, is_signed_type(type), \ 175 filter_type, 0); \ 176 if (ret) \ 177 return ret; 178 179 #define __common_field(type, item) \ 180 ret = __trace_define_field(&ftrace_common_fields, #type, \ 181 "common_" #item, \ 182 offsetof(typeof(ent), item), \ 183 sizeof(ent.item), \ 184 is_signed_type(type), FILTER_OTHER, 0); \ 185 if (ret) \ 186 return ret; 187 188 static int trace_define_generic_fields(void) 189 { 190 int ret; 191 192 __generic_field(int, CPU, FILTER_CPU); 193 __generic_field(int, cpu, FILTER_CPU); 194 __generic_field(int, common_cpu, FILTER_CPU); 195 __generic_field(char *, COMM, FILTER_COMM); 196 __generic_field(char *, comm, FILTER_COMM); 197 198 return ret; 199 } 200 201 static int trace_define_common_fields(void) 202 { 203 int ret; 204 struct trace_entry ent; 205 206 __common_field(unsigned short, type); 207 __common_field(unsigned char, flags); 208 /* Holds both preempt_count and migrate_disable */ 209 __common_field(unsigned char, preempt_count); 210 __common_field(int, pid); 211 212 return ret; 213 } 214 215 static void trace_destroy_fields(struct trace_event_call *call) 216 { 217 struct ftrace_event_field *field, *next; 218 struct list_head *head; 219 220 head = trace_get_fields(call); 221 list_for_each_entry_safe(field, next, head, link) { 222 list_del(&field->link); 223 kmem_cache_free(field_cachep, field); 224 } 225 } 226 227 /* 228 * run-time version of trace_event_get_offsets_<call>() that returns the last 229 * accessible offset of trace fields excluding __dynamic_array bytes 230 */ 231 int trace_event_get_offsets(struct trace_event_call *call) 232 { 233 struct ftrace_event_field *tail; 234 struct list_head *head; 235 236 head = trace_get_fields(call); 237 /* 238 * head->next points to the last field with the largest offset, 239 * since it was added last by trace_define_field() 240 */ 241 tail = list_first_entry(head, struct ftrace_event_field, link); 242 return tail->offset + tail->size; 243 } 244 245 /* 246 * Check if the referenced field is an array and return true, 247 * as arrays are OK to dereference. 248 */ 249 static bool test_field(const char *fmt, struct trace_event_call *call) 250 { 251 struct trace_event_fields *field = call->class->fields_array; 252 const char *array_descriptor; 253 const char *p = fmt; 254 int len; 255 256 if (!(len = str_has_prefix(fmt, "REC->"))) 257 return false; 258 fmt += len; 259 for (p = fmt; *p; p++) { 260 if (!isalnum(*p) && *p != '_') 261 break; 262 } 263 len = p - fmt; 264 265 for (; field->type; field++) { 266 if (strncmp(field->name, fmt, len) || 267 field->name[len]) 268 continue; 269 array_descriptor = strchr(field->type, '['); 270 /* This is an array and is OK to dereference. */ 271 return array_descriptor != NULL; 272 } 273 return false; 274 } 275 276 /* 277 * Examine the print fmt of the event looking for unsafe dereference 278 * pointers using %p* that could be recorded in the trace event and 279 * much later referenced after the pointer was freed. Dereferencing 280 * pointers are OK, if it is dereferenced into the event itself. 281 */ 282 static void test_event_printk(struct trace_event_call *call) 283 { 284 u64 dereference_flags = 0; 285 bool first = true; 286 const char *fmt, *c, *r, *a; 287 int parens = 0; 288 char in_quote = 0; 289 int start_arg = 0; 290 int arg = 0; 291 int i; 292 293 fmt = call->print_fmt; 294 295 if (!fmt) 296 return; 297 298 for (i = 0; fmt[i]; i++) { 299 switch (fmt[i]) { 300 case '\\': 301 i++; 302 if (!fmt[i]) 303 return; 304 continue; 305 case '"': 306 case '\'': 307 /* 308 * The print fmt starts with a string that 309 * is processed first to find %p* usage, 310 * then after the first string, the print fmt 311 * contains arguments that are used to check 312 * if the dereferenced %p* usage is safe. 313 */ 314 if (first) { 315 if (fmt[i] == '\'') 316 continue; 317 if (in_quote) { 318 arg = 0; 319 first = false; 320 /* 321 * If there was no %p* uses 322 * the fmt is OK. 323 */ 324 if (!dereference_flags) 325 return; 326 } 327 } 328 if (in_quote) { 329 if (in_quote == fmt[i]) 330 in_quote = 0; 331 } else { 332 in_quote = fmt[i]; 333 } 334 continue; 335 case '%': 336 if (!first || !in_quote) 337 continue; 338 i++; 339 if (!fmt[i]) 340 return; 341 switch (fmt[i]) { 342 case '%': 343 continue; 344 case 'p': 345 /* Find dereferencing fields */ 346 switch (fmt[i + 1]) { 347 case 'B': case 'R': case 'r': 348 case 'b': case 'M': case 'm': 349 case 'I': case 'i': case 'E': 350 case 'U': case 'V': case 'N': 351 case 'a': case 'd': case 'D': 352 case 'g': case 't': case 'C': 353 case 'O': case 'f': 354 if (WARN_ONCE(arg == 63, 355 "Too many args for event: %s", 356 trace_event_name(call))) 357 return; 358 dereference_flags |= 1ULL << arg; 359 } 360 break; 361 default: 362 { 363 bool star = false; 364 int j; 365 366 /* Increment arg if %*s exists. */ 367 for (j = 0; fmt[i + j]; j++) { 368 if (isdigit(fmt[i + j]) || 369 fmt[i + j] == '.') 370 continue; 371 if (fmt[i + j] == '*') { 372 star = true; 373 continue; 374 } 375 if ((fmt[i + j] == 's') && star) 376 arg++; 377 break; 378 } 379 break; 380 } /* default */ 381 382 } /* switch */ 383 arg++; 384 continue; 385 case '(': 386 if (in_quote) 387 continue; 388 parens++; 389 continue; 390 case ')': 391 if (in_quote) 392 continue; 393 parens--; 394 if (WARN_ONCE(parens < 0, 395 "Paren mismatch for event: %s\narg='%s'\n%*s", 396 trace_event_name(call), 397 fmt + start_arg, 398 (i - start_arg) + 5, "^")) 399 return; 400 continue; 401 case ',': 402 if (in_quote || parens) 403 continue; 404 i++; 405 while (isspace(fmt[i])) 406 i++; 407 start_arg = i; 408 if (!(dereference_flags & (1ULL << arg))) 409 goto next_arg; 410 411 /* Find the REC-> in the argument */ 412 c = strchr(fmt + i, ','); 413 r = strstr(fmt + i, "REC->"); 414 if (r && (!c || r < c)) { 415 /* 416 * Addresses of events on the buffer, 417 * or an array on the buffer is 418 * OK to dereference. 419 * There's ways to fool this, but 420 * this is to catch common mistakes, 421 * not malicious code. 422 */ 423 a = strchr(fmt + i, '&'); 424 if ((a && (a < r)) || test_field(r, call)) 425 dereference_flags &= ~(1ULL << arg); 426 } else if ((r = strstr(fmt + i, "__get_dynamic_array(")) && 427 (!c || r < c)) { 428 dereference_flags &= ~(1ULL << arg); 429 } else if ((r = strstr(fmt + i, "__get_sockaddr(")) && 430 (!c || r < c)) { 431 dereference_flags &= ~(1ULL << arg); 432 } 433 434 next_arg: 435 i--; 436 arg++; 437 } 438 } 439 440 /* 441 * If you triggered the below warning, the trace event reported 442 * uses an unsafe dereference pointer %p*. As the data stored 443 * at the trace event time may no longer exist when the trace 444 * event is printed, dereferencing to the original source is 445 * unsafe. The source of the dereference must be copied into the 446 * event itself, and the dereference must access the copy instead. 447 */ 448 if (WARN_ON_ONCE(dereference_flags)) { 449 arg = 1; 450 while (!(dereference_flags & 1)) { 451 dereference_flags >>= 1; 452 arg++; 453 } 454 pr_warn("event %s has unsafe dereference of argument %d\n", 455 trace_event_name(call), arg); 456 pr_warn("print_fmt: %s\n", fmt); 457 } 458 } 459 460 int trace_event_raw_init(struct trace_event_call *call) 461 { 462 int id; 463 464 id = register_trace_event(&call->event); 465 if (!id) 466 return -ENODEV; 467 468 test_event_printk(call); 469 470 return 0; 471 } 472 EXPORT_SYMBOL_GPL(trace_event_raw_init); 473 474 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file) 475 { 476 struct trace_array *tr = trace_file->tr; 477 struct trace_array_cpu *data; 478 struct trace_pid_list *no_pid_list; 479 struct trace_pid_list *pid_list; 480 481 pid_list = rcu_dereference_raw(tr->filtered_pids); 482 no_pid_list = rcu_dereference_raw(tr->filtered_no_pids); 483 484 if (!pid_list && !no_pid_list) 485 return false; 486 487 data = this_cpu_ptr(tr->array_buffer.data); 488 489 return data->ignore_pid; 490 } 491 EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid); 492 493 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, 494 struct trace_event_file *trace_file, 495 unsigned long len) 496 { 497 struct trace_event_call *event_call = trace_file->event_call; 498 499 if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) && 500 trace_event_ignore_this_pid(trace_file)) 501 return NULL; 502 503 /* 504 * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables 505 * preemption (adding one to the preempt_count). Since we are 506 * interested in the preempt_count at the time the tracepoint was 507 * hit, we need to subtract one to offset the increment. 508 */ 509 fbuffer->trace_ctx = tracing_gen_ctx_dec(); 510 fbuffer->trace_file = trace_file; 511 512 fbuffer->event = 513 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file, 514 event_call->event.type, len, 515 fbuffer->trace_ctx); 516 if (!fbuffer->event) 517 return NULL; 518 519 fbuffer->regs = NULL; 520 fbuffer->entry = ring_buffer_event_data(fbuffer->event); 521 return fbuffer->entry; 522 } 523 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve); 524 525 int trace_event_reg(struct trace_event_call *call, 526 enum trace_reg type, void *data) 527 { 528 struct trace_event_file *file = data; 529 530 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); 531 switch (type) { 532 case TRACE_REG_REGISTER: 533 return tracepoint_probe_register(call->tp, 534 call->class->probe, 535 file); 536 case TRACE_REG_UNREGISTER: 537 tracepoint_probe_unregister(call->tp, 538 call->class->probe, 539 file); 540 return 0; 541 542 #ifdef CONFIG_PERF_EVENTS 543 case TRACE_REG_PERF_REGISTER: 544 return tracepoint_probe_register(call->tp, 545 call->class->perf_probe, 546 call); 547 case TRACE_REG_PERF_UNREGISTER: 548 tracepoint_probe_unregister(call->tp, 549 call->class->perf_probe, 550 call); 551 return 0; 552 case TRACE_REG_PERF_OPEN: 553 case TRACE_REG_PERF_CLOSE: 554 case TRACE_REG_PERF_ADD: 555 case TRACE_REG_PERF_DEL: 556 return 0; 557 #endif 558 } 559 return 0; 560 } 561 EXPORT_SYMBOL_GPL(trace_event_reg); 562 563 void trace_event_enable_cmd_record(bool enable) 564 { 565 struct trace_event_file *file; 566 struct trace_array *tr; 567 568 lockdep_assert_held(&event_mutex); 569 570 do_for_each_event_file(tr, file) { 571 572 if (!(file->flags & EVENT_FILE_FL_ENABLED)) 573 continue; 574 575 if (enable) { 576 tracing_start_cmdline_record(); 577 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 578 } else { 579 tracing_stop_cmdline_record(); 580 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 581 } 582 } while_for_each_event_file(); 583 } 584 585 void trace_event_enable_tgid_record(bool enable) 586 { 587 struct trace_event_file *file; 588 struct trace_array *tr; 589 590 lockdep_assert_held(&event_mutex); 591 592 do_for_each_event_file(tr, file) { 593 if (!(file->flags & EVENT_FILE_FL_ENABLED)) 594 continue; 595 596 if (enable) { 597 tracing_start_tgid_record(); 598 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); 599 } else { 600 tracing_stop_tgid_record(); 601 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, 602 &file->flags); 603 } 604 } while_for_each_event_file(); 605 } 606 607 static int __ftrace_event_enable_disable(struct trace_event_file *file, 608 int enable, int soft_disable) 609 { 610 struct trace_event_call *call = file->event_call; 611 struct trace_array *tr = file->tr; 612 unsigned long file_flags = file->flags; 613 int ret = 0; 614 int disable; 615 616 switch (enable) { 617 case 0: 618 /* 619 * When soft_disable is set and enable is cleared, the sm_ref 620 * reference counter is decremented. If it reaches 0, we want 621 * to clear the SOFT_DISABLED flag but leave the event in the 622 * state that it was. That is, if the event was enabled and 623 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED 624 * is set we do not want the event to be enabled before we 625 * clear the bit. 626 * 627 * When soft_disable is not set but the SOFT_MODE flag is, 628 * we do nothing. Do not disable the tracepoint, otherwise 629 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work. 630 */ 631 if (soft_disable) { 632 if (atomic_dec_return(&file->sm_ref) > 0) 633 break; 634 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED; 635 clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); 636 } else 637 disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE); 638 639 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) { 640 clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags); 641 if (file->flags & EVENT_FILE_FL_RECORDED_CMD) { 642 tracing_stop_cmdline_record(); 643 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 644 } 645 646 if (file->flags & EVENT_FILE_FL_RECORDED_TGID) { 647 tracing_stop_tgid_record(); 648 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); 649 } 650 651 call->class->reg(call, TRACE_REG_UNREGISTER, file); 652 } 653 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ 654 if (file->flags & EVENT_FILE_FL_SOFT_MODE) 655 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 656 else 657 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 658 break; 659 case 1: 660 /* 661 * When soft_disable is set and enable is set, we want to 662 * register the tracepoint for the event, but leave the event 663 * as is. That means, if the event was already enabled, we do 664 * nothing (but set SOFT_MODE). If the event is disabled, we 665 * set SOFT_DISABLED before enabling the event tracepoint, so 666 * it still seems to be disabled. 667 */ 668 if (!soft_disable) 669 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 670 else { 671 if (atomic_inc_return(&file->sm_ref) > 1) 672 break; 673 set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); 674 } 675 676 if (!(file->flags & EVENT_FILE_FL_ENABLED)) { 677 bool cmd = false, tgid = false; 678 679 /* Keep the event disabled, when going to SOFT_MODE. */ 680 if (soft_disable) 681 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 682 683 if (tr->trace_flags & TRACE_ITER_RECORD_CMD) { 684 cmd = true; 685 tracing_start_cmdline_record(); 686 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 687 } 688 689 if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { 690 tgid = true; 691 tracing_start_tgid_record(); 692 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); 693 } 694 695 ret = call->class->reg(call, TRACE_REG_REGISTER, file); 696 if (ret) { 697 if (cmd) 698 tracing_stop_cmdline_record(); 699 if (tgid) 700 tracing_stop_tgid_record(); 701 pr_info("event trace: Could not enable event " 702 "%s\n", trace_event_name(call)); 703 break; 704 } 705 set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags); 706 707 /* WAS_ENABLED gets set but never cleared. */ 708 set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags); 709 } 710 break; 711 } 712 713 /* Enable or disable use of trace_buffered_event */ 714 if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) != 715 (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) { 716 if (file->flags & EVENT_FILE_FL_SOFT_DISABLED) 717 trace_buffered_event_enable(); 718 else 719 trace_buffered_event_disable(); 720 } 721 722 return ret; 723 } 724 725 int trace_event_enable_disable(struct trace_event_file *file, 726 int enable, int soft_disable) 727 { 728 return __ftrace_event_enable_disable(file, enable, soft_disable); 729 } 730 731 static int ftrace_event_enable_disable(struct trace_event_file *file, 732 int enable) 733 { 734 return __ftrace_event_enable_disable(file, enable, 0); 735 } 736 737 static void ftrace_clear_events(struct trace_array *tr) 738 { 739 struct trace_event_file *file; 740 741 mutex_lock(&event_mutex); 742 list_for_each_entry(file, &tr->events, list) { 743 ftrace_event_enable_disable(file, 0); 744 } 745 mutex_unlock(&event_mutex); 746 } 747 748 static void 749 event_filter_pid_sched_process_exit(void *data, struct task_struct *task) 750 { 751 struct trace_pid_list *pid_list; 752 struct trace_array *tr = data; 753 754 pid_list = rcu_dereference_raw(tr->filtered_pids); 755 trace_filter_add_remove_task(pid_list, NULL, task); 756 757 pid_list = rcu_dereference_raw(tr->filtered_no_pids); 758 trace_filter_add_remove_task(pid_list, NULL, task); 759 } 760 761 static void 762 event_filter_pid_sched_process_fork(void *data, 763 struct task_struct *self, 764 struct task_struct *task) 765 { 766 struct trace_pid_list *pid_list; 767 struct trace_array *tr = data; 768 769 pid_list = rcu_dereference_sched(tr->filtered_pids); 770 trace_filter_add_remove_task(pid_list, self, task); 771 772 pid_list = rcu_dereference_sched(tr->filtered_no_pids); 773 trace_filter_add_remove_task(pid_list, self, task); 774 } 775 776 void trace_event_follow_fork(struct trace_array *tr, bool enable) 777 { 778 if (enable) { 779 register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork, 780 tr, INT_MIN); 781 register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit, 782 tr, INT_MAX); 783 } else { 784 unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork, 785 tr); 786 unregister_trace_sched_process_free(event_filter_pid_sched_process_exit, 787 tr); 788 } 789 } 790 791 static void 792 event_filter_pid_sched_switch_probe_pre(void *data, bool preempt, 793 struct task_struct *prev, 794 struct task_struct *next, 795 unsigned int prev_state) 796 { 797 struct trace_array *tr = data; 798 struct trace_pid_list *no_pid_list; 799 struct trace_pid_list *pid_list; 800 bool ret; 801 802 pid_list = rcu_dereference_sched(tr->filtered_pids); 803 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); 804 805 /* 806 * Sched switch is funny, as we only want to ignore it 807 * in the notrace case if both prev and next should be ignored. 808 */ 809 ret = trace_ignore_this_task(NULL, no_pid_list, prev) && 810 trace_ignore_this_task(NULL, no_pid_list, next); 811 812 this_cpu_write(tr->array_buffer.data->ignore_pid, ret || 813 (trace_ignore_this_task(pid_list, NULL, prev) && 814 trace_ignore_this_task(pid_list, NULL, next))); 815 } 816 817 static void 818 event_filter_pid_sched_switch_probe_post(void *data, bool preempt, 819 struct task_struct *prev, 820 struct task_struct *next, 821 unsigned int prev_state) 822 { 823 struct trace_array *tr = data; 824 struct trace_pid_list *no_pid_list; 825 struct trace_pid_list *pid_list; 826 827 pid_list = rcu_dereference_sched(tr->filtered_pids); 828 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); 829 830 this_cpu_write(tr->array_buffer.data->ignore_pid, 831 trace_ignore_this_task(pid_list, no_pid_list, next)); 832 } 833 834 static void 835 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task) 836 { 837 struct trace_array *tr = data; 838 struct trace_pid_list *no_pid_list; 839 struct trace_pid_list *pid_list; 840 841 /* Nothing to do if we are already tracing */ 842 if (!this_cpu_read(tr->array_buffer.data->ignore_pid)) 843 return; 844 845 pid_list = rcu_dereference_sched(tr->filtered_pids); 846 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); 847 848 this_cpu_write(tr->array_buffer.data->ignore_pid, 849 trace_ignore_this_task(pid_list, no_pid_list, task)); 850 } 851 852 static void 853 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task) 854 { 855 struct trace_array *tr = data; 856 struct trace_pid_list *no_pid_list; 857 struct trace_pid_list *pid_list; 858 859 /* Nothing to do if we are not tracing */ 860 if (this_cpu_read(tr->array_buffer.data->ignore_pid)) 861 return; 862 863 pid_list = rcu_dereference_sched(tr->filtered_pids); 864 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids); 865 866 /* Set tracing if current is enabled */ 867 this_cpu_write(tr->array_buffer.data->ignore_pid, 868 trace_ignore_this_task(pid_list, no_pid_list, current)); 869 } 870 871 static void unregister_pid_events(struct trace_array *tr) 872 { 873 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr); 874 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr); 875 876 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr); 877 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr); 878 879 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr); 880 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr); 881 882 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr); 883 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr); 884 } 885 886 static void __ftrace_clear_event_pids(struct trace_array *tr, int type) 887 { 888 struct trace_pid_list *pid_list; 889 struct trace_pid_list *no_pid_list; 890 struct trace_event_file *file; 891 int cpu; 892 893 pid_list = rcu_dereference_protected(tr->filtered_pids, 894 lockdep_is_held(&event_mutex)); 895 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, 896 lockdep_is_held(&event_mutex)); 897 898 /* Make sure there's something to do */ 899 if (!pid_type_enabled(type, pid_list, no_pid_list)) 900 return; 901 902 if (!still_need_pid_events(type, pid_list, no_pid_list)) { 903 unregister_pid_events(tr); 904 905 list_for_each_entry(file, &tr->events, list) { 906 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); 907 } 908 909 for_each_possible_cpu(cpu) 910 per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false; 911 } 912 913 if (type & TRACE_PIDS) 914 rcu_assign_pointer(tr->filtered_pids, NULL); 915 916 if (type & TRACE_NO_PIDS) 917 rcu_assign_pointer(tr->filtered_no_pids, NULL); 918 919 /* Wait till all users are no longer using pid filtering */ 920 tracepoint_synchronize_unregister(); 921 922 if ((type & TRACE_PIDS) && pid_list) 923 trace_pid_list_free(pid_list); 924 925 if ((type & TRACE_NO_PIDS) && no_pid_list) 926 trace_pid_list_free(no_pid_list); 927 } 928 929 static void ftrace_clear_event_pids(struct trace_array *tr, int type) 930 { 931 mutex_lock(&event_mutex); 932 __ftrace_clear_event_pids(tr, type); 933 mutex_unlock(&event_mutex); 934 } 935 936 static void __put_system(struct event_subsystem *system) 937 { 938 struct event_filter *filter = system->filter; 939 940 WARN_ON_ONCE(system_refcount(system) == 0); 941 if (system_refcount_dec(system)) 942 return; 943 944 list_del(&system->list); 945 946 if (filter) { 947 kfree(filter->filter_string); 948 kfree(filter); 949 } 950 kfree_const(system->name); 951 kfree(system); 952 } 953 954 static void __get_system(struct event_subsystem *system) 955 { 956 WARN_ON_ONCE(system_refcount(system) == 0); 957 system_refcount_inc(system); 958 } 959 960 static void __get_system_dir(struct trace_subsystem_dir *dir) 961 { 962 WARN_ON_ONCE(dir->ref_count == 0); 963 dir->ref_count++; 964 __get_system(dir->subsystem); 965 } 966 967 static void __put_system_dir(struct trace_subsystem_dir *dir) 968 { 969 WARN_ON_ONCE(dir->ref_count == 0); 970 /* If the subsystem is about to be freed, the dir must be too */ 971 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1); 972 973 __put_system(dir->subsystem); 974 if (!--dir->ref_count) 975 kfree(dir); 976 } 977 978 static void put_system(struct trace_subsystem_dir *dir) 979 { 980 mutex_lock(&event_mutex); 981 __put_system_dir(dir); 982 mutex_unlock(&event_mutex); 983 } 984 985 static void remove_subsystem(struct trace_subsystem_dir *dir) 986 { 987 if (!dir) 988 return; 989 990 if (!--dir->nr_events) { 991 tracefs_remove(dir->entry); 992 list_del(&dir->list); 993 __put_system_dir(dir); 994 } 995 } 996 997 static void remove_event_file_dir(struct trace_event_file *file) 998 { 999 struct dentry *dir = file->dir; 1000 struct dentry *child; 1001 1002 if (dir) { 1003 spin_lock(&dir->d_lock); /* probably unneeded */ 1004 list_for_each_entry(child, &dir->d_subdirs, d_child) { 1005 if (d_really_is_positive(child)) /* probably unneeded */ 1006 d_inode(child)->i_private = NULL; 1007 } 1008 spin_unlock(&dir->d_lock); 1009 1010 tracefs_remove(dir); 1011 } 1012 1013 list_del(&file->list); 1014 remove_subsystem(file->system); 1015 free_event_filter(file->filter); 1016 kmem_cache_free(file_cachep, file); 1017 } 1018 1019 /* 1020 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. 1021 */ 1022 static int 1023 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, 1024 const char *sub, const char *event, int set) 1025 { 1026 struct trace_event_file *file; 1027 struct trace_event_call *call; 1028 const char *name; 1029 int ret = -EINVAL; 1030 int eret = 0; 1031 1032 list_for_each_entry(file, &tr->events, list) { 1033 1034 call = file->event_call; 1035 name = trace_event_name(call); 1036 1037 if (!name || !call->class || !call->class->reg) 1038 continue; 1039 1040 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 1041 continue; 1042 1043 if (match && 1044 strcmp(match, name) != 0 && 1045 strcmp(match, call->class->system) != 0) 1046 continue; 1047 1048 if (sub && strcmp(sub, call->class->system) != 0) 1049 continue; 1050 1051 if (event && strcmp(event, name) != 0) 1052 continue; 1053 1054 ret = ftrace_event_enable_disable(file, set); 1055 1056 /* 1057 * Save the first error and return that. Some events 1058 * may still have been enabled, but let the user 1059 * know that something went wrong. 1060 */ 1061 if (ret && !eret) 1062 eret = ret; 1063 1064 ret = eret; 1065 } 1066 1067 return ret; 1068 } 1069 1070 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, 1071 const char *sub, const char *event, int set) 1072 { 1073 int ret; 1074 1075 mutex_lock(&event_mutex); 1076 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set); 1077 mutex_unlock(&event_mutex); 1078 1079 return ret; 1080 } 1081 1082 int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) 1083 { 1084 char *event = NULL, *sub = NULL, *match; 1085 int ret; 1086 1087 if (!tr) 1088 return -ENOENT; 1089 /* 1090 * The buf format can be <subsystem>:<event-name> 1091 * *:<event-name> means any event by that name. 1092 * :<event-name> is the same. 1093 * 1094 * <subsystem>:* means all events in that subsystem 1095 * <subsystem>: means the same. 1096 * 1097 * <name> (no ':') means all events in a subsystem with 1098 * the name <name> or any event that matches <name> 1099 */ 1100 1101 match = strsep(&buf, ":"); 1102 if (buf) { 1103 sub = match; 1104 event = buf; 1105 match = NULL; 1106 1107 if (!strlen(sub) || strcmp(sub, "*") == 0) 1108 sub = NULL; 1109 if (!strlen(event) || strcmp(event, "*") == 0) 1110 event = NULL; 1111 } 1112 1113 ret = __ftrace_set_clr_event(tr, match, sub, event, set); 1114 1115 /* Put back the colon to allow this to be called again */ 1116 if (buf) 1117 *(buf - 1) = ':'; 1118 1119 return ret; 1120 } 1121 1122 /** 1123 * trace_set_clr_event - enable or disable an event 1124 * @system: system name to match (NULL for any system) 1125 * @event: event name to match (NULL for all events, within system) 1126 * @set: 1 to enable, 0 to disable 1127 * 1128 * This is a way for other parts of the kernel to enable or disable 1129 * event recording. 1130 * 1131 * Returns 0 on success, -EINVAL if the parameters do not match any 1132 * registered events. 1133 */ 1134 int trace_set_clr_event(const char *system, const char *event, int set) 1135 { 1136 struct trace_array *tr = top_trace_array(); 1137 1138 if (!tr) 1139 return -ENODEV; 1140 1141 return __ftrace_set_clr_event(tr, NULL, system, event, set); 1142 } 1143 EXPORT_SYMBOL_GPL(trace_set_clr_event); 1144 1145 /** 1146 * trace_array_set_clr_event - enable or disable an event for a trace array. 1147 * @tr: concerned trace array. 1148 * @system: system name to match (NULL for any system) 1149 * @event: event name to match (NULL for all events, within system) 1150 * @enable: true to enable, false to disable 1151 * 1152 * This is a way for other parts of the kernel to enable or disable 1153 * event recording. 1154 * 1155 * Returns 0 on success, -EINVAL if the parameters do not match any 1156 * registered events. 1157 */ 1158 int trace_array_set_clr_event(struct trace_array *tr, const char *system, 1159 const char *event, bool enable) 1160 { 1161 int set; 1162 1163 if (!tr) 1164 return -ENOENT; 1165 1166 set = (enable == true) ? 1 : 0; 1167 return __ftrace_set_clr_event(tr, NULL, system, event, set); 1168 } 1169 EXPORT_SYMBOL_GPL(trace_array_set_clr_event); 1170 1171 /* 128 should be much more than enough */ 1172 #define EVENT_BUF_SIZE 127 1173 1174 static ssize_t 1175 ftrace_event_write(struct file *file, const char __user *ubuf, 1176 size_t cnt, loff_t *ppos) 1177 { 1178 struct trace_parser parser; 1179 struct seq_file *m = file->private_data; 1180 struct trace_array *tr = m->private; 1181 ssize_t read, ret; 1182 1183 if (!cnt) 1184 return 0; 1185 1186 ret = tracing_update_buffers(); 1187 if (ret < 0) 1188 return ret; 1189 1190 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1)) 1191 return -ENOMEM; 1192 1193 read = trace_get_user(&parser, ubuf, cnt, ppos); 1194 1195 if (read >= 0 && trace_parser_loaded((&parser))) { 1196 int set = 1; 1197 1198 if (*parser.buffer == '!') 1199 set = 0; 1200 1201 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set); 1202 if (ret) 1203 goto out_put; 1204 } 1205 1206 ret = read; 1207 1208 out_put: 1209 trace_parser_put(&parser); 1210 1211 return ret; 1212 } 1213 1214 static void * 1215 t_next(struct seq_file *m, void *v, loff_t *pos) 1216 { 1217 struct trace_event_file *file = v; 1218 struct trace_event_call *call; 1219 struct trace_array *tr = m->private; 1220 1221 (*pos)++; 1222 1223 list_for_each_entry_continue(file, &tr->events, list) { 1224 call = file->event_call; 1225 /* 1226 * The ftrace subsystem is for showing formats only. 1227 * They can not be enabled or disabled via the event files. 1228 */ 1229 if (call->class && call->class->reg && 1230 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) 1231 return file; 1232 } 1233 1234 return NULL; 1235 } 1236 1237 static void *t_start(struct seq_file *m, loff_t *pos) 1238 { 1239 struct trace_event_file *file; 1240 struct trace_array *tr = m->private; 1241 loff_t l; 1242 1243 mutex_lock(&event_mutex); 1244 1245 file = list_entry(&tr->events, struct trace_event_file, list); 1246 for (l = 0; l <= *pos; ) { 1247 file = t_next(m, file, &l); 1248 if (!file) 1249 break; 1250 } 1251 return file; 1252 } 1253 1254 static void * 1255 s_next(struct seq_file *m, void *v, loff_t *pos) 1256 { 1257 struct trace_event_file *file = v; 1258 struct trace_array *tr = m->private; 1259 1260 (*pos)++; 1261 1262 list_for_each_entry_continue(file, &tr->events, list) { 1263 if (file->flags & EVENT_FILE_FL_ENABLED) 1264 return file; 1265 } 1266 1267 return NULL; 1268 } 1269 1270 static void *s_start(struct seq_file *m, loff_t *pos) 1271 { 1272 struct trace_event_file *file; 1273 struct trace_array *tr = m->private; 1274 loff_t l; 1275 1276 mutex_lock(&event_mutex); 1277 1278 file = list_entry(&tr->events, struct trace_event_file, list); 1279 for (l = 0; l <= *pos; ) { 1280 file = s_next(m, file, &l); 1281 if (!file) 1282 break; 1283 } 1284 return file; 1285 } 1286 1287 static int t_show(struct seq_file *m, void *v) 1288 { 1289 struct trace_event_file *file = v; 1290 struct trace_event_call *call = file->event_call; 1291 1292 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) 1293 seq_printf(m, "%s:", call->class->system); 1294 seq_printf(m, "%s\n", trace_event_name(call)); 1295 1296 return 0; 1297 } 1298 1299 static void t_stop(struct seq_file *m, void *p) 1300 { 1301 mutex_unlock(&event_mutex); 1302 } 1303 1304 static void * 1305 __next(struct seq_file *m, void *v, loff_t *pos, int type) 1306 { 1307 struct trace_array *tr = m->private; 1308 struct trace_pid_list *pid_list; 1309 1310 if (type == TRACE_PIDS) 1311 pid_list = rcu_dereference_sched(tr->filtered_pids); 1312 else 1313 pid_list = rcu_dereference_sched(tr->filtered_no_pids); 1314 1315 return trace_pid_next(pid_list, v, pos); 1316 } 1317 1318 static void * 1319 p_next(struct seq_file *m, void *v, loff_t *pos) 1320 { 1321 return __next(m, v, pos, TRACE_PIDS); 1322 } 1323 1324 static void * 1325 np_next(struct seq_file *m, void *v, loff_t *pos) 1326 { 1327 return __next(m, v, pos, TRACE_NO_PIDS); 1328 } 1329 1330 static void *__start(struct seq_file *m, loff_t *pos, int type) 1331 __acquires(RCU) 1332 { 1333 struct trace_pid_list *pid_list; 1334 struct trace_array *tr = m->private; 1335 1336 /* 1337 * Grab the mutex, to keep calls to p_next() having the same 1338 * tr->filtered_pids as p_start() has. 1339 * If we just passed the tr->filtered_pids around, then RCU would 1340 * have been enough, but doing that makes things more complex. 1341 */ 1342 mutex_lock(&event_mutex); 1343 rcu_read_lock_sched(); 1344 1345 if (type == TRACE_PIDS) 1346 pid_list = rcu_dereference_sched(tr->filtered_pids); 1347 else 1348 pid_list = rcu_dereference_sched(tr->filtered_no_pids); 1349 1350 if (!pid_list) 1351 return NULL; 1352 1353 return trace_pid_start(pid_list, pos); 1354 } 1355 1356 static void *p_start(struct seq_file *m, loff_t *pos) 1357 __acquires(RCU) 1358 { 1359 return __start(m, pos, TRACE_PIDS); 1360 } 1361 1362 static void *np_start(struct seq_file *m, loff_t *pos) 1363 __acquires(RCU) 1364 { 1365 return __start(m, pos, TRACE_NO_PIDS); 1366 } 1367 1368 static void p_stop(struct seq_file *m, void *p) 1369 __releases(RCU) 1370 { 1371 rcu_read_unlock_sched(); 1372 mutex_unlock(&event_mutex); 1373 } 1374 1375 static ssize_t 1376 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 1377 loff_t *ppos) 1378 { 1379 struct trace_event_file *file; 1380 unsigned long flags; 1381 char buf[4] = "0"; 1382 1383 mutex_lock(&event_mutex); 1384 file = event_file_data(filp); 1385 if (likely(file)) 1386 flags = file->flags; 1387 mutex_unlock(&event_mutex); 1388 1389 if (!file) 1390 return -ENODEV; 1391 1392 if (flags & EVENT_FILE_FL_ENABLED && 1393 !(flags & EVENT_FILE_FL_SOFT_DISABLED)) 1394 strcpy(buf, "1"); 1395 1396 if (flags & EVENT_FILE_FL_SOFT_DISABLED || 1397 flags & EVENT_FILE_FL_SOFT_MODE) 1398 strcat(buf, "*"); 1399 1400 strcat(buf, "\n"); 1401 1402 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf)); 1403 } 1404 1405 static ssize_t 1406 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, 1407 loff_t *ppos) 1408 { 1409 struct trace_event_file *file; 1410 unsigned long val; 1411 int ret; 1412 1413 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 1414 if (ret) 1415 return ret; 1416 1417 ret = tracing_update_buffers(); 1418 if (ret < 0) 1419 return ret; 1420 1421 switch (val) { 1422 case 0: 1423 case 1: 1424 ret = -ENODEV; 1425 mutex_lock(&event_mutex); 1426 file = event_file_data(filp); 1427 if (likely(file)) 1428 ret = ftrace_event_enable_disable(file, val); 1429 mutex_unlock(&event_mutex); 1430 break; 1431 1432 default: 1433 return -EINVAL; 1434 } 1435 1436 *ppos += cnt; 1437 1438 return ret ? ret : cnt; 1439 } 1440 1441 static ssize_t 1442 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 1443 loff_t *ppos) 1444 { 1445 const char set_to_char[4] = { '?', '0', '1', 'X' }; 1446 struct trace_subsystem_dir *dir = filp->private_data; 1447 struct event_subsystem *system = dir->subsystem; 1448 struct trace_event_call *call; 1449 struct trace_event_file *file; 1450 struct trace_array *tr = dir->tr; 1451 char buf[2]; 1452 int set = 0; 1453 int ret; 1454 1455 mutex_lock(&event_mutex); 1456 list_for_each_entry(file, &tr->events, list) { 1457 call = file->event_call; 1458 if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) || 1459 !trace_event_name(call) || !call->class || !call->class->reg) 1460 continue; 1461 1462 if (system && strcmp(call->class->system, system->name) != 0) 1463 continue; 1464 1465 /* 1466 * We need to find out if all the events are set 1467 * or if all events or cleared, or if we have 1468 * a mixture. 1469 */ 1470 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED)); 1471 1472 /* 1473 * If we have a mixture, no need to look further. 1474 */ 1475 if (set == 3) 1476 break; 1477 } 1478 mutex_unlock(&event_mutex); 1479 1480 buf[0] = set_to_char[set]; 1481 buf[1] = '\n'; 1482 1483 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 1484 1485 return ret; 1486 } 1487 1488 static ssize_t 1489 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, 1490 loff_t *ppos) 1491 { 1492 struct trace_subsystem_dir *dir = filp->private_data; 1493 struct event_subsystem *system = dir->subsystem; 1494 const char *name = NULL; 1495 unsigned long val; 1496 ssize_t ret; 1497 1498 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 1499 if (ret) 1500 return ret; 1501 1502 ret = tracing_update_buffers(); 1503 if (ret < 0) 1504 return ret; 1505 1506 if (val != 0 && val != 1) 1507 return -EINVAL; 1508 1509 /* 1510 * Opening of "enable" adds a ref count to system, 1511 * so the name is safe to use. 1512 */ 1513 if (system) 1514 name = system->name; 1515 1516 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val); 1517 if (ret) 1518 goto out; 1519 1520 ret = cnt; 1521 1522 out: 1523 *ppos += cnt; 1524 1525 return ret; 1526 } 1527 1528 enum { 1529 FORMAT_HEADER = 1, 1530 FORMAT_FIELD_SEPERATOR = 2, 1531 FORMAT_PRINTFMT = 3, 1532 }; 1533 1534 static void *f_next(struct seq_file *m, void *v, loff_t *pos) 1535 { 1536 struct trace_event_call *call = event_file_data(m->private); 1537 struct list_head *common_head = &ftrace_common_fields; 1538 struct list_head *head = trace_get_fields(call); 1539 struct list_head *node = v; 1540 1541 (*pos)++; 1542 1543 switch ((unsigned long)v) { 1544 case FORMAT_HEADER: 1545 node = common_head; 1546 break; 1547 1548 case FORMAT_FIELD_SEPERATOR: 1549 node = head; 1550 break; 1551 1552 case FORMAT_PRINTFMT: 1553 /* all done */ 1554 return NULL; 1555 } 1556 1557 node = node->prev; 1558 if (node == common_head) 1559 return (void *)FORMAT_FIELD_SEPERATOR; 1560 else if (node == head) 1561 return (void *)FORMAT_PRINTFMT; 1562 else 1563 return node; 1564 } 1565 1566 static int f_show(struct seq_file *m, void *v) 1567 { 1568 struct trace_event_call *call = event_file_data(m->private); 1569 struct ftrace_event_field *field; 1570 const char *array_descriptor; 1571 1572 switch ((unsigned long)v) { 1573 case FORMAT_HEADER: 1574 seq_printf(m, "name: %s\n", trace_event_name(call)); 1575 seq_printf(m, "ID: %d\n", call->event.type); 1576 seq_puts(m, "format:\n"); 1577 return 0; 1578 1579 case FORMAT_FIELD_SEPERATOR: 1580 seq_putc(m, '\n'); 1581 return 0; 1582 1583 case FORMAT_PRINTFMT: 1584 seq_printf(m, "\nprint fmt: %s\n", 1585 call->print_fmt); 1586 return 0; 1587 } 1588 1589 field = list_entry(v, struct ftrace_event_field, link); 1590 /* 1591 * Smartly shows the array type(except dynamic array). 1592 * Normal: 1593 * field:TYPE VAR 1594 * If TYPE := TYPE[LEN], it is shown: 1595 * field:TYPE VAR[LEN] 1596 */ 1597 array_descriptor = strchr(field->type, '['); 1598 1599 if (str_has_prefix(field->type, "__data_loc")) 1600 array_descriptor = NULL; 1601 1602 if (!array_descriptor) 1603 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n", 1604 field->type, field->name, field->offset, 1605 field->size, !!field->is_signed); 1606 else if (field->len) 1607 seq_printf(m, "\tfield:%.*s %s[%d];\toffset:%u;\tsize:%u;\tsigned:%d;\n", 1608 (int)(array_descriptor - field->type), 1609 field->type, field->name, 1610 field->len, field->offset, 1611 field->size, !!field->is_signed); 1612 else 1613 seq_printf(m, "\tfield:%.*s %s[];\toffset:%u;\tsize:%u;\tsigned:%d;\n", 1614 (int)(array_descriptor - field->type), 1615 field->type, field->name, 1616 field->offset, field->size, !!field->is_signed); 1617 1618 return 0; 1619 } 1620 1621 static void *f_start(struct seq_file *m, loff_t *pos) 1622 { 1623 void *p = (void *)FORMAT_HEADER; 1624 loff_t l = 0; 1625 1626 /* ->stop() is called even if ->start() fails */ 1627 mutex_lock(&event_mutex); 1628 if (!event_file_data(m->private)) 1629 return ERR_PTR(-ENODEV); 1630 1631 while (l < *pos && p) 1632 p = f_next(m, p, &l); 1633 1634 return p; 1635 } 1636 1637 static void f_stop(struct seq_file *m, void *p) 1638 { 1639 mutex_unlock(&event_mutex); 1640 } 1641 1642 static const struct seq_operations trace_format_seq_ops = { 1643 .start = f_start, 1644 .next = f_next, 1645 .stop = f_stop, 1646 .show = f_show, 1647 }; 1648 1649 static int trace_format_open(struct inode *inode, struct file *file) 1650 { 1651 struct seq_file *m; 1652 int ret; 1653 1654 /* Do we want to hide event format files on tracefs lockdown? */ 1655 1656 ret = seq_open(file, &trace_format_seq_ops); 1657 if (ret < 0) 1658 return ret; 1659 1660 m = file->private_data; 1661 m->private = file; 1662 1663 return 0; 1664 } 1665 1666 static ssize_t 1667 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 1668 { 1669 int id = (long)event_file_data(filp); 1670 char buf[32]; 1671 int len; 1672 1673 if (unlikely(!id)) 1674 return -ENODEV; 1675 1676 len = sprintf(buf, "%d\n", id); 1677 1678 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); 1679 } 1680 1681 static ssize_t 1682 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 1683 loff_t *ppos) 1684 { 1685 struct trace_event_file *file; 1686 struct trace_seq *s; 1687 int r = -ENODEV; 1688 1689 if (*ppos) 1690 return 0; 1691 1692 s = kmalloc(sizeof(*s), GFP_KERNEL); 1693 1694 if (!s) 1695 return -ENOMEM; 1696 1697 trace_seq_init(s); 1698 1699 mutex_lock(&event_mutex); 1700 file = event_file_data(filp); 1701 if (file) 1702 print_event_filter(file, s); 1703 mutex_unlock(&event_mutex); 1704 1705 if (file) 1706 r = simple_read_from_buffer(ubuf, cnt, ppos, 1707 s->buffer, trace_seq_used(s)); 1708 1709 kfree(s); 1710 1711 return r; 1712 } 1713 1714 static ssize_t 1715 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1716 loff_t *ppos) 1717 { 1718 struct trace_event_file *file; 1719 char *buf; 1720 int err = -ENODEV; 1721 1722 if (cnt >= PAGE_SIZE) 1723 return -EINVAL; 1724 1725 buf = memdup_user_nul(ubuf, cnt); 1726 if (IS_ERR(buf)) 1727 return PTR_ERR(buf); 1728 1729 mutex_lock(&event_mutex); 1730 file = event_file_data(filp); 1731 if (file) 1732 err = apply_event_filter(file, buf); 1733 mutex_unlock(&event_mutex); 1734 1735 kfree(buf); 1736 if (err < 0) 1737 return err; 1738 1739 *ppos += cnt; 1740 1741 return cnt; 1742 } 1743 1744 static LIST_HEAD(event_subsystems); 1745 1746 static int subsystem_open(struct inode *inode, struct file *filp) 1747 { 1748 struct trace_subsystem_dir *dir = NULL, *iter_dir; 1749 struct trace_array *tr = NULL, *iter_tr; 1750 struct event_subsystem *system = NULL; 1751 int ret; 1752 1753 if (tracing_is_disabled()) 1754 return -ENODEV; 1755 1756 /* Make sure the system still exists */ 1757 mutex_lock(&event_mutex); 1758 mutex_lock(&trace_types_lock); 1759 list_for_each_entry(iter_tr, &ftrace_trace_arrays, list) { 1760 list_for_each_entry(iter_dir, &iter_tr->systems, list) { 1761 if (iter_dir == inode->i_private) { 1762 /* Don't open systems with no events */ 1763 tr = iter_tr; 1764 dir = iter_dir; 1765 if (dir->nr_events) { 1766 __get_system_dir(dir); 1767 system = dir->subsystem; 1768 } 1769 goto exit_loop; 1770 } 1771 } 1772 } 1773 exit_loop: 1774 mutex_unlock(&trace_types_lock); 1775 mutex_unlock(&event_mutex); 1776 1777 if (!system) 1778 return -ENODEV; 1779 1780 /* Still need to increment the ref count of the system */ 1781 if (trace_array_get(tr) < 0) { 1782 put_system(dir); 1783 return -ENODEV; 1784 } 1785 1786 ret = tracing_open_generic(inode, filp); 1787 if (ret < 0) { 1788 trace_array_put(tr); 1789 put_system(dir); 1790 } 1791 1792 return ret; 1793 } 1794 1795 static int system_tr_open(struct inode *inode, struct file *filp) 1796 { 1797 struct trace_subsystem_dir *dir; 1798 struct trace_array *tr = inode->i_private; 1799 int ret; 1800 1801 /* Make a temporary dir that has no system but points to tr */ 1802 dir = kzalloc(sizeof(*dir), GFP_KERNEL); 1803 if (!dir) 1804 return -ENOMEM; 1805 1806 ret = tracing_open_generic_tr(inode, filp); 1807 if (ret < 0) { 1808 kfree(dir); 1809 return ret; 1810 } 1811 dir->tr = tr; 1812 filp->private_data = dir; 1813 1814 return 0; 1815 } 1816 1817 static int subsystem_release(struct inode *inode, struct file *file) 1818 { 1819 struct trace_subsystem_dir *dir = file->private_data; 1820 1821 trace_array_put(dir->tr); 1822 1823 /* 1824 * If dir->subsystem is NULL, then this is a temporary 1825 * descriptor that was made for a trace_array to enable 1826 * all subsystems. 1827 */ 1828 if (dir->subsystem) 1829 put_system(dir); 1830 else 1831 kfree(dir); 1832 1833 return 0; 1834 } 1835 1836 static ssize_t 1837 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 1838 loff_t *ppos) 1839 { 1840 struct trace_subsystem_dir *dir = filp->private_data; 1841 struct event_subsystem *system = dir->subsystem; 1842 struct trace_seq *s; 1843 int r; 1844 1845 if (*ppos) 1846 return 0; 1847 1848 s = kmalloc(sizeof(*s), GFP_KERNEL); 1849 if (!s) 1850 return -ENOMEM; 1851 1852 trace_seq_init(s); 1853 1854 print_subsystem_event_filter(system, s); 1855 r = simple_read_from_buffer(ubuf, cnt, ppos, 1856 s->buffer, trace_seq_used(s)); 1857 1858 kfree(s); 1859 1860 return r; 1861 } 1862 1863 static ssize_t 1864 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1865 loff_t *ppos) 1866 { 1867 struct trace_subsystem_dir *dir = filp->private_data; 1868 char *buf; 1869 int err; 1870 1871 if (cnt >= PAGE_SIZE) 1872 return -EINVAL; 1873 1874 buf = memdup_user_nul(ubuf, cnt); 1875 if (IS_ERR(buf)) 1876 return PTR_ERR(buf); 1877 1878 err = apply_subsystem_event_filter(dir, buf); 1879 kfree(buf); 1880 if (err < 0) 1881 return err; 1882 1883 *ppos += cnt; 1884 1885 return cnt; 1886 } 1887 1888 static ssize_t 1889 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 1890 { 1891 int (*func)(struct trace_seq *s) = filp->private_data; 1892 struct trace_seq *s; 1893 int r; 1894 1895 if (*ppos) 1896 return 0; 1897 1898 s = kmalloc(sizeof(*s), GFP_KERNEL); 1899 if (!s) 1900 return -ENOMEM; 1901 1902 trace_seq_init(s); 1903 1904 func(s); 1905 r = simple_read_from_buffer(ubuf, cnt, ppos, 1906 s->buffer, trace_seq_used(s)); 1907 1908 kfree(s); 1909 1910 return r; 1911 } 1912 1913 static void ignore_task_cpu(void *data) 1914 { 1915 struct trace_array *tr = data; 1916 struct trace_pid_list *pid_list; 1917 struct trace_pid_list *no_pid_list; 1918 1919 /* 1920 * This function is called by on_each_cpu() while the 1921 * event_mutex is held. 1922 */ 1923 pid_list = rcu_dereference_protected(tr->filtered_pids, 1924 mutex_is_locked(&event_mutex)); 1925 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, 1926 mutex_is_locked(&event_mutex)); 1927 1928 this_cpu_write(tr->array_buffer.data->ignore_pid, 1929 trace_ignore_this_task(pid_list, no_pid_list, current)); 1930 } 1931 1932 static void register_pid_events(struct trace_array *tr) 1933 { 1934 /* 1935 * Register a probe that is called before all other probes 1936 * to set ignore_pid if next or prev do not match. 1937 * Register a probe this is called after all other probes 1938 * to only keep ignore_pid set if next pid matches. 1939 */ 1940 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre, 1941 tr, INT_MAX); 1942 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post, 1943 tr, 0); 1944 1945 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, 1946 tr, INT_MAX); 1947 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, 1948 tr, 0); 1949 1950 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, 1951 tr, INT_MAX); 1952 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, 1953 tr, 0); 1954 1955 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre, 1956 tr, INT_MAX); 1957 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post, 1958 tr, 0); 1959 } 1960 1961 static ssize_t 1962 event_pid_write(struct file *filp, const char __user *ubuf, 1963 size_t cnt, loff_t *ppos, int type) 1964 { 1965 struct seq_file *m = filp->private_data; 1966 struct trace_array *tr = m->private; 1967 struct trace_pid_list *filtered_pids = NULL; 1968 struct trace_pid_list *other_pids = NULL; 1969 struct trace_pid_list *pid_list; 1970 struct trace_event_file *file; 1971 ssize_t ret; 1972 1973 if (!cnt) 1974 return 0; 1975 1976 ret = tracing_update_buffers(); 1977 if (ret < 0) 1978 return ret; 1979 1980 mutex_lock(&event_mutex); 1981 1982 if (type == TRACE_PIDS) { 1983 filtered_pids = rcu_dereference_protected(tr->filtered_pids, 1984 lockdep_is_held(&event_mutex)); 1985 other_pids = rcu_dereference_protected(tr->filtered_no_pids, 1986 lockdep_is_held(&event_mutex)); 1987 } else { 1988 filtered_pids = rcu_dereference_protected(tr->filtered_no_pids, 1989 lockdep_is_held(&event_mutex)); 1990 other_pids = rcu_dereference_protected(tr->filtered_pids, 1991 lockdep_is_held(&event_mutex)); 1992 } 1993 1994 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); 1995 if (ret < 0) 1996 goto out; 1997 1998 if (type == TRACE_PIDS) 1999 rcu_assign_pointer(tr->filtered_pids, pid_list); 2000 else 2001 rcu_assign_pointer(tr->filtered_no_pids, pid_list); 2002 2003 list_for_each_entry(file, &tr->events, list) { 2004 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); 2005 } 2006 2007 if (filtered_pids) { 2008 tracepoint_synchronize_unregister(); 2009 trace_pid_list_free(filtered_pids); 2010 } else if (pid_list && !other_pids) { 2011 register_pid_events(tr); 2012 } 2013 2014 /* 2015 * Ignoring of pids is done at task switch. But we have to 2016 * check for those tasks that are currently running. 2017 * Always do this in case a pid was appended or removed. 2018 */ 2019 on_each_cpu(ignore_task_cpu, tr, 1); 2020 2021 out: 2022 mutex_unlock(&event_mutex); 2023 2024 if (ret > 0) 2025 *ppos += ret; 2026 2027 return ret; 2028 } 2029 2030 static ssize_t 2031 ftrace_event_pid_write(struct file *filp, const char __user *ubuf, 2032 size_t cnt, loff_t *ppos) 2033 { 2034 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS); 2035 } 2036 2037 static ssize_t 2038 ftrace_event_npid_write(struct file *filp, const char __user *ubuf, 2039 size_t cnt, loff_t *ppos) 2040 { 2041 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS); 2042 } 2043 2044 static int ftrace_event_avail_open(struct inode *inode, struct file *file); 2045 static int ftrace_event_set_open(struct inode *inode, struct file *file); 2046 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file); 2047 static int ftrace_event_set_npid_open(struct inode *inode, struct file *file); 2048 static int ftrace_event_release(struct inode *inode, struct file *file); 2049 2050 static const struct seq_operations show_event_seq_ops = { 2051 .start = t_start, 2052 .next = t_next, 2053 .show = t_show, 2054 .stop = t_stop, 2055 }; 2056 2057 static const struct seq_operations show_set_event_seq_ops = { 2058 .start = s_start, 2059 .next = s_next, 2060 .show = t_show, 2061 .stop = t_stop, 2062 }; 2063 2064 static const struct seq_operations show_set_pid_seq_ops = { 2065 .start = p_start, 2066 .next = p_next, 2067 .show = trace_pid_show, 2068 .stop = p_stop, 2069 }; 2070 2071 static const struct seq_operations show_set_no_pid_seq_ops = { 2072 .start = np_start, 2073 .next = np_next, 2074 .show = trace_pid_show, 2075 .stop = p_stop, 2076 }; 2077 2078 static const struct file_operations ftrace_avail_fops = { 2079 .open = ftrace_event_avail_open, 2080 .read = seq_read, 2081 .llseek = seq_lseek, 2082 .release = seq_release, 2083 }; 2084 2085 static const struct file_operations ftrace_set_event_fops = { 2086 .open = ftrace_event_set_open, 2087 .read = seq_read, 2088 .write = ftrace_event_write, 2089 .llseek = seq_lseek, 2090 .release = ftrace_event_release, 2091 }; 2092 2093 static const struct file_operations ftrace_set_event_pid_fops = { 2094 .open = ftrace_event_set_pid_open, 2095 .read = seq_read, 2096 .write = ftrace_event_pid_write, 2097 .llseek = seq_lseek, 2098 .release = ftrace_event_release, 2099 }; 2100 2101 static const struct file_operations ftrace_set_event_notrace_pid_fops = { 2102 .open = ftrace_event_set_npid_open, 2103 .read = seq_read, 2104 .write = ftrace_event_npid_write, 2105 .llseek = seq_lseek, 2106 .release = ftrace_event_release, 2107 }; 2108 2109 static const struct file_operations ftrace_enable_fops = { 2110 .open = tracing_open_generic, 2111 .read = event_enable_read, 2112 .write = event_enable_write, 2113 .llseek = default_llseek, 2114 }; 2115 2116 static const struct file_operations ftrace_event_format_fops = { 2117 .open = trace_format_open, 2118 .read = seq_read, 2119 .llseek = seq_lseek, 2120 .release = seq_release, 2121 }; 2122 2123 static const struct file_operations ftrace_event_id_fops = { 2124 .read = event_id_read, 2125 .llseek = default_llseek, 2126 }; 2127 2128 static const struct file_operations ftrace_event_filter_fops = { 2129 .open = tracing_open_generic, 2130 .read = event_filter_read, 2131 .write = event_filter_write, 2132 .llseek = default_llseek, 2133 }; 2134 2135 static const struct file_operations ftrace_subsystem_filter_fops = { 2136 .open = subsystem_open, 2137 .read = subsystem_filter_read, 2138 .write = subsystem_filter_write, 2139 .llseek = default_llseek, 2140 .release = subsystem_release, 2141 }; 2142 2143 static const struct file_operations ftrace_system_enable_fops = { 2144 .open = subsystem_open, 2145 .read = system_enable_read, 2146 .write = system_enable_write, 2147 .llseek = default_llseek, 2148 .release = subsystem_release, 2149 }; 2150 2151 static const struct file_operations ftrace_tr_enable_fops = { 2152 .open = system_tr_open, 2153 .read = system_enable_read, 2154 .write = system_enable_write, 2155 .llseek = default_llseek, 2156 .release = subsystem_release, 2157 }; 2158 2159 static const struct file_operations ftrace_show_header_fops = { 2160 .open = tracing_open_generic, 2161 .read = show_header, 2162 .llseek = default_llseek, 2163 }; 2164 2165 static int 2166 ftrace_event_open(struct inode *inode, struct file *file, 2167 const struct seq_operations *seq_ops) 2168 { 2169 struct seq_file *m; 2170 int ret; 2171 2172 ret = security_locked_down(LOCKDOWN_TRACEFS); 2173 if (ret) 2174 return ret; 2175 2176 ret = seq_open(file, seq_ops); 2177 if (ret < 0) 2178 return ret; 2179 m = file->private_data; 2180 /* copy tr over to seq ops */ 2181 m->private = inode->i_private; 2182 2183 return ret; 2184 } 2185 2186 static int ftrace_event_release(struct inode *inode, struct file *file) 2187 { 2188 struct trace_array *tr = inode->i_private; 2189 2190 trace_array_put(tr); 2191 2192 return seq_release(inode, file); 2193 } 2194 2195 static int 2196 ftrace_event_avail_open(struct inode *inode, struct file *file) 2197 { 2198 const struct seq_operations *seq_ops = &show_event_seq_ops; 2199 2200 /* Checks for tracefs lockdown */ 2201 return ftrace_event_open(inode, file, seq_ops); 2202 } 2203 2204 static int 2205 ftrace_event_set_open(struct inode *inode, struct file *file) 2206 { 2207 const struct seq_operations *seq_ops = &show_set_event_seq_ops; 2208 struct trace_array *tr = inode->i_private; 2209 int ret; 2210 2211 ret = tracing_check_open_get_tr(tr); 2212 if (ret) 2213 return ret; 2214 2215 if ((file->f_mode & FMODE_WRITE) && 2216 (file->f_flags & O_TRUNC)) 2217 ftrace_clear_events(tr); 2218 2219 ret = ftrace_event_open(inode, file, seq_ops); 2220 if (ret < 0) 2221 trace_array_put(tr); 2222 return ret; 2223 } 2224 2225 static int 2226 ftrace_event_set_pid_open(struct inode *inode, struct file *file) 2227 { 2228 const struct seq_operations *seq_ops = &show_set_pid_seq_ops; 2229 struct trace_array *tr = inode->i_private; 2230 int ret; 2231 2232 ret = tracing_check_open_get_tr(tr); 2233 if (ret) 2234 return ret; 2235 2236 if ((file->f_mode & FMODE_WRITE) && 2237 (file->f_flags & O_TRUNC)) 2238 ftrace_clear_event_pids(tr, TRACE_PIDS); 2239 2240 ret = ftrace_event_open(inode, file, seq_ops); 2241 if (ret < 0) 2242 trace_array_put(tr); 2243 return ret; 2244 } 2245 2246 static int 2247 ftrace_event_set_npid_open(struct inode *inode, struct file *file) 2248 { 2249 const struct seq_operations *seq_ops = &show_set_no_pid_seq_ops; 2250 struct trace_array *tr = inode->i_private; 2251 int ret; 2252 2253 ret = tracing_check_open_get_tr(tr); 2254 if (ret) 2255 return ret; 2256 2257 if ((file->f_mode & FMODE_WRITE) && 2258 (file->f_flags & O_TRUNC)) 2259 ftrace_clear_event_pids(tr, TRACE_NO_PIDS); 2260 2261 ret = ftrace_event_open(inode, file, seq_ops); 2262 if (ret < 0) 2263 trace_array_put(tr); 2264 return ret; 2265 } 2266 2267 static struct event_subsystem * 2268 create_new_subsystem(const char *name) 2269 { 2270 struct event_subsystem *system; 2271 2272 /* need to create new entry */ 2273 system = kmalloc(sizeof(*system), GFP_KERNEL); 2274 if (!system) 2275 return NULL; 2276 2277 system->ref_count = 1; 2278 2279 /* Only allocate if dynamic (kprobes and modules) */ 2280 system->name = kstrdup_const(name, GFP_KERNEL); 2281 if (!system->name) 2282 goto out_free; 2283 2284 system->filter = NULL; 2285 2286 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL); 2287 if (!system->filter) 2288 goto out_free; 2289 2290 list_add(&system->list, &event_subsystems); 2291 2292 return system; 2293 2294 out_free: 2295 kfree_const(system->name); 2296 kfree(system); 2297 return NULL; 2298 } 2299 2300 static struct dentry * 2301 event_subsystem_dir(struct trace_array *tr, const char *name, 2302 struct trace_event_file *file, struct dentry *parent) 2303 { 2304 struct event_subsystem *system, *iter; 2305 struct trace_subsystem_dir *dir; 2306 struct dentry *entry; 2307 2308 /* First see if we did not already create this dir */ 2309 list_for_each_entry(dir, &tr->systems, list) { 2310 system = dir->subsystem; 2311 if (strcmp(system->name, name) == 0) { 2312 dir->nr_events++; 2313 file->system = dir; 2314 return dir->entry; 2315 } 2316 } 2317 2318 /* Now see if the system itself exists. */ 2319 system = NULL; 2320 list_for_each_entry(iter, &event_subsystems, list) { 2321 if (strcmp(iter->name, name) == 0) { 2322 system = iter; 2323 break; 2324 } 2325 } 2326 2327 dir = kmalloc(sizeof(*dir), GFP_KERNEL); 2328 if (!dir) 2329 goto out_fail; 2330 2331 if (!system) { 2332 system = create_new_subsystem(name); 2333 if (!system) 2334 goto out_free; 2335 } else 2336 __get_system(system); 2337 2338 dir->entry = tracefs_create_dir(name, parent); 2339 if (!dir->entry) { 2340 pr_warn("Failed to create system directory %s\n", name); 2341 __put_system(system); 2342 goto out_free; 2343 } 2344 2345 dir->tr = tr; 2346 dir->ref_count = 1; 2347 dir->nr_events = 1; 2348 dir->subsystem = system; 2349 file->system = dir; 2350 2351 /* the ftrace system is special, do not create enable or filter files */ 2352 if (strcmp(name, "ftrace") != 0) { 2353 2354 entry = tracefs_create_file("filter", TRACE_MODE_WRITE, 2355 dir->entry, dir, 2356 &ftrace_subsystem_filter_fops); 2357 if (!entry) { 2358 kfree(system->filter); 2359 system->filter = NULL; 2360 pr_warn("Could not create tracefs '%s/filter' entry\n", name); 2361 } 2362 2363 trace_create_file("enable", TRACE_MODE_WRITE, dir->entry, dir, 2364 &ftrace_system_enable_fops); 2365 } 2366 2367 list_add(&dir->list, &tr->systems); 2368 2369 return dir->entry; 2370 2371 out_free: 2372 kfree(dir); 2373 out_fail: 2374 /* Only print this message if failed on memory allocation */ 2375 if (!dir || !system) 2376 pr_warn("No memory to create event subsystem %s\n", name); 2377 return NULL; 2378 } 2379 2380 static int 2381 event_define_fields(struct trace_event_call *call) 2382 { 2383 struct list_head *head; 2384 int ret = 0; 2385 2386 /* 2387 * Other events may have the same class. Only update 2388 * the fields if they are not already defined. 2389 */ 2390 head = trace_get_fields(call); 2391 if (list_empty(head)) { 2392 struct trace_event_fields *field = call->class->fields_array; 2393 unsigned int offset = sizeof(struct trace_entry); 2394 2395 for (; field->type; field++) { 2396 if (field->type == TRACE_FUNCTION_TYPE) { 2397 field->define_fields(call); 2398 break; 2399 } 2400 2401 offset = ALIGN(offset, field->align); 2402 ret = trace_define_field_ext(call, field->type, field->name, 2403 offset, field->size, 2404 field->is_signed, field->filter_type, 2405 field->len); 2406 if (WARN_ON_ONCE(ret)) { 2407 pr_err("error code is %d\n", ret); 2408 break; 2409 } 2410 2411 offset += field->size; 2412 } 2413 } 2414 2415 return ret; 2416 } 2417 2418 static int 2419 event_create_dir(struct dentry *parent, struct trace_event_file *file) 2420 { 2421 struct trace_event_call *call = file->event_call; 2422 struct trace_array *tr = file->tr; 2423 struct dentry *d_events; 2424 const char *name; 2425 int ret; 2426 2427 /* 2428 * If the trace point header did not define TRACE_SYSTEM 2429 * then the system would be called "TRACE_SYSTEM". 2430 */ 2431 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) { 2432 d_events = event_subsystem_dir(tr, call->class->system, file, parent); 2433 if (!d_events) 2434 return -ENOMEM; 2435 } else 2436 d_events = parent; 2437 2438 name = trace_event_name(call); 2439 file->dir = tracefs_create_dir(name, d_events); 2440 if (!file->dir) { 2441 pr_warn("Could not create tracefs '%s' directory\n", name); 2442 return -1; 2443 } 2444 2445 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) 2446 trace_create_file("enable", TRACE_MODE_WRITE, file->dir, file, 2447 &ftrace_enable_fops); 2448 2449 #ifdef CONFIG_PERF_EVENTS 2450 if (call->event.type && call->class->reg) 2451 trace_create_file("id", TRACE_MODE_READ, file->dir, 2452 (void *)(long)call->event.type, 2453 &ftrace_event_id_fops); 2454 #endif 2455 2456 ret = event_define_fields(call); 2457 if (ret < 0) { 2458 pr_warn("Could not initialize trace point events/%s\n", name); 2459 return ret; 2460 } 2461 2462 /* 2463 * Only event directories that can be enabled should have 2464 * triggers or filters. 2465 */ 2466 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) { 2467 trace_create_file("filter", TRACE_MODE_WRITE, file->dir, 2468 file, &ftrace_event_filter_fops); 2469 2470 trace_create_file("trigger", TRACE_MODE_WRITE, file->dir, 2471 file, &event_trigger_fops); 2472 } 2473 2474 #ifdef CONFIG_HIST_TRIGGERS 2475 trace_create_file("hist", TRACE_MODE_READ, file->dir, file, 2476 &event_hist_fops); 2477 #endif 2478 #ifdef CONFIG_HIST_TRIGGERS_DEBUG 2479 trace_create_file("hist_debug", TRACE_MODE_READ, file->dir, file, 2480 &event_hist_debug_fops); 2481 #endif 2482 trace_create_file("format", TRACE_MODE_READ, file->dir, call, 2483 &ftrace_event_format_fops); 2484 2485 #ifdef CONFIG_TRACE_EVENT_INJECT 2486 if (call->event.type && call->class->reg) 2487 trace_create_file("inject", 0200, file->dir, file, 2488 &event_inject_fops); 2489 #endif 2490 2491 return 0; 2492 } 2493 2494 static void remove_event_from_tracers(struct trace_event_call *call) 2495 { 2496 struct trace_event_file *file; 2497 struct trace_array *tr; 2498 2499 do_for_each_event_file_safe(tr, file) { 2500 if (file->event_call != call) 2501 continue; 2502 2503 remove_event_file_dir(file); 2504 /* 2505 * The do_for_each_event_file_safe() is 2506 * a double loop. After finding the call for this 2507 * trace_array, we use break to jump to the next 2508 * trace_array. 2509 */ 2510 break; 2511 } while_for_each_event_file(); 2512 } 2513 2514 static void event_remove(struct trace_event_call *call) 2515 { 2516 struct trace_array *tr; 2517 struct trace_event_file *file; 2518 2519 do_for_each_event_file(tr, file) { 2520 if (file->event_call != call) 2521 continue; 2522 2523 if (file->flags & EVENT_FILE_FL_WAS_ENABLED) 2524 tr->clear_trace = true; 2525 2526 ftrace_event_enable_disable(file, 0); 2527 /* 2528 * The do_for_each_event_file() is 2529 * a double loop. After finding the call for this 2530 * trace_array, we use break to jump to the next 2531 * trace_array. 2532 */ 2533 break; 2534 } while_for_each_event_file(); 2535 2536 if (call->event.funcs) 2537 __unregister_trace_event(&call->event); 2538 remove_event_from_tracers(call); 2539 list_del(&call->list); 2540 } 2541 2542 static int event_init(struct trace_event_call *call) 2543 { 2544 int ret = 0; 2545 const char *name; 2546 2547 name = trace_event_name(call); 2548 if (WARN_ON(!name)) 2549 return -EINVAL; 2550 2551 if (call->class->raw_init) { 2552 ret = call->class->raw_init(call); 2553 if (ret < 0 && ret != -ENOSYS) 2554 pr_warn("Could not initialize trace events/%s\n", name); 2555 } 2556 2557 return ret; 2558 } 2559 2560 static int 2561 __register_event(struct trace_event_call *call, struct module *mod) 2562 { 2563 int ret; 2564 2565 ret = event_init(call); 2566 if (ret < 0) 2567 return ret; 2568 2569 list_add(&call->list, &ftrace_events); 2570 if (call->flags & TRACE_EVENT_FL_DYNAMIC) 2571 atomic_set(&call->refcnt, 0); 2572 else 2573 call->module = mod; 2574 2575 return 0; 2576 } 2577 2578 static char *eval_replace(char *ptr, struct trace_eval_map *map, int len) 2579 { 2580 int rlen; 2581 int elen; 2582 2583 /* Find the length of the eval value as a string */ 2584 elen = snprintf(ptr, 0, "%ld", map->eval_value); 2585 /* Make sure there's enough room to replace the string with the value */ 2586 if (len < elen) 2587 return NULL; 2588 2589 snprintf(ptr, elen + 1, "%ld", map->eval_value); 2590 2591 /* Get the rest of the string of ptr */ 2592 rlen = strlen(ptr + len); 2593 memmove(ptr + elen, ptr + len, rlen); 2594 /* Make sure we end the new string */ 2595 ptr[elen + rlen] = 0; 2596 2597 return ptr + elen; 2598 } 2599 2600 static void update_event_printk(struct trace_event_call *call, 2601 struct trace_eval_map *map) 2602 { 2603 char *ptr; 2604 int quote = 0; 2605 int len = strlen(map->eval_string); 2606 2607 for (ptr = call->print_fmt; *ptr; ptr++) { 2608 if (*ptr == '\\') { 2609 ptr++; 2610 /* paranoid */ 2611 if (!*ptr) 2612 break; 2613 continue; 2614 } 2615 if (*ptr == '"') { 2616 quote ^= 1; 2617 continue; 2618 } 2619 if (quote) 2620 continue; 2621 if (isdigit(*ptr)) { 2622 /* skip numbers */ 2623 do { 2624 ptr++; 2625 /* Check for alpha chars like ULL */ 2626 } while (isalnum(*ptr)); 2627 if (!*ptr) 2628 break; 2629 /* 2630 * A number must have some kind of delimiter after 2631 * it, and we can ignore that too. 2632 */ 2633 continue; 2634 } 2635 if (isalpha(*ptr) || *ptr == '_') { 2636 if (strncmp(map->eval_string, ptr, len) == 0 && 2637 !isalnum(ptr[len]) && ptr[len] != '_') { 2638 ptr = eval_replace(ptr, map, len); 2639 /* enum/sizeof string smaller than value */ 2640 if (WARN_ON_ONCE(!ptr)) 2641 return; 2642 /* 2643 * No need to decrement here, as eval_replace() 2644 * returns the pointer to the character passed 2645 * the eval, and two evals can not be placed 2646 * back to back without something in between. 2647 * We can skip that something in between. 2648 */ 2649 continue; 2650 } 2651 skip_more: 2652 do { 2653 ptr++; 2654 } while (isalnum(*ptr) || *ptr == '_'); 2655 if (!*ptr) 2656 break; 2657 /* 2658 * If what comes after this variable is a '.' or 2659 * '->' then we can continue to ignore that string. 2660 */ 2661 if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) { 2662 ptr += *ptr == '.' ? 1 : 2; 2663 if (!*ptr) 2664 break; 2665 goto skip_more; 2666 } 2667 /* 2668 * Once again, we can skip the delimiter that came 2669 * after the string. 2670 */ 2671 continue; 2672 } 2673 } 2674 } 2675 2676 static void add_str_to_module(struct module *module, char *str) 2677 { 2678 struct module_string *modstr; 2679 2680 modstr = kmalloc(sizeof(*modstr), GFP_KERNEL); 2681 2682 /* 2683 * If we failed to allocate memory here, then we'll just 2684 * let the str memory leak when the module is removed. 2685 * If this fails to allocate, there's worse problems than 2686 * a leaked string on module removal. 2687 */ 2688 if (WARN_ON_ONCE(!modstr)) 2689 return; 2690 2691 modstr->module = module; 2692 modstr->str = str; 2693 2694 list_add(&modstr->next, &module_strings); 2695 } 2696 2697 static void update_event_fields(struct trace_event_call *call, 2698 struct trace_eval_map *map) 2699 { 2700 struct ftrace_event_field *field; 2701 struct list_head *head; 2702 char *ptr; 2703 char *str; 2704 int len = strlen(map->eval_string); 2705 2706 /* Dynamic events should never have field maps */ 2707 if (WARN_ON_ONCE(call->flags & TRACE_EVENT_FL_DYNAMIC)) 2708 return; 2709 2710 head = trace_get_fields(call); 2711 list_for_each_entry(field, head, link) { 2712 ptr = strchr(field->type, '['); 2713 if (!ptr) 2714 continue; 2715 ptr++; 2716 2717 if (!isalpha(*ptr) && *ptr != '_') 2718 continue; 2719 2720 if (strncmp(map->eval_string, ptr, len) != 0) 2721 continue; 2722 2723 str = kstrdup(field->type, GFP_KERNEL); 2724 if (WARN_ON_ONCE(!str)) 2725 return; 2726 ptr = str + (ptr - field->type); 2727 ptr = eval_replace(ptr, map, len); 2728 /* enum/sizeof string smaller than value */ 2729 if (WARN_ON_ONCE(!ptr)) { 2730 kfree(str); 2731 continue; 2732 } 2733 2734 /* 2735 * If the event is part of a module, then we need to free the string 2736 * when the module is removed. Otherwise, it will stay allocated 2737 * until a reboot. 2738 */ 2739 if (call->module) 2740 add_str_to_module(call->module, str); 2741 2742 field->type = str; 2743 } 2744 } 2745 2746 void trace_event_eval_update(struct trace_eval_map **map, int len) 2747 { 2748 struct trace_event_call *call, *p; 2749 const char *last_system = NULL; 2750 bool first = false; 2751 int last_i; 2752 int i; 2753 2754 down_write(&trace_event_sem); 2755 list_for_each_entry_safe(call, p, &ftrace_events, list) { 2756 /* events are usually grouped together with systems */ 2757 if (!last_system || call->class->system != last_system) { 2758 first = true; 2759 last_i = 0; 2760 last_system = call->class->system; 2761 } 2762 2763 /* 2764 * Since calls are grouped by systems, the likelihood that the 2765 * next call in the iteration belongs to the same system as the 2766 * previous call is high. As an optimization, we skip searching 2767 * for a map[] that matches the call's system if the last call 2768 * was from the same system. That's what last_i is for. If the 2769 * call has the same system as the previous call, then last_i 2770 * will be the index of the first map[] that has a matching 2771 * system. 2772 */ 2773 for (i = last_i; i < len; i++) { 2774 if (call->class->system == map[i]->system) { 2775 /* Save the first system if need be */ 2776 if (first) { 2777 last_i = i; 2778 first = false; 2779 } 2780 update_event_printk(call, map[i]); 2781 update_event_fields(call, map[i]); 2782 } 2783 } 2784 } 2785 up_write(&trace_event_sem); 2786 } 2787 2788 static struct trace_event_file * 2789 trace_create_new_event(struct trace_event_call *call, 2790 struct trace_array *tr) 2791 { 2792 struct trace_pid_list *no_pid_list; 2793 struct trace_pid_list *pid_list; 2794 struct trace_event_file *file; 2795 unsigned int first; 2796 2797 file = kmem_cache_alloc(file_cachep, GFP_TRACE); 2798 if (!file) 2799 return NULL; 2800 2801 pid_list = rcu_dereference_protected(tr->filtered_pids, 2802 lockdep_is_held(&event_mutex)); 2803 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids, 2804 lockdep_is_held(&event_mutex)); 2805 2806 if (!trace_pid_list_first(pid_list, &first) || 2807 !trace_pid_list_first(no_pid_list, &first)) 2808 file->flags |= EVENT_FILE_FL_PID_FILTER; 2809 2810 file->event_call = call; 2811 file->tr = tr; 2812 atomic_set(&file->sm_ref, 0); 2813 atomic_set(&file->tm_ref, 0); 2814 INIT_LIST_HEAD(&file->triggers); 2815 list_add(&file->list, &tr->events); 2816 2817 return file; 2818 } 2819 2820 #define MAX_BOOT_TRIGGERS 32 2821 2822 static struct boot_triggers { 2823 const char *event; 2824 char *trigger; 2825 } bootup_triggers[MAX_BOOT_TRIGGERS]; 2826 2827 static char bootup_trigger_buf[COMMAND_LINE_SIZE]; 2828 static int nr_boot_triggers; 2829 2830 static __init int setup_trace_triggers(char *str) 2831 { 2832 char *trigger; 2833 char *buf; 2834 int i; 2835 2836 strlcpy(bootup_trigger_buf, str, COMMAND_LINE_SIZE); 2837 ring_buffer_expanded = true; 2838 disable_tracing_selftest("running event triggers"); 2839 2840 buf = bootup_trigger_buf; 2841 for (i = 0; i < MAX_BOOT_TRIGGERS; i++) { 2842 trigger = strsep(&buf, ","); 2843 if (!trigger) 2844 break; 2845 bootup_triggers[i].event = strsep(&trigger, "."); 2846 bootup_triggers[i].trigger = strsep(&trigger, "."); 2847 if (!bootup_triggers[i].trigger) 2848 break; 2849 } 2850 2851 nr_boot_triggers = i; 2852 return 1; 2853 } 2854 __setup("trace_trigger=", setup_trace_triggers); 2855 2856 /* Add an event to a trace directory */ 2857 static int 2858 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr) 2859 { 2860 struct trace_event_file *file; 2861 2862 file = trace_create_new_event(call, tr); 2863 if (!file) 2864 return -ENOMEM; 2865 2866 if (eventdir_initialized) 2867 return event_create_dir(tr->event_dir, file); 2868 else 2869 return event_define_fields(call); 2870 } 2871 2872 static void trace_early_triggers(struct trace_event_file *file, const char *name) 2873 { 2874 int ret; 2875 int i; 2876 2877 for (i = 0; i < nr_boot_triggers; i++) { 2878 if (strcmp(name, bootup_triggers[i].event)) 2879 continue; 2880 mutex_lock(&event_mutex); 2881 ret = trigger_process_regex(file, bootup_triggers[i].trigger); 2882 mutex_unlock(&event_mutex); 2883 if (ret) 2884 pr_err("Failed to register trigger '%s' on event %s\n", 2885 bootup_triggers[i].trigger, 2886 bootup_triggers[i].event); 2887 } 2888 } 2889 2890 /* 2891 * Just create a descriptor for early init. A descriptor is required 2892 * for enabling events at boot. We want to enable events before 2893 * the filesystem is initialized. 2894 */ 2895 static int 2896 __trace_early_add_new_event(struct trace_event_call *call, 2897 struct trace_array *tr) 2898 { 2899 struct trace_event_file *file; 2900 int ret; 2901 2902 file = trace_create_new_event(call, tr); 2903 if (!file) 2904 return -ENOMEM; 2905 2906 ret = event_define_fields(call); 2907 if (ret) 2908 return ret; 2909 2910 trace_early_triggers(file, trace_event_name(call)); 2911 2912 return 0; 2913 } 2914 2915 struct ftrace_module_file_ops; 2916 static void __add_event_to_tracers(struct trace_event_call *call); 2917 2918 /* Add an additional event_call dynamically */ 2919 int trace_add_event_call(struct trace_event_call *call) 2920 { 2921 int ret; 2922 lockdep_assert_held(&event_mutex); 2923 2924 mutex_lock(&trace_types_lock); 2925 2926 ret = __register_event(call, NULL); 2927 if (ret >= 0) 2928 __add_event_to_tracers(call); 2929 2930 mutex_unlock(&trace_types_lock); 2931 return ret; 2932 } 2933 EXPORT_SYMBOL_GPL(trace_add_event_call); 2934 2935 /* 2936 * Must be called under locking of trace_types_lock, event_mutex and 2937 * trace_event_sem. 2938 */ 2939 static void __trace_remove_event_call(struct trace_event_call *call) 2940 { 2941 event_remove(call); 2942 trace_destroy_fields(call); 2943 free_event_filter(call->filter); 2944 call->filter = NULL; 2945 } 2946 2947 static int probe_remove_event_call(struct trace_event_call *call) 2948 { 2949 struct trace_array *tr; 2950 struct trace_event_file *file; 2951 2952 #ifdef CONFIG_PERF_EVENTS 2953 if (call->perf_refcount) 2954 return -EBUSY; 2955 #endif 2956 do_for_each_event_file(tr, file) { 2957 if (file->event_call != call) 2958 continue; 2959 /* 2960 * We can't rely on ftrace_event_enable_disable(enable => 0) 2961 * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress 2962 * TRACE_REG_UNREGISTER. 2963 */ 2964 if (file->flags & EVENT_FILE_FL_ENABLED) 2965 goto busy; 2966 2967 if (file->flags & EVENT_FILE_FL_WAS_ENABLED) 2968 tr->clear_trace = true; 2969 /* 2970 * The do_for_each_event_file_safe() is 2971 * a double loop. After finding the call for this 2972 * trace_array, we use break to jump to the next 2973 * trace_array. 2974 */ 2975 break; 2976 } while_for_each_event_file(); 2977 2978 __trace_remove_event_call(call); 2979 2980 return 0; 2981 busy: 2982 /* No need to clear the trace now */ 2983 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 2984 tr->clear_trace = false; 2985 } 2986 return -EBUSY; 2987 } 2988 2989 /* Remove an event_call */ 2990 int trace_remove_event_call(struct trace_event_call *call) 2991 { 2992 int ret; 2993 2994 lockdep_assert_held(&event_mutex); 2995 2996 mutex_lock(&trace_types_lock); 2997 down_write(&trace_event_sem); 2998 ret = probe_remove_event_call(call); 2999 up_write(&trace_event_sem); 3000 mutex_unlock(&trace_types_lock); 3001 3002 return ret; 3003 } 3004 EXPORT_SYMBOL_GPL(trace_remove_event_call); 3005 3006 #define for_each_event(event, start, end) \ 3007 for (event = start; \ 3008 (unsigned long)event < (unsigned long)end; \ 3009 event++) 3010 3011 #ifdef CONFIG_MODULES 3012 3013 static void trace_module_add_events(struct module *mod) 3014 { 3015 struct trace_event_call **call, **start, **end; 3016 3017 if (!mod->num_trace_events) 3018 return; 3019 3020 /* Don't add infrastructure for mods without tracepoints */ 3021 if (trace_module_has_bad_taint(mod)) { 3022 pr_err("%s: module has bad taint, not creating trace events\n", 3023 mod->name); 3024 return; 3025 } 3026 3027 start = mod->trace_events; 3028 end = mod->trace_events + mod->num_trace_events; 3029 3030 for_each_event(call, start, end) { 3031 __register_event(*call, mod); 3032 __add_event_to_tracers(*call); 3033 } 3034 } 3035 3036 static void trace_module_remove_events(struct module *mod) 3037 { 3038 struct trace_event_call *call, *p; 3039 struct module_string *modstr, *m; 3040 3041 down_write(&trace_event_sem); 3042 list_for_each_entry_safe(call, p, &ftrace_events, list) { 3043 if ((call->flags & TRACE_EVENT_FL_DYNAMIC) || !call->module) 3044 continue; 3045 if (call->module == mod) 3046 __trace_remove_event_call(call); 3047 } 3048 /* Check for any strings allocade for this module */ 3049 list_for_each_entry_safe(modstr, m, &module_strings, next) { 3050 if (modstr->module != mod) 3051 continue; 3052 list_del(&modstr->next); 3053 kfree(modstr->str); 3054 kfree(modstr); 3055 } 3056 up_write(&trace_event_sem); 3057 3058 /* 3059 * It is safest to reset the ring buffer if the module being unloaded 3060 * registered any events that were used. The only worry is if 3061 * a new module gets loaded, and takes on the same id as the events 3062 * of this module. When printing out the buffer, traced events left 3063 * over from this module may be passed to the new module events and 3064 * unexpected results may occur. 3065 */ 3066 tracing_reset_all_online_cpus_unlocked(); 3067 } 3068 3069 static int trace_module_notify(struct notifier_block *self, 3070 unsigned long val, void *data) 3071 { 3072 struct module *mod = data; 3073 3074 mutex_lock(&event_mutex); 3075 mutex_lock(&trace_types_lock); 3076 switch (val) { 3077 case MODULE_STATE_COMING: 3078 trace_module_add_events(mod); 3079 break; 3080 case MODULE_STATE_GOING: 3081 trace_module_remove_events(mod); 3082 break; 3083 } 3084 mutex_unlock(&trace_types_lock); 3085 mutex_unlock(&event_mutex); 3086 3087 return NOTIFY_OK; 3088 } 3089 3090 static struct notifier_block trace_module_nb = { 3091 .notifier_call = trace_module_notify, 3092 .priority = 1, /* higher than trace.c module notify */ 3093 }; 3094 #endif /* CONFIG_MODULES */ 3095 3096 /* Create a new event directory structure for a trace directory. */ 3097 static void 3098 __trace_add_event_dirs(struct trace_array *tr) 3099 { 3100 struct trace_event_call *call; 3101 int ret; 3102 3103 list_for_each_entry(call, &ftrace_events, list) { 3104 ret = __trace_add_new_event(call, tr); 3105 if (ret < 0) 3106 pr_warn("Could not create directory for event %s\n", 3107 trace_event_name(call)); 3108 } 3109 } 3110 3111 /* Returns any file that matches the system and event */ 3112 struct trace_event_file * 3113 __find_event_file(struct trace_array *tr, const char *system, const char *event) 3114 { 3115 struct trace_event_file *file; 3116 struct trace_event_call *call; 3117 const char *name; 3118 3119 list_for_each_entry(file, &tr->events, list) { 3120 3121 call = file->event_call; 3122 name = trace_event_name(call); 3123 3124 if (!name || !call->class) 3125 continue; 3126 3127 if (strcmp(event, name) == 0 && 3128 strcmp(system, call->class->system) == 0) 3129 return file; 3130 } 3131 return NULL; 3132 } 3133 3134 /* Returns valid trace event files that match system and event */ 3135 struct trace_event_file * 3136 find_event_file(struct trace_array *tr, const char *system, const char *event) 3137 { 3138 struct trace_event_file *file; 3139 3140 file = __find_event_file(tr, system, event); 3141 if (!file || !file->event_call->class->reg || 3142 file->event_call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 3143 return NULL; 3144 3145 return file; 3146 } 3147 3148 /** 3149 * trace_get_event_file - Find and return a trace event file 3150 * @instance: The name of the trace instance containing the event 3151 * @system: The name of the system containing the event 3152 * @event: The name of the event 3153 * 3154 * Return a trace event file given the trace instance name, trace 3155 * system, and trace event name. If the instance name is NULL, it 3156 * refers to the top-level trace array. 3157 * 3158 * This function will look it up and return it if found, after calling 3159 * trace_array_get() to prevent the instance from going away, and 3160 * increment the event's module refcount to prevent it from being 3161 * removed. 3162 * 3163 * To release the file, call trace_put_event_file(), which will call 3164 * trace_array_put() and decrement the event's module refcount. 3165 * 3166 * Return: The trace event on success, ERR_PTR otherwise. 3167 */ 3168 struct trace_event_file *trace_get_event_file(const char *instance, 3169 const char *system, 3170 const char *event) 3171 { 3172 struct trace_array *tr = top_trace_array(); 3173 struct trace_event_file *file = NULL; 3174 int ret = -EINVAL; 3175 3176 if (instance) { 3177 tr = trace_array_find_get(instance); 3178 if (!tr) 3179 return ERR_PTR(-ENOENT); 3180 } else { 3181 ret = trace_array_get(tr); 3182 if (ret) 3183 return ERR_PTR(ret); 3184 } 3185 3186 mutex_lock(&event_mutex); 3187 3188 file = find_event_file(tr, system, event); 3189 if (!file) { 3190 trace_array_put(tr); 3191 ret = -EINVAL; 3192 goto out; 3193 } 3194 3195 /* Don't let event modules unload while in use */ 3196 ret = trace_event_try_get_ref(file->event_call); 3197 if (!ret) { 3198 trace_array_put(tr); 3199 ret = -EBUSY; 3200 goto out; 3201 } 3202 3203 ret = 0; 3204 out: 3205 mutex_unlock(&event_mutex); 3206 3207 if (ret) 3208 file = ERR_PTR(ret); 3209 3210 return file; 3211 } 3212 EXPORT_SYMBOL_GPL(trace_get_event_file); 3213 3214 /** 3215 * trace_put_event_file - Release a file from trace_get_event_file() 3216 * @file: The trace event file 3217 * 3218 * If a file was retrieved using trace_get_event_file(), this should 3219 * be called when it's no longer needed. It will cancel the previous 3220 * trace_array_get() called by that function, and decrement the 3221 * event's module refcount. 3222 */ 3223 void trace_put_event_file(struct trace_event_file *file) 3224 { 3225 mutex_lock(&event_mutex); 3226 trace_event_put_ref(file->event_call); 3227 mutex_unlock(&event_mutex); 3228 3229 trace_array_put(file->tr); 3230 } 3231 EXPORT_SYMBOL_GPL(trace_put_event_file); 3232 3233 #ifdef CONFIG_DYNAMIC_FTRACE 3234 3235 /* Avoid typos */ 3236 #define ENABLE_EVENT_STR "enable_event" 3237 #define DISABLE_EVENT_STR "disable_event" 3238 3239 struct event_probe_data { 3240 struct trace_event_file *file; 3241 unsigned long count; 3242 int ref; 3243 bool enable; 3244 }; 3245 3246 static void update_event_probe(struct event_probe_data *data) 3247 { 3248 if (data->enable) 3249 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags); 3250 else 3251 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags); 3252 } 3253 3254 static void 3255 event_enable_probe(unsigned long ip, unsigned long parent_ip, 3256 struct trace_array *tr, struct ftrace_probe_ops *ops, 3257 void *data) 3258 { 3259 struct ftrace_func_mapper *mapper = data; 3260 struct event_probe_data *edata; 3261 void **pdata; 3262 3263 pdata = ftrace_func_mapper_find_ip(mapper, ip); 3264 if (!pdata || !*pdata) 3265 return; 3266 3267 edata = *pdata; 3268 update_event_probe(edata); 3269 } 3270 3271 static void 3272 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, 3273 struct trace_array *tr, struct ftrace_probe_ops *ops, 3274 void *data) 3275 { 3276 struct ftrace_func_mapper *mapper = data; 3277 struct event_probe_data *edata; 3278 void **pdata; 3279 3280 pdata = ftrace_func_mapper_find_ip(mapper, ip); 3281 if (!pdata || !*pdata) 3282 return; 3283 3284 edata = *pdata; 3285 3286 if (!edata->count) 3287 return; 3288 3289 /* Skip if the event is in a state we want to switch to */ 3290 if (edata->enable == !(edata->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) 3291 return; 3292 3293 if (edata->count != -1) 3294 (edata->count)--; 3295 3296 update_event_probe(edata); 3297 } 3298 3299 static int 3300 event_enable_print(struct seq_file *m, unsigned long ip, 3301 struct ftrace_probe_ops *ops, void *data) 3302 { 3303 struct ftrace_func_mapper *mapper = data; 3304 struct event_probe_data *edata; 3305 void **pdata; 3306 3307 pdata = ftrace_func_mapper_find_ip(mapper, ip); 3308 3309 if (WARN_ON_ONCE(!pdata || !*pdata)) 3310 return 0; 3311 3312 edata = *pdata; 3313 3314 seq_printf(m, "%ps:", (void *)ip); 3315 3316 seq_printf(m, "%s:%s:%s", 3317 edata->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, 3318 edata->file->event_call->class->system, 3319 trace_event_name(edata->file->event_call)); 3320 3321 if (edata->count == -1) 3322 seq_puts(m, ":unlimited\n"); 3323 else 3324 seq_printf(m, ":count=%ld\n", edata->count); 3325 3326 return 0; 3327 } 3328 3329 static int 3330 event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr, 3331 unsigned long ip, void *init_data, void **data) 3332 { 3333 struct ftrace_func_mapper *mapper = *data; 3334 struct event_probe_data *edata = init_data; 3335 int ret; 3336 3337 if (!mapper) { 3338 mapper = allocate_ftrace_func_mapper(); 3339 if (!mapper) 3340 return -ENODEV; 3341 *data = mapper; 3342 } 3343 3344 ret = ftrace_func_mapper_add_ip(mapper, ip, edata); 3345 if (ret < 0) 3346 return ret; 3347 3348 edata->ref++; 3349 3350 return 0; 3351 } 3352 3353 static int free_probe_data(void *data) 3354 { 3355 struct event_probe_data *edata = data; 3356 3357 edata->ref--; 3358 if (!edata->ref) { 3359 /* Remove the SOFT_MODE flag */ 3360 __ftrace_event_enable_disable(edata->file, 0, 1); 3361 trace_event_put_ref(edata->file->event_call); 3362 kfree(edata); 3363 } 3364 return 0; 3365 } 3366 3367 static void 3368 event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr, 3369 unsigned long ip, void *data) 3370 { 3371 struct ftrace_func_mapper *mapper = data; 3372 struct event_probe_data *edata; 3373 3374 if (!ip) { 3375 if (!mapper) 3376 return; 3377 free_ftrace_func_mapper(mapper, free_probe_data); 3378 return; 3379 } 3380 3381 edata = ftrace_func_mapper_remove_ip(mapper, ip); 3382 3383 if (WARN_ON_ONCE(!edata)) 3384 return; 3385 3386 if (WARN_ON_ONCE(edata->ref <= 0)) 3387 return; 3388 3389 free_probe_data(edata); 3390 } 3391 3392 static struct ftrace_probe_ops event_enable_probe_ops = { 3393 .func = event_enable_probe, 3394 .print = event_enable_print, 3395 .init = event_enable_init, 3396 .free = event_enable_free, 3397 }; 3398 3399 static struct ftrace_probe_ops event_enable_count_probe_ops = { 3400 .func = event_enable_count_probe, 3401 .print = event_enable_print, 3402 .init = event_enable_init, 3403 .free = event_enable_free, 3404 }; 3405 3406 static struct ftrace_probe_ops event_disable_probe_ops = { 3407 .func = event_enable_probe, 3408 .print = event_enable_print, 3409 .init = event_enable_init, 3410 .free = event_enable_free, 3411 }; 3412 3413 static struct ftrace_probe_ops event_disable_count_probe_ops = { 3414 .func = event_enable_count_probe, 3415 .print = event_enable_print, 3416 .init = event_enable_init, 3417 .free = event_enable_free, 3418 }; 3419 3420 static int 3421 event_enable_func(struct trace_array *tr, struct ftrace_hash *hash, 3422 char *glob, char *cmd, char *param, int enabled) 3423 { 3424 struct trace_event_file *file; 3425 struct ftrace_probe_ops *ops; 3426 struct event_probe_data *data; 3427 const char *system; 3428 const char *event; 3429 char *number; 3430 bool enable; 3431 int ret; 3432 3433 if (!tr) 3434 return -ENODEV; 3435 3436 /* hash funcs only work with set_ftrace_filter */ 3437 if (!enabled || !param) 3438 return -EINVAL; 3439 3440 system = strsep(¶m, ":"); 3441 if (!param) 3442 return -EINVAL; 3443 3444 event = strsep(¶m, ":"); 3445 3446 mutex_lock(&event_mutex); 3447 3448 ret = -EINVAL; 3449 file = find_event_file(tr, system, event); 3450 if (!file) 3451 goto out; 3452 3453 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 3454 3455 if (enable) 3456 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops; 3457 else 3458 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops; 3459 3460 if (glob[0] == '!') { 3461 ret = unregister_ftrace_function_probe_func(glob+1, tr, ops); 3462 goto out; 3463 } 3464 3465 ret = -ENOMEM; 3466 3467 data = kzalloc(sizeof(*data), GFP_KERNEL); 3468 if (!data) 3469 goto out; 3470 3471 data->enable = enable; 3472 data->count = -1; 3473 data->file = file; 3474 3475 if (!param) 3476 goto out_reg; 3477 3478 number = strsep(¶m, ":"); 3479 3480 ret = -EINVAL; 3481 if (!strlen(number)) 3482 goto out_free; 3483 3484 /* 3485 * We use the callback data field (which is a pointer) 3486 * as our counter. 3487 */ 3488 ret = kstrtoul(number, 0, &data->count); 3489 if (ret) 3490 goto out_free; 3491 3492 out_reg: 3493 /* Don't let event modules unload while probe registered */ 3494 ret = trace_event_try_get_ref(file->event_call); 3495 if (!ret) { 3496 ret = -EBUSY; 3497 goto out_free; 3498 } 3499 3500 ret = __ftrace_event_enable_disable(file, 1, 1); 3501 if (ret < 0) 3502 goto out_put; 3503 3504 ret = register_ftrace_function_probe(glob, tr, ops, data); 3505 /* 3506 * The above returns on success the # of functions enabled, 3507 * but if it didn't find any functions it returns zero. 3508 * Consider no functions a failure too. 3509 */ 3510 if (!ret) { 3511 ret = -ENOENT; 3512 goto out_disable; 3513 } else if (ret < 0) 3514 goto out_disable; 3515 /* Just return zero, not the number of enabled functions */ 3516 ret = 0; 3517 out: 3518 mutex_unlock(&event_mutex); 3519 return ret; 3520 3521 out_disable: 3522 __ftrace_event_enable_disable(file, 0, 1); 3523 out_put: 3524 trace_event_put_ref(file->event_call); 3525 out_free: 3526 kfree(data); 3527 goto out; 3528 } 3529 3530 static struct ftrace_func_command event_enable_cmd = { 3531 .name = ENABLE_EVENT_STR, 3532 .func = event_enable_func, 3533 }; 3534 3535 static struct ftrace_func_command event_disable_cmd = { 3536 .name = DISABLE_EVENT_STR, 3537 .func = event_enable_func, 3538 }; 3539 3540 static __init int register_event_cmds(void) 3541 { 3542 int ret; 3543 3544 ret = register_ftrace_command(&event_enable_cmd); 3545 if (WARN_ON(ret < 0)) 3546 return ret; 3547 ret = register_ftrace_command(&event_disable_cmd); 3548 if (WARN_ON(ret < 0)) 3549 unregister_ftrace_command(&event_enable_cmd); 3550 return ret; 3551 } 3552 #else 3553 static inline int register_event_cmds(void) { return 0; } 3554 #endif /* CONFIG_DYNAMIC_FTRACE */ 3555 3556 /* 3557 * The top level array and trace arrays created by boot-time tracing 3558 * have already had its trace_event_file descriptors created in order 3559 * to allow for early events to be recorded. 3560 * This function is called after the tracefs has been initialized, 3561 * and we now have to create the files associated to the events. 3562 */ 3563 static void __trace_early_add_event_dirs(struct trace_array *tr) 3564 { 3565 struct trace_event_file *file; 3566 int ret; 3567 3568 3569 list_for_each_entry(file, &tr->events, list) { 3570 ret = event_create_dir(tr->event_dir, file); 3571 if (ret < 0) 3572 pr_warn("Could not create directory for event %s\n", 3573 trace_event_name(file->event_call)); 3574 } 3575 } 3576 3577 /* 3578 * For early boot up, the top trace array and the trace arrays created 3579 * by boot-time tracing require to have a list of events that can be 3580 * enabled. This must be done before the filesystem is set up in order 3581 * to allow events to be traced early. 3582 */ 3583 void __trace_early_add_events(struct trace_array *tr) 3584 { 3585 struct trace_event_call *call; 3586 int ret; 3587 3588 list_for_each_entry(call, &ftrace_events, list) { 3589 /* Early boot up should not have any modules loaded */ 3590 if (!(call->flags & TRACE_EVENT_FL_DYNAMIC) && 3591 WARN_ON_ONCE(call->module)) 3592 continue; 3593 3594 ret = __trace_early_add_new_event(call, tr); 3595 if (ret < 0) 3596 pr_warn("Could not create early event %s\n", 3597 trace_event_name(call)); 3598 } 3599 } 3600 3601 /* Remove the event directory structure for a trace directory. */ 3602 static void 3603 __trace_remove_event_dirs(struct trace_array *tr) 3604 { 3605 struct trace_event_file *file, *next; 3606 3607 list_for_each_entry_safe(file, next, &tr->events, list) 3608 remove_event_file_dir(file); 3609 } 3610 3611 static void __add_event_to_tracers(struct trace_event_call *call) 3612 { 3613 struct trace_array *tr; 3614 3615 list_for_each_entry(tr, &ftrace_trace_arrays, list) 3616 __trace_add_new_event(call, tr); 3617 } 3618 3619 extern struct trace_event_call *__start_ftrace_events[]; 3620 extern struct trace_event_call *__stop_ftrace_events[]; 3621 3622 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; 3623 3624 static __init int setup_trace_event(char *str) 3625 { 3626 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE); 3627 ring_buffer_expanded = true; 3628 disable_tracing_selftest("running event tracing"); 3629 3630 return 1; 3631 } 3632 __setup("trace_event=", setup_trace_event); 3633 3634 /* Expects to have event_mutex held when called */ 3635 static int 3636 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr) 3637 { 3638 struct dentry *d_events; 3639 struct dentry *entry; 3640 3641 entry = trace_create_file("set_event", TRACE_MODE_WRITE, parent, 3642 tr, &ftrace_set_event_fops); 3643 if (!entry) 3644 return -ENOMEM; 3645 3646 d_events = tracefs_create_dir("events", parent); 3647 if (!d_events) { 3648 pr_warn("Could not create tracefs 'events' directory\n"); 3649 return -ENOMEM; 3650 } 3651 3652 entry = trace_create_file("enable", TRACE_MODE_WRITE, d_events, 3653 tr, &ftrace_tr_enable_fops); 3654 if (!entry) 3655 return -ENOMEM; 3656 3657 /* There are not as crucial, just warn if they are not created */ 3658 3659 trace_create_file("set_event_pid", TRACE_MODE_WRITE, parent, 3660 tr, &ftrace_set_event_pid_fops); 3661 3662 trace_create_file("set_event_notrace_pid", 3663 TRACE_MODE_WRITE, parent, tr, 3664 &ftrace_set_event_notrace_pid_fops); 3665 3666 /* ring buffer internal formats */ 3667 trace_create_file("header_page", TRACE_MODE_READ, d_events, 3668 ring_buffer_print_page_header, 3669 &ftrace_show_header_fops); 3670 3671 trace_create_file("header_event", TRACE_MODE_READ, d_events, 3672 ring_buffer_print_entry_header, 3673 &ftrace_show_header_fops); 3674 3675 tr->event_dir = d_events; 3676 3677 return 0; 3678 } 3679 3680 /** 3681 * event_trace_add_tracer - add a instance of a trace_array to events 3682 * @parent: The parent dentry to place the files/directories for events in 3683 * @tr: The trace array associated with these events 3684 * 3685 * When a new instance is created, it needs to set up its events 3686 * directory, as well as other files associated with events. It also 3687 * creates the event hierarchy in the @parent/events directory. 3688 * 3689 * Returns 0 on success. 3690 * 3691 * Must be called with event_mutex held. 3692 */ 3693 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr) 3694 { 3695 int ret; 3696 3697 lockdep_assert_held(&event_mutex); 3698 3699 ret = create_event_toplevel_files(parent, tr); 3700 if (ret) 3701 goto out; 3702 3703 down_write(&trace_event_sem); 3704 /* If tr already has the event list, it is initialized in early boot. */ 3705 if (unlikely(!list_empty(&tr->events))) 3706 __trace_early_add_event_dirs(tr); 3707 else 3708 __trace_add_event_dirs(tr); 3709 up_write(&trace_event_sem); 3710 3711 out: 3712 return ret; 3713 } 3714 3715 /* 3716 * The top trace array already had its file descriptors created. 3717 * Now the files themselves need to be created. 3718 */ 3719 static __init int 3720 early_event_add_tracer(struct dentry *parent, struct trace_array *tr) 3721 { 3722 int ret; 3723 3724 mutex_lock(&event_mutex); 3725 3726 ret = create_event_toplevel_files(parent, tr); 3727 if (ret) 3728 goto out_unlock; 3729 3730 down_write(&trace_event_sem); 3731 __trace_early_add_event_dirs(tr); 3732 up_write(&trace_event_sem); 3733 3734 out_unlock: 3735 mutex_unlock(&event_mutex); 3736 3737 return ret; 3738 } 3739 3740 /* Must be called with event_mutex held */ 3741 int event_trace_del_tracer(struct trace_array *tr) 3742 { 3743 lockdep_assert_held(&event_mutex); 3744 3745 /* Disable any event triggers and associated soft-disabled events */ 3746 clear_event_triggers(tr); 3747 3748 /* Clear the pid list */ 3749 __ftrace_clear_event_pids(tr, TRACE_PIDS | TRACE_NO_PIDS); 3750 3751 /* Disable any running events */ 3752 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); 3753 3754 /* Make sure no more events are being executed */ 3755 tracepoint_synchronize_unregister(); 3756 3757 down_write(&trace_event_sem); 3758 __trace_remove_event_dirs(tr); 3759 tracefs_remove(tr->event_dir); 3760 up_write(&trace_event_sem); 3761 3762 tr->event_dir = NULL; 3763 3764 return 0; 3765 } 3766 3767 static __init int event_trace_memsetup(void) 3768 { 3769 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC); 3770 file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC); 3771 return 0; 3772 } 3773 3774 static __init void 3775 early_enable_events(struct trace_array *tr, bool disable_first) 3776 { 3777 char *buf = bootup_event_buf; 3778 char *token; 3779 int ret; 3780 3781 while (true) { 3782 token = strsep(&buf, ","); 3783 3784 if (!token) 3785 break; 3786 3787 if (*token) { 3788 /* Restarting syscalls requires that we stop them first */ 3789 if (disable_first) 3790 ftrace_set_clr_event(tr, token, 0); 3791 3792 ret = ftrace_set_clr_event(tr, token, 1); 3793 if (ret) 3794 pr_warn("Failed to enable trace event: %s\n", token); 3795 } 3796 3797 /* Put back the comma to allow this to be called again */ 3798 if (buf) 3799 *(buf - 1) = ','; 3800 } 3801 } 3802 3803 static __init int event_trace_enable(void) 3804 { 3805 struct trace_array *tr = top_trace_array(); 3806 struct trace_event_call **iter, *call; 3807 int ret; 3808 3809 if (!tr) 3810 return -ENODEV; 3811 3812 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) { 3813 3814 call = *iter; 3815 ret = event_init(call); 3816 if (!ret) 3817 list_add(&call->list, &ftrace_events); 3818 } 3819 3820 register_trigger_cmds(); 3821 3822 /* 3823 * We need the top trace array to have a working set of trace 3824 * points at early init, before the debug files and directories 3825 * are created. Create the file entries now, and attach them 3826 * to the actual file dentries later. 3827 */ 3828 __trace_early_add_events(tr); 3829 3830 early_enable_events(tr, false); 3831 3832 trace_printk_start_comm(); 3833 3834 register_event_cmds(); 3835 3836 3837 return 0; 3838 } 3839 3840 /* 3841 * event_trace_enable() is called from trace_event_init() first to 3842 * initialize events and perhaps start any events that are on the 3843 * command line. Unfortunately, there are some events that will not 3844 * start this early, like the system call tracepoints that need 3845 * to set the %SYSCALL_WORK_SYSCALL_TRACEPOINT flag of pid 1. But 3846 * event_trace_enable() is called before pid 1 starts, and this flag 3847 * is never set, making the syscall tracepoint never get reached, but 3848 * the event is enabled regardless (and not doing anything). 3849 */ 3850 static __init int event_trace_enable_again(void) 3851 { 3852 struct trace_array *tr; 3853 3854 tr = top_trace_array(); 3855 if (!tr) 3856 return -ENODEV; 3857 3858 early_enable_events(tr, true); 3859 3860 return 0; 3861 } 3862 3863 early_initcall(event_trace_enable_again); 3864 3865 /* Init fields which doesn't related to the tracefs */ 3866 static __init int event_trace_init_fields(void) 3867 { 3868 if (trace_define_generic_fields()) 3869 pr_warn("tracing: Failed to allocated generic fields"); 3870 3871 if (trace_define_common_fields()) 3872 pr_warn("tracing: Failed to allocate common fields"); 3873 3874 return 0; 3875 } 3876 3877 __init int event_trace_init(void) 3878 { 3879 struct trace_array *tr; 3880 int ret; 3881 3882 tr = top_trace_array(); 3883 if (!tr) 3884 return -ENODEV; 3885 3886 trace_create_file("available_events", TRACE_MODE_READ, 3887 NULL, tr, &ftrace_avail_fops); 3888 3889 ret = early_event_add_tracer(NULL, tr); 3890 if (ret) 3891 return ret; 3892 3893 #ifdef CONFIG_MODULES 3894 ret = register_module_notifier(&trace_module_nb); 3895 if (ret) 3896 pr_warn("Failed to register trace events module notifier\n"); 3897 #endif 3898 3899 eventdir_initialized = true; 3900 3901 return 0; 3902 } 3903 3904 void __init trace_event_init(void) 3905 { 3906 event_trace_memsetup(); 3907 init_ftrace_syscalls(); 3908 event_trace_enable(); 3909 event_trace_init_fields(); 3910 } 3911 3912 #ifdef CONFIG_EVENT_TRACE_STARTUP_TEST 3913 3914 static DEFINE_SPINLOCK(test_spinlock); 3915 static DEFINE_SPINLOCK(test_spinlock_irq); 3916 static DEFINE_MUTEX(test_mutex); 3917 3918 static __init void test_work(struct work_struct *dummy) 3919 { 3920 spin_lock(&test_spinlock); 3921 spin_lock_irq(&test_spinlock_irq); 3922 udelay(1); 3923 spin_unlock_irq(&test_spinlock_irq); 3924 spin_unlock(&test_spinlock); 3925 3926 mutex_lock(&test_mutex); 3927 msleep(1); 3928 mutex_unlock(&test_mutex); 3929 } 3930 3931 static __init int event_test_thread(void *unused) 3932 { 3933 void *test_malloc; 3934 3935 test_malloc = kmalloc(1234, GFP_KERNEL); 3936 if (!test_malloc) 3937 pr_info("failed to kmalloc\n"); 3938 3939 schedule_on_each_cpu(test_work); 3940 3941 kfree(test_malloc); 3942 3943 set_current_state(TASK_INTERRUPTIBLE); 3944 while (!kthread_should_stop()) { 3945 schedule(); 3946 set_current_state(TASK_INTERRUPTIBLE); 3947 } 3948 __set_current_state(TASK_RUNNING); 3949 3950 return 0; 3951 } 3952 3953 /* 3954 * Do various things that may trigger events. 3955 */ 3956 static __init void event_test_stuff(void) 3957 { 3958 struct task_struct *test_thread; 3959 3960 test_thread = kthread_run(event_test_thread, NULL, "test-events"); 3961 msleep(1); 3962 kthread_stop(test_thread); 3963 } 3964 3965 /* 3966 * For every trace event defined, we will test each trace point separately, 3967 * and then by groups, and finally all trace points. 3968 */ 3969 static __init void event_trace_self_tests(void) 3970 { 3971 struct trace_subsystem_dir *dir; 3972 struct trace_event_file *file; 3973 struct trace_event_call *call; 3974 struct event_subsystem *system; 3975 struct trace_array *tr; 3976 int ret; 3977 3978 tr = top_trace_array(); 3979 if (!tr) 3980 return; 3981 3982 pr_info("Running tests on trace events:\n"); 3983 3984 list_for_each_entry(file, &tr->events, list) { 3985 3986 call = file->event_call; 3987 3988 /* Only test those that have a probe */ 3989 if (!call->class || !call->class->probe) 3990 continue; 3991 3992 /* 3993 * Testing syscall events here is pretty useless, but 3994 * we still do it if configured. But this is time consuming. 3995 * What we really need is a user thread to perform the 3996 * syscalls as we test. 3997 */ 3998 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS 3999 if (call->class->system && 4000 strcmp(call->class->system, "syscalls") == 0) 4001 continue; 4002 #endif 4003 4004 pr_info("Testing event %s: ", trace_event_name(call)); 4005 4006 /* 4007 * If an event is already enabled, someone is using 4008 * it and the self test should not be on. 4009 */ 4010 if (file->flags & EVENT_FILE_FL_ENABLED) { 4011 pr_warn("Enabled event during self test!\n"); 4012 WARN_ON_ONCE(1); 4013 continue; 4014 } 4015 4016 ftrace_event_enable_disable(file, 1); 4017 event_test_stuff(); 4018 ftrace_event_enable_disable(file, 0); 4019 4020 pr_cont("OK\n"); 4021 } 4022 4023 /* Now test at the sub system level */ 4024 4025 pr_info("Running tests on trace event systems:\n"); 4026 4027 list_for_each_entry(dir, &tr->systems, list) { 4028 4029 system = dir->subsystem; 4030 4031 /* the ftrace system is special, skip it */ 4032 if (strcmp(system->name, "ftrace") == 0) 4033 continue; 4034 4035 pr_info("Testing event system %s: ", system->name); 4036 4037 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1); 4038 if (WARN_ON_ONCE(ret)) { 4039 pr_warn("error enabling system %s\n", 4040 system->name); 4041 continue; 4042 } 4043 4044 event_test_stuff(); 4045 4046 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0); 4047 if (WARN_ON_ONCE(ret)) { 4048 pr_warn("error disabling system %s\n", 4049 system->name); 4050 continue; 4051 } 4052 4053 pr_cont("OK\n"); 4054 } 4055 4056 /* Test with all events enabled */ 4057 4058 pr_info("Running tests on all trace events:\n"); 4059 pr_info("Testing all events: "); 4060 4061 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1); 4062 if (WARN_ON_ONCE(ret)) { 4063 pr_warn("error enabling all events\n"); 4064 return; 4065 } 4066 4067 event_test_stuff(); 4068 4069 /* reset sysname */ 4070 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0); 4071 if (WARN_ON_ONCE(ret)) { 4072 pr_warn("error disabling all events\n"); 4073 return; 4074 } 4075 4076 pr_cont("OK\n"); 4077 } 4078 4079 #ifdef CONFIG_FUNCTION_TRACER 4080 4081 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); 4082 4083 static struct trace_event_file event_trace_file __initdata; 4084 4085 static void __init 4086 function_test_events_call(unsigned long ip, unsigned long parent_ip, 4087 struct ftrace_ops *op, struct ftrace_regs *regs) 4088 { 4089 struct trace_buffer *buffer; 4090 struct ring_buffer_event *event; 4091 struct ftrace_entry *entry; 4092 unsigned int trace_ctx; 4093 long disabled; 4094 int cpu; 4095 4096 trace_ctx = tracing_gen_ctx(); 4097 preempt_disable_notrace(); 4098 cpu = raw_smp_processor_id(); 4099 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); 4100 4101 if (disabled != 1) 4102 goto out; 4103 4104 event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file, 4105 TRACE_FN, sizeof(*entry), 4106 trace_ctx); 4107 if (!event) 4108 goto out; 4109 entry = ring_buffer_event_data(event); 4110 entry->ip = ip; 4111 entry->parent_ip = parent_ip; 4112 4113 event_trigger_unlock_commit(&event_trace_file, buffer, event, 4114 entry, trace_ctx); 4115 out: 4116 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); 4117 preempt_enable_notrace(); 4118 } 4119 4120 static struct ftrace_ops trace_ops __initdata = 4121 { 4122 .func = function_test_events_call, 4123 }; 4124 4125 static __init void event_trace_self_test_with_function(void) 4126 { 4127 int ret; 4128 4129 event_trace_file.tr = top_trace_array(); 4130 if (WARN_ON(!event_trace_file.tr)) 4131 return; 4132 4133 ret = register_ftrace_function(&trace_ops); 4134 if (WARN_ON(ret < 0)) { 4135 pr_info("Failed to enable function tracer for event tests\n"); 4136 return; 4137 } 4138 pr_info("Running tests again, along with the function tracer\n"); 4139 event_trace_self_tests(); 4140 unregister_ftrace_function(&trace_ops); 4141 } 4142 #else 4143 static __init void event_trace_self_test_with_function(void) 4144 { 4145 } 4146 #endif 4147 4148 static __init int event_trace_self_tests_init(void) 4149 { 4150 if (!tracing_selftest_disabled) { 4151 event_trace_self_tests(); 4152 event_trace_self_test_with_function(); 4153 } 4154 4155 return 0; 4156 } 4157 4158 late_initcall(event_trace_self_tests_init); 4159 4160 #endif 4161