1 /* 2 * event tracer 3 * 4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> 5 * 6 * - Added format output of fields of the trace point. 7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>. 8 * 9 */ 10 11 #include <linux/workqueue.h> 12 #include <linux/spinlock.h> 13 #include <linux/kthread.h> 14 #include <linux/debugfs.h> 15 #include <linux/uaccess.h> 16 #include <linux/module.h> 17 #include <linux/ctype.h> 18 #include <linux/slab.h> 19 #include <linux/delay.h> 20 21 #include <asm/setup.h> 22 23 #include "trace_output.h" 24 25 #undef TRACE_SYSTEM 26 #define TRACE_SYSTEM "TRACE_SYSTEM" 27 28 DEFINE_MUTEX(event_mutex); 29 30 LIST_HEAD(ftrace_events); 31 static LIST_HEAD(ftrace_common_fields); 32 33 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO) 34 35 static struct kmem_cache *field_cachep; 36 static struct kmem_cache *file_cachep; 37 38 #define SYSTEM_FL_FREE_NAME (1 << 31) 39 40 static inline int system_refcount(struct event_subsystem *system) 41 { 42 return system->ref_count & ~SYSTEM_FL_FREE_NAME; 43 } 44 45 static int system_refcount_inc(struct event_subsystem *system) 46 { 47 return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME; 48 } 49 50 static int system_refcount_dec(struct event_subsystem *system) 51 { 52 return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME; 53 } 54 55 /* Double loops, do not use break, only goto's work */ 56 #define do_for_each_event_file(tr, file) \ 57 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ 58 list_for_each_entry(file, &tr->events, list) 59 60 #define do_for_each_event_file_safe(tr, file) \ 61 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ 62 struct ftrace_event_file *___n; \ 63 list_for_each_entry_safe(file, ___n, &tr->events, list) 64 65 #define while_for_each_event_file() \ 66 } 67 68 static struct list_head * 69 trace_get_fields(struct ftrace_event_call *event_call) 70 { 71 if (!event_call->class->get_fields) 72 return &event_call->class->fields; 73 return event_call->class->get_fields(event_call); 74 } 75 76 static struct ftrace_event_field * 77 __find_event_field(struct list_head *head, char *name) 78 { 79 struct ftrace_event_field *field; 80 81 list_for_each_entry(field, head, link) { 82 if (!strcmp(field->name, name)) 83 return field; 84 } 85 86 return NULL; 87 } 88 89 struct ftrace_event_field * 90 trace_find_event_field(struct ftrace_event_call *call, char *name) 91 { 92 struct ftrace_event_field *field; 93 struct list_head *head; 94 95 field = __find_event_field(&ftrace_common_fields, name); 96 if (field) 97 return field; 98 99 head = trace_get_fields(call); 100 return __find_event_field(head, name); 101 } 102 103 static int __trace_define_field(struct list_head *head, const char *type, 104 const char *name, int offset, int size, 105 int is_signed, int filter_type) 106 { 107 struct ftrace_event_field *field; 108 109 field = kmem_cache_alloc(field_cachep, GFP_TRACE); 110 if (!field) 111 return -ENOMEM; 112 113 field->name = name; 114 field->type = type; 115 116 if (filter_type == FILTER_OTHER) 117 field->filter_type = filter_assign_type(type); 118 else 119 field->filter_type = filter_type; 120 121 field->offset = offset; 122 field->size = size; 123 field->is_signed = is_signed; 124 125 list_add(&field->link, head); 126 127 return 0; 128 } 129 130 int trace_define_field(struct ftrace_event_call *call, const char *type, 131 const char *name, int offset, int size, int is_signed, 132 int filter_type) 133 { 134 struct list_head *head; 135 136 if (WARN_ON(!call->class)) 137 return 0; 138 139 head = trace_get_fields(call); 140 return __trace_define_field(head, type, name, offset, size, 141 is_signed, filter_type); 142 } 143 EXPORT_SYMBOL_GPL(trace_define_field); 144 145 #define __common_field(type, item) \ 146 ret = __trace_define_field(&ftrace_common_fields, #type, \ 147 "common_" #item, \ 148 offsetof(typeof(ent), item), \ 149 sizeof(ent.item), \ 150 is_signed_type(type), FILTER_OTHER); \ 151 if (ret) \ 152 return ret; 153 154 static int trace_define_common_fields(void) 155 { 156 int ret; 157 struct trace_entry ent; 158 159 __common_field(unsigned short, type); 160 __common_field(unsigned char, flags); 161 __common_field(unsigned char, preempt_count); 162 __common_field(int, pid); 163 164 return ret; 165 } 166 167 static void trace_destroy_fields(struct ftrace_event_call *call) 168 { 169 struct ftrace_event_field *field, *next; 170 struct list_head *head; 171 172 head = trace_get_fields(call); 173 list_for_each_entry_safe(field, next, head, link) { 174 list_del(&field->link); 175 kmem_cache_free(field_cachep, field); 176 } 177 } 178 179 int trace_event_raw_init(struct ftrace_event_call *call) 180 { 181 int id; 182 183 id = register_ftrace_event(&call->event); 184 if (!id) 185 return -ENODEV; 186 187 return 0; 188 } 189 EXPORT_SYMBOL_GPL(trace_event_raw_init); 190 191 void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer, 192 struct ftrace_event_file *ftrace_file, 193 unsigned long len) 194 { 195 struct ftrace_event_call *event_call = ftrace_file->event_call; 196 197 local_save_flags(fbuffer->flags); 198 fbuffer->pc = preempt_count(); 199 fbuffer->ftrace_file = ftrace_file; 200 201 fbuffer->event = 202 trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file, 203 event_call->event.type, len, 204 fbuffer->flags, fbuffer->pc); 205 if (!fbuffer->event) 206 return NULL; 207 208 fbuffer->entry = ring_buffer_event_data(fbuffer->event); 209 return fbuffer->entry; 210 } 211 EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve); 212 213 void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer) 214 { 215 event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer, 216 fbuffer->event, fbuffer->entry, 217 fbuffer->flags, fbuffer->pc); 218 } 219 EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit); 220 221 int ftrace_event_reg(struct ftrace_event_call *call, 222 enum trace_reg type, void *data) 223 { 224 struct ftrace_event_file *file = data; 225 226 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); 227 switch (type) { 228 case TRACE_REG_REGISTER: 229 return tracepoint_probe_register(call->tp, 230 call->class->probe, 231 file); 232 case TRACE_REG_UNREGISTER: 233 tracepoint_probe_unregister(call->tp, 234 call->class->probe, 235 file); 236 return 0; 237 238 #ifdef CONFIG_PERF_EVENTS 239 case TRACE_REG_PERF_REGISTER: 240 return tracepoint_probe_register(call->tp, 241 call->class->perf_probe, 242 call); 243 case TRACE_REG_PERF_UNREGISTER: 244 tracepoint_probe_unregister(call->tp, 245 call->class->perf_probe, 246 call); 247 return 0; 248 case TRACE_REG_PERF_OPEN: 249 case TRACE_REG_PERF_CLOSE: 250 case TRACE_REG_PERF_ADD: 251 case TRACE_REG_PERF_DEL: 252 return 0; 253 #endif 254 } 255 return 0; 256 } 257 EXPORT_SYMBOL_GPL(ftrace_event_reg); 258 259 void trace_event_enable_cmd_record(bool enable) 260 { 261 struct ftrace_event_file *file; 262 struct trace_array *tr; 263 264 mutex_lock(&event_mutex); 265 do_for_each_event_file(tr, file) { 266 267 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) 268 continue; 269 270 if (enable) { 271 tracing_start_cmdline_record(); 272 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); 273 } else { 274 tracing_stop_cmdline_record(); 275 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); 276 } 277 } while_for_each_event_file(); 278 mutex_unlock(&event_mutex); 279 } 280 281 static int __ftrace_event_enable_disable(struct ftrace_event_file *file, 282 int enable, int soft_disable) 283 { 284 struct ftrace_event_call *call = file->event_call; 285 int ret = 0; 286 int disable; 287 288 switch (enable) { 289 case 0: 290 /* 291 * When soft_disable is set and enable is cleared, the sm_ref 292 * reference counter is decremented. If it reaches 0, we want 293 * to clear the SOFT_DISABLED flag but leave the event in the 294 * state that it was. That is, if the event was enabled and 295 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED 296 * is set we do not want the event to be enabled before we 297 * clear the bit. 298 * 299 * When soft_disable is not set but the SOFT_MODE flag is, 300 * we do nothing. Do not disable the tracepoint, otherwise 301 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work. 302 */ 303 if (soft_disable) { 304 if (atomic_dec_return(&file->sm_ref) > 0) 305 break; 306 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED; 307 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); 308 } else 309 disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE); 310 311 if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) { 312 clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); 313 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) { 314 tracing_stop_cmdline_record(); 315 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); 316 } 317 call->class->reg(call, TRACE_REG_UNREGISTER, file); 318 } 319 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ 320 if (file->flags & FTRACE_EVENT_FL_SOFT_MODE) 321 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); 322 else 323 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); 324 break; 325 case 1: 326 /* 327 * When soft_disable is set and enable is set, we want to 328 * register the tracepoint for the event, but leave the event 329 * as is. That means, if the event was already enabled, we do 330 * nothing (but set SOFT_MODE). If the event is disabled, we 331 * set SOFT_DISABLED before enabling the event tracepoint, so 332 * it still seems to be disabled. 333 */ 334 if (!soft_disable) 335 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); 336 else { 337 if (atomic_inc_return(&file->sm_ref) > 1) 338 break; 339 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); 340 } 341 342 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) { 343 344 /* Keep the event disabled, when going to SOFT_MODE. */ 345 if (soft_disable) 346 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); 347 348 if (trace_flags & TRACE_ITER_RECORD_CMD) { 349 tracing_start_cmdline_record(); 350 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); 351 } 352 ret = call->class->reg(call, TRACE_REG_REGISTER, file); 353 if (ret) { 354 tracing_stop_cmdline_record(); 355 pr_info("event trace: Could not enable event " 356 "%s\n", ftrace_event_name(call)); 357 break; 358 } 359 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); 360 361 /* WAS_ENABLED gets set but never cleared. */ 362 call->flags |= TRACE_EVENT_FL_WAS_ENABLED; 363 } 364 break; 365 } 366 367 return ret; 368 } 369 370 int trace_event_enable_disable(struct ftrace_event_file *file, 371 int enable, int soft_disable) 372 { 373 return __ftrace_event_enable_disable(file, enable, soft_disable); 374 } 375 376 static int ftrace_event_enable_disable(struct ftrace_event_file *file, 377 int enable) 378 { 379 return __ftrace_event_enable_disable(file, enable, 0); 380 } 381 382 static void ftrace_clear_events(struct trace_array *tr) 383 { 384 struct ftrace_event_file *file; 385 386 mutex_lock(&event_mutex); 387 list_for_each_entry(file, &tr->events, list) { 388 ftrace_event_enable_disable(file, 0); 389 } 390 mutex_unlock(&event_mutex); 391 } 392 393 static void __put_system(struct event_subsystem *system) 394 { 395 struct event_filter *filter = system->filter; 396 397 WARN_ON_ONCE(system_refcount(system) == 0); 398 if (system_refcount_dec(system)) 399 return; 400 401 list_del(&system->list); 402 403 if (filter) { 404 kfree(filter->filter_string); 405 kfree(filter); 406 } 407 if (system->ref_count & SYSTEM_FL_FREE_NAME) 408 kfree(system->name); 409 kfree(system); 410 } 411 412 static void __get_system(struct event_subsystem *system) 413 { 414 WARN_ON_ONCE(system_refcount(system) == 0); 415 system_refcount_inc(system); 416 } 417 418 static void __get_system_dir(struct ftrace_subsystem_dir *dir) 419 { 420 WARN_ON_ONCE(dir->ref_count == 0); 421 dir->ref_count++; 422 __get_system(dir->subsystem); 423 } 424 425 static void __put_system_dir(struct ftrace_subsystem_dir *dir) 426 { 427 WARN_ON_ONCE(dir->ref_count == 0); 428 /* If the subsystem is about to be freed, the dir must be too */ 429 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1); 430 431 __put_system(dir->subsystem); 432 if (!--dir->ref_count) 433 kfree(dir); 434 } 435 436 static void put_system(struct ftrace_subsystem_dir *dir) 437 { 438 mutex_lock(&event_mutex); 439 __put_system_dir(dir); 440 mutex_unlock(&event_mutex); 441 } 442 443 static void remove_subsystem(struct ftrace_subsystem_dir *dir) 444 { 445 if (!dir) 446 return; 447 448 if (!--dir->nr_events) { 449 debugfs_remove_recursive(dir->entry); 450 list_del(&dir->list); 451 __put_system_dir(dir); 452 } 453 } 454 455 static void remove_event_file_dir(struct ftrace_event_file *file) 456 { 457 struct dentry *dir = file->dir; 458 struct dentry *child; 459 460 if (dir) { 461 spin_lock(&dir->d_lock); /* probably unneeded */ 462 list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) { 463 if (child->d_inode) /* probably unneeded */ 464 child->d_inode->i_private = NULL; 465 } 466 spin_unlock(&dir->d_lock); 467 468 debugfs_remove_recursive(dir); 469 } 470 471 list_del(&file->list); 472 remove_subsystem(file->system); 473 kmem_cache_free(file_cachep, file); 474 } 475 476 /* 477 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events. 478 */ 479 static int 480 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, 481 const char *sub, const char *event, int set) 482 { 483 struct ftrace_event_file *file; 484 struct ftrace_event_call *call; 485 const char *name; 486 int ret = -EINVAL; 487 488 list_for_each_entry(file, &tr->events, list) { 489 490 call = file->event_call; 491 name = ftrace_event_name(call); 492 493 if (!name || !call->class || !call->class->reg) 494 continue; 495 496 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 497 continue; 498 499 if (match && 500 strcmp(match, name) != 0 && 501 strcmp(match, call->class->system) != 0) 502 continue; 503 504 if (sub && strcmp(sub, call->class->system) != 0) 505 continue; 506 507 if (event && strcmp(event, name) != 0) 508 continue; 509 510 ftrace_event_enable_disable(file, set); 511 512 ret = 0; 513 } 514 515 return ret; 516 } 517 518 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match, 519 const char *sub, const char *event, int set) 520 { 521 int ret; 522 523 mutex_lock(&event_mutex); 524 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set); 525 mutex_unlock(&event_mutex); 526 527 return ret; 528 } 529 530 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) 531 { 532 char *event = NULL, *sub = NULL, *match; 533 534 /* 535 * The buf format can be <subsystem>:<event-name> 536 * *:<event-name> means any event by that name. 537 * :<event-name> is the same. 538 * 539 * <subsystem>:* means all events in that subsystem 540 * <subsystem>: means the same. 541 * 542 * <name> (no ':') means all events in a subsystem with 543 * the name <name> or any event that matches <name> 544 */ 545 546 match = strsep(&buf, ":"); 547 if (buf) { 548 sub = match; 549 event = buf; 550 match = NULL; 551 552 if (!strlen(sub) || strcmp(sub, "*") == 0) 553 sub = NULL; 554 if (!strlen(event) || strcmp(event, "*") == 0) 555 event = NULL; 556 } 557 558 return __ftrace_set_clr_event(tr, match, sub, event, set); 559 } 560 561 /** 562 * trace_set_clr_event - enable or disable an event 563 * @system: system name to match (NULL for any system) 564 * @event: event name to match (NULL for all events, within system) 565 * @set: 1 to enable, 0 to disable 566 * 567 * This is a way for other parts of the kernel to enable or disable 568 * event recording. 569 * 570 * Returns 0 on success, -EINVAL if the parameters do not match any 571 * registered events. 572 */ 573 int trace_set_clr_event(const char *system, const char *event, int set) 574 { 575 struct trace_array *tr = top_trace_array(); 576 577 return __ftrace_set_clr_event(tr, NULL, system, event, set); 578 } 579 EXPORT_SYMBOL_GPL(trace_set_clr_event); 580 581 /* 128 should be much more than enough */ 582 #define EVENT_BUF_SIZE 127 583 584 static ssize_t 585 ftrace_event_write(struct file *file, const char __user *ubuf, 586 size_t cnt, loff_t *ppos) 587 { 588 struct trace_parser parser; 589 struct seq_file *m = file->private_data; 590 struct trace_array *tr = m->private; 591 ssize_t read, ret; 592 593 if (!cnt) 594 return 0; 595 596 ret = tracing_update_buffers(); 597 if (ret < 0) 598 return ret; 599 600 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1)) 601 return -ENOMEM; 602 603 read = trace_get_user(&parser, ubuf, cnt, ppos); 604 605 if (read >= 0 && trace_parser_loaded((&parser))) { 606 int set = 1; 607 608 if (*parser.buffer == '!') 609 set = 0; 610 611 parser.buffer[parser.idx] = 0; 612 613 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set); 614 if (ret) 615 goto out_put; 616 } 617 618 ret = read; 619 620 out_put: 621 trace_parser_put(&parser); 622 623 return ret; 624 } 625 626 static void * 627 t_next(struct seq_file *m, void *v, loff_t *pos) 628 { 629 struct ftrace_event_file *file = v; 630 struct ftrace_event_call *call; 631 struct trace_array *tr = m->private; 632 633 (*pos)++; 634 635 list_for_each_entry_continue(file, &tr->events, list) { 636 call = file->event_call; 637 /* 638 * The ftrace subsystem is for showing formats only. 639 * They can not be enabled or disabled via the event files. 640 */ 641 if (call->class && call->class->reg) 642 return file; 643 } 644 645 return NULL; 646 } 647 648 static void *t_start(struct seq_file *m, loff_t *pos) 649 { 650 struct ftrace_event_file *file; 651 struct trace_array *tr = m->private; 652 loff_t l; 653 654 mutex_lock(&event_mutex); 655 656 file = list_entry(&tr->events, struct ftrace_event_file, list); 657 for (l = 0; l <= *pos; ) { 658 file = t_next(m, file, &l); 659 if (!file) 660 break; 661 } 662 return file; 663 } 664 665 static void * 666 s_next(struct seq_file *m, void *v, loff_t *pos) 667 { 668 struct ftrace_event_file *file = v; 669 struct trace_array *tr = m->private; 670 671 (*pos)++; 672 673 list_for_each_entry_continue(file, &tr->events, list) { 674 if (file->flags & FTRACE_EVENT_FL_ENABLED) 675 return file; 676 } 677 678 return NULL; 679 } 680 681 static void *s_start(struct seq_file *m, loff_t *pos) 682 { 683 struct ftrace_event_file *file; 684 struct trace_array *tr = m->private; 685 loff_t l; 686 687 mutex_lock(&event_mutex); 688 689 file = list_entry(&tr->events, struct ftrace_event_file, list); 690 for (l = 0; l <= *pos; ) { 691 file = s_next(m, file, &l); 692 if (!file) 693 break; 694 } 695 return file; 696 } 697 698 static int t_show(struct seq_file *m, void *v) 699 { 700 struct ftrace_event_file *file = v; 701 struct ftrace_event_call *call = file->event_call; 702 703 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) 704 seq_printf(m, "%s:", call->class->system); 705 seq_printf(m, "%s\n", ftrace_event_name(call)); 706 707 return 0; 708 } 709 710 static void t_stop(struct seq_file *m, void *p) 711 { 712 mutex_unlock(&event_mutex); 713 } 714 715 static ssize_t 716 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 717 loff_t *ppos) 718 { 719 struct ftrace_event_file *file; 720 unsigned long flags; 721 char buf[4] = "0"; 722 723 mutex_lock(&event_mutex); 724 file = event_file_data(filp); 725 if (likely(file)) 726 flags = file->flags; 727 mutex_unlock(&event_mutex); 728 729 if (!file) 730 return -ENODEV; 731 732 if (flags & FTRACE_EVENT_FL_ENABLED && 733 !(flags & FTRACE_EVENT_FL_SOFT_DISABLED)) 734 strcpy(buf, "1"); 735 736 if (flags & FTRACE_EVENT_FL_SOFT_DISABLED || 737 flags & FTRACE_EVENT_FL_SOFT_MODE) 738 strcat(buf, "*"); 739 740 strcat(buf, "\n"); 741 742 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf)); 743 } 744 745 static ssize_t 746 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, 747 loff_t *ppos) 748 { 749 struct ftrace_event_file *file; 750 unsigned long val; 751 int ret; 752 753 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 754 if (ret) 755 return ret; 756 757 ret = tracing_update_buffers(); 758 if (ret < 0) 759 return ret; 760 761 switch (val) { 762 case 0: 763 case 1: 764 ret = -ENODEV; 765 mutex_lock(&event_mutex); 766 file = event_file_data(filp); 767 if (likely(file)) 768 ret = ftrace_event_enable_disable(file, val); 769 mutex_unlock(&event_mutex); 770 break; 771 772 default: 773 return -EINVAL; 774 } 775 776 *ppos += cnt; 777 778 return ret ? ret : cnt; 779 } 780 781 static ssize_t 782 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 783 loff_t *ppos) 784 { 785 const char set_to_char[4] = { '?', '0', '1', 'X' }; 786 struct ftrace_subsystem_dir *dir = filp->private_data; 787 struct event_subsystem *system = dir->subsystem; 788 struct ftrace_event_call *call; 789 struct ftrace_event_file *file; 790 struct trace_array *tr = dir->tr; 791 char buf[2]; 792 int set = 0; 793 int ret; 794 795 mutex_lock(&event_mutex); 796 list_for_each_entry(file, &tr->events, list) { 797 call = file->event_call; 798 if (!ftrace_event_name(call) || !call->class || !call->class->reg) 799 continue; 800 801 if (system && strcmp(call->class->system, system->name) != 0) 802 continue; 803 804 /* 805 * We need to find out if all the events are set 806 * or if all events or cleared, or if we have 807 * a mixture. 808 */ 809 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED)); 810 811 /* 812 * If we have a mixture, no need to look further. 813 */ 814 if (set == 3) 815 break; 816 } 817 mutex_unlock(&event_mutex); 818 819 buf[0] = set_to_char[set]; 820 buf[1] = '\n'; 821 822 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); 823 824 return ret; 825 } 826 827 static ssize_t 828 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, 829 loff_t *ppos) 830 { 831 struct ftrace_subsystem_dir *dir = filp->private_data; 832 struct event_subsystem *system = dir->subsystem; 833 const char *name = NULL; 834 unsigned long val; 835 ssize_t ret; 836 837 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 838 if (ret) 839 return ret; 840 841 ret = tracing_update_buffers(); 842 if (ret < 0) 843 return ret; 844 845 if (val != 0 && val != 1) 846 return -EINVAL; 847 848 /* 849 * Opening of "enable" adds a ref count to system, 850 * so the name is safe to use. 851 */ 852 if (system) 853 name = system->name; 854 855 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val); 856 if (ret) 857 goto out; 858 859 ret = cnt; 860 861 out: 862 *ppos += cnt; 863 864 return ret; 865 } 866 867 enum { 868 FORMAT_HEADER = 1, 869 FORMAT_FIELD_SEPERATOR = 2, 870 FORMAT_PRINTFMT = 3, 871 }; 872 873 static void *f_next(struct seq_file *m, void *v, loff_t *pos) 874 { 875 struct ftrace_event_call *call = event_file_data(m->private); 876 struct list_head *common_head = &ftrace_common_fields; 877 struct list_head *head = trace_get_fields(call); 878 struct list_head *node = v; 879 880 (*pos)++; 881 882 switch ((unsigned long)v) { 883 case FORMAT_HEADER: 884 node = common_head; 885 break; 886 887 case FORMAT_FIELD_SEPERATOR: 888 node = head; 889 break; 890 891 case FORMAT_PRINTFMT: 892 /* all done */ 893 return NULL; 894 } 895 896 node = node->prev; 897 if (node == common_head) 898 return (void *)FORMAT_FIELD_SEPERATOR; 899 else if (node == head) 900 return (void *)FORMAT_PRINTFMT; 901 else 902 return node; 903 } 904 905 static int f_show(struct seq_file *m, void *v) 906 { 907 struct ftrace_event_call *call = event_file_data(m->private); 908 struct ftrace_event_field *field; 909 const char *array_descriptor; 910 911 switch ((unsigned long)v) { 912 case FORMAT_HEADER: 913 seq_printf(m, "name: %s\n", ftrace_event_name(call)); 914 seq_printf(m, "ID: %d\n", call->event.type); 915 seq_printf(m, "format:\n"); 916 return 0; 917 918 case FORMAT_FIELD_SEPERATOR: 919 seq_putc(m, '\n'); 920 return 0; 921 922 case FORMAT_PRINTFMT: 923 seq_printf(m, "\nprint fmt: %s\n", 924 call->print_fmt); 925 return 0; 926 } 927 928 field = list_entry(v, struct ftrace_event_field, link); 929 /* 930 * Smartly shows the array type(except dynamic array). 931 * Normal: 932 * field:TYPE VAR 933 * If TYPE := TYPE[LEN], it is shown: 934 * field:TYPE VAR[LEN] 935 */ 936 array_descriptor = strchr(field->type, '['); 937 938 if (!strncmp(field->type, "__data_loc", 10)) 939 array_descriptor = NULL; 940 941 if (!array_descriptor) 942 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n", 943 field->type, field->name, field->offset, 944 field->size, !!field->is_signed); 945 else 946 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n", 947 (int)(array_descriptor - field->type), 948 field->type, field->name, 949 array_descriptor, field->offset, 950 field->size, !!field->is_signed); 951 952 return 0; 953 } 954 955 static void *f_start(struct seq_file *m, loff_t *pos) 956 { 957 void *p = (void *)FORMAT_HEADER; 958 loff_t l = 0; 959 960 /* ->stop() is called even if ->start() fails */ 961 mutex_lock(&event_mutex); 962 if (!event_file_data(m->private)) 963 return ERR_PTR(-ENODEV); 964 965 while (l < *pos && p) 966 p = f_next(m, p, &l); 967 968 return p; 969 } 970 971 static void f_stop(struct seq_file *m, void *p) 972 { 973 mutex_unlock(&event_mutex); 974 } 975 976 static const struct seq_operations trace_format_seq_ops = { 977 .start = f_start, 978 .next = f_next, 979 .stop = f_stop, 980 .show = f_show, 981 }; 982 983 static int trace_format_open(struct inode *inode, struct file *file) 984 { 985 struct seq_file *m; 986 int ret; 987 988 ret = seq_open(file, &trace_format_seq_ops); 989 if (ret < 0) 990 return ret; 991 992 m = file->private_data; 993 m->private = file; 994 995 return 0; 996 } 997 998 static ssize_t 999 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 1000 { 1001 int id = (long)event_file_data(filp); 1002 char buf[32]; 1003 int len; 1004 1005 if (*ppos) 1006 return 0; 1007 1008 if (unlikely(!id)) 1009 return -ENODEV; 1010 1011 len = sprintf(buf, "%d\n", id); 1012 1013 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); 1014 } 1015 1016 static ssize_t 1017 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 1018 loff_t *ppos) 1019 { 1020 struct ftrace_event_file *file; 1021 struct trace_seq *s; 1022 int r = -ENODEV; 1023 1024 if (*ppos) 1025 return 0; 1026 1027 s = kmalloc(sizeof(*s), GFP_KERNEL); 1028 1029 if (!s) 1030 return -ENOMEM; 1031 1032 trace_seq_init(s); 1033 1034 mutex_lock(&event_mutex); 1035 file = event_file_data(filp); 1036 if (file) 1037 print_event_filter(file, s); 1038 mutex_unlock(&event_mutex); 1039 1040 if (file) 1041 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); 1042 1043 kfree(s); 1044 1045 return r; 1046 } 1047 1048 static ssize_t 1049 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1050 loff_t *ppos) 1051 { 1052 struct ftrace_event_file *file; 1053 char *buf; 1054 int err = -ENODEV; 1055 1056 if (cnt >= PAGE_SIZE) 1057 return -EINVAL; 1058 1059 buf = (char *)__get_free_page(GFP_TEMPORARY); 1060 if (!buf) 1061 return -ENOMEM; 1062 1063 if (copy_from_user(buf, ubuf, cnt)) { 1064 free_page((unsigned long) buf); 1065 return -EFAULT; 1066 } 1067 buf[cnt] = '\0'; 1068 1069 mutex_lock(&event_mutex); 1070 file = event_file_data(filp); 1071 if (file) 1072 err = apply_event_filter(file, buf); 1073 mutex_unlock(&event_mutex); 1074 1075 free_page((unsigned long) buf); 1076 if (err < 0) 1077 return err; 1078 1079 *ppos += cnt; 1080 1081 return cnt; 1082 } 1083 1084 static LIST_HEAD(event_subsystems); 1085 1086 static int subsystem_open(struct inode *inode, struct file *filp) 1087 { 1088 struct event_subsystem *system = NULL; 1089 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */ 1090 struct trace_array *tr; 1091 int ret; 1092 1093 if (tracing_is_disabled()) 1094 return -ENODEV; 1095 1096 /* Make sure the system still exists */ 1097 mutex_lock(&trace_types_lock); 1098 mutex_lock(&event_mutex); 1099 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 1100 list_for_each_entry(dir, &tr->systems, list) { 1101 if (dir == inode->i_private) { 1102 /* Don't open systems with no events */ 1103 if (dir->nr_events) { 1104 __get_system_dir(dir); 1105 system = dir->subsystem; 1106 } 1107 goto exit_loop; 1108 } 1109 } 1110 } 1111 exit_loop: 1112 mutex_unlock(&event_mutex); 1113 mutex_unlock(&trace_types_lock); 1114 1115 if (!system) 1116 return -ENODEV; 1117 1118 /* Some versions of gcc think dir can be uninitialized here */ 1119 WARN_ON(!dir); 1120 1121 /* Still need to increment the ref count of the system */ 1122 if (trace_array_get(tr) < 0) { 1123 put_system(dir); 1124 return -ENODEV; 1125 } 1126 1127 ret = tracing_open_generic(inode, filp); 1128 if (ret < 0) { 1129 trace_array_put(tr); 1130 put_system(dir); 1131 } 1132 1133 return ret; 1134 } 1135 1136 static int system_tr_open(struct inode *inode, struct file *filp) 1137 { 1138 struct ftrace_subsystem_dir *dir; 1139 struct trace_array *tr = inode->i_private; 1140 int ret; 1141 1142 if (tracing_is_disabled()) 1143 return -ENODEV; 1144 1145 if (trace_array_get(tr) < 0) 1146 return -ENODEV; 1147 1148 /* Make a temporary dir that has no system but points to tr */ 1149 dir = kzalloc(sizeof(*dir), GFP_KERNEL); 1150 if (!dir) { 1151 trace_array_put(tr); 1152 return -ENOMEM; 1153 } 1154 1155 dir->tr = tr; 1156 1157 ret = tracing_open_generic(inode, filp); 1158 if (ret < 0) { 1159 trace_array_put(tr); 1160 kfree(dir); 1161 return ret; 1162 } 1163 1164 filp->private_data = dir; 1165 1166 return 0; 1167 } 1168 1169 static int subsystem_release(struct inode *inode, struct file *file) 1170 { 1171 struct ftrace_subsystem_dir *dir = file->private_data; 1172 1173 trace_array_put(dir->tr); 1174 1175 /* 1176 * If dir->subsystem is NULL, then this is a temporary 1177 * descriptor that was made for a trace_array to enable 1178 * all subsystems. 1179 */ 1180 if (dir->subsystem) 1181 put_system(dir); 1182 else 1183 kfree(dir); 1184 1185 return 0; 1186 } 1187 1188 static ssize_t 1189 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 1190 loff_t *ppos) 1191 { 1192 struct ftrace_subsystem_dir *dir = filp->private_data; 1193 struct event_subsystem *system = dir->subsystem; 1194 struct trace_seq *s; 1195 int r; 1196 1197 if (*ppos) 1198 return 0; 1199 1200 s = kmalloc(sizeof(*s), GFP_KERNEL); 1201 if (!s) 1202 return -ENOMEM; 1203 1204 trace_seq_init(s); 1205 1206 print_subsystem_event_filter(system, s); 1207 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); 1208 1209 kfree(s); 1210 1211 return r; 1212 } 1213 1214 static ssize_t 1215 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1216 loff_t *ppos) 1217 { 1218 struct ftrace_subsystem_dir *dir = filp->private_data; 1219 char *buf; 1220 int err; 1221 1222 if (cnt >= PAGE_SIZE) 1223 return -EINVAL; 1224 1225 buf = (char *)__get_free_page(GFP_TEMPORARY); 1226 if (!buf) 1227 return -ENOMEM; 1228 1229 if (copy_from_user(buf, ubuf, cnt)) { 1230 free_page((unsigned long) buf); 1231 return -EFAULT; 1232 } 1233 buf[cnt] = '\0'; 1234 1235 err = apply_subsystem_event_filter(dir, buf); 1236 free_page((unsigned long) buf); 1237 if (err < 0) 1238 return err; 1239 1240 *ppos += cnt; 1241 1242 return cnt; 1243 } 1244 1245 static ssize_t 1246 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) 1247 { 1248 int (*func)(struct trace_seq *s) = filp->private_data; 1249 struct trace_seq *s; 1250 int r; 1251 1252 if (*ppos) 1253 return 0; 1254 1255 s = kmalloc(sizeof(*s), GFP_KERNEL); 1256 if (!s) 1257 return -ENOMEM; 1258 1259 trace_seq_init(s); 1260 1261 func(s); 1262 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); 1263 1264 kfree(s); 1265 1266 return r; 1267 } 1268 1269 static int ftrace_event_avail_open(struct inode *inode, struct file *file); 1270 static int ftrace_event_set_open(struct inode *inode, struct file *file); 1271 static int ftrace_event_release(struct inode *inode, struct file *file); 1272 1273 static const struct seq_operations show_event_seq_ops = { 1274 .start = t_start, 1275 .next = t_next, 1276 .show = t_show, 1277 .stop = t_stop, 1278 }; 1279 1280 static const struct seq_operations show_set_event_seq_ops = { 1281 .start = s_start, 1282 .next = s_next, 1283 .show = t_show, 1284 .stop = t_stop, 1285 }; 1286 1287 static const struct file_operations ftrace_avail_fops = { 1288 .open = ftrace_event_avail_open, 1289 .read = seq_read, 1290 .llseek = seq_lseek, 1291 .release = seq_release, 1292 }; 1293 1294 static const struct file_operations ftrace_set_event_fops = { 1295 .open = ftrace_event_set_open, 1296 .read = seq_read, 1297 .write = ftrace_event_write, 1298 .llseek = seq_lseek, 1299 .release = ftrace_event_release, 1300 }; 1301 1302 static const struct file_operations ftrace_enable_fops = { 1303 .open = tracing_open_generic, 1304 .read = event_enable_read, 1305 .write = event_enable_write, 1306 .llseek = default_llseek, 1307 }; 1308 1309 static const struct file_operations ftrace_event_format_fops = { 1310 .open = trace_format_open, 1311 .read = seq_read, 1312 .llseek = seq_lseek, 1313 .release = seq_release, 1314 }; 1315 1316 static const struct file_operations ftrace_event_id_fops = { 1317 .read = event_id_read, 1318 .llseek = default_llseek, 1319 }; 1320 1321 static const struct file_operations ftrace_event_filter_fops = { 1322 .open = tracing_open_generic, 1323 .read = event_filter_read, 1324 .write = event_filter_write, 1325 .llseek = default_llseek, 1326 }; 1327 1328 static const struct file_operations ftrace_subsystem_filter_fops = { 1329 .open = subsystem_open, 1330 .read = subsystem_filter_read, 1331 .write = subsystem_filter_write, 1332 .llseek = default_llseek, 1333 .release = subsystem_release, 1334 }; 1335 1336 static const struct file_operations ftrace_system_enable_fops = { 1337 .open = subsystem_open, 1338 .read = system_enable_read, 1339 .write = system_enable_write, 1340 .llseek = default_llseek, 1341 .release = subsystem_release, 1342 }; 1343 1344 static const struct file_operations ftrace_tr_enable_fops = { 1345 .open = system_tr_open, 1346 .read = system_enable_read, 1347 .write = system_enable_write, 1348 .llseek = default_llseek, 1349 .release = subsystem_release, 1350 }; 1351 1352 static const struct file_operations ftrace_show_header_fops = { 1353 .open = tracing_open_generic, 1354 .read = show_header, 1355 .llseek = default_llseek, 1356 }; 1357 1358 static int 1359 ftrace_event_open(struct inode *inode, struct file *file, 1360 const struct seq_operations *seq_ops) 1361 { 1362 struct seq_file *m; 1363 int ret; 1364 1365 ret = seq_open(file, seq_ops); 1366 if (ret < 0) 1367 return ret; 1368 m = file->private_data; 1369 /* copy tr over to seq ops */ 1370 m->private = inode->i_private; 1371 1372 return ret; 1373 } 1374 1375 static int ftrace_event_release(struct inode *inode, struct file *file) 1376 { 1377 struct trace_array *tr = inode->i_private; 1378 1379 trace_array_put(tr); 1380 1381 return seq_release(inode, file); 1382 } 1383 1384 static int 1385 ftrace_event_avail_open(struct inode *inode, struct file *file) 1386 { 1387 const struct seq_operations *seq_ops = &show_event_seq_ops; 1388 1389 return ftrace_event_open(inode, file, seq_ops); 1390 } 1391 1392 static int 1393 ftrace_event_set_open(struct inode *inode, struct file *file) 1394 { 1395 const struct seq_operations *seq_ops = &show_set_event_seq_ops; 1396 struct trace_array *tr = inode->i_private; 1397 int ret; 1398 1399 if (trace_array_get(tr) < 0) 1400 return -ENODEV; 1401 1402 if ((file->f_mode & FMODE_WRITE) && 1403 (file->f_flags & O_TRUNC)) 1404 ftrace_clear_events(tr); 1405 1406 ret = ftrace_event_open(inode, file, seq_ops); 1407 if (ret < 0) 1408 trace_array_put(tr); 1409 return ret; 1410 } 1411 1412 static struct event_subsystem * 1413 create_new_subsystem(const char *name) 1414 { 1415 struct event_subsystem *system; 1416 1417 /* need to create new entry */ 1418 system = kmalloc(sizeof(*system), GFP_KERNEL); 1419 if (!system) 1420 return NULL; 1421 1422 system->ref_count = 1; 1423 1424 /* Only allocate if dynamic (kprobes and modules) */ 1425 if (!core_kernel_data((unsigned long)name)) { 1426 system->ref_count |= SYSTEM_FL_FREE_NAME; 1427 system->name = kstrdup(name, GFP_KERNEL); 1428 if (!system->name) 1429 goto out_free; 1430 } else 1431 system->name = name; 1432 1433 system->filter = NULL; 1434 1435 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL); 1436 if (!system->filter) 1437 goto out_free; 1438 1439 list_add(&system->list, &event_subsystems); 1440 1441 return system; 1442 1443 out_free: 1444 if (system->ref_count & SYSTEM_FL_FREE_NAME) 1445 kfree(system->name); 1446 kfree(system); 1447 return NULL; 1448 } 1449 1450 static struct dentry * 1451 event_subsystem_dir(struct trace_array *tr, const char *name, 1452 struct ftrace_event_file *file, struct dentry *parent) 1453 { 1454 struct ftrace_subsystem_dir *dir; 1455 struct event_subsystem *system; 1456 struct dentry *entry; 1457 1458 /* First see if we did not already create this dir */ 1459 list_for_each_entry(dir, &tr->systems, list) { 1460 system = dir->subsystem; 1461 if (strcmp(system->name, name) == 0) { 1462 dir->nr_events++; 1463 file->system = dir; 1464 return dir->entry; 1465 } 1466 } 1467 1468 /* Now see if the system itself exists. */ 1469 list_for_each_entry(system, &event_subsystems, list) { 1470 if (strcmp(system->name, name) == 0) 1471 break; 1472 } 1473 /* Reset system variable when not found */ 1474 if (&system->list == &event_subsystems) 1475 system = NULL; 1476 1477 dir = kmalloc(sizeof(*dir), GFP_KERNEL); 1478 if (!dir) 1479 goto out_fail; 1480 1481 if (!system) { 1482 system = create_new_subsystem(name); 1483 if (!system) 1484 goto out_free; 1485 } else 1486 __get_system(system); 1487 1488 dir->entry = debugfs_create_dir(name, parent); 1489 if (!dir->entry) { 1490 pr_warning("Failed to create system directory %s\n", name); 1491 __put_system(system); 1492 goto out_free; 1493 } 1494 1495 dir->tr = tr; 1496 dir->ref_count = 1; 1497 dir->nr_events = 1; 1498 dir->subsystem = system; 1499 file->system = dir; 1500 1501 entry = debugfs_create_file("filter", 0644, dir->entry, dir, 1502 &ftrace_subsystem_filter_fops); 1503 if (!entry) { 1504 kfree(system->filter); 1505 system->filter = NULL; 1506 pr_warning("Could not create debugfs '%s/filter' entry\n", name); 1507 } 1508 1509 trace_create_file("enable", 0644, dir->entry, dir, 1510 &ftrace_system_enable_fops); 1511 1512 list_add(&dir->list, &tr->systems); 1513 1514 return dir->entry; 1515 1516 out_free: 1517 kfree(dir); 1518 out_fail: 1519 /* Only print this message if failed on memory allocation */ 1520 if (!dir || !system) 1521 pr_warning("No memory to create event subsystem %s\n", 1522 name); 1523 return NULL; 1524 } 1525 1526 static int 1527 event_create_dir(struct dentry *parent, struct ftrace_event_file *file) 1528 { 1529 struct ftrace_event_call *call = file->event_call; 1530 struct trace_array *tr = file->tr; 1531 struct list_head *head; 1532 struct dentry *d_events; 1533 const char *name; 1534 int ret; 1535 1536 /* 1537 * If the trace point header did not define TRACE_SYSTEM 1538 * then the system would be called "TRACE_SYSTEM". 1539 */ 1540 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) { 1541 d_events = event_subsystem_dir(tr, call->class->system, file, parent); 1542 if (!d_events) 1543 return -ENOMEM; 1544 } else 1545 d_events = parent; 1546 1547 name = ftrace_event_name(call); 1548 file->dir = debugfs_create_dir(name, d_events); 1549 if (!file->dir) { 1550 pr_warning("Could not create debugfs '%s' directory\n", 1551 name); 1552 return -1; 1553 } 1554 1555 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) 1556 trace_create_file("enable", 0644, file->dir, file, 1557 &ftrace_enable_fops); 1558 1559 #ifdef CONFIG_PERF_EVENTS 1560 if (call->event.type && call->class->reg) 1561 trace_create_file("id", 0444, file->dir, 1562 (void *)(long)call->event.type, 1563 &ftrace_event_id_fops); 1564 #endif 1565 1566 /* 1567 * Other events may have the same class. Only update 1568 * the fields if they are not already defined. 1569 */ 1570 head = trace_get_fields(call); 1571 if (list_empty(head)) { 1572 ret = call->class->define_fields(call); 1573 if (ret < 0) { 1574 pr_warning("Could not initialize trace point" 1575 " events/%s\n", name); 1576 return -1; 1577 } 1578 } 1579 trace_create_file("filter", 0644, file->dir, file, 1580 &ftrace_event_filter_fops); 1581 1582 trace_create_file("trigger", 0644, file->dir, file, 1583 &event_trigger_fops); 1584 1585 trace_create_file("format", 0444, file->dir, call, 1586 &ftrace_event_format_fops); 1587 1588 return 0; 1589 } 1590 1591 static void remove_event_from_tracers(struct ftrace_event_call *call) 1592 { 1593 struct ftrace_event_file *file; 1594 struct trace_array *tr; 1595 1596 do_for_each_event_file_safe(tr, file) { 1597 if (file->event_call != call) 1598 continue; 1599 1600 remove_event_file_dir(file); 1601 /* 1602 * The do_for_each_event_file_safe() is 1603 * a double loop. After finding the call for this 1604 * trace_array, we use break to jump to the next 1605 * trace_array. 1606 */ 1607 break; 1608 } while_for_each_event_file(); 1609 } 1610 1611 static void event_remove(struct ftrace_event_call *call) 1612 { 1613 struct trace_array *tr; 1614 struct ftrace_event_file *file; 1615 1616 do_for_each_event_file(tr, file) { 1617 if (file->event_call != call) 1618 continue; 1619 ftrace_event_enable_disable(file, 0); 1620 destroy_preds(file); 1621 /* 1622 * The do_for_each_event_file() is 1623 * a double loop. After finding the call for this 1624 * trace_array, we use break to jump to the next 1625 * trace_array. 1626 */ 1627 break; 1628 } while_for_each_event_file(); 1629 1630 if (call->event.funcs) 1631 __unregister_ftrace_event(&call->event); 1632 remove_event_from_tracers(call); 1633 list_del(&call->list); 1634 } 1635 1636 static int event_init(struct ftrace_event_call *call) 1637 { 1638 int ret = 0; 1639 const char *name; 1640 1641 name = ftrace_event_name(call); 1642 if (WARN_ON(!name)) 1643 return -EINVAL; 1644 1645 if (call->class->raw_init) { 1646 ret = call->class->raw_init(call); 1647 if (ret < 0 && ret != -ENOSYS) 1648 pr_warn("Could not initialize trace events/%s\n", 1649 name); 1650 } 1651 1652 return ret; 1653 } 1654 1655 static int 1656 __register_event(struct ftrace_event_call *call, struct module *mod) 1657 { 1658 int ret; 1659 1660 ret = event_init(call); 1661 if (ret < 0) 1662 return ret; 1663 1664 list_add(&call->list, &ftrace_events); 1665 call->mod = mod; 1666 1667 return 0; 1668 } 1669 1670 static struct ftrace_event_file * 1671 trace_create_new_event(struct ftrace_event_call *call, 1672 struct trace_array *tr) 1673 { 1674 struct ftrace_event_file *file; 1675 1676 file = kmem_cache_alloc(file_cachep, GFP_TRACE); 1677 if (!file) 1678 return NULL; 1679 1680 file->event_call = call; 1681 file->tr = tr; 1682 atomic_set(&file->sm_ref, 0); 1683 atomic_set(&file->tm_ref, 0); 1684 INIT_LIST_HEAD(&file->triggers); 1685 list_add(&file->list, &tr->events); 1686 1687 return file; 1688 } 1689 1690 /* Add an event to a trace directory */ 1691 static int 1692 __trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr) 1693 { 1694 struct ftrace_event_file *file; 1695 1696 file = trace_create_new_event(call, tr); 1697 if (!file) 1698 return -ENOMEM; 1699 1700 return event_create_dir(tr->event_dir, file); 1701 } 1702 1703 /* 1704 * Just create a decriptor for early init. A descriptor is required 1705 * for enabling events at boot. We want to enable events before 1706 * the filesystem is initialized. 1707 */ 1708 static __init int 1709 __trace_early_add_new_event(struct ftrace_event_call *call, 1710 struct trace_array *tr) 1711 { 1712 struct ftrace_event_file *file; 1713 1714 file = trace_create_new_event(call, tr); 1715 if (!file) 1716 return -ENOMEM; 1717 1718 return 0; 1719 } 1720 1721 struct ftrace_module_file_ops; 1722 static void __add_event_to_tracers(struct ftrace_event_call *call); 1723 1724 /* Add an additional event_call dynamically */ 1725 int trace_add_event_call(struct ftrace_event_call *call) 1726 { 1727 int ret; 1728 mutex_lock(&trace_types_lock); 1729 mutex_lock(&event_mutex); 1730 1731 ret = __register_event(call, NULL); 1732 if (ret >= 0) 1733 __add_event_to_tracers(call); 1734 1735 mutex_unlock(&event_mutex); 1736 mutex_unlock(&trace_types_lock); 1737 return ret; 1738 } 1739 1740 /* 1741 * Must be called under locking of trace_types_lock, event_mutex and 1742 * trace_event_sem. 1743 */ 1744 static void __trace_remove_event_call(struct ftrace_event_call *call) 1745 { 1746 event_remove(call); 1747 trace_destroy_fields(call); 1748 destroy_call_preds(call); 1749 } 1750 1751 static int probe_remove_event_call(struct ftrace_event_call *call) 1752 { 1753 struct trace_array *tr; 1754 struct ftrace_event_file *file; 1755 1756 #ifdef CONFIG_PERF_EVENTS 1757 if (call->perf_refcount) 1758 return -EBUSY; 1759 #endif 1760 do_for_each_event_file(tr, file) { 1761 if (file->event_call != call) 1762 continue; 1763 /* 1764 * We can't rely on ftrace_event_enable_disable(enable => 0) 1765 * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress 1766 * TRACE_REG_UNREGISTER. 1767 */ 1768 if (file->flags & FTRACE_EVENT_FL_ENABLED) 1769 return -EBUSY; 1770 /* 1771 * The do_for_each_event_file_safe() is 1772 * a double loop. After finding the call for this 1773 * trace_array, we use break to jump to the next 1774 * trace_array. 1775 */ 1776 break; 1777 } while_for_each_event_file(); 1778 1779 __trace_remove_event_call(call); 1780 1781 return 0; 1782 } 1783 1784 /* Remove an event_call */ 1785 int trace_remove_event_call(struct ftrace_event_call *call) 1786 { 1787 int ret; 1788 1789 mutex_lock(&trace_types_lock); 1790 mutex_lock(&event_mutex); 1791 down_write(&trace_event_sem); 1792 ret = probe_remove_event_call(call); 1793 up_write(&trace_event_sem); 1794 mutex_unlock(&event_mutex); 1795 mutex_unlock(&trace_types_lock); 1796 1797 return ret; 1798 } 1799 1800 #define for_each_event(event, start, end) \ 1801 for (event = start; \ 1802 (unsigned long)event < (unsigned long)end; \ 1803 event++) 1804 1805 #ifdef CONFIG_MODULES 1806 1807 static void trace_module_add_events(struct module *mod) 1808 { 1809 struct ftrace_event_call **call, **start, **end; 1810 1811 if (!mod->num_trace_events) 1812 return; 1813 1814 /* Don't add infrastructure for mods without tracepoints */ 1815 if (trace_module_has_bad_taint(mod)) { 1816 pr_err("%s: module has bad taint, not creating trace events\n", 1817 mod->name); 1818 return; 1819 } 1820 1821 start = mod->trace_events; 1822 end = mod->trace_events + mod->num_trace_events; 1823 1824 for_each_event(call, start, end) { 1825 __register_event(*call, mod); 1826 __add_event_to_tracers(*call); 1827 } 1828 } 1829 1830 static void trace_module_remove_events(struct module *mod) 1831 { 1832 struct ftrace_event_call *call, *p; 1833 bool clear_trace = false; 1834 1835 down_write(&trace_event_sem); 1836 list_for_each_entry_safe(call, p, &ftrace_events, list) { 1837 if (call->mod == mod) { 1838 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED) 1839 clear_trace = true; 1840 __trace_remove_event_call(call); 1841 } 1842 } 1843 up_write(&trace_event_sem); 1844 1845 /* 1846 * It is safest to reset the ring buffer if the module being unloaded 1847 * registered any events that were used. The only worry is if 1848 * a new module gets loaded, and takes on the same id as the events 1849 * of this module. When printing out the buffer, traced events left 1850 * over from this module may be passed to the new module events and 1851 * unexpected results may occur. 1852 */ 1853 if (clear_trace) 1854 tracing_reset_all_online_cpus(); 1855 } 1856 1857 static int trace_module_notify(struct notifier_block *self, 1858 unsigned long val, void *data) 1859 { 1860 struct module *mod = data; 1861 1862 mutex_lock(&trace_types_lock); 1863 mutex_lock(&event_mutex); 1864 switch (val) { 1865 case MODULE_STATE_COMING: 1866 trace_module_add_events(mod); 1867 break; 1868 case MODULE_STATE_GOING: 1869 trace_module_remove_events(mod); 1870 break; 1871 } 1872 mutex_unlock(&event_mutex); 1873 mutex_unlock(&trace_types_lock); 1874 1875 return 0; 1876 } 1877 1878 static struct notifier_block trace_module_nb = { 1879 .notifier_call = trace_module_notify, 1880 .priority = 0, 1881 }; 1882 #endif /* CONFIG_MODULES */ 1883 1884 /* Create a new event directory structure for a trace directory. */ 1885 static void 1886 __trace_add_event_dirs(struct trace_array *tr) 1887 { 1888 struct ftrace_event_call *call; 1889 int ret; 1890 1891 list_for_each_entry(call, &ftrace_events, list) { 1892 ret = __trace_add_new_event(call, tr); 1893 if (ret < 0) 1894 pr_warning("Could not create directory for event %s\n", 1895 ftrace_event_name(call)); 1896 } 1897 } 1898 1899 struct ftrace_event_file * 1900 find_event_file(struct trace_array *tr, const char *system, const char *event) 1901 { 1902 struct ftrace_event_file *file; 1903 struct ftrace_event_call *call; 1904 const char *name; 1905 1906 list_for_each_entry(file, &tr->events, list) { 1907 1908 call = file->event_call; 1909 name = ftrace_event_name(call); 1910 1911 if (!name || !call->class || !call->class->reg) 1912 continue; 1913 1914 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 1915 continue; 1916 1917 if (strcmp(event, name) == 0 && 1918 strcmp(system, call->class->system) == 0) 1919 return file; 1920 } 1921 return NULL; 1922 } 1923 1924 #ifdef CONFIG_DYNAMIC_FTRACE 1925 1926 /* Avoid typos */ 1927 #define ENABLE_EVENT_STR "enable_event" 1928 #define DISABLE_EVENT_STR "disable_event" 1929 1930 struct event_probe_data { 1931 struct ftrace_event_file *file; 1932 unsigned long count; 1933 int ref; 1934 bool enable; 1935 }; 1936 1937 static void 1938 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data) 1939 { 1940 struct event_probe_data **pdata = (struct event_probe_data **)_data; 1941 struct event_probe_data *data = *pdata; 1942 1943 if (!data) 1944 return; 1945 1946 if (data->enable) 1947 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags); 1948 else 1949 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags); 1950 } 1951 1952 static void 1953 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data) 1954 { 1955 struct event_probe_data **pdata = (struct event_probe_data **)_data; 1956 struct event_probe_data *data = *pdata; 1957 1958 if (!data) 1959 return; 1960 1961 if (!data->count) 1962 return; 1963 1964 /* Skip if the event is in a state we want to switch to */ 1965 if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)) 1966 return; 1967 1968 if (data->count != -1) 1969 (data->count)--; 1970 1971 event_enable_probe(ip, parent_ip, _data); 1972 } 1973 1974 static int 1975 event_enable_print(struct seq_file *m, unsigned long ip, 1976 struct ftrace_probe_ops *ops, void *_data) 1977 { 1978 struct event_probe_data *data = _data; 1979 1980 seq_printf(m, "%ps:", (void *)ip); 1981 1982 seq_printf(m, "%s:%s:%s", 1983 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, 1984 data->file->event_call->class->system, 1985 ftrace_event_name(data->file->event_call)); 1986 1987 if (data->count == -1) 1988 seq_printf(m, ":unlimited\n"); 1989 else 1990 seq_printf(m, ":count=%ld\n", data->count); 1991 1992 return 0; 1993 } 1994 1995 static int 1996 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip, 1997 void **_data) 1998 { 1999 struct event_probe_data **pdata = (struct event_probe_data **)_data; 2000 struct event_probe_data *data = *pdata; 2001 2002 data->ref++; 2003 return 0; 2004 } 2005 2006 static void 2007 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip, 2008 void **_data) 2009 { 2010 struct event_probe_data **pdata = (struct event_probe_data **)_data; 2011 struct event_probe_data *data = *pdata; 2012 2013 if (WARN_ON_ONCE(data->ref <= 0)) 2014 return; 2015 2016 data->ref--; 2017 if (!data->ref) { 2018 /* Remove the SOFT_MODE flag */ 2019 __ftrace_event_enable_disable(data->file, 0, 1); 2020 module_put(data->file->event_call->mod); 2021 kfree(data); 2022 } 2023 *pdata = NULL; 2024 } 2025 2026 static struct ftrace_probe_ops event_enable_probe_ops = { 2027 .func = event_enable_probe, 2028 .print = event_enable_print, 2029 .init = event_enable_init, 2030 .free = event_enable_free, 2031 }; 2032 2033 static struct ftrace_probe_ops event_enable_count_probe_ops = { 2034 .func = event_enable_count_probe, 2035 .print = event_enable_print, 2036 .init = event_enable_init, 2037 .free = event_enable_free, 2038 }; 2039 2040 static struct ftrace_probe_ops event_disable_probe_ops = { 2041 .func = event_enable_probe, 2042 .print = event_enable_print, 2043 .init = event_enable_init, 2044 .free = event_enable_free, 2045 }; 2046 2047 static struct ftrace_probe_ops event_disable_count_probe_ops = { 2048 .func = event_enable_count_probe, 2049 .print = event_enable_print, 2050 .init = event_enable_init, 2051 .free = event_enable_free, 2052 }; 2053 2054 static int 2055 event_enable_func(struct ftrace_hash *hash, 2056 char *glob, char *cmd, char *param, int enabled) 2057 { 2058 struct trace_array *tr = top_trace_array(); 2059 struct ftrace_event_file *file; 2060 struct ftrace_probe_ops *ops; 2061 struct event_probe_data *data; 2062 const char *system; 2063 const char *event; 2064 char *number; 2065 bool enable; 2066 int ret; 2067 2068 /* hash funcs only work with set_ftrace_filter */ 2069 if (!enabled || !param) 2070 return -EINVAL; 2071 2072 system = strsep(¶m, ":"); 2073 if (!param) 2074 return -EINVAL; 2075 2076 event = strsep(¶m, ":"); 2077 2078 mutex_lock(&event_mutex); 2079 2080 ret = -EINVAL; 2081 file = find_event_file(tr, system, event); 2082 if (!file) 2083 goto out; 2084 2085 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 2086 2087 if (enable) 2088 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops; 2089 else 2090 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops; 2091 2092 if (glob[0] == '!') { 2093 unregister_ftrace_function_probe_func(glob+1, ops); 2094 ret = 0; 2095 goto out; 2096 } 2097 2098 ret = -ENOMEM; 2099 data = kzalloc(sizeof(*data), GFP_KERNEL); 2100 if (!data) 2101 goto out; 2102 2103 data->enable = enable; 2104 data->count = -1; 2105 data->file = file; 2106 2107 if (!param) 2108 goto out_reg; 2109 2110 number = strsep(¶m, ":"); 2111 2112 ret = -EINVAL; 2113 if (!strlen(number)) 2114 goto out_free; 2115 2116 /* 2117 * We use the callback data field (which is a pointer) 2118 * as our counter. 2119 */ 2120 ret = kstrtoul(number, 0, &data->count); 2121 if (ret) 2122 goto out_free; 2123 2124 out_reg: 2125 /* Don't let event modules unload while probe registered */ 2126 ret = try_module_get(file->event_call->mod); 2127 if (!ret) { 2128 ret = -EBUSY; 2129 goto out_free; 2130 } 2131 2132 ret = __ftrace_event_enable_disable(file, 1, 1); 2133 if (ret < 0) 2134 goto out_put; 2135 ret = register_ftrace_function_probe(glob, ops, data); 2136 /* 2137 * The above returns on success the # of functions enabled, 2138 * but if it didn't find any functions it returns zero. 2139 * Consider no functions a failure too. 2140 */ 2141 if (!ret) { 2142 ret = -ENOENT; 2143 goto out_disable; 2144 } else if (ret < 0) 2145 goto out_disable; 2146 /* Just return zero, not the number of enabled functions */ 2147 ret = 0; 2148 out: 2149 mutex_unlock(&event_mutex); 2150 return ret; 2151 2152 out_disable: 2153 __ftrace_event_enable_disable(file, 0, 1); 2154 out_put: 2155 module_put(file->event_call->mod); 2156 out_free: 2157 kfree(data); 2158 goto out; 2159 } 2160 2161 static struct ftrace_func_command event_enable_cmd = { 2162 .name = ENABLE_EVENT_STR, 2163 .func = event_enable_func, 2164 }; 2165 2166 static struct ftrace_func_command event_disable_cmd = { 2167 .name = DISABLE_EVENT_STR, 2168 .func = event_enable_func, 2169 }; 2170 2171 static __init int register_event_cmds(void) 2172 { 2173 int ret; 2174 2175 ret = register_ftrace_command(&event_enable_cmd); 2176 if (WARN_ON(ret < 0)) 2177 return ret; 2178 ret = register_ftrace_command(&event_disable_cmd); 2179 if (WARN_ON(ret < 0)) 2180 unregister_ftrace_command(&event_enable_cmd); 2181 return ret; 2182 } 2183 #else 2184 static inline int register_event_cmds(void) { return 0; } 2185 #endif /* CONFIG_DYNAMIC_FTRACE */ 2186 2187 /* 2188 * The top level array has already had its ftrace_event_file 2189 * descriptors created in order to allow for early events to 2190 * be recorded. This function is called after the debugfs has been 2191 * initialized, and we now have to create the files associated 2192 * to the events. 2193 */ 2194 static __init void 2195 __trace_early_add_event_dirs(struct trace_array *tr) 2196 { 2197 struct ftrace_event_file *file; 2198 int ret; 2199 2200 2201 list_for_each_entry(file, &tr->events, list) { 2202 ret = event_create_dir(tr->event_dir, file); 2203 if (ret < 0) 2204 pr_warning("Could not create directory for event %s\n", 2205 ftrace_event_name(file->event_call)); 2206 } 2207 } 2208 2209 /* 2210 * For early boot up, the top trace array requires to have 2211 * a list of events that can be enabled. This must be done before 2212 * the filesystem is set up in order to allow events to be traced 2213 * early. 2214 */ 2215 static __init void 2216 __trace_early_add_events(struct trace_array *tr) 2217 { 2218 struct ftrace_event_call *call; 2219 int ret; 2220 2221 list_for_each_entry(call, &ftrace_events, list) { 2222 /* Early boot up should not have any modules loaded */ 2223 if (WARN_ON_ONCE(call->mod)) 2224 continue; 2225 2226 ret = __trace_early_add_new_event(call, tr); 2227 if (ret < 0) 2228 pr_warning("Could not create early event %s\n", 2229 ftrace_event_name(call)); 2230 } 2231 } 2232 2233 /* Remove the event directory structure for a trace directory. */ 2234 static void 2235 __trace_remove_event_dirs(struct trace_array *tr) 2236 { 2237 struct ftrace_event_file *file, *next; 2238 2239 list_for_each_entry_safe(file, next, &tr->events, list) 2240 remove_event_file_dir(file); 2241 } 2242 2243 static void __add_event_to_tracers(struct ftrace_event_call *call) 2244 { 2245 struct trace_array *tr; 2246 2247 list_for_each_entry(tr, &ftrace_trace_arrays, list) 2248 __trace_add_new_event(call, tr); 2249 } 2250 2251 extern struct ftrace_event_call *__start_ftrace_events[]; 2252 extern struct ftrace_event_call *__stop_ftrace_events[]; 2253 2254 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; 2255 2256 static __init int setup_trace_event(char *str) 2257 { 2258 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE); 2259 ring_buffer_expanded = true; 2260 tracing_selftest_disabled = true; 2261 2262 return 1; 2263 } 2264 __setup("trace_event=", setup_trace_event); 2265 2266 /* Expects to have event_mutex held when called */ 2267 static int 2268 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr) 2269 { 2270 struct dentry *d_events; 2271 struct dentry *entry; 2272 2273 entry = debugfs_create_file("set_event", 0644, parent, 2274 tr, &ftrace_set_event_fops); 2275 if (!entry) { 2276 pr_warning("Could not create debugfs 'set_event' entry\n"); 2277 return -ENOMEM; 2278 } 2279 2280 d_events = debugfs_create_dir("events", parent); 2281 if (!d_events) { 2282 pr_warning("Could not create debugfs 'events' directory\n"); 2283 return -ENOMEM; 2284 } 2285 2286 /* ring buffer internal formats */ 2287 trace_create_file("header_page", 0444, d_events, 2288 ring_buffer_print_page_header, 2289 &ftrace_show_header_fops); 2290 2291 trace_create_file("header_event", 0444, d_events, 2292 ring_buffer_print_entry_header, 2293 &ftrace_show_header_fops); 2294 2295 trace_create_file("enable", 0644, d_events, 2296 tr, &ftrace_tr_enable_fops); 2297 2298 tr->event_dir = d_events; 2299 2300 return 0; 2301 } 2302 2303 /** 2304 * event_trace_add_tracer - add a instance of a trace_array to events 2305 * @parent: The parent dentry to place the files/directories for events in 2306 * @tr: The trace array associated with these events 2307 * 2308 * When a new instance is created, it needs to set up its events 2309 * directory, as well as other files associated with events. It also 2310 * creates the event hierachry in the @parent/events directory. 2311 * 2312 * Returns 0 on success. 2313 */ 2314 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr) 2315 { 2316 int ret; 2317 2318 mutex_lock(&event_mutex); 2319 2320 ret = create_event_toplevel_files(parent, tr); 2321 if (ret) 2322 goto out_unlock; 2323 2324 down_write(&trace_event_sem); 2325 __trace_add_event_dirs(tr); 2326 up_write(&trace_event_sem); 2327 2328 out_unlock: 2329 mutex_unlock(&event_mutex); 2330 2331 return ret; 2332 } 2333 2334 /* 2335 * The top trace array already had its file descriptors created. 2336 * Now the files themselves need to be created. 2337 */ 2338 static __init int 2339 early_event_add_tracer(struct dentry *parent, struct trace_array *tr) 2340 { 2341 int ret; 2342 2343 mutex_lock(&event_mutex); 2344 2345 ret = create_event_toplevel_files(parent, tr); 2346 if (ret) 2347 goto out_unlock; 2348 2349 down_write(&trace_event_sem); 2350 __trace_early_add_event_dirs(tr); 2351 up_write(&trace_event_sem); 2352 2353 out_unlock: 2354 mutex_unlock(&event_mutex); 2355 2356 return ret; 2357 } 2358 2359 int event_trace_del_tracer(struct trace_array *tr) 2360 { 2361 mutex_lock(&event_mutex); 2362 2363 /* Disable any event triggers and associated soft-disabled events */ 2364 clear_event_triggers(tr); 2365 2366 /* Disable any running events */ 2367 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); 2368 2369 /* Access to events are within rcu_read_lock_sched() */ 2370 synchronize_sched(); 2371 2372 down_write(&trace_event_sem); 2373 __trace_remove_event_dirs(tr); 2374 debugfs_remove_recursive(tr->event_dir); 2375 up_write(&trace_event_sem); 2376 2377 tr->event_dir = NULL; 2378 2379 mutex_unlock(&event_mutex); 2380 2381 return 0; 2382 } 2383 2384 static __init int event_trace_memsetup(void) 2385 { 2386 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC); 2387 file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC); 2388 return 0; 2389 } 2390 2391 static __init int event_trace_enable(void) 2392 { 2393 struct trace_array *tr = top_trace_array(); 2394 struct ftrace_event_call **iter, *call; 2395 char *buf = bootup_event_buf; 2396 char *token; 2397 int ret; 2398 2399 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) { 2400 2401 call = *iter; 2402 ret = event_init(call); 2403 if (!ret) 2404 list_add(&call->list, &ftrace_events); 2405 } 2406 2407 /* 2408 * We need the top trace array to have a working set of trace 2409 * points at early init, before the debug files and directories 2410 * are created. Create the file entries now, and attach them 2411 * to the actual file dentries later. 2412 */ 2413 __trace_early_add_events(tr); 2414 2415 while (true) { 2416 token = strsep(&buf, ","); 2417 2418 if (!token) 2419 break; 2420 if (!*token) 2421 continue; 2422 2423 ret = ftrace_set_clr_event(tr, token, 1); 2424 if (ret) 2425 pr_warn("Failed to enable trace event: %s\n", token); 2426 } 2427 2428 trace_printk_start_comm(); 2429 2430 register_event_cmds(); 2431 2432 register_trigger_cmds(); 2433 2434 return 0; 2435 } 2436 2437 static __init int event_trace_init(void) 2438 { 2439 struct trace_array *tr; 2440 struct dentry *d_tracer; 2441 struct dentry *entry; 2442 int ret; 2443 2444 tr = top_trace_array(); 2445 2446 d_tracer = tracing_init_dentry(); 2447 if (!d_tracer) 2448 return 0; 2449 2450 entry = debugfs_create_file("available_events", 0444, d_tracer, 2451 tr, &ftrace_avail_fops); 2452 if (!entry) 2453 pr_warning("Could not create debugfs " 2454 "'available_events' entry\n"); 2455 2456 if (trace_define_common_fields()) 2457 pr_warning("tracing: Failed to allocate common fields"); 2458 2459 ret = early_event_add_tracer(d_tracer, tr); 2460 if (ret) 2461 return ret; 2462 2463 #ifdef CONFIG_MODULES 2464 ret = register_module_notifier(&trace_module_nb); 2465 if (ret) 2466 pr_warning("Failed to register trace events module notifier\n"); 2467 #endif 2468 return 0; 2469 } 2470 early_initcall(event_trace_memsetup); 2471 core_initcall(event_trace_enable); 2472 fs_initcall(event_trace_init); 2473 2474 #ifdef CONFIG_FTRACE_STARTUP_TEST 2475 2476 static DEFINE_SPINLOCK(test_spinlock); 2477 static DEFINE_SPINLOCK(test_spinlock_irq); 2478 static DEFINE_MUTEX(test_mutex); 2479 2480 static __init void test_work(struct work_struct *dummy) 2481 { 2482 spin_lock(&test_spinlock); 2483 spin_lock_irq(&test_spinlock_irq); 2484 udelay(1); 2485 spin_unlock_irq(&test_spinlock_irq); 2486 spin_unlock(&test_spinlock); 2487 2488 mutex_lock(&test_mutex); 2489 msleep(1); 2490 mutex_unlock(&test_mutex); 2491 } 2492 2493 static __init int event_test_thread(void *unused) 2494 { 2495 void *test_malloc; 2496 2497 test_malloc = kmalloc(1234, GFP_KERNEL); 2498 if (!test_malloc) 2499 pr_info("failed to kmalloc\n"); 2500 2501 schedule_on_each_cpu(test_work); 2502 2503 kfree(test_malloc); 2504 2505 set_current_state(TASK_INTERRUPTIBLE); 2506 while (!kthread_should_stop()) 2507 schedule(); 2508 2509 return 0; 2510 } 2511 2512 /* 2513 * Do various things that may trigger events. 2514 */ 2515 static __init void event_test_stuff(void) 2516 { 2517 struct task_struct *test_thread; 2518 2519 test_thread = kthread_run(event_test_thread, NULL, "test-events"); 2520 msleep(1); 2521 kthread_stop(test_thread); 2522 } 2523 2524 /* 2525 * For every trace event defined, we will test each trace point separately, 2526 * and then by groups, and finally all trace points. 2527 */ 2528 static __init void event_trace_self_tests(void) 2529 { 2530 struct ftrace_subsystem_dir *dir; 2531 struct ftrace_event_file *file; 2532 struct ftrace_event_call *call; 2533 struct event_subsystem *system; 2534 struct trace_array *tr; 2535 int ret; 2536 2537 tr = top_trace_array(); 2538 2539 pr_info("Running tests on trace events:\n"); 2540 2541 list_for_each_entry(file, &tr->events, list) { 2542 2543 call = file->event_call; 2544 2545 /* Only test those that have a probe */ 2546 if (!call->class || !call->class->probe) 2547 continue; 2548 2549 /* 2550 * Testing syscall events here is pretty useless, but 2551 * we still do it if configured. But this is time consuming. 2552 * What we really need is a user thread to perform the 2553 * syscalls as we test. 2554 */ 2555 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS 2556 if (call->class->system && 2557 strcmp(call->class->system, "syscalls") == 0) 2558 continue; 2559 #endif 2560 2561 pr_info("Testing event %s: ", ftrace_event_name(call)); 2562 2563 /* 2564 * If an event is already enabled, someone is using 2565 * it and the self test should not be on. 2566 */ 2567 if (file->flags & FTRACE_EVENT_FL_ENABLED) { 2568 pr_warning("Enabled event during self test!\n"); 2569 WARN_ON_ONCE(1); 2570 continue; 2571 } 2572 2573 ftrace_event_enable_disable(file, 1); 2574 event_test_stuff(); 2575 ftrace_event_enable_disable(file, 0); 2576 2577 pr_cont("OK\n"); 2578 } 2579 2580 /* Now test at the sub system level */ 2581 2582 pr_info("Running tests on trace event systems:\n"); 2583 2584 list_for_each_entry(dir, &tr->systems, list) { 2585 2586 system = dir->subsystem; 2587 2588 /* the ftrace system is special, skip it */ 2589 if (strcmp(system->name, "ftrace") == 0) 2590 continue; 2591 2592 pr_info("Testing event system %s: ", system->name); 2593 2594 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1); 2595 if (WARN_ON_ONCE(ret)) { 2596 pr_warning("error enabling system %s\n", 2597 system->name); 2598 continue; 2599 } 2600 2601 event_test_stuff(); 2602 2603 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0); 2604 if (WARN_ON_ONCE(ret)) { 2605 pr_warning("error disabling system %s\n", 2606 system->name); 2607 continue; 2608 } 2609 2610 pr_cont("OK\n"); 2611 } 2612 2613 /* Test with all events enabled */ 2614 2615 pr_info("Running tests on all trace events:\n"); 2616 pr_info("Testing all events: "); 2617 2618 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1); 2619 if (WARN_ON_ONCE(ret)) { 2620 pr_warning("error enabling all events\n"); 2621 return; 2622 } 2623 2624 event_test_stuff(); 2625 2626 /* reset sysname */ 2627 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0); 2628 if (WARN_ON_ONCE(ret)) { 2629 pr_warning("error disabling all events\n"); 2630 return; 2631 } 2632 2633 pr_cont("OK\n"); 2634 } 2635 2636 #ifdef CONFIG_FUNCTION_TRACER 2637 2638 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); 2639 2640 static void 2641 function_test_events_call(unsigned long ip, unsigned long parent_ip, 2642 struct ftrace_ops *op, struct pt_regs *pt_regs) 2643 { 2644 struct ring_buffer_event *event; 2645 struct ring_buffer *buffer; 2646 struct ftrace_entry *entry; 2647 unsigned long flags; 2648 long disabled; 2649 int cpu; 2650 int pc; 2651 2652 pc = preempt_count(); 2653 preempt_disable_notrace(); 2654 cpu = raw_smp_processor_id(); 2655 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); 2656 2657 if (disabled != 1) 2658 goto out; 2659 2660 local_save_flags(flags); 2661 2662 event = trace_current_buffer_lock_reserve(&buffer, 2663 TRACE_FN, sizeof(*entry), 2664 flags, pc); 2665 if (!event) 2666 goto out; 2667 entry = ring_buffer_event_data(event); 2668 entry->ip = ip; 2669 entry->parent_ip = parent_ip; 2670 2671 trace_buffer_unlock_commit(buffer, event, flags, pc); 2672 2673 out: 2674 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); 2675 preempt_enable_notrace(); 2676 } 2677 2678 static struct ftrace_ops trace_ops __initdata = 2679 { 2680 .func = function_test_events_call, 2681 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 2682 }; 2683 2684 static __init void event_trace_self_test_with_function(void) 2685 { 2686 int ret; 2687 ret = register_ftrace_function(&trace_ops); 2688 if (WARN_ON(ret < 0)) { 2689 pr_info("Failed to enable function tracer for event tests\n"); 2690 return; 2691 } 2692 pr_info("Running tests again, along with the function tracer\n"); 2693 event_trace_self_tests(); 2694 unregister_ftrace_function(&trace_ops); 2695 } 2696 #else 2697 static __init void event_trace_self_test_with_function(void) 2698 { 2699 } 2700 #endif 2701 2702 static __init int event_trace_self_tests_init(void) 2703 { 2704 if (!tracing_selftest_disabled) { 2705 event_trace_self_tests(); 2706 event_trace_self_test_with_function(); 2707 } 2708 2709 return 0; 2710 } 2711 2712 late_initcall(event_trace_self_tests_init); 2713 2714 #endif 2715