1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * trace_events_trigger - trace event triggers 4 * 5 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com> 6 */ 7 8 #include <linux/security.h> 9 #include <linux/module.h> 10 #include <linux/ctype.h> 11 #include <linux/mutex.h> 12 #include <linux/slab.h> 13 #include <linux/rculist.h> 14 15 #include "trace.h" 16 17 static LIST_HEAD(trigger_commands); 18 static DEFINE_MUTEX(trigger_cmd_mutex); 19 20 void trigger_data_free(struct event_trigger_data *data) 21 { 22 if (data->cmd_ops->set_filter) 23 data->cmd_ops->set_filter(NULL, data, NULL); 24 25 /* make sure current triggers exit before free */ 26 tracepoint_synchronize_unregister(); 27 28 kfree(data); 29 } 30 31 /** 32 * event_triggers_call - Call triggers associated with a trace event 33 * @file: The trace_event_file associated with the event 34 * @rec: The trace entry for the event, NULL for unconditional invocation 35 * 36 * For each trigger associated with an event, invoke the trigger 37 * function registered with the associated trigger command. If rec is 38 * non-NULL, it means that the trigger requires further processing and 39 * shouldn't be unconditionally invoked. If rec is non-NULL and the 40 * trigger has a filter associated with it, rec will checked against 41 * the filter and if the record matches the trigger will be invoked. 42 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked 43 * in any case until the current event is written, the trigger 44 * function isn't invoked but the bit associated with the deferred 45 * trigger is set in the return value. 46 * 47 * Returns an enum event_trigger_type value containing a set bit for 48 * any trigger that should be deferred, ETT_NONE if nothing to defer. 49 * 50 * Called from tracepoint handlers (with rcu_read_lock_sched() held). 51 * 52 * Return: an enum event_trigger_type value containing a set bit for 53 * any trigger that should be deferred, ETT_NONE if nothing to defer. 54 */ 55 enum event_trigger_type 56 event_triggers_call(struct trace_event_file *file, 57 struct trace_buffer *buffer, void *rec, 58 struct ring_buffer_event *event) 59 { 60 struct event_trigger_data *data; 61 enum event_trigger_type tt = ETT_NONE; 62 struct event_filter *filter; 63 64 if (list_empty(&file->triggers)) 65 return tt; 66 67 list_for_each_entry_rcu(data, &file->triggers, list) { 68 if (data->paused) 69 continue; 70 if (!rec) { 71 data->ops->func(data, buffer, rec, event); 72 continue; 73 } 74 filter = rcu_dereference_sched(data->filter); 75 if (filter && !filter_match_preds(filter, rec)) 76 continue; 77 if (event_command_post_trigger(data->cmd_ops)) { 78 tt |= data->cmd_ops->trigger_type; 79 continue; 80 } 81 data->ops->func(data, buffer, rec, event); 82 } 83 return tt; 84 } 85 EXPORT_SYMBOL_GPL(event_triggers_call); 86 87 /** 88 * event_triggers_post_call - Call 'post_triggers' for a trace event 89 * @file: The trace_event_file associated with the event 90 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke 91 * 92 * For each trigger associated with an event, invoke the trigger 93 * function registered with the associated trigger command, if the 94 * corresponding bit is set in the tt enum passed into this function. 95 * See @event_triggers_call for details on how those bits are set. 96 * 97 * Called from tracepoint handlers (with rcu_read_lock_sched() held). 98 */ 99 void 100 event_triggers_post_call(struct trace_event_file *file, 101 enum event_trigger_type tt) 102 { 103 struct event_trigger_data *data; 104 105 list_for_each_entry_rcu(data, &file->triggers, list) { 106 if (data->paused) 107 continue; 108 if (data->cmd_ops->trigger_type & tt) 109 data->ops->func(data, NULL, NULL, NULL); 110 } 111 } 112 EXPORT_SYMBOL_GPL(event_triggers_post_call); 113 114 #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL) 115 116 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos) 117 { 118 struct trace_event_file *event_file = event_file_data(m->private); 119 120 if (t == SHOW_AVAILABLE_TRIGGERS) { 121 (*pos)++; 122 return NULL; 123 } 124 return seq_list_next(t, &event_file->triggers, pos); 125 } 126 127 static void *trigger_start(struct seq_file *m, loff_t *pos) 128 { 129 struct trace_event_file *event_file; 130 131 /* ->stop() is called even if ->start() fails */ 132 mutex_lock(&event_mutex); 133 event_file = event_file_data(m->private); 134 if (unlikely(!event_file)) 135 return ERR_PTR(-ENODEV); 136 137 if (list_empty(&event_file->triggers)) 138 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL; 139 140 return seq_list_start(&event_file->triggers, *pos); 141 } 142 143 static void trigger_stop(struct seq_file *m, void *t) 144 { 145 mutex_unlock(&event_mutex); 146 } 147 148 static int trigger_show(struct seq_file *m, void *v) 149 { 150 struct event_trigger_data *data; 151 struct event_command *p; 152 153 if (v == SHOW_AVAILABLE_TRIGGERS) { 154 seq_puts(m, "# Available triggers:\n"); 155 seq_putc(m, '#'); 156 mutex_lock(&trigger_cmd_mutex); 157 list_for_each_entry_reverse(p, &trigger_commands, list) 158 seq_printf(m, " %s", p->name); 159 seq_putc(m, '\n'); 160 mutex_unlock(&trigger_cmd_mutex); 161 return 0; 162 } 163 164 data = list_entry(v, struct event_trigger_data, list); 165 data->ops->print(m, data->ops, data); 166 167 return 0; 168 } 169 170 static const struct seq_operations event_triggers_seq_ops = { 171 .start = trigger_start, 172 .next = trigger_next, 173 .stop = trigger_stop, 174 .show = trigger_show, 175 }; 176 177 static int event_trigger_regex_open(struct inode *inode, struct file *file) 178 { 179 int ret; 180 181 ret = security_locked_down(LOCKDOWN_TRACEFS); 182 if (ret) 183 return ret; 184 185 mutex_lock(&event_mutex); 186 187 if (unlikely(!event_file_data(file))) { 188 mutex_unlock(&event_mutex); 189 return -ENODEV; 190 } 191 192 if ((file->f_mode & FMODE_WRITE) && 193 (file->f_flags & O_TRUNC)) { 194 struct trace_event_file *event_file; 195 struct event_command *p; 196 197 event_file = event_file_data(file); 198 199 list_for_each_entry(p, &trigger_commands, list) { 200 if (p->unreg_all) 201 p->unreg_all(event_file); 202 } 203 } 204 205 if (file->f_mode & FMODE_READ) { 206 ret = seq_open(file, &event_triggers_seq_ops); 207 if (!ret) { 208 struct seq_file *m = file->private_data; 209 m->private = file; 210 } 211 } 212 213 mutex_unlock(&event_mutex); 214 215 return ret; 216 } 217 218 int trigger_process_regex(struct trace_event_file *file, char *buff) 219 { 220 char *command, *next; 221 struct event_command *p; 222 int ret = -EINVAL; 223 224 next = buff = skip_spaces(buff); 225 command = strsep(&next, ": \t"); 226 if (next) { 227 next = skip_spaces(next); 228 if (!*next) 229 next = NULL; 230 } 231 command = (command[0] != '!') ? command : command + 1; 232 233 mutex_lock(&trigger_cmd_mutex); 234 list_for_each_entry(p, &trigger_commands, list) { 235 if (strcmp(p->name, command) == 0) { 236 ret = p->func(p, file, buff, command, next); 237 goto out_unlock; 238 } 239 } 240 out_unlock: 241 mutex_unlock(&trigger_cmd_mutex); 242 243 return ret; 244 } 245 246 static ssize_t event_trigger_regex_write(struct file *file, 247 const char __user *ubuf, 248 size_t cnt, loff_t *ppos) 249 { 250 struct trace_event_file *event_file; 251 ssize_t ret; 252 char *buf; 253 254 if (!cnt) 255 return 0; 256 257 if (cnt >= PAGE_SIZE) 258 return -EINVAL; 259 260 buf = memdup_user_nul(ubuf, cnt); 261 if (IS_ERR(buf)) 262 return PTR_ERR(buf); 263 264 strim(buf); 265 266 mutex_lock(&event_mutex); 267 event_file = event_file_data(file); 268 if (unlikely(!event_file)) { 269 mutex_unlock(&event_mutex); 270 kfree(buf); 271 return -ENODEV; 272 } 273 ret = trigger_process_regex(event_file, buf); 274 mutex_unlock(&event_mutex); 275 276 kfree(buf); 277 if (ret < 0) 278 goto out; 279 280 *ppos += cnt; 281 ret = cnt; 282 out: 283 return ret; 284 } 285 286 static int event_trigger_regex_release(struct inode *inode, struct file *file) 287 { 288 mutex_lock(&event_mutex); 289 290 if (file->f_mode & FMODE_READ) 291 seq_release(inode, file); 292 293 mutex_unlock(&event_mutex); 294 295 return 0; 296 } 297 298 static ssize_t 299 event_trigger_write(struct file *filp, const char __user *ubuf, 300 size_t cnt, loff_t *ppos) 301 { 302 return event_trigger_regex_write(filp, ubuf, cnt, ppos); 303 } 304 305 static int 306 event_trigger_open(struct inode *inode, struct file *filp) 307 { 308 /* Checks for tracefs lockdown */ 309 return event_trigger_regex_open(inode, filp); 310 } 311 312 static int 313 event_trigger_release(struct inode *inode, struct file *file) 314 { 315 return event_trigger_regex_release(inode, file); 316 } 317 318 const struct file_operations event_trigger_fops = { 319 .open = event_trigger_open, 320 .read = seq_read, 321 .write = event_trigger_write, 322 .llseek = tracing_lseek, 323 .release = event_trigger_release, 324 }; 325 326 /* 327 * Currently we only register event commands from __init, so mark this 328 * __init too. 329 */ 330 __init int register_event_command(struct event_command *cmd) 331 { 332 struct event_command *p; 333 int ret = 0; 334 335 mutex_lock(&trigger_cmd_mutex); 336 list_for_each_entry(p, &trigger_commands, list) { 337 if (strcmp(cmd->name, p->name) == 0) { 338 ret = -EBUSY; 339 goto out_unlock; 340 } 341 } 342 list_add(&cmd->list, &trigger_commands); 343 out_unlock: 344 mutex_unlock(&trigger_cmd_mutex); 345 346 return ret; 347 } 348 349 /* 350 * Currently we only unregister event commands from __init, so mark 351 * this __init too. 352 */ 353 __init int unregister_event_command(struct event_command *cmd) 354 { 355 struct event_command *p, *n; 356 int ret = -ENODEV; 357 358 mutex_lock(&trigger_cmd_mutex); 359 list_for_each_entry_safe(p, n, &trigger_commands, list) { 360 if (strcmp(cmd->name, p->name) == 0) { 361 ret = 0; 362 list_del_init(&p->list); 363 goto out_unlock; 364 } 365 } 366 out_unlock: 367 mutex_unlock(&trigger_cmd_mutex); 368 369 return ret; 370 } 371 372 /** 373 * event_trigger_print - Generic event_trigger_ops @print implementation 374 * @name: The name of the event trigger 375 * @m: The seq_file being printed to 376 * @data: Trigger-specific data 377 * @filter_str: filter_str to print, if present 378 * 379 * Common implementation for event triggers to print themselves. 380 * 381 * Usually wrapped by a function that simply sets the @name of the 382 * trigger command and then invokes this. 383 * 384 * Return: 0 on success, errno otherwise 385 */ 386 static int 387 event_trigger_print(const char *name, struct seq_file *m, 388 void *data, char *filter_str) 389 { 390 long count = (long)data; 391 392 seq_puts(m, name); 393 394 if (count == -1) 395 seq_puts(m, ":unlimited"); 396 else 397 seq_printf(m, ":count=%ld", count); 398 399 if (filter_str) 400 seq_printf(m, " if %s\n", filter_str); 401 else 402 seq_putc(m, '\n'); 403 404 return 0; 405 } 406 407 /** 408 * event_trigger_init - Generic event_trigger_ops @init implementation 409 * @ops: The trigger ops associated with the trigger 410 * @data: Trigger-specific data 411 * 412 * Common implementation of event trigger initialization. 413 * 414 * Usually used directly as the @init method in event trigger 415 * implementations. 416 * 417 * Return: 0 on success, errno otherwise 418 */ 419 int event_trigger_init(struct event_trigger_ops *ops, 420 struct event_trigger_data *data) 421 { 422 data->ref++; 423 return 0; 424 } 425 426 /** 427 * event_trigger_free - Generic event_trigger_ops @free implementation 428 * @ops: The trigger ops associated with the trigger 429 * @data: Trigger-specific data 430 * 431 * Common implementation of event trigger de-initialization. 432 * 433 * Usually used directly as the @free method in event trigger 434 * implementations. 435 */ 436 static void 437 event_trigger_free(struct event_trigger_ops *ops, 438 struct event_trigger_data *data) 439 { 440 if (WARN_ON_ONCE(data->ref <= 0)) 441 return; 442 443 data->ref--; 444 if (!data->ref) 445 trigger_data_free(data); 446 } 447 448 int trace_event_trigger_enable_disable(struct trace_event_file *file, 449 int trigger_enable) 450 { 451 int ret = 0; 452 453 if (trigger_enable) { 454 if (atomic_inc_return(&file->tm_ref) > 1) 455 return ret; 456 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags); 457 ret = trace_event_enable_disable(file, 1, 1); 458 } else { 459 if (atomic_dec_return(&file->tm_ref) > 0) 460 return ret; 461 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags); 462 ret = trace_event_enable_disable(file, 0, 1); 463 } 464 465 return ret; 466 } 467 468 /** 469 * clear_event_triggers - Clear all triggers associated with a trace array 470 * @tr: The trace array to clear 471 * 472 * For each trigger, the triggering event has its tm_ref decremented 473 * via trace_event_trigger_enable_disable(), and any associated event 474 * (in the case of enable/disable_event triggers) will have its sm_ref 475 * decremented via free()->trace_event_enable_disable(). That 476 * combination effectively reverses the soft-mode/trigger state added 477 * by trigger registration. 478 * 479 * Must be called with event_mutex held. 480 */ 481 void 482 clear_event_triggers(struct trace_array *tr) 483 { 484 struct trace_event_file *file; 485 486 list_for_each_entry(file, &tr->events, list) { 487 struct event_trigger_data *data, *n; 488 list_for_each_entry_safe(data, n, &file->triggers, list) { 489 trace_event_trigger_enable_disable(file, 0); 490 list_del_rcu(&data->list); 491 if (data->ops->free) 492 data->ops->free(data->ops, data); 493 } 494 } 495 } 496 497 /** 498 * update_cond_flag - Set or reset the TRIGGER_COND bit 499 * @file: The trace_event_file associated with the event 500 * 501 * If an event has triggers and any of those triggers has a filter or 502 * a post_trigger, trigger invocation needs to be deferred until after 503 * the current event has logged its data, and the event should have 504 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be 505 * cleared. 506 */ 507 void update_cond_flag(struct trace_event_file *file) 508 { 509 struct event_trigger_data *data; 510 bool set_cond = false; 511 512 lockdep_assert_held(&event_mutex); 513 514 list_for_each_entry(data, &file->triggers, list) { 515 if (data->filter || event_command_post_trigger(data->cmd_ops) || 516 event_command_needs_rec(data->cmd_ops)) { 517 set_cond = true; 518 break; 519 } 520 } 521 522 if (set_cond) 523 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags); 524 else 525 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags); 526 } 527 528 /** 529 * register_trigger - Generic event_command @reg implementation 530 * @glob: The raw string used to register the trigger 531 * @ops: The trigger ops associated with the trigger 532 * @data: Trigger-specific data to associate with the trigger 533 * @file: The trace_event_file associated with the event 534 * 535 * Common implementation for event trigger registration. 536 * 537 * Usually used directly as the @reg method in event command 538 * implementations. 539 * 540 * Return: 0 on success, errno otherwise 541 */ 542 static int register_trigger(char *glob, struct event_trigger_ops *ops, 543 struct event_trigger_data *data, 544 struct trace_event_file *file) 545 { 546 struct event_trigger_data *test; 547 int ret = 0; 548 549 lockdep_assert_held(&event_mutex); 550 551 list_for_each_entry(test, &file->triggers, list) { 552 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) { 553 ret = -EEXIST; 554 goto out; 555 } 556 } 557 558 if (data->ops->init) { 559 ret = data->ops->init(data->ops, data); 560 if (ret < 0) 561 goto out; 562 } 563 564 list_add_rcu(&data->list, &file->triggers); 565 ret++; 566 567 update_cond_flag(file); 568 if (trace_event_trigger_enable_disable(file, 1) < 0) { 569 list_del_rcu(&data->list); 570 update_cond_flag(file); 571 ret--; 572 } 573 out: 574 return ret; 575 } 576 577 /** 578 * unregister_trigger - Generic event_command @unreg implementation 579 * @glob: The raw string used to register the trigger 580 * @ops: The trigger ops associated with the trigger 581 * @test: Trigger-specific data used to find the trigger to remove 582 * @file: The trace_event_file associated with the event 583 * 584 * Common implementation for event trigger unregistration. 585 * 586 * Usually used directly as the @unreg method in event command 587 * implementations. 588 */ 589 static void unregister_trigger(char *glob, struct event_trigger_ops *ops, 590 struct event_trigger_data *test, 591 struct trace_event_file *file) 592 { 593 struct event_trigger_data *data; 594 bool unregistered = false; 595 596 lockdep_assert_held(&event_mutex); 597 598 list_for_each_entry(data, &file->triggers, list) { 599 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) { 600 unregistered = true; 601 list_del_rcu(&data->list); 602 trace_event_trigger_enable_disable(file, 0); 603 update_cond_flag(file); 604 break; 605 } 606 } 607 608 if (unregistered && data->ops->free) 609 data->ops->free(data->ops, data); 610 } 611 612 /** 613 * event_trigger_callback - Generic event_command @func implementation 614 * @cmd_ops: The command ops, used for trigger registration 615 * @file: The trace_event_file associated with the event 616 * @glob: The raw string used to register the trigger 617 * @cmd: The cmd portion of the string used to register the trigger 618 * @param: The params portion of the string used to register the trigger 619 * 620 * Common implementation for event command parsing and trigger 621 * instantiation. 622 * 623 * Usually used directly as the @func method in event command 624 * implementations. 625 * 626 * Return: 0 on success, errno otherwise 627 */ 628 static int 629 event_trigger_callback(struct event_command *cmd_ops, 630 struct trace_event_file *file, 631 char *glob, char *cmd, char *param) 632 { 633 struct event_trigger_data *trigger_data; 634 struct event_trigger_ops *trigger_ops; 635 char *trigger = NULL; 636 char *number; 637 int ret; 638 639 /* separate the trigger from the filter (t:n [if filter]) */ 640 if (param && isdigit(param[0])) { 641 trigger = strsep(¶m, " \t"); 642 if (param) { 643 param = skip_spaces(param); 644 if (!*param) 645 param = NULL; 646 } 647 } 648 649 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); 650 651 ret = -ENOMEM; 652 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); 653 if (!trigger_data) 654 goto out; 655 656 trigger_data->count = -1; 657 trigger_data->ops = trigger_ops; 658 trigger_data->cmd_ops = cmd_ops; 659 trigger_data->private_data = file; 660 INIT_LIST_HEAD(&trigger_data->list); 661 INIT_LIST_HEAD(&trigger_data->named_list); 662 663 if (glob[0] == '!') { 664 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 665 kfree(trigger_data); 666 ret = 0; 667 goto out; 668 } 669 670 if (trigger) { 671 number = strsep(&trigger, ":"); 672 673 ret = -EINVAL; 674 if (!strlen(number)) 675 goto out_free; 676 677 /* 678 * We use the callback data field (which is a pointer) 679 * as our counter. 680 */ 681 ret = kstrtoul(number, 0, &trigger_data->count); 682 if (ret) 683 goto out_free; 684 } 685 686 if (!param) /* if param is non-empty, it's supposed to be a filter */ 687 goto out_reg; 688 689 if (!cmd_ops->set_filter) 690 goto out_reg; 691 692 ret = cmd_ops->set_filter(param, trigger_data, file); 693 if (ret < 0) 694 goto out_free; 695 696 out_reg: 697 /* Up the trigger_data count to make sure reg doesn't free it on failure */ 698 event_trigger_init(trigger_ops, trigger_data); 699 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 700 /* 701 * The above returns on success the # of functions enabled, 702 * but if it didn't find any functions it returns zero. 703 * Consider no functions a failure too. 704 */ 705 if (!ret) { 706 cmd_ops->unreg(glob, trigger_ops, trigger_data, file); 707 ret = -ENOENT; 708 } else if (ret > 0) 709 ret = 0; 710 711 /* Down the counter of trigger_data or free it if not used anymore */ 712 event_trigger_free(trigger_ops, trigger_data); 713 out: 714 return ret; 715 716 out_free: 717 if (cmd_ops->set_filter) 718 cmd_ops->set_filter(NULL, trigger_data, NULL); 719 kfree(trigger_data); 720 goto out; 721 } 722 723 /** 724 * set_trigger_filter - Generic event_command @set_filter implementation 725 * @filter_str: The filter string for the trigger, NULL to remove filter 726 * @trigger_data: Trigger-specific data 727 * @file: The trace_event_file associated with the event 728 * 729 * Common implementation for event command filter parsing and filter 730 * instantiation. 731 * 732 * Usually used directly as the @set_filter method in event command 733 * implementations. 734 * 735 * Also used to remove a filter (if filter_str = NULL). 736 * 737 * Return: 0 on success, errno otherwise 738 */ 739 int set_trigger_filter(char *filter_str, 740 struct event_trigger_data *trigger_data, 741 struct trace_event_file *file) 742 { 743 struct event_trigger_data *data = trigger_data; 744 struct event_filter *filter = NULL, *tmp; 745 int ret = -EINVAL; 746 char *s; 747 748 if (!filter_str) /* clear the current filter */ 749 goto assign; 750 751 s = strsep(&filter_str, " \t"); 752 753 if (!strlen(s) || strcmp(s, "if") != 0) 754 goto out; 755 756 if (!filter_str) 757 goto out; 758 759 /* The filter is for the 'trigger' event, not the triggered event */ 760 ret = create_event_filter(file->tr, file->event_call, 761 filter_str, false, &filter); 762 /* 763 * If create_event_filter() fails, filter still needs to be freed. 764 * Which the calling code will do with data->filter. 765 */ 766 assign: 767 tmp = rcu_access_pointer(data->filter); 768 769 rcu_assign_pointer(data->filter, filter); 770 771 if (tmp) { 772 /* Make sure the call is done with the filter */ 773 tracepoint_synchronize_unregister(); 774 free_event_filter(tmp); 775 } 776 777 kfree(data->filter_str); 778 data->filter_str = NULL; 779 780 if (filter_str) { 781 data->filter_str = kstrdup(filter_str, GFP_KERNEL); 782 if (!data->filter_str) { 783 free_event_filter(rcu_access_pointer(data->filter)); 784 data->filter = NULL; 785 ret = -ENOMEM; 786 } 787 } 788 out: 789 return ret; 790 } 791 792 static LIST_HEAD(named_triggers); 793 794 /** 795 * find_named_trigger - Find the common named trigger associated with @name 796 * @name: The name of the set of named triggers to find the common data for 797 * 798 * Named triggers are sets of triggers that share a common set of 799 * trigger data. The first named trigger registered with a given name 800 * owns the common trigger data that the others subsequently 801 * registered with the same name will reference. This function 802 * returns the common trigger data associated with that first 803 * registered instance. 804 * 805 * Return: the common trigger data for the given named trigger on 806 * success, NULL otherwise. 807 */ 808 struct event_trigger_data *find_named_trigger(const char *name) 809 { 810 struct event_trigger_data *data; 811 812 if (!name) 813 return NULL; 814 815 list_for_each_entry(data, &named_triggers, named_list) { 816 if (data->named_data) 817 continue; 818 if (strcmp(data->name, name) == 0) 819 return data; 820 } 821 822 return NULL; 823 } 824 825 /** 826 * is_named_trigger - determine if a given trigger is a named trigger 827 * @test: The trigger data to test 828 * 829 * Return: true if 'test' is a named trigger, false otherwise. 830 */ 831 bool is_named_trigger(struct event_trigger_data *test) 832 { 833 struct event_trigger_data *data; 834 835 list_for_each_entry(data, &named_triggers, named_list) { 836 if (test == data) 837 return true; 838 } 839 840 return false; 841 } 842 843 /** 844 * save_named_trigger - save the trigger in the named trigger list 845 * @name: The name of the named trigger set 846 * @data: The trigger data to save 847 * 848 * Return: 0 if successful, negative error otherwise. 849 */ 850 int save_named_trigger(const char *name, struct event_trigger_data *data) 851 { 852 data->name = kstrdup(name, GFP_KERNEL); 853 if (!data->name) 854 return -ENOMEM; 855 856 list_add(&data->named_list, &named_triggers); 857 858 return 0; 859 } 860 861 /** 862 * del_named_trigger - delete a trigger from the named trigger list 863 * @data: The trigger data to delete 864 */ 865 void del_named_trigger(struct event_trigger_data *data) 866 { 867 kfree(data->name); 868 data->name = NULL; 869 870 list_del(&data->named_list); 871 } 872 873 static void __pause_named_trigger(struct event_trigger_data *data, bool pause) 874 { 875 struct event_trigger_data *test; 876 877 list_for_each_entry(test, &named_triggers, named_list) { 878 if (strcmp(test->name, data->name) == 0) { 879 if (pause) { 880 test->paused_tmp = test->paused; 881 test->paused = true; 882 } else { 883 test->paused = test->paused_tmp; 884 } 885 } 886 } 887 } 888 889 /** 890 * pause_named_trigger - Pause all named triggers with the same name 891 * @data: The trigger data of a named trigger to pause 892 * 893 * Pauses a named trigger along with all other triggers having the 894 * same name. Because named triggers share a common set of data, 895 * pausing only one is meaningless, so pausing one named trigger needs 896 * to pause all triggers with the same name. 897 */ 898 void pause_named_trigger(struct event_trigger_data *data) 899 { 900 __pause_named_trigger(data, true); 901 } 902 903 /** 904 * unpause_named_trigger - Un-pause all named triggers with the same name 905 * @data: The trigger data of a named trigger to unpause 906 * 907 * Un-pauses a named trigger along with all other triggers having the 908 * same name. Because named triggers share a common set of data, 909 * unpausing only one is meaningless, so unpausing one named trigger 910 * needs to unpause all triggers with the same name. 911 */ 912 void unpause_named_trigger(struct event_trigger_data *data) 913 { 914 __pause_named_trigger(data, false); 915 } 916 917 /** 918 * set_named_trigger_data - Associate common named trigger data 919 * @data: The trigger data of a named trigger to unpause 920 * 921 * Named triggers are sets of triggers that share a common set of 922 * trigger data. The first named trigger registered with a given name 923 * owns the common trigger data that the others subsequently 924 * registered with the same name will reference. This function 925 * associates the common trigger data from the first trigger with the 926 * given trigger. 927 */ 928 void set_named_trigger_data(struct event_trigger_data *data, 929 struct event_trigger_data *named_data) 930 { 931 data->named_data = named_data; 932 } 933 934 struct event_trigger_data * 935 get_named_trigger_data(struct event_trigger_data *data) 936 { 937 return data->named_data; 938 } 939 940 static void 941 traceon_trigger(struct event_trigger_data *data, 942 struct trace_buffer *buffer, void *rec, 943 struct ring_buffer_event *event) 944 { 945 if (tracing_is_on()) 946 return; 947 948 tracing_on(); 949 } 950 951 static void 952 traceon_count_trigger(struct event_trigger_data *data, 953 struct trace_buffer *buffer, void *rec, 954 struct ring_buffer_event *event) 955 { 956 if (tracing_is_on()) 957 return; 958 959 if (!data->count) 960 return; 961 962 if (data->count != -1) 963 (data->count)--; 964 965 tracing_on(); 966 } 967 968 static void 969 traceoff_trigger(struct event_trigger_data *data, 970 struct trace_buffer *buffer, void *rec, 971 struct ring_buffer_event *event) 972 { 973 if (!tracing_is_on()) 974 return; 975 976 tracing_off(); 977 } 978 979 static void 980 traceoff_count_trigger(struct event_trigger_data *data, 981 struct trace_buffer *buffer, void *rec, 982 struct ring_buffer_event *event) 983 { 984 if (!tracing_is_on()) 985 return; 986 987 if (!data->count) 988 return; 989 990 if (data->count != -1) 991 (data->count)--; 992 993 tracing_off(); 994 } 995 996 static int 997 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 998 struct event_trigger_data *data) 999 { 1000 return event_trigger_print("traceon", m, (void *)data->count, 1001 data->filter_str); 1002 } 1003 1004 static int 1005 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 1006 struct event_trigger_data *data) 1007 { 1008 return event_trigger_print("traceoff", m, (void *)data->count, 1009 data->filter_str); 1010 } 1011 1012 static struct event_trigger_ops traceon_trigger_ops = { 1013 .func = traceon_trigger, 1014 .print = traceon_trigger_print, 1015 .init = event_trigger_init, 1016 .free = event_trigger_free, 1017 }; 1018 1019 static struct event_trigger_ops traceon_count_trigger_ops = { 1020 .func = traceon_count_trigger, 1021 .print = traceon_trigger_print, 1022 .init = event_trigger_init, 1023 .free = event_trigger_free, 1024 }; 1025 1026 static struct event_trigger_ops traceoff_trigger_ops = { 1027 .func = traceoff_trigger, 1028 .print = traceoff_trigger_print, 1029 .init = event_trigger_init, 1030 .free = event_trigger_free, 1031 }; 1032 1033 static struct event_trigger_ops traceoff_count_trigger_ops = { 1034 .func = traceoff_count_trigger, 1035 .print = traceoff_trigger_print, 1036 .init = event_trigger_init, 1037 .free = event_trigger_free, 1038 }; 1039 1040 static struct event_trigger_ops * 1041 onoff_get_trigger_ops(char *cmd, char *param) 1042 { 1043 struct event_trigger_ops *ops; 1044 1045 /* we register both traceon and traceoff to this callback */ 1046 if (strcmp(cmd, "traceon") == 0) 1047 ops = param ? &traceon_count_trigger_ops : 1048 &traceon_trigger_ops; 1049 else 1050 ops = param ? &traceoff_count_trigger_ops : 1051 &traceoff_trigger_ops; 1052 1053 return ops; 1054 } 1055 1056 static struct event_command trigger_traceon_cmd = { 1057 .name = "traceon", 1058 .trigger_type = ETT_TRACE_ONOFF, 1059 .func = event_trigger_callback, 1060 .reg = register_trigger, 1061 .unreg = unregister_trigger, 1062 .get_trigger_ops = onoff_get_trigger_ops, 1063 .set_filter = set_trigger_filter, 1064 }; 1065 1066 static struct event_command trigger_traceoff_cmd = { 1067 .name = "traceoff", 1068 .trigger_type = ETT_TRACE_ONOFF, 1069 .flags = EVENT_CMD_FL_POST_TRIGGER, 1070 .func = event_trigger_callback, 1071 .reg = register_trigger, 1072 .unreg = unregister_trigger, 1073 .get_trigger_ops = onoff_get_trigger_ops, 1074 .set_filter = set_trigger_filter, 1075 }; 1076 1077 #ifdef CONFIG_TRACER_SNAPSHOT 1078 static void 1079 snapshot_trigger(struct event_trigger_data *data, 1080 struct trace_buffer *buffer, void *rec, 1081 struct ring_buffer_event *event) 1082 { 1083 struct trace_event_file *file = data->private_data; 1084 1085 if (file) 1086 tracing_snapshot_instance(file->tr); 1087 else 1088 tracing_snapshot(); 1089 } 1090 1091 static void 1092 snapshot_count_trigger(struct event_trigger_data *data, 1093 struct trace_buffer *buffer, void *rec, 1094 struct ring_buffer_event *event) 1095 { 1096 if (!data->count) 1097 return; 1098 1099 if (data->count != -1) 1100 (data->count)--; 1101 1102 snapshot_trigger(data, buffer, rec, event); 1103 } 1104 1105 static int 1106 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops, 1107 struct event_trigger_data *data, 1108 struct trace_event_file *file) 1109 { 1110 if (tracing_alloc_snapshot_instance(file->tr) != 0) 1111 return 0; 1112 1113 return register_trigger(glob, ops, data, file); 1114 } 1115 1116 static int 1117 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 1118 struct event_trigger_data *data) 1119 { 1120 return event_trigger_print("snapshot", m, (void *)data->count, 1121 data->filter_str); 1122 } 1123 1124 static struct event_trigger_ops snapshot_trigger_ops = { 1125 .func = snapshot_trigger, 1126 .print = snapshot_trigger_print, 1127 .init = event_trigger_init, 1128 .free = event_trigger_free, 1129 }; 1130 1131 static struct event_trigger_ops snapshot_count_trigger_ops = { 1132 .func = snapshot_count_trigger, 1133 .print = snapshot_trigger_print, 1134 .init = event_trigger_init, 1135 .free = event_trigger_free, 1136 }; 1137 1138 static struct event_trigger_ops * 1139 snapshot_get_trigger_ops(char *cmd, char *param) 1140 { 1141 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops; 1142 } 1143 1144 static struct event_command trigger_snapshot_cmd = { 1145 .name = "snapshot", 1146 .trigger_type = ETT_SNAPSHOT, 1147 .func = event_trigger_callback, 1148 .reg = register_snapshot_trigger, 1149 .unreg = unregister_trigger, 1150 .get_trigger_ops = snapshot_get_trigger_ops, 1151 .set_filter = set_trigger_filter, 1152 }; 1153 1154 static __init int register_trigger_snapshot_cmd(void) 1155 { 1156 int ret; 1157 1158 ret = register_event_command(&trigger_snapshot_cmd); 1159 WARN_ON(ret < 0); 1160 1161 return ret; 1162 } 1163 #else 1164 static __init int register_trigger_snapshot_cmd(void) { return 0; } 1165 #endif /* CONFIG_TRACER_SNAPSHOT */ 1166 1167 #ifdef CONFIG_STACKTRACE 1168 #ifdef CONFIG_UNWINDER_ORC 1169 /* Skip 2: 1170 * event_triggers_post_call() 1171 * trace_event_raw_event_xxx() 1172 */ 1173 # define STACK_SKIP 2 1174 #else 1175 /* 1176 * Skip 4: 1177 * stacktrace_trigger() 1178 * event_triggers_post_call() 1179 * trace_event_buffer_commit() 1180 * trace_event_raw_event_xxx() 1181 */ 1182 #define STACK_SKIP 4 1183 #endif 1184 1185 static void 1186 stacktrace_trigger(struct event_trigger_data *data, 1187 struct trace_buffer *buffer, void *rec, 1188 struct ring_buffer_event *event) 1189 { 1190 trace_dump_stack(STACK_SKIP); 1191 } 1192 1193 static void 1194 stacktrace_count_trigger(struct event_trigger_data *data, 1195 struct trace_buffer *buffer, void *rec, 1196 struct ring_buffer_event *event) 1197 { 1198 if (!data->count) 1199 return; 1200 1201 if (data->count != -1) 1202 (data->count)--; 1203 1204 stacktrace_trigger(data, buffer, rec, event); 1205 } 1206 1207 static int 1208 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 1209 struct event_trigger_data *data) 1210 { 1211 return event_trigger_print("stacktrace", m, (void *)data->count, 1212 data->filter_str); 1213 } 1214 1215 static struct event_trigger_ops stacktrace_trigger_ops = { 1216 .func = stacktrace_trigger, 1217 .print = stacktrace_trigger_print, 1218 .init = event_trigger_init, 1219 .free = event_trigger_free, 1220 }; 1221 1222 static struct event_trigger_ops stacktrace_count_trigger_ops = { 1223 .func = stacktrace_count_trigger, 1224 .print = stacktrace_trigger_print, 1225 .init = event_trigger_init, 1226 .free = event_trigger_free, 1227 }; 1228 1229 static struct event_trigger_ops * 1230 stacktrace_get_trigger_ops(char *cmd, char *param) 1231 { 1232 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops; 1233 } 1234 1235 static struct event_command trigger_stacktrace_cmd = { 1236 .name = "stacktrace", 1237 .trigger_type = ETT_STACKTRACE, 1238 .flags = EVENT_CMD_FL_POST_TRIGGER, 1239 .func = event_trigger_callback, 1240 .reg = register_trigger, 1241 .unreg = unregister_trigger, 1242 .get_trigger_ops = stacktrace_get_trigger_ops, 1243 .set_filter = set_trigger_filter, 1244 }; 1245 1246 static __init int register_trigger_stacktrace_cmd(void) 1247 { 1248 int ret; 1249 1250 ret = register_event_command(&trigger_stacktrace_cmd); 1251 WARN_ON(ret < 0); 1252 1253 return ret; 1254 } 1255 #else 1256 static __init int register_trigger_stacktrace_cmd(void) { return 0; } 1257 #endif /* CONFIG_STACKTRACE */ 1258 1259 static __init void unregister_trigger_traceon_traceoff_cmds(void) 1260 { 1261 unregister_event_command(&trigger_traceon_cmd); 1262 unregister_event_command(&trigger_traceoff_cmd); 1263 } 1264 1265 static void 1266 event_enable_trigger(struct event_trigger_data *data, 1267 struct trace_buffer *buffer, void *rec, 1268 struct ring_buffer_event *event) 1269 { 1270 struct enable_trigger_data *enable_data = data->private_data; 1271 1272 if (enable_data->enable) 1273 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); 1274 else 1275 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); 1276 } 1277 1278 static void 1279 event_enable_count_trigger(struct event_trigger_data *data, 1280 struct trace_buffer *buffer, void *rec, 1281 struct ring_buffer_event *event) 1282 { 1283 struct enable_trigger_data *enable_data = data->private_data; 1284 1285 if (!data->count) 1286 return; 1287 1288 /* Skip if the event is in a state we want to switch to */ 1289 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) 1290 return; 1291 1292 if (data->count != -1) 1293 (data->count)--; 1294 1295 event_enable_trigger(data, buffer, rec, event); 1296 } 1297 1298 int event_enable_trigger_print(struct seq_file *m, 1299 struct event_trigger_ops *ops, 1300 struct event_trigger_data *data) 1301 { 1302 struct enable_trigger_data *enable_data = data->private_data; 1303 1304 seq_printf(m, "%s:%s:%s", 1305 enable_data->hist ? 1306 (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) : 1307 (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR), 1308 enable_data->file->event_call->class->system, 1309 trace_event_name(enable_data->file->event_call)); 1310 1311 if (data->count == -1) 1312 seq_puts(m, ":unlimited"); 1313 else 1314 seq_printf(m, ":count=%ld", data->count); 1315 1316 if (data->filter_str) 1317 seq_printf(m, " if %s\n", data->filter_str); 1318 else 1319 seq_putc(m, '\n'); 1320 1321 return 0; 1322 } 1323 1324 void event_enable_trigger_free(struct event_trigger_ops *ops, 1325 struct event_trigger_data *data) 1326 { 1327 struct enable_trigger_data *enable_data = data->private_data; 1328 1329 if (WARN_ON_ONCE(data->ref <= 0)) 1330 return; 1331 1332 data->ref--; 1333 if (!data->ref) { 1334 /* Remove the SOFT_MODE flag */ 1335 trace_event_enable_disable(enable_data->file, 0, 1); 1336 module_put(enable_data->file->event_call->mod); 1337 trigger_data_free(data); 1338 kfree(enable_data); 1339 } 1340 } 1341 1342 static struct event_trigger_ops event_enable_trigger_ops = { 1343 .func = event_enable_trigger, 1344 .print = event_enable_trigger_print, 1345 .init = event_trigger_init, 1346 .free = event_enable_trigger_free, 1347 }; 1348 1349 static struct event_trigger_ops event_enable_count_trigger_ops = { 1350 .func = event_enable_count_trigger, 1351 .print = event_enable_trigger_print, 1352 .init = event_trigger_init, 1353 .free = event_enable_trigger_free, 1354 }; 1355 1356 static struct event_trigger_ops event_disable_trigger_ops = { 1357 .func = event_enable_trigger, 1358 .print = event_enable_trigger_print, 1359 .init = event_trigger_init, 1360 .free = event_enable_trigger_free, 1361 }; 1362 1363 static struct event_trigger_ops event_disable_count_trigger_ops = { 1364 .func = event_enable_count_trigger, 1365 .print = event_enable_trigger_print, 1366 .init = event_trigger_init, 1367 .free = event_enable_trigger_free, 1368 }; 1369 1370 int event_enable_trigger_func(struct event_command *cmd_ops, 1371 struct trace_event_file *file, 1372 char *glob, char *cmd, char *param) 1373 { 1374 struct trace_event_file *event_enable_file; 1375 struct enable_trigger_data *enable_data; 1376 struct event_trigger_data *trigger_data; 1377 struct event_trigger_ops *trigger_ops; 1378 struct trace_array *tr = file->tr; 1379 const char *system; 1380 const char *event; 1381 bool hist = false; 1382 char *trigger; 1383 char *number; 1384 bool enable; 1385 int ret; 1386 1387 if (!param) 1388 return -EINVAL; 1389 1390 /* separate the trigger from the filter (s:e:n [if filter]) */ 1391 trigger = strsep(¶m, " \t"); 1392 if (!trigger) 1393 return -EINVAL; 1394 if (param) { 1395 param = skip_spaces(param); 1396 if (!*param) 1397 param = NULL; 1398 } 1399 1400 system = strsep(&trigger, ":"); 1401 if (!trigger) 1402 return -EINVAL; 1403 1404 event = strsep(&trigger, ":"); 1405 1406 ret = -EINVAL; 1407 event_enable_file = find_event_file(tr, system, event); 1408 if (!event_enable_file) 1409 goto out; 1410 1411 #ifdef CONFIG_HIST_TRIGGERS 1412 hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) || 1413 (strcmp(cmd, DISABLE_HIST_STR) == 0)); 1414 1415 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) || 1416 (strcmp(cmd, ENABLE_HIST_STR) == 0)); 1417 #else 1418 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 1419 #endif 1420 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); 1421 1422 ret = -ENOMEM; 1423 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); 1424 if (!trigger_data) 1425 goto out; 1426 1427 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL); 1428 if (!enable_data) { 1429 kfree(trigger_data); 1430 goto out; 1431 } 1432 1433 trigger_data->count = -1; 1434 trigger_data->ops = trigger_ops; 1435 trigger_data->cmd_ops = cmd_ops; 1436 INIT_LIST_HEAD(&trigger_data->list); 1437 RCU_INIT_POINTER(trigger_data->filter, NULL); 1438 1439 enable_data->hist = hist; 1440 enable_data->enable = enable; 1441 enable_data->file = event_enable_file; 1442 trigger_data->private_data = enable_data; 1443 1444 if (glob[0] == '!') { 1445 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 1446 kfree(trigger_data); 1447 kfree(enable_data); 1448 ret = 0; 1449 goto out; 1450 } 1451 1452 /* Up the trigger_data count to make sure nothing frees it on failure */ 1453 event_trigger_init(trigger_ops, trigger_data); 1454 1455 if (trigger) { 1456 number = strsep(&trigger, ":"); 1457 1458 ret = -EINVAL; 1459 if (!strlen(number)) 1460 goto out_free; 1461 1462 /* 1463 * We use the callback data field (which is a pointer) 1464 * as our counter. 1465 */ 1466 ret = kstrtoul(number, 0, &trigger_data->count); 1467 if (ret) 1468 goto out_free; 1469 } 1470 1471 if (!param) /* if param is non-empty, it's supposed to be a filter */ 1472 goto out_reg; 1473 1474 if (!cmd_ops->set_filter) 1475 goto out_reg; 1476 1477 ret = cmd_ops->set_filter(param, trigger_data, file); 1478 if (ret < 0) 1479 goto out_free; 1480 1481 out_reg: 1482 /* Don't let event modules unload while probe registered */ 1483 ret = try_module_get(event_enable_file->event_call->mod); 1484 if (!ret) { 1485 ret = -EBUSY; 1486 goto out_free; 1487 } 1488 1489 ret = trace_event_enable_disable(event_enable_file, 1, 1); 1490 if (ret < 0) 1491 goto out_put; 1492 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 1493 /* 1494 * The above returns on success the # of functions enabled, 1495 * but if it didn't find any functions it returns zero. 1496 * Consider no functions a failure too. 1497 */ 1498 if (!ret) { 1499 ret = -ENOENT; 1500 goto out_disable; 1501 } else if (ret < 0) 1502 goto out_disable; 1503 /* Just return zero, not the number of enabled functions */ 1504 ret = 0; 1505 event_trigger_free(trigger_ops, trigger_data); 1506 out: 1507 return ret; 1508 1509 out_disable: 1510 trace_event_enable_disable(event_enable_file, 0, 1); 1511 out_put: 1512 module_put(event_enable_file->event_call->mod); 1513 out_free: 1514 if (cmd_ops->set_filter) 1515 cmd_ops->set_filter(NULL, trigger_data, NULL); 1516 event_trigger_free(trigger_ops, trigger_data); 1517 kfree(enable_data); 1518 goto out; 1519 } 1520 1521 int event_enable_register_trigger(char *glob, 1522 struct event_trigger_ops *ops, 1523 struct event_trigger_data *data, 1524 struct trace_event_file *file) 1525 { 1526 struct enable_trigger_data *enable_data = data->private_data; 1527 struct enable_trigger_data *test_enable_data; 1528 struct event_trigger_data *test; 1529 int ret = 0; 1530 1531 lockdep_assert_held(&event_mutex); 1532 1533 list_for_each_entry(test, &file->triggers, list) { 1534 test_enable_data = test->private_data; 1535 if (test_enable_data && 1536 (test->cmd_ops->trigger_type == 1537 data->cmd_ops->trigger_type) && 1538 (test_enable_data->file == enable_data->file)) { 1539 ret = -EEXIST; 1540 goto out; 1541 } 1542 } 1543 1544 if (data->ops->init) { 1545 ret = data->ops->init(data->ops, data); 1546 if (ret < 0) 1547 goto out; 1548 } 1549 1550 list_add_rcu(&data->list, &file->triggers); 1551 ret++; 1552 1553 update_cond_flag(file); 1554 if (trace_event_trigger_enable_disable(file, 1) < 0) { 1555 list_del_rcu(&data->list); 1556 update_cond_flag(file); 1557 ret--; 1558 } 1559 out: 1560 return ret; 1561 } 1562 1563 void event_enable_unregister_trigger(char *glob, 1564 struct event_trigger_ops *ops, 1565 struct event_trigger_data *test, 1566 struct trace_event_file *file) 1567 { 1568 struct enable_trigger_data *test_enable_data = test->private_data; 1569 struct enable_trigger_data *enable_data; 1570 struct event_trigger_data *data; 1571 bool unregistered = false; 1572 1573 lockdep_assert_held(&event_mutex); 1574 1575 list_for_each_entry(data, &file->triggers, list) { 1576 enable_data = data->private_data; 1577 if (enable_data && 1578 (data->cmd_ops->trigger_type == 1579 test->cmd_ops->trigger_type) && 1580 (enable_data->file == test_enable_data->file)) { 1581 unregistered = true; 1582 list_del_rcu(&data->list); 1583 trace_event_trigger_enable_disable(file, 0); 1584 update_cond_flag(file); 1585 break; 1586 } 1587 } 1588 1589 if (unregistered && data->ops->free) 1590 data->ops->free(data->ops, data); 1591 } 1592 1593 static struct event_trigger_ops * 1594 event_enable_get_trigger_ops(char *cmd, char *param) 1595 { 1596 struct event_trigger_ops *ops; 1597 bool enable; 1598 1599 #ifdef CONFIG_HIST_TRIGGERS 1600 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) || 1601 (strcmp(cmd, ENABLE_HIST_STR) == 0)); 1602 #else 1603 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 1604 #endif 1605 if (enable) 1606 ops = param ? &event_enable_count_trigger_ops : 1607 &event_enable_trigger_ops; 1608 else 1609 ops = param ? &event_disable_count_trigger_ops : 1610 &event_disable_trigger_ops; 1611 1612 return ops; 1613 } 1614 1615 static struct event_command trigger_enable_cmd = { 1616 .name = ENABLE_EVENT_STR, 1617 .trigger_type = ETT_EVENT_ENABLE, 1618 .func = event_enable_trigger_func, 1619 .reg = event_enable_register_trigger, 1620 .unreg = event_enable_unregister_trigger, 1621 .get_trigger_ops = event_enable_get_trigger_ops, 1622 .set_filter = set_trigger_filter, 1623 }; 1624 1625 static struct event_command trigger_disable_cmd = { 1626 .name = DISABLE_EVENT_STR, 1627 .trigger_type = ETT_EVENT_ENABLE, 1628 .func = event_enable_trigger_func, 1629 .reg = event_enable_register_trigger, 1630 .unreg = event_enable_unregister_trigger, 1631 .get_trigger_ops = event_enable_get_trigger_ops, 1632 .set_filter = set_trigger_filter, 1633 }; 1634 1635 static __init void unregister_trigger_enable_disable_cmds(void) 1636 { 1637 unregister_event_command(&trigger_enable_cmd); 1638 unregister_event_command(&trigger_disable_cmd); 1639 } 1640 1641 static __init int register_trigger_enable_disable_cmds(void) 1642 { 1643 int ret; 1644 1645 ret = register_event_command(&trigger_enable_cmd); 1646 if (WARN_ON(ret < 0)) 1647 return ret; 1648 ret = register_event_command(&trigger_disable_cmd); 1649 if (WARN_ON(ret < 0)) 1650 unregister_trigger_enable_disable_cmds(); 1651 1652 return ret; 1653 } 1654 1655 static __init int register_trigger_traceon_traceoff_cmds(void) 1656 { 1657 int ret; 1658 1659 ret = register_event_command(&trigger_traceon_cmd); 1660 if (WARN_ON(ret < 0)) 1661 return ret; 1662 ret = register_event_command(&trigger_traceoff_cmd); 1663 if (WARN_ON(ret < 0)) 1664 unregister_trigger_traceon_traceoff_cmds(); 1665 1666 return ret; 1667 } 1668 1669 __init int register_trigger_cmds(void) 1670 { 1671 register_trigger_traceon_traceoff_cmds(); 1672 register_trigger_snapshot_cmd(); 1673 register_trigger_stacktrace_cmd(); 1674 register_trigger_enable_disable_cmds(); 1675 register_trigger_hist_enable_disable_cmds(); 1676 register_trigger_hist_cmd(); 1677 1678 return 0; 1679 } 1680