1 /* 2 * trace_events_trigger - trace event triggers 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com> 19 */ 20 21 #include <linux/module.h> 22 #include <linux/ctype.h> 23 #include <linux/mutex.h> 24 #include <linux/slab.h> 25 #include <linux/rculist.h> 26 27 #include "trace.h" 28 29 static LIST_HEAD(trigger_commands); 30 static DEFINE_MUTEX(trigger_cmd_mutex); 31 32 void trigger_data_free(struct event_trigger_data *data) 33 { 34 if (data->cmd_ops->set_filter) 35 data->cmd_ops->set_filter(NULL, data, NULL); 36 37 synchronize_sched(); /* make sure current triggers exit before free */ 38 kfree(data); 39 } 40 41 /** 42 * event_triggers_call - Call triggers associated with a trace event 43 * @file: The trace_event_file associated with the event 44 * @rec: The trace entry for the event, NULL for unconditional invocation 45 * 46 * For each trigger associated with an event, invoke the trigger 47 * function registered with the associated trigger command. If rec is 48 * non-NULL, it means that the trigger requires further processing and 49 * shouldn't be unconditionally invoked. If rec is non-NULL and the 50 * trigger has a filter associated with it, rec will checked against 51 * the filter and if the record matches the trigger will be invoked. 52 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked 53 * in any case until the current event is written, the trigger 54 * function isn't invoked but the bit associated with the deferred 55 * trigger is set in the return value. 56 * 57 * Returns an enum event_trigger_type value containing a set bit for 58 * any trigger that should be deferred, ETT_NONE if nothing to defer. 59 * 60 * Called from tracepoint handlers (with rcu_read_lock_sched() held). 61 * 62 * Return: an enum event_trigger_type value containing a set bit for 63 * any trigger that should be deferred, ETT_NONE if nothing to defer. 64 */ 65 enum event_trigger_type 66 event_triggers_call(struct trace_event_file *file, void *rec) 67 { 68 struct event_trigger_data *data; 69 enum event_trigger_type tt = ETT_NONE; 70 struct event_filter *filter; 71 72 if (list_empty(&file->triggers)) 73 return tt; 74 75 list_for_each_entry_rcu(data, &file->triggers, list) { 76 if (data->paused) 77 continue; 78 if (!rec) { 79 data->ops->func(data, rec); 80 continue; 81 } 82 filter = rcu_dereference_sched(data->filter); 83 if (filter && !filter_match_preds(filter, rec)) 84 continue; 85 if (event_command_post_trigger(data->cmd_ops)) { 86 tt |= data->cmd_ops->trigger_type; 87 continue; 88 } 89 data->ops->func(data, rec); 90 } 91 return tt; 92 } 93 EXPORT_SYMBOL_GPL(event_triggers_call); 94 95 /** 96 * event_triggers_post_call - Call 'post_triggers' for a trace event 97 * @file: The trace_event_file associated with the event 98 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke 99 * @rec: The trace entry for the event 100 * 101 * For each trigger associated with an event, invoke the trigger 102 * function registered with the associated trigger command, if the 103 * corresponding bit is set in the tt enum passed into this function. 104 * See @event_triggers_call for details on how those bits are set. 105 * 106 * Called from tracepoint handlers (with rcu_read_lock_sched() held). 107 */ 108 void 109 event_triggers_post_call(struct trace_event_file *file, 110 enum event_trigger_type tt, 111 void *rec) 112 { 113 struct event_trigger_data *data; 114 115 list_for_each_entry_rcu(data, &file->triggers, list) { 116 if (data->paused) 117 continue; 118 if (data->cmd_ops->trigger_type & tt) 119 data->ops->func(data, rec); 120 } 121 } 122 EXPORT_SYMBOL_GPL(event_triggers_post_call); 123 124 #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL) 125 126 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos) 127 { 128 struct trace_event_file *event_file = event_file_data(m->private); 129 130 if (t == SHOW_AVAILABLE_TRIGGERS) 131 return NULL; 132 133 return seq_list_next(t, &event_file->triggers, pos); 134 } 135 136 static void *trigger_start(struct seq_file *m, loff_t *pos) 137 { 138 struct trace_event_file *event_file; 139 140 /* ->stop() is called even if ->start() fails */ 141 mutex_lock(&event_mutex); 142 event_file = event_file_data(m->private); 143 if (unlikely(!event_file)) 144 return ERR_PTR(-ENODEV); 145 146 if (list_empty(&event_file->triggers)) 147 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL; 148 149 return seq_list_start(&event_file->triggers, *pos); 150 } 151 152 static void trigger_stop(struct seq_file *m, void *t) 153 { 154 mutex_unlock(&event_mutex); 155 } 156 157 static int trigger_show(struct seq_file *m, void *v) 158 { 159 struct event_trigger_data *data; 160 struct event_command *p; 161 162 if (v == SHOW_AVAILABLE_TRIGGERS) { 163 seq_puts(m, "# Available triggers:\n"); 164 seq_putc(m, '#'); 165 mutex_lock(&trigger_cmd_mutex); 166 list_for_each_entry_reverse(p, &trigger_commands, list) 167 seq_printf(m, " %s", p->name); 168 seq_putc(m, '\n'); 169 mutex_unlock(&trigger_cmd_mutex); 170 return 0; 171 } 172 173 data = list_entry(v, struct event_trigger_data, list); 174 data->ops->print(m, data->ops, data); 175 176 return 0; 177 } 178 179 static const struct seq_operations event_triggers_seq_ops = { 180 .start = trigger_start, 181 .next = trigger_next, 182 .stop = trigger_stop, 183 .show = trigger_show, 184 }; 185 186 static int event_trigger_regex_open(struct inode *inode, struct file *file) 187 { 188 int ret = 0; 189 190 mutex_lock(&event_mutex); 191 192 if (unlikely(!event_file_data(file))) { 193 mutex_unlock(&event_mutex); 194 return -ENODEV; 195 } 196 197 if ((file->f_mode & FMODE_WRITE) && 198 (file->f_flags & O_TRUNC)) { 199 struct trace_event_file *event_file; 200 struct event_command *p; 201 202 event_file = event_file_data(file); 203 204 list_for_each_entry(p, &trigger_commands, list) { 205 if (p->unreg_all) 206 p->unreg_all(event_file); 207 } 208 } 209 210 if (file->f_mode & FMODE_READ) { 211 ret = seq_open(file, &event_triggers_seq_ops); 212 if (!ret) { 213 struct seq_file *m = file->private_data; 214 m->private = file; 215 } 216 } 217 218 mutex_unlock(&event_mutex); 219 220 return ret; 221 } 222 223 static int trigger_process_regex(struct trace_event_file *file, char *buff) 224 { 225 char *command, *next = buff; 226 struct event_command *p; 227 int ret = -EINVAL; 228 229 command = strsep(&next, ": \t"); 230 command = (command[0] != '!') ? command : command + 1; 231 232 mutex_lock(&trigger_cmd_mutex); 233 list_for_each_entry(p, &trigger_commands, list) { 234 if (strcmp(p->name, command) == 0) { 235 ret = p->func(p, file, buff, command, next); 236 goto out_unlock; 237 } 238 } 239 out_unlock: 240 mutex_unlock(&trigger_cmd_mutex); 241 242 return ret; 243 } 244 245 static ssize_t event_trigger_regex_write(struct file *file, 246 const char __user *ubuf, 247 size_t cnt, loff_t *ppos) 248 { 249 struct trace_event_file *event_file; 250 ssize_t ret; 251 char *buf; 252 253 if (!cnt) 254 return 0; 255 256 if (cnt >= PAGE_SIZE) 257 return -EINVAL; 258 259 buf = memdup_user_nul(ubuf, cnt); 260 if (IS_ERR(buf)) 261 return PTR_ERR(buf); 262 263 strim(buf); 264 265 mutex_lock(&event_mutex); 266 event_file = event_file_data(file); 267 if (unlikely(!event_file)) { 268 mutex_unlock(&event_mutex); 269 kfree(buf); 270 return -ENODEV; 271 } 272 ret = trigger_process_regex(event_file, buf); 273 mutex_unlock(&event_mutex); 274 275 kfree(buf); 276 if (ret < 0) 277 goto out; 278 279 *ppos += cnt; 280 ret = cnt; 281 out: 282 return ret; 283 } 284 285 static int event_trigger_regex_release(struct inode *inode, struct file *file) 286 { 287 mutex_lock(&event_mutex); 288 289 if (file->f_mode & FMODE_READ) 290 seq_release(inode, file); 291 292 mutex_unlock(&event_mutex); 293 294 return 0; 295 } 296 297 static ssize_t 298 event_trigger_write(struct file *filp, const char __user *ubuf, 299 size_t cnt, loff_t *ppos) 300 { 301 return event_trigger_regex_write(filp, ubuf, cnt, ppos); 302 } 303 304 static int 305 event_trigger_open(struct inode *inode, struct file *filp) 306 { 307 return event_trigger_regex_open(inode, filp); 308 } 309 310 static int 311 event_trigger_release(struct inode *inode, struct file *file) 312 { 313 return event_trigger_regex_release(inode, file); 314 } 315 316 const struct file_operations event_trigger_fops = { 317 .open = event_trigger_open, 318 .read = seq_read, 319 .write = event_trigger_write, 320 .llseek = tracing_lseek, 321 .release = event_trigger_release, 322 }; 323 324 /* 325 * Currently we only register event commands from __init, so mark this 326 * __init too. 327 */ 328 __init int register_event_command(struct event_command *cmd) 329 { 330 struct event_command *p; 331 int ret = 0; 332 333 mutex_lock(&trigger_cmd_mutex); 334 list_for_each_entry(p, &trigger_commands, list) { 335 if (strcmp(cmd->name, p->name) == 0) { 336 ret = -EBUSY; 337 goto out_unlock; 338 } 339 } 340 list_add(&cmd->list, &trigger_commands); 341 out_unlock: 342 mutex_unlock(&trigger_cmd_mutex); 343 344 return ret; 345 } 346 347 /* 348 * Currently we only unregister event commands from __init, so mark 349 * this __init too. 350 */ 351 __init int unregister_event_command(struct event_command *cmd) 352 { 353 struct event_command *p, *n; 354 int ret = -ENODEV; 355 356 mutex_lock(&trigger_cmd_mutex); 357 list_for_each_entry_safe(p, n, &trigger_commands, list) { 358 if (strcmp(cmd->name, p->name) == 0) { 359 ret = 0; 360 list_del_init(&p->list); 361 goto out_unlock; 362 } 363 } 364 out_unlock: 365 mutex_unlock(&trigger_cmd_mutex); 366 367 return ret; 368 } 369 370 /** 371 * event_trigger_print - Generic event_trigger_ops @print implementation 372 * @name: The name of the event trigger 373 * @m: The seq_file being printed to 374 * @data: Trigger-specific data 375 * @filter_str: filter_str to print, if present 376 * 377 * Common implementation for event triggers to print themselves. 378 * 379 * Usually wrapped by a function that simply sets the @name of the 380 * trigger command and then invokes this. 381 * 382 * Return: 0 on success, errno otherwise 383 */ 384 static int 385 event_trigger_print(const char *name, struct seq_file *m, 386 void *data, char *filter_str) 387 { 388 long count = (long)data; 389 390 seq_puts(m, name); 391 392 if (count == -1) 393 seq_puts(m, ":unlimited"); 394 else 395 seq_printf(m, ":count=%ld", count); 396 397 if (filter_str) 398 seq_printf(m, " if %s\n", filter_str); 399 else 400 seq_putc(m, '\n'); 401 402 return 0; 403 } 404 405 /** 406 * event_trigger_init - Generic event_trigger_ops @init implementation 407 * @ops: The trigger ops associated with the trigger 408 * @data: Trigger-specific data 409 * 410 * Common implementation of event trigger initialization. 411 * 412 * Usually used directly as the @init method in event trigger 413 * implementations. 414 * 415 * Return: 0 on success, errno otherwise 416 */ 417 int event_trigger_init(struct event_trigger_ops *ops, 418 struct event_trigger_data *data) 419 { 420 data->ref++; 421 return 0; 422 } 423 424 /** 425 * event_trigger_free - Generic event_trigger_ops @free implementation 426 * @ops: The trigger ops associated with the trigger 427 * @data: Trigger-specific data 428 * 429 * Common implementation of event trigger de-initialization. 430 * 431 * Usually used directly as the @free method in event trigger 432 * implementations. 433 */ 434 static void 435 event_trigger_free(struct event_trigger_ops *ops, 436 struct event_trigger_data *data) 437 { 438 if (WARN_ON_ONCE(data->ref <= 0)) 439 return; 440 441 data->ref--; 442 if (!data->ref) 443 trigger_data_free(data); 444 } 445 446 int trace_event_trigger_enable_disable(struct trace_event_file *file, 447 int trigger_enable) 448 { 449 int ret = 0; 450 451 if (trigger_enable) { 452 if (atomic_inc_return(&file->tm_ref) > 1) 453 return ret; 454 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags); 455 ret = trace_event_enable_disable(file, 1, 1); 456 } else { 457 if (atomic_dec_return(&file->tm_ref) > 0) 458 return ret; 459 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags); 460 ret = trace_event_enable_disable(file, 0, 1); 461 } 462 463 return ret; 464 } 465 466 /** 467 * clear_event_triggers - Clear all triggers associated with a trace array 468 * @tr: The trace array to clear 469 * 470 * For each trigger, the triggering event has its tm_ref decremented 471 * via trace_event_trigger_enable_disable(), and any associated event 472 * (in the case of enable/disable_event triggers) will have its sm_ref 473 * decremented via free()->trace_event_enable_disable(). That 474 * combination effectively reverses the soft-mode/trigger state added 475 * by trigger registration. 476 * 477 * Must be called with event_mutex held. 478 */ 479 void 480 clear_event_triggers(struct trace_array *tr) 481 { 482 struct trace_event_file *file; 483 484 list_for_each_entry(file, &tr->events, list) { 485 struct event_trigger_data *data; 486 list_for_each_entry_rcu(data, &file->triggers, list) { 487 trace_event_trigger_enable_disable(file, 0); 488 if (data->ops->free) 489 data->ops->free(data->ops, data); 490 } 491 } 492 } 493 494 /** 495 * update_cond_flag - Set or reset the TRIGGER_COND bit 496 * @file: The trace_event_file associated with the event 497 * 498 * If an event has triggers and any of those triggers has a filter or 499 * a post_trigger, trigger invocation needs to be deferred until after 500 * the current event has logged its data, and the event should have 501 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be 502 * cleared. 503 */ 504 void update_cond_flag(struct trace_event_file *file) 505 { 506 struct event_trigger_data *data; 507 bool set_cond = false; 508 509 list_for_each_entry_rcu(data, &file->triggers, list) { 510 if (data->filter || event_command_post_trigger(data->cmd_ops) || 511 event_command_needs_rec(data->cmd_ops)) { 512 set_cond = true; 513 break; 514 } 515 } 516 517 if (set_cond) 518 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags); 519 else 520 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags); 521 } 522 523 /** 524 * register_trigger - Generic event_command @reg implementation 525 * @glob: The raw string used to register the trigger 526 * @ops: The trigger ops associated with the trigger 527 * @data: Trigger-specific data to associate with the trigger 528 * @file: The trace_event_file associated with the event 529 * 530 * Common implementation for event trigger registration. 531 * 532 * Usually used directly as the @reg method in event command 533 * implementations. 534 * 535 * Return: 0 on success, errno otherwise 536 */ 537 static int register_trigger(char *glob, struct event_trigger_ops *ops, 538 struct event_trigger_data *data, 539 struct trace_event_file *file) 540 { 541 struct event_trigger_data *test; 542 int ret = 0; 543 544 list_for_each_entry_rcu(test, &file->triggers, list) { 545 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) { 546 ret = -EEXIST; 547 goto out; 548 } 549 } 550 551 if (data->ops->init) { 552 ret = data->ops->init(data->ops, data); 553 if (ret < 0) 554 goto out; 555 } 556 557 list_add_rcu(&data->list, &file->triggers); 558 ret++; 559 560 update_cond_flag(file); 561 if (trace_event_trigger_enable_disable(file, 1) < 0) { 562 list_del_rcu(&data->list); 563 update_cond_flag(file); 564 ret--; 565 } 566 out: 567 return ret; 568 } 569 570 /** 571 * unregister_trigger - Generic event_command @unreg implementation 572 * @glob: The raw string used to register the trigger 573 * @ops: The trigger ops associated with the trigger 574 * @test: Trigger-specific data used to find the trigger to remove 575 * @file: The trace_event_file associated with the event 576 * 577 * Common implementation for event trigger unregistration. 578 * 579 * Usually used directly as the @unreg method in event command 580 * implementations. 581 */ 582 void unregister_trigger(char *glob, struct event_trigger_ops *ops, 583 struct event_trigger_data *test, 584 struct trace_event_file *file) 585 { 586 struct event_trigger_data *data; 587 bool unregistered = false; 588 589 list_for_each_entry_rcu(data, &file->triggers, list) { 590 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) { 591 unregistered = true; 592 list_del_rcu(&data->list); 593 trace_event_trigger_enable_disable(file, 0); 594 update_cond_flag(file); 595 break; 596 } 597 } 598 599 if (unregistered && data->ops->free) 600 data->ops->free(data->ops, data); 601 } 602 603 /** 604 * event_trigger_callback - Generic event_command @func implementation 605 * @cmd_ops: The command ops, used for trigger registration 606 * @file: The trace_event_file associated with the event 607 * @glob: The raw string used to register the trigger 608 * @cmd: The cmd portion of the string used to register the trigger 609 * @param: The params portion of the string used to register the trigger 610 * 611 * Common implementation for event command parsing and trigger 612 * instantiation. 613 * 614 * Usually used directly as the @func method in event command 615 * implementations. 616 * 617 * Return: 0 on success, errno otherwise 618 */ 619 static int 620 event_trigger_callback(struct event_command *cmd_ops, 621 struct trace_event_file *file, 622 char *glob, char *cmd, char *param) 623 { 624 struct event_trigger_data *trigger_data; 625 struct event_trigger_ops *trigger_ops; 626 char *trigger = NULL; 627 char *number; 628 int ret; 629 630 /* separate the trigger from the filter (t:n [if filter]) */ 631 if (param && isdigit(param[0])) 632 trigger = strsep(¶m, " \t"); 633 634 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); 635 636 ret = -ENOMEM; 637 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); 638 if (!trigger_data) 639 goto out; 640 641 trigger_data->count = -1; 642 trigger_data->ops = trigger_ops; 643 trigger_data->cmd_ops = cmd_ops; 644 INIT_LIST_HEAD(&trigger_data->list); 645 INIT_LIST_HEAD(&trigger_data->named_list); 646 647 if (glob[0] == '!') { 648 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 649 kfree(trigger_data); 650 ret = 0; 651 goto out; 652 } 653 654 if (trigger) { 655 number = strsep(&trigger, ":"); 656 657 ret = -EINVAL; 658 if (!strlen(number)) 659 goto out_free; 660 661 /* 662 * We use the callback data field (which is a pointer) 663 * as our counter. 664 */ 665 ret = kstrtoul(number, 0, &trigger_data->count); 666 if (ret) 667 goto out_free; 668 } 669 670 if (!param) /* if param is non-empty, it's supposed to be a filter */ 671 goto out_reg; 672 673 if (!cmd_ops->set_filter) 674 goto out_reg; 675 676 ret = cmd_ops->set_filter(param, trigger_data, file); 677 if (ret < 0) 678 goto out_free; 679 680 out_reg: 681 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 682 /* 683 * The above returns on success the # of functions enabled, 684 * but if it didn't find any functions it returns zero. 685 * Consider no functions a failure too. 686 */ 687 if (!ret) { 688 ret = -ENOENT; 689 goto out_free; 690 } else if (ret < 0) 691 goto out_free; 692 ret = 0; 693 out: 694 return ret; 695 696 out_free: 697 if (cmd_ops->set_filter) 698 cmd_ops->set_filter(NULL, trigger_data, NULL); 699 kfree(trigger_data); 700 goto out; 701 } 702 703 /** 704 * set_trigger_filter - Generic event_command @set_filter implementation 705 * @filter_str: The filter string for the trigger, NULL to remove filter 706 * @trigger_data: Trigger-specific data 707 * @file: The trace_event_file associated with the event 708 * 709 * Common implementation for event command filter parsing and filter 710 * instantiation. 711 * 712 * Usually used directly as the @set_filter method in event command 713 * implementations. 714 * 715 * Also used to remove a filter (if filter_str = NULL). 716 * 717 * Return: 0 on success, errno otherwise 718 */ 719 int set_trigger_filter(char *filter_str, 720 struct event_trigger_data *trigger_data, 721 struct trace_event_file *file) 722 { 723 struct event_trigger_data *data = trigger_data; 724 struct event_filter *filter = NULL, *tmp; 725 int ret = -EINVAL; 726 char *s; 727 728 if (!filter_str) /* clear the current filter */ 729 goto assign; 730 731 s = strsep(&filter_str, " \t"); 732 733 if (!strlen(s) || strcmp(s, "if") != 0) 734 goto out; 735 736 if (!filter_str) 737 goto out; 738 739 /* The filter is for the 'trigger' event, not the triggered event */ 740 ret = create_event_filter(file->event_call, filter_str, false, &filter); 741 if (ret) 742 goto out; 743 assign: 744 tmp = rcu_access_pointer(data->filter); 745 746 rcu_assign_pointer(data->filter, filter); 747 748 if (tmp) { 749 /* Make sure the call is done with the filter */ 750 synchronize_sched(); 751 free_event_filter(tmp); 752 } 753 754 kfree(data->filter_str); 755 data->filter_str = NULL; 756 757 if (filter_str) { 758 data->filter_str = kstrdup(filter_str, GFP_KERNEL); 759 if (!data->filter_str) { 760 free_event_filter(rcu_access_pointer(data->filter)); 761 data->filter = NULL; 762 ret = -ENOMEM; 763 } 764 } 765 out: 766 return ret; 767 } 768 769 static LIST_HEAD(named_triggers); 770 771 /** 772 * find_named_trigger - Find the common named trigger associated with @name 773 * @name: The name of the set of named triggers to find the common data for 774 * 775 * Named triggers are sets of triggers that share a common set of 776 * trigger data. The first named trigger registered with a given name 777 * owns the common trigger data that the others subsequently 778 * registered with the same name will reference. This function 779 * returns the common trigger data associated with that first 780 * registered instance. 781 * 782 * Return: the common trigger data for the given named trigger on 783 * success, NULL otherwise. 784 */ 785 struct event_trigger_data *find_named_trigger(const char *name) 786 { 787 struct event_trigger_data *data; 788 789 if (!name) 790 return NULL; 791 792 list_for_each_entry(data, &named_triggers, named_list) { 793 if (data->named_data) 794 continue; 795 if (strcmp(data->name, name) == 0) 796 return data; 797 } 798 799 return NULL; 800 } 801 802 /** 803 * is_named_trigger - determine if a given trigger is a named trigger 804 * @test: The trigger data to test 805 * 806 * Return: true if 'test' is a named trigger, false otherwise. 807 */ 808 bool is_named_trigger(struct event_trigger_data *test) 809 { 810 struct event_trigger_data *data; 811 812 list_for_each_entry(data, &named_triggers, named_list) { 813 if (test == data) 814 return true; 815 } 816 817 return false; 818 } 819 820 /** 821 * save_named_trigger - save the trigger in the named trigger list 822 * @name: The name of the named trigger set 823 * @data: The trigger data to save 824 * 825 * Return: 0 if successful, negative error otherwise. 826 */ 827 int save_named_trigger(const char *name, struct event_trigger_data *data) 828 { 829 data->name = kstrdup(name, GFP_KERNEL); 830 if (!data->name) 831 return -ENOMEM; 832 833 list_add(&data->named_list, &named_triggers); 834 835 return 0; 836 } 837 838 /** 839 * del_named_trigger - delete a trigger from the named trigger list 840 * @data: The trigger data to delete 841 */ 842 void del_named_trigger(struct event_trigger_data *data) 843 { 844 kfree(data->name); 845 data->name = NULL; 846 847 list_del(&data->named_list); 848 } 849 850 static void __pause_named_trigger(struct event_trigger_data *data, bool pause) 851 { 852 struct event_trigger_data *test; 853 854 list_for_each_entry(test, &named_triggers, named_list) { 855 if (strcmp(test->name, data->name) == 0) { 856 if (pause) { 857 test->paused_tmp = test->paused; 858 test->paused = true; 859 } else { 860 test->paused = test->paused_tmp; 861 } 862 } 863 } 864 } 865 866 /** 867 * pause_named_trigger - Pause all named triggers with the same name 868 * @data: The trigger data of a named trigger to pause 869 * 870 * Pauses a named trigger along with all other triggers having the 871 * same name. Because named triggers share a common set of data, 872 * pausing only one is meaningless, so pausing one named trigger needs 873 * to pause all triggers with the same name. 874 */ 875 void pause_named_trigger(struct event_trigger_data *data) 876 { 877 __pause_named_trigger(data, true); 878 } 879 880 /** 881 * unpause_named_trigger - Un-pause all named triggers with the same name 882 * @data: The trigger data of a named trigger to unpause 883 * 884 * Un-pauses a named trigger along with all other triggers having the 885 * same name. Because named triggers share a common set of data, 886 * unpausing only one is meaningless, so unpausing one named trigger 887 * needs to unpause all triggers with the same name. 888 */ 889 void unpause_named_trigger(struct event_trigger_data *data) 890 { 891 __pause_named_trigger(data, false); 892 } 893 894 /** 895 * set_named_trigger_data - Associate common named trigger data 896 * @data: The trigger data of a named trigger to unpause 897 * 898 * Named triggers are sets of triggers that share a common set of 899 * trigger data. The first named trigger registered with a given name 900 * owns the common trigger data that the others subsequently 901 * registered with the same name will reference. This function 902 * associates the common trigger data from the first trigger with the 903 * given trigger. 904 */ 905 void set_named_trigger_data(struct event_trigger_data *data, 906 struct event_trigger_data *named_data) 907 { 908 data->named_data = named_data; 909 } 910 911 static void 912 traceon_trigger(struct event_trigger_data *data, void *rec) 913 { 914 if (tracing_is_on()) 915 return; 916 917 tracing_on(); 918 } 919 920 static void 921 traceon_count_trigger(struct event_trigger_data *data, void *rec) 922 { 923 if (tracing_is_on()) 924 return; 925 926 if (!data->count) 927 return; 928 929 if (data->count != -1) 930 (data->count)--; 931 932 tracing_on(); 933 } 934 935 static void 936 traceoff_trigger(struct event_trigger_data *data, void *rec) 937 { 938 if (!tracing_is_on()) 939 return; 940 941 tracing_off(); 942 } 943 944 static void 945 traceoff_count_trigger(struct event_trigger_data *data, void *rec) 946 { 947 if (!tracing_is_on()) 948 return; 949 950 if (!data->count) 951 return; 952 953 if (data->count != -1) 954 (data->count)--; 955 956 tracing_off(); 957 } 958 959 static int 960 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 961 struct event_trigger_data *data) 962 { 963 return event_trigger_print("traceon", m, (void *)data->count, 964 data->filter_str); 965 } 966 967 static int 968 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 969 struct event_trigger_data *data) 970 { 971 return event_trigger_print("traceoff", m, (void *)data->count, 972 data->filter_str); 973 } 974 975 static struct event_trigger_ops traceon_trigger_ops = { 976 .func = traceon_trigger, 977 .print = traceon_trigger_print, 978 .init = event_trigger_init, 979 .free = event_trigger_free, 980 }; 981 982 static struct event_trigger_ops traceon_count_trigger_ops = { 983 .func = traceon_count_trigger, 984 .print = traceon_trigger_print, 985 .init = event_trigger_init, 986 .free = event_trigger_free, 987 }; 988 989 static struct event_trigger_ops traceoff_trigger_ops = { 990 .func = traceoff_trigger, 991 .print = traceoff_trigger_print, 992 .init = event_trigger_init, 993 .free = event_trigger_free, 994 }; 995 996 static struct event_trigger_ops traceoff_count_trigger_ops = { 997 .func = traceoff_count_trigger, 998 .print = traceoff_trigger_print, 999 .init = event_trigger_init, 1000 .free = event_trigger_free, 1001 }; 1002 1003 static struct event_trigger_ops * 1004 onoff_get_trigger_ops(char *cmd, char *param) 1005 { 1006 struct event_trigger_ops *ops; 1007 1008 /* we register both traceon and traceoff to this callback */ 1009 if (strcmp(cmd, "traceon") == 0) 1010 ops = param ? &traceon_count_trigger_ops : 1011 &traceon_trigger_ops; 1012 else 1013 ops = param ? &traceoff_count_trigger_ops : 1014 &traceoff_trigger_ops; 1015 1016 return ops; 1017 } 1018 1019 static struct event_command trigger_traceon_cmd = { 1020 .name = "traceon", 1021 .trigger_type = ETT_TRACE_ONOFF, 1022 .func = event_trigger_callback, 1023 .reg = register_trigger, 1024 .unreg = unregister_trigger, 1025 .get_trigger_ops = onoff_get_trigger_ops, 1026 .set_filter = set_trigger_filter, 1027 }; 1028 1029 static struct event_command trigger_traceoff_cmd = { 1030 .name = "traceoff", 1031 .trigger_type = ETT_TRACE_ONOFF, 1032 .flags = EVENT_CMD_FL_POST_TRIGGER, 1033 .func = event_trigger_callback, 1034 .reg = register_trigger, 1035 .unreg = unregister_trigger, 1036 .get_trigger_ops = onoff_get_trigger_ops, 1037 .set_filter = set_trigger_filter, 1038 }; 1039 1040 #ifdef CONFIG_TRACER_SNAPSHOT 1041 static void 1042 snapshot_trigger(struct event_trigger_data *data, void *rec) 1043 { 1044 tracing_snapshot(); 1045 } 1046 1047 static void 1048 snapshot_count_trigger(struct event_trigger_data *data, void *rec) 1049 { 1050 if (!data->count) 1051 return; 1052 1053 if (data->count != -1) 1054 (data->count)--; 1055 1056 snapshot_trigger(data, rec); 1057 } 1058 1059 static int 1060 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops, 1061 struct event_trigger_data *data, 1062 struct trace_event_file *file) 1063 { 1064 int ret = register_trigger(glob, ops, data, file); 1065 1066 if (ret > 0 && tracing_alloc_snapshot() != 0) { 1067 unregister_trigger(glob, ops, data, file); 1068 ret = 0; 1069 } 1070 1071 return ret; 1072 } 1073 1074 static int 1075 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 1076 struct event_trigger_data *data) 1077 { 1078 return event_trigger_print("snapshot", m, (void *)data->count, 1079 data->filter_str); 1080 } 1081 1082 static struct event_trigger_ops snapshot_trigger_ops = { 1083 .func = snapshot_trigger, 1084 .print = snapshot_trigger_print, 1085 .init = event_trigger_init, 1086 .free = event_trigger_free, 1087 }; 1088 1089 static struct event_trigger_ops snapshot_count_trigger_ops = { 1090 .func = snapshot_count_trigger, 1091 .print = snapshot_trigger_print, 1092 .init = event_trigger_init, 1093 .free = event_trigger_free, 1094 }; 1095 1096 static struct event_trigger_ops * 1097 snapshot_get_trigger_ops(char *cmd, char *param) 1098 { 1099 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops; 1100 } 1101 1102 static struct event_command trigger_snapshot_cmd = { 1103 .name = "snapshot", 1104 .trigger_type = ETT_SNAPSHOT, 1105 .func = event_trigger_callback, 1106 .reg = register_snapshot_trigger, 1107 .unreg = unregister_trigger, 1108 .get_trigger_ops = snapshot_get_trigger_ops, 1109 .set_filter = set_trigger_filter, 1110 }; 1111 1112 static __init int register_trigger_snapshot_cmd(void) 1113 { 1114 int ret; 1115 1116 ret = register_event_command(&trigger_snapshot_cmd); 1117 WARN_ON(ret < 0); 1118 1119 return ret; 1120 } 1121 #else 1122 static __init int register_trigger_snapshot_cmd(void) { return 0; } 1123 #endif /* CONFIG_TRACER_SNAPSHOT */ 1124 1125 #ifdef CONFIG_STACKTRACE 1126 #ifdef CONFIG_UNWINDER_ORC 1127 /* Skip 2: 1128 * event_triggers_post_call() 1129 * trace_event_raw_event_xxx() 1130 */ 1131 # define STACK_SKIP 2 1132 #else 1133 /* 1134 * Skip 4: 1135 * stacktrace_trigger() 1136 * event_triggers_post_call() 1137 * trace_event_buffer_commit() 1138 * trace_event_raw_event_xxx() 1139 */ 1140 #define STACK_SKIP 4 1141 #endif 1142 1143 static void 1144 stacktrace_trigger(struct event_trigger_data *data, void *rec) 1145 { 1146 trace_dump_stack(STACK_SKIP); 1147 } 1148 1149 static void 1150 stacktrace_count_trigger(struct event_trigger_data *data, void *rec) 1151 { 1152 if (!data->count) 1153 return; 1154 1155 if (data->count != -1) 1156 (data->count)--; 1157 1158 stacktrace_trigger(data, rec); 1159 } 1160 1161 static int 1162 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 1163 struct event_trigger_data *data) 1164 { 1165 return event_trigger_print("stacktrace", m, (void *)data->count, 1166 data->filter_str); 1167 } 1168 1169 static struct event_trigger_ops stacktrace_trigger_ops = { 1170 .func = stacktrace_trigger, 1171 .print = stacktrace_trigger_print, 1172 .init = event_trigger_init, 1173 .free = event_trigger_free, 1174 }; 1175 1176 static struct event_trigger_ops stacktrace_count_trigger_ops = { 1177 .func = stacktrace_count_trigger, 1178 .print = stacktrace_trigger_print, 1179 .init = event_trigger_init, 1180 .free = event_trigger_free, 1181 }; 1182 1183 static struct event_trigger_ops * 1184 stacktrace_get_trigger_ops(char *cmd, char *param) 1185 { 1186 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops; 1187 } 1188 1189 static struct event_command trigger_stacktrace_cmd = { 1190 .name = "stacktrace", 1191 .trigger_type = ETT_STACKTRACE, 1192 .flags = EVENT_CMD_FL_POST_TRIGGER, 1193 .func = event_trigger_callback, 1194 .reg = register_trigger, 1195 .unreg = unregister_trigger, 1196 .get_trigger_ops = stacktrace_get_trigger_ops, 1197 .set_filter = set_trigger_filter, 1198 }; 1199 1200 static __init int register_trigger_stacktrace_cmd(void) 1201 { 1202 int ret; 1203 1204 ret = register_event_command(&trigger_stacktrace_cmd); 1205 WARN_ON(ret < 0); 1206 1207 return ret; 1208 } 1209 #else 1210 static __init int register_trigger_stacktrace_cmd(void) { return 0; } 1211 #endif /* CONFIG_STACKTRACE */ 1212 1213 static __init void unregister_trigger_traceon_traceoff_cmds(void) 1214 { 1215 unregister_event_command(&trigger_traceon_cmd); 1216 unregister_event_command(&trigger_traceoff_cmd); 1217 } 1218 1219 static void 1220 event_enable_trigger(struct event_trigger_data *data, void *rec) 1221 { 1222 struct enable_trigger_data *enable_data = data->private_data; 1223 1224 if (enable_data->enable) 1225 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); 1226 else 1227 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); 1228 } 1229 1230 static void 1231 event_enable_count_trigger(struct event_trigger_data *data, void *rec) 1232 { 1233 struct enable_trigger_data *enable_data = data->private_data; 1234 1235 if (!data->count) 1236 return; 1237 1238 /* Skip if the event is in a state we want to switch to */ 1239 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) 1240 return; 1241 1242 if (data->count != -1) 1243 (data->count)--; 1244 1245 event_enable_trigger(data, rec); 1246 } 1247 1248 int event_enable_trigger_print(struct seq_file *m, 1249 struct event_trigger_ops *ops, 1250 struct event_trigger_data *data) 1251 { 1252 struct enable_trigger_data *enable_data = data->private_data; 1253 1254 seq_printf(m, "%s:%s:%s", 1255 enable_data->hist ? 1256 (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) : 1257 (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR), 1258 enable_data->file->event_call->class->system, 1259 trace_event_name(enable_data->file->event_call)); 1260 1261 if (data->count == -1) 1262 seq_puts(m, ":unlimited"); 1263 else 1264 seq_printf(m, ":count=%ld", data->count); 1265 1266 if (data->filter_str) 1267 seq_printf(m, " if %s\n", data->filter_str); 1268 else 1269 seq_putc(m, '\n'); 1270 1271 return 0; 1272 } 1273 1274 void event_enable_trigger_free(struct event_trigger_ops *ops, 1275 struct event_trigger_data *data) 1276 { 1277 struct enable_trigger_data *enable_data = data->private_data; 1278 1279 if (WARN_ON_ONCE(data->ref <= 0)) 1280 return; 1281 1282 data->ref--; 1283 if (!data->ref) { 1284 /* Remove the SOFT_MODE flag */ 1285 trace_event_enable_disable(enable_data->file, 0, 1); 1286 module_put(enable_data->file->event_call->mod); 1287 trigger_data_free(data); 1288 kfree(enable_data); 1289 } 1290 } 1291 1292 static struct event_trigger_ops event_enable_trigger_ops = { 1293 .func = event_enable_trigger, 1294 .print = event_enable_trigger_print, 1295 .init = event_trigger_init, 1296 .free = event_enable_trigger_free, 1297 }; 1298 1299 static struct event_trigger_ops event_enable_count_trigger_ops = { 1300 .func = event_enable_count_trigger, 1301 .print = event_enable_trigger_print, 1302 .init = event_trigger_init, 1303 .free = event_enable_trigger_free, 1304 }; 1305 1306 static struct event_trigger_ops event_disable_trigger_ops = { 1307 .func = event_enable_trigger, 1308 .print = event_enable_trigger_print, 1309 .init = event_trigger_init, 1310 .free = event_enable_trigger_free, 1311 }; 1312 1313 static struct event_trigger_ops event_disable_count_trigger_ops = { 1314 .func = event_enable_count_trigger, 1315 .print = event_enable_trigger_print, 1316 .init = event_trigger_init, 1317 .free = event_enable_trigger_free, 1318 }; 1319 1320 int event_enable_trigger_func(struct event_command *cmd_ops, 1321 struct trace_event_file *file, 1322 char *glob, char *cmd, char *param) 1323 { 1324 struct trace_event_file *event_enable_file; 1325 struct enable_trigger_data *enable_data; 1326 struct event_trigger_data *trigger_data; 1327 struct event_trigger_ops *trigger_ops; 1328 struct trace_array *tr = file->tr; 1329 const char *system; 1330 const char *event; 1331 bool hist = false; 1332 char *trigger; 1333 char *number; 1334 bool enable; 1335 int ret; 1336 1337 if (!param) 1338 return -EINVAL; 1339 1340 /* separate the trigger from the filter (s:e:n [if filter]) */ 1341 trigger = strsep(¶m, " \t"); 1342 if (!trigger) 1343 return -EINVAL; 1344 1345 system = strsep(&trigger, ":"); 1346 if (!trigger) 1347 return -EINVAL; 1348 1349 event = strsep(&trigger, ":"); 1350 1351 ret = -EINVAL; 1352 event_enable_file = find_event_file(tr, system, event); 1353 if (!event_enable_file) 1354 goto out; 1355 1356 #ifdef CONFIG_HIST_TRIGGERS 1357 hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) || 1358 (strcmp(cmd, DISABLE_HIST_STR) == 0)); 1359 1360 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) || 1361 (strcmp(cmd, ENABLE_HIST_STR) == 0)); 1362 #else 1363 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 1364 #endif 1365 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); 1366 1367 ret = -ENOMEM; 1368 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); 1369 if (!trigger_data) 1370 goto out; 1371 1372 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL); 1373 if (!enable_data) { 1374 kfree(trigger_data); 1375 goto out; 1376 } 1377 1378 trigger_data->count = -1; 1379 trigger_data->ops = trigger_ops; 1380 trigger_data->cmd_ops = cmd_ops; 1381 INIT_LIST_HEAD(&trigger_data->list); 1382 RCU_INIT_POINTER(trigger_data->filter, NULL); 1383 1384 enable_data->hist = hist; 1385 enable_data->enable = enable; 1386 enable_data->file = event_enable_file; 1387 trigger_data->private_data = enable_data; 1388 1389 if (glob[0] == '!') { 1390 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 1391 kfree(trigger_data); 1392 kfree(enable_data); 1393 ret = 0; 1394 goto out; 1395 } 1396 1397 if (trigger) { 1398 number = strsep(&trigger, ":"); 1399 1400 ret = -EINVAL; 1401 if (!strlen(number)) 1402 goto out_free; 1403 1404 /* 1405 * We use the callback data field (which is a pointer) 1406 * as our counter. 1407 */ 1408 ret = kstrtoul(number, 0, &trigger_data->count); 1409 if (ret) 1410 goto out_free; 1411 } 1412 1413 if (!param) /* if param is non-empty, it's supposed to be a filter */ 1414 goto out_reg; 1415 1416 if (!cmd_ops->set_filter) 1417 goto out_reg; 1418 1419 ret = cmd_ops->set_filter(param, trigger_data, file); 1420 if (ret < 0) 1421 goto out_free; 1422 1423 out_reg: 1424 /* Don't let event modules unload while probe registered */ 1425 ret = try_module_get(event_enable_file->event_call->mod); 1426 if (!ret) { 1427 ret = -EBUSY; 1428 goto out_free; 1429 } 1430 1431 ret = trace_event_enable_disable(event_enable_file, 1, 1); 1432 if (ret < 0) 1433 goto out_put; 1434 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 1435 /* 1436 * The above returns on success the # of functions enabled, 1437 * but if it didn't find any functions it returns zero. 1438 * Consider no functions a failure too. 1439 */ 1440 if (!ret) { 1441 ret = -ENOENT; 1442 goto out_disable; 1443 } else if (ret < 0) 1444 goto out_disable; 1445 /* Just return zero, not the number of enabled functions */ 1446 ret = 0; 1447 out: 1448 return ret; 1449 1450 out_disable: 1451 trace_event_enable_disable(event_enable_file, 0, 1); 1452 out_put: 1453 module_put(event_enable_file->event_call->mod); 1454 out_free: 1455 if (cmd_ops->set_filter) 1456 cmd_ops->set_filter(NULL, trigger_data, NULL); 1457 kfree(trigger_data); 1458 kfree(enable_data); 1459 goto out; 1460 } 1461 1462 int event_enable_register_trigger(char *glob, 1463 struct event_trigger_ops *ops, 1464 struct event_trigger_data *data, 1465 struct trace_event_file *file) 1466 { 1467 struct enable_trigger_data *enable_data = data->private_data; 1468 struct enable_trigger_data *test_enable_data; 1469 struct event_trigger_data *test; 1470 int ret = 0; 1471 1472 list_for_each_entry_rcu(test, &file->triggers, list) { 1473 test_enable_data = test->private_data; 1474 if (test_enable_data && 1475 (test->cmd_ops->trigger_type == 1476 data->cmd_ops->trigger_type) && 1477 (test_enable_data->file == enable_data->file)) { 1478 ret = -EEXIST; 1479 goto out; 1480 } 1481 } 1482 1483 if (data->ops->init) { 1484 ret = data->ops->init(data->ops, data); 1485 if (ret < 0) 1486 goto out; 1487 } 1488 1489 list_add_rcu(&data->list, &file->triggers); 1490 ret++; 1491 1492 update_cond_flag(file); 1493 if (trace_event_trigger_enable_disable(file, 1) < 0) { 1494 list_del_rcu(&data->list); 1495 update_cond_flag(file); 1496 ret--; 1497 } 1498 out: 1499 return ret; 1500 } 1501 1502 void event_enable_unregister_trigger(char *glob, 1503 struct event_trigger_ops *ops, 1504 struct event_trigger_data *test, 1505 struct trace_event_file *file) 1506 { 1507 struct enable_trigger_data *test_enable_data = test->private_data; 1508 struct enable_trigger_data *enable_data; 1509 struct event_trigger_data *data; 1510 bool unregistered = false; 1511 1512 list_for_each_entry_rcu(data, &file->triggers, list) { 1513 enable_data = data->private_data; 1514 if (enable_data && 1515 (data->cmd_ops->trigger_type == 1516 test->cmd_ops->trigger_type) && 1517 (enable_data->file == test_enable_data->file)) { 1518 unregistered = true; 1519 list_del_rcu(&data->list); 1520 trace_event_trigger_enable_disable(file, 0); 1521 update_cond_flag(file); 1522 break; 1523 } 1524 } 1525 1526 if (unregistered && data->ops->free) 1527 data->ops->free(data->ops, data); 1528 } 1529 1530 static struct event_trigger_ops * 1531 event_enable_get_trigger_ops(char *cmd, char *param) 1532 { 1533 struct event_trigger_ops *ops; 1534 bool enable; 1535 1536 #ifdef CONFIG_HIST_TRIGGERS 1537 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) || 1538 (strcmp(cmd, ENABLE_HIST_STR) == 0)); 1539 #else 1540 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 1541 #endif 1542 if (enable) 1543 ops = param ? &event_enable_count_trigger_ops : 1544 &event_enable_trigger_ops; 1545 else 1546 ops = param ? &event_disable_count_trigger_ops : 1547 &event_disable_trigger_ops; 1548 1549 return ops; 1550 } 1551 1552 static struct event_command trigger_enable_cmd = { 1553 .name = ENABLE_EVENT_STR, 1554 .trigger_type = ETT_EVENT_ENABLE, 1555 .func = event_enable_trigger_func, 1556 .reg = event_enable_register_trigger, 1557 .unreg = event_enable_unregister_trigger, 1558 .get_trigger_ops = event_enable_get_trigger_ops, 1559 .set_filter = set_trigger_filter, 1560 }; 1561 1562 static struct event_command trigger_disable_cmd = { 1563 .name = DISABLE_EVENT_STR, 1564 .trigger_type = ETT_EVENT_ENABLE, 1565 .func = event_enable_trigger_func, 1566 .reg = event_enable_register_trigger, 1567 .unreg = event_enable_unregister_trigger, 1568 .get_trigger_ops = event_enable_get_trigger_ops, 1569 .set_filter = set_trigger_filter, 1570 }; 1571 1572 static __init void unregister_trigger_enable_disable_cmds(void) 1573 { 1574 unregister_event_command(&trigger_enable_cmd); 1575 unregister_event_command(&trigger_disable_cmd); 1576 } 1577 1578 static __init int register_trigger_enable_disable_cmds(void) 1579 { 1580 int ret; 1581 1582 ret = register_event_command(&trigger_enable_cmd); 1583 if (WARN_ON(ret < 0)) 1584 return ret; 1585 ret = register_event_command(&trigger_disable_cmd); 1586 if (WARN_ON(ret < 0)) 1587 unregister_trigger_enable_disable_cmds(); 1588 1589 return ret; 1590 } 1591 1592 static __init int register_trigger_traceon_traceoff_cmds(void) 1593 { 1594 int ret; 1595 1596 ret = register_event_command(&trigger_traceon_cmd); 1597 if (WARN_ON(ret < 0)) 1598 return ret; 1599 ret = register_event_command(&trigger_traceoff_cmd); 1600 if (WARN_ON(ret < 0)) 1601 unregister_trigger_traceon_traceoff_cmds(); 1602 1603 return ret; 1604 } 1605 1606 __init int register_trigger_cmds(void) 1607 { 1608 register_trigger_traceon_traceoff_cmds(); 1609 register_trigger_snapshot_cmd(); 1610 register_trigger_stacktrace_cmd(); 1611 register_trigger_enable_disable_cmds(); 1612 register_trigger_hist_enable_disable_cmds(); 1613 register_trigger_hist_cmd(); 1614 1615 return 0; 1616 } 1617