1 /* 2 * trace_events_trigger - trace event triggers 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com> 19 */ 20 21 #include <linux/module.h> 22 #include <linux/ctype.h> 23 #include <linux/mutex.h> 24 #include <linux/slab.h> 25 26 #include "trace.h" 27 28 static LIST_HEAD(trigger_commands); 29 static DEFINE_MUTEX(trigger_cmd_mutex); 30 31 void trigger_data_free(struct event_trigger_data *data) 32 { 33 if (data->cmd_ops->set_filter) 34 data->cmd_ops->set_filter(NULL, data, NULL); 35 36 synchronize_sched(); /* make sure current triggers exit before free */ 37 kfree(data); 38 } 39 40 /** 41 * event_triggers_call - Call triggers associated with a trace event 42 * @file: The trace_event_file associated with the event 43 * @rec: The trace entry for the event, NULL for unconditional invocation 44 * 45 * For each trigger associated with an event, invoke the trigger 46 * function registered with the associated trigger command. If rec is 47 * non-NULL, it means that the trigger requires further processing and 48 * shouldn't be unconditionally invoked. If rec is non-NULL and the 49 * trigger has a filter associated with it, rec will checked against 50 * the filter and if the record matches the trigger will be invoked. 51 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked 52 * in any case until the current event is written, the trigger 53 * function isn't invoked but the bit associated with the deferred 54 * trigger is set in the return value. 55 * 56 * Returns an enum event_trigger_type value containing a set bit for 57 * any trigger that should be deferred, ETT_NONE if nothing to defer. 58 * 59 * Called from tracepoint handlers (with rcu_read_lock_sched() held). 60 * 61 * Return: an enum event_trigger_type value containing a set bit for 62 * any trigger that should be deferred, ETT_NONE if nothing to defer. 63 */ 64 enum event_trigger_type 65 event_triggers_call(struct trace_event_file *file, void *rec) 66 { 67 struct event_trigger_data *data; 68 enum event_trigger_type tt = ETT_NONE; 69 struct event_filter *filter; 70 71 if (list_empty(&file->triggers)) 72 return tt; 73 74 list_for_each_entry_rcu(data, &file->triggers, list) { 75 if (data->paused) 76 continue; 77 if (!rec) { 78 data->ops->func(data, rec); 79 continue; 80 } 81 filter = rcu_dereference_sched(data->filter); 82 if (filter && !filter_match_preds(filter, rec)) 83 continue; 84 if (event_command_post_trigger(data->cmd_ops)) { 85 tt |= data->cmd_ops->trigger_type; 86 continue; 87 } 88 data->ops->func(data, rec); 89 } 90 return tt; 91 } 92 EXPORT_SYMBOL_GPL(event_triggers_call); 93 94 /** 95 * event_triggers_post_call - Call 'post_triggers' for a trace event 96 * @file: The trace_event_file associated with the event 97 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke 98 * @rec: The trace entry for the event 99 * 100 * For each trigger associated with an event, invoke the trigger 101 * function registered with the associated trigger command, if the 102 * corresponding bit is set in the tt enum passed into this function. 103 * See @event_triggers_call for details on how those bits are set. 104 * 105 * Called from tracepoint handlers (with rcu_read_lock_sched() held). 106 */ 107 void 108 event_triggers_post_call(struct trace_event_file *file, 109 enum event_trigger_type tt, 110 void *rec) 111 { 112 struct event_trigger_data *data; 113 114 list_for_each_entry_rcu(data, &file->triggers, list) { 115 if (data->paused) 116 continue; 117 if (data->cmd_ops->trigger_type & tt) 118 data->ops->func(data, rec); 119 } 120 } 121 EXPORT_SYMBOL_GPL(event_triggers_post_call); 122 123 #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL) 124 125 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos) 126 { 127 struct trace_event_file *event_file = event_file_data(m->private); 128 129 if (t == SHOW_AVAILABLE_TRIGGERS) 130 return NULL; 131 132 return seq_list_next(t, &event_file->triggers, pos); 133 } 134 135 static void *trigger_start(struct seq_file *m, loff_t *pos) 136 { 137 struct trace_event_file *event_file; 138 139 /* ->stop() is called even if ->start() fails */ 140 mutex_lock(&event_mutex); 141 event_file = event_file_data(m->private); 142 if (unlikely(!event_file)) 143 return ERR_PTR(-ENODEV); 144 145 if (list_empty(&event_file->triggers)) 146 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL; 147 148 return seq_list_start(&event_file->triggers, *pos); 149 } 150 151 static void trigger_stop(struct seq_file *m, void *t) 152 { 153 mutex_unlock(&event_mutex); 154 } 155 156 static int trigger_show(struct seq_file *m, void *v) 157 { 158 struct event_trigger_data *data; 159 struct event_command *p; 160 161 if (v == SHOW_AVAILABLE_TRIGGERS) { 162 seq_puts(m, "# Available triggers:\n"); 163 seq_putc(m, '#'); 164 mutex_lock(&trigger_cmd_mutex); 165 list_for_each_entry_reverse(p, &trigger_commands, list) 166 seq_printf(m, " %s", p->name); 167 seq_putc(m, '\n'); 168 mutex_unlock(&trigger_cmd_mutex); 169 return 0; 170 } 171 172 data = list_entry(v, struct event_trigger_data, list); 173 data->ops->print(m, data->ops, data); 174 175 return 0; 176 } 177 178 static const struct seq_operations event_triggers_seq_ops = { 179 .start = trigger_start, 180 .next = trigger_next, 181 .stop = trigger_stop, 182 .show = trigger_show, 183 }; 184 185 static int event_trigger_regex_open(struct inode *inode, struct file *file) 186 { 187 int ret = 0; 188 189 mutex_lock(&event_mutex); 190 191 if (unlikely(!event_file_data(file))) { 192 mutex_unlock(&event_mutex); 193 return -ENODEV; 194 } 195 196 if ((file->f_mode & FMODE_WRITE) && 197 (file->f_flags & O_TRUNC)) { 198 struct trace_event_file *event_file; 199 struct event_command *p; 200 201 event_file = event_file_data(file); 202 203 list_for_each_entry(p, &trigger_commands, list) { 204 if (p->unreg_all) 205 p->unreg_all(event_file); 206 } 207 } 208 209 if (file->f_mode & FMODE_READ) { 210 ret = seq_open(file, &event_triggers_seq_ops); 211 if (!ret) { 212 struct seq_file *m = file->private_data; 213 m->private = file; 214 } 215 } 216 217 mutex_unlock(&event_mutex); 218 219 return ret; 220 } 221 222 static int trigger_process_regex(struct trace_event_file *file, char *buff) 223 { 224 char *command, *next = buff; 225 struct event_command *p; 226 int ret = -EINVAL; 227 228 command = strsep(&next, ": \t"); 229 command = (command[0] != '!') ? command : command + 1; 230 231 mutex_lock(&trigger_cmd_mutex); 232 list_for_each_entry(p, &trigger_commands, list) { 233 if (strcmp(p->name, command) == 0) { 234 ret = p->func(p, file, buff, command, next); 235 goto out_unlock; 236 } 237 } 238 out_unlock: 239 mutex_unlock(&trigger_cmd_mutex); 240 241 return ret; 242 } 243 244 static ssize_t event_trigger_regex_write(struct file *file, 245 const char __user *ubuf, 246 size_t cnt, loff_t *ppos) 247 { 248 struct trace_event_file *event_file; 249 ssize_t ret; 250 char *buf; 251 252 if (!cnt) 253 return 0; 254 255 if (cnt >= PAGE_SIZE) 256 return -EINVAL; 257 258 buf = memdup_user_nul(ubuf, cnt); 259 if (IS_ERR(buf)) 260 return PTR_ERR(buf); 261 262 strim(buf); 263 264 mutex_lock(&event_mutex); 265 event_file = event_file_data(file); 266 if (unlikely(!event_file)) { 267 mutex_unlock(&event_mutex); 268 kfree(buf); 269 return -ENODEV; 270 } 271 ret = trigger_process_regex(event_file, buf); 272 mutex_unlock(&event_mutex); 273 274 kfree(buf); 275 if (ret < 0) 276 goto out; 277 278 *ppos += cnt; 279 ret = cnt; 280 out: 281 return ret; 282 } 283 284 static int event_trigger_regex_release(struct inode *inode, struct file *file) 285 { 286 mutex_lock(&event_mutex); 287 288 if (file->f_mode & FMODE_READ) 289 seq_release(inode, file); 290 291 mutex_unlock(&event_mutex); 292 293 return 0; 294 } 295 296 static ssize_t 297 event_trigger_write(struct file *filp, const char __user *ubuf, 298 size_t cnt, loff_t *ppos) 299 { 300 return event_trigger_regex_write(filp, ubuf, cnt, ppos); 301 } 302 303 static int 304 event_trigger_open(struct inode *inode, struct file *filp) 305 { 306 return event_trigger_regex_open(inode, filp); 307 } 308 309 static int 310 event_trigger_release(struct inode *inode, struct file *file) 311 { 312 return event_trigger_regex_release(inode, file); 313 } 314 315 const struct file_operations event_trigger_fops = { 316 .open = event_trigger_open, 317 .read = seq_read, 318 .write = event_trigger_write, 319 .llseek = tracing_lseek, 320 .release = event_trigger_release, 321 }; 322 323 /* 324 * Currently we only register event commands from __init, so mark this 325 * __init too. 326 */ 327 __init int register_event_command(struct event_command *cmd) 328 { 329 struct event_command *p; 330 int ret = 0; 331 332 mutex_lock(&trigger_cmd_mutex); 333 list_for_each_entry(p, &trigger_commands, list) { 334 if (strcmp(cmd->name, p->name) == 0) { 335 ret = -EBUSY; 336 goto out_unlock; 337 } 338 } 339 list_add(&cmd->list, &trigger_commands); 340 out_unlock: 341 mutex_unlock(&trigger_cmd_mutex); 342 343 return ret; 344 } 345 346 /* 347 * Currently we only unregister event commands from __init, so mark 348 * this __init too. 349 */ 350 __init int unregister_event_command(struct event_command *cmd) 351 { 352 struct event_command *p, *n; 353 int ret = -ENODEV; 354 355 mutex_lock(&trigger_cmd_mutex); 356 list_for_each_entry_safe(p, n, &trigger_commands, list) { 357 if (strcmp(cmd->name, p->name) == 0) { 358 ret = 0; 359 list_del_init(&p->list); 360 goto out_unlock; 361 } 362 } 363 out_unlock: 364 mutex_unlock(&trigger_cmd_mutex); 365 366 return ret; 367 } 368 369 /** 370 * event_trigger_print - Generic event_trigger_ops @print implementation 371 * @name: The name of the event trigger 372 * @m: The seq_file being printed to 373 * @data: Trigger-specific data 374 * @filter_str: filter_str to print, if present 375 * 376 * Common implementation for event triggers to print themselves. 377 * 378 * Usually wrapped by a function that simply sets the @name of the 379 * trigger command and then invokes this. 380 * 381 * Return: 0 on success, errno otherwise 382 */ 383 static int 384 event_trigger_print(const char *name, struct seq_file *m, 385 void *data, char *filter_str) 386 { 387 long count = (long)data; 388 389 seq_puts(m, name); 390 391 if (count == -1) 392 seq_puts(m, ":unlimited"); 393 else 394 seq_printf(m, ":count=%ld", count); 395 396 if (filter_str) 397 seq_printf(m, " if %s\n", filter_str); 398 else 399 seq_putc(m, '\n'); 400 401 return 0; 402 } 403 404 /** 405 * event_trigger_init - Generic event_trigger_ops @init implementation 406 * @ops: The trigger ops associated with the trigger 407 * @data: Trigger-specific data 408 * 409 * Common implementation of event trigger initialization. 410 * 411 * Usually used directly as the @init method in event trigger 412 * implementations. 413 * 414 * Return: 0 on success, errno otherwise 415 */ 416 int event_trigger_init(struct event_trigger_ops *ops, 417 struct event_trigger_data *data) 418 { 419 data->ref++; 420 return 0; 421 } 422 423 /** 424 * event_trigger_free - Generic event_trigger_ops @free implementation 425 * @ops: The trigger ops associated with the trigger 426 * @data: Trigger-specific data 427 * 428 * Common implementation of event trigger de-initialization. 429 * 430 * Usually used directly as the @free method in event trigger 431 * implementations. 432 */ 433 static void 434 event_trigger_free(struct event_trigger_ops *ops, 435 struct event_trigger_data *data) 436 { 437 if (WARN_ON_ONCE(data->ref <= 0)) 438 return; 439 440 data->ref--; 441 if (!data->ref) 442 trigger_data_free(data); 443 } 444 445 int trace_event_trigger_enable_disable(struct trace_event_file *file, 446 int trigger_enable) 447 { 448 int ret = 0; 449 450 if (trigger_enable) { 451 if (atomic_inc_return(&file->tm_ref) > 1) 452 return ret; 453 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags); 454 ret = trace_event_enable_disable(file, 1, 1); 455 } else { 456 if (atomic_dec_return(&file->tm_ref) > 0) 457 return ret; 458 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags); 459 ret = trace_event_enable_disable(file, 0, 1); 460 } 461 462 return ret; 463 } 464 465 /** 466 * clear_event_triggers - Clear all triggers associated with a trace array 467 * @tr: The trace array to clear 468 * 469 * For each trigger, the triggering event has its tm_ref decremented 470 * via trace_event_trigger_enable_disable(), and any associated event 471 * (in the case of enable/disable_event triggers) will have its sm_ref 472 * decremented via free()->trace_event_enable_disable(). That 473 * combination effectively reverses the soft-mode/trigger state added 474 * by trigger registration. 475 * 476 * Must be called with event_mutex held. 477 */ 478 void 479 clear_event_triggers(struct trace_array *tr) 480 { 481 struct trace_event_file *file; 482 483 list_for_each_entry(file, &tr->events, list) { 484 struct event_trigger_data *data; 485 list_for_each_entry_rcu(data, &file->triggers, list) { 486 trace_event_trigger_enable_disable(file, 0); 487 if (data->ops->free) 488 data->ops->free(data->ops, data); 489 } 490 } 491 } 492 493 /** 494 * update_cond_flag - Set or reset the TRIGGER_COND bit 495 * @file: The trace_event_file associated with the event 496 * 497 * If an event has triggers and any of those triggers has a filter or 498 * a post_trigger, trigger invocation needs to be deferred until after 499 * the current event has logged its data, and the event should have 500 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be 501 * cleared. 502 */ 503 void update_cond_flag(struct trace_event_file *file) 504 { 505 struct event_trigger_data *data; 506 bool set_cond = false; 507 508 list_for_each_entry_rcu(data, &file->triggers, list) { 509 if (data->filter || event_command_post_trigger(data->cmd_ops) || 510 event_command_needs_rec(data->cmd_ops)) { 511 set_cond = true; 512 break; 513 } 514 } 515 516 if (set_cond) 517 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags); 518 else 519 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags); 520 } 521 522 /** 523 * register_trigger - Generic event_command @reg implementation 524 * @glob: The raw string used to register the trigger 525 * @ops: The trigger ops associated with the trigger 526 * @data: Trigger-specific data to associate with the trigger 527 * @file: The trace_event_file associated with the event 528 * 529 * Common implementation for event trigger registration. 530 * 531 * Usually used directly as the @reg method in event command 532 * implementations. 533 * 534 * Return: 0 on success, errno otherwise 535 */ 536 static int register_trigger(char *glob, struct event_trigger_ops *ops, 537 struct event_trigger_data *data, 538 struct trace_event_file *file) 539 { 540 struct event_trigger_data *test; 541 int ret = 0; 542 543 list_for_each_entry_rcu(test, &file->triggers, list) { 544 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) { 545 ret = -EEXIST; 546 goto out; 547 } 548 } 549 550 if (data->ops->init) { 551 ret = data->ops->init(data->ops, data); 552 if (ret < 0) 553 goto out; 554 } 555 556 list_add_rcu(&data->list, &file->triggers); 557 ret++; 558 559 update_cond_flag(file); 560 if (trace_event_trigger_enable_disable(file, 1) < 0) { 561 list_del_rcu(&data->list); 562 update_cond_flag(file); 563 ret--; 564 } 565 out: 566 return ret; 567 } 568 569 /** 570 * unregister_trigger - Generic event_command @unreg implementation 571 * @glob: The raw string used to register the trigger 572 * @ops: The trigger ops associated with the trigger 573 * @test: Trigger-specific data used to find the trigger to remove 574 * @file: The trace_event_file associated with the event 575 * 576 * Common implementation for event trigger unregistration. 577 * 578 * Usually used directly as the @unreg method in event command 579 * implementations. 580 */ 581 void unregister_trigger(char *glob, struct event_trigger_ops *ops, 582 struct event_trigger_data *test, 583 struct trace_event_file *file) 584 { 585 struct event_trigger_data *data; 586 bool unregistered = false; 587 588 list_for_each_entry_rcu(data, &file->triggers, list) { 589 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) { 590 unregistered = true; 591 list_del_rcu(&data->list); 592 trace_event_trigger_enable_disable(file, 0); 593 update_cond_flag(file); 594 break; 595 } 596 } 597 598 if (unregistered && data->ops->free) 599 data->ops->free(data->ops, data); 600 } 601 602 /** 603 * event_trigger_callback - Generic event_command @func implementation 604 * @cmd_ops: The command ops, used for trigger registration 605 * @file: The trace_event_file associated with the event 606 * @glob: The raw string used to register the trigger 607 * @cmd: The cmd portion of the string used to register the trigger 608 * @param: The params portion of the string used to register the trigger 609 * 610 * Common implementation for event command parsing and trigger 611 * instantiation. 612 * 613 * Usually used directly as the @func method in event command 614 * implementations. 615 * 616 * Return: 0 on success, errno otherwise 617 */ 618 static int 619 event_trigger_callback(struct event_command *cmd_ops, 620 struct trace_event_file *file, 621 char *glob, char *cmd, char *param) 622 { 623 struct event_trigger_data *trigger_data; 624 struct event_trigger_ops *trigger_ops; 625 char *trigger = NULL; 626 char *number; 627 int ret; 628 629 /* separate the trigger from the filter (t:n [if filter]) */ 630 if (param && isdigit(param[0])) 631 trigger = strsep(¶m, " \t"); 632 633 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); 634 635 ret = -ENOMEM; 636 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); 637 if (!trigger_data) 638 goto out; 639 640 trigger_data->count = -1; 641 trigger_data->ops = trigger_ops; 642 trigger_data->cmd_ops = cmd_ops; 643 INIT_LIST_HEAD(&trigger_data->list); 644 INIT_LIST_HEAD(&trigger_data->named_list); 645 646 if (glob[0] == '!') { 647 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 648 kfree(trigger_data); 649 ret = 0; 650 goto out; 651 } 652 653 if (trigger) { 654 number = strsep(&trigger, ":"); 655 656 ret = -EINVAL; 657 if (!strlen(number)) 658 goto out_free; 659 660 /* 661 * We use the callback data field (which is a pointer) 662 * as our counter. 663 */ 664 ret = kstrtoul(number, 0, &trigger_data->count); 665 if (ret) 666 goto out_free; 667 } 668 669 if (!param) /* if param is non-empty, it's supposed to be a filter */ 670 goto out_reg; 671 672 if (!cmd_ops->set_filter) 673 goto out_reg; 674 675 ret = cmd_ops->set_filter(param, trigger_data, file); 676 if (ret < 0) 677 goto out_free; 678 679 out_reg: 680 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 681 /* 682 * The above returns on success the # of functions enabled, 683 * but if it didn't find any functions it returns zero. 684 * Consider no functions a failure too. 685 */ 686 if (!ret) { 687 ret = -ENOENT; 688 goto out_free; 689 } else if (ret < 0) 690 goto out_free; 691 ret = 0; 692 out: 693 return ret; 694 695 out_free: 696 if (cmd_ops->set_filter) 697 cmd_ops->set_filter(NULL, trigger_data, NULL); 698 kfree(trigger_data); 699 goto out; 700 } 701 702 /** 703 * set_trigger_filter - Generic event_command @set_filter implementation 704 * @filter_str: The filter string for the trigger, NULL to remove filter 705 * @trigger_data: Trigger-specific data 706 * @file: The trace_event_file associated with the event 707 * 708 * Common implementation for event command filter parsing and filter 709 * instantiation. 710 * 711 * Usually used directly as the @set_filter method in event command 712 * implementations. 713 * 714 * Also used to remove a filter (if filter_str = NULL). 715 * 716 * Return: 0 on success, errno otherwise 717 */ 718 int set_trigger_filter(char *filter_str, 719 struct event_trigger_data *trigger_data, 720 struct trace_event_file *file) 721 { 722 struct event_trigger_data *data = trigger_data; 723 struct event_filter *filter = NULL, *tmp; 724 int ret = -EINVAL; 725 char *s; 726 727 if (!filter_str) /* clear the current filter */ 728 goto assign; 729 730 s = strsep(&filter_str, " \t"); 731 732 if (!strlen(s) || strcmp(s, "if") != 0) 733 goto out; 734 735 if (!filter_str) 736 goto out; 737 738 /* The filter is for the 'trigger' event, not the triggered event */ 739 ret = create_event_filter(file->event_call, filter_str, false, &filter); 740 if (ret) 741 goto out; 742 assign: 743 tmp = rcu_access_pointer(data->filter); 744 745 rcu_assign_pointer(data->filter, filter); 746 747 if (tmp) { 748 /* Make sure the call is done with the filter */ 749 synchronize_sched(); 750 free_event_filter(tmp); 751 } 752 753 kfree(data->filter_str); 754 data->filter_str = NULL; 755 756 if (filter_str) { 757 data->filter_str = kstrdup(filter_str, GFP_KERNEL); 758 if (!data->filter_str) { 759 free_event_filter(rcu_access_pointer(data->filter)); 760 data->filter = NULL; 761 ret = -ENOMEM; 762 } 763 } 764 out: 765 return ret; 766 } 767 768 static LIST_HEAD(named_triggers); 769 770 /** 771 * find_named_trigger - Find the common named trigger associated with @name 772 * @name: The name of the set of named triggers to find the common data for 773 * 774 * Named triggers are sets of triggers that share a common set of 775 * trigger data. The first named trigger registered with a given name 776 * owns the common trigger data that the others subsequently 777 * registered with the same name will reference. This function 778 * returns the common trigger data associated with that first 779 * registered instance. 780 * 781 * Return: the common trigger data for the given named trigger on 782 * success, NULL otherwise. 783 */ 784 struct event_trigger_data *find_named_trigger(const char *name) 785 { 786 struct event_trigger_data *data; 787 788 if (!name) 789 return NULL; 790 791 list_for_each_entry(data, &named_triggers, named_list) { 792 if (data->named_data) 793 continue; 794 if (strcmp(data->name, name) == 0) 795 return data; 796 } 797 798 return NULL; 799 } 800 801 /** 802 * is_named_trigger - determine if a given trigger is a named trigger 803 * @test: The trigger data to test 804 * 805 * Return: true if 'test' is a named trigger, false otherwise. 806 */ 807 bool is_named_trigger(struct event_trigger_data *test) 808 { 809 struct event_trigger_data *data; 810 811 list_for_each_entry(data, &named_triggers, named_list) { 812 if (test == data) 813 return true; 814 } 815 816 return false; 817 } 818 819 /** 820 * save_named_trigger - save the trigger in the named trigger list 821 * @name: The name of the named trigger set 822 * @data: The trigger data to save 823 * 824 * Return: 0 if successful, negative error otherwise. 825 */ 826 int save_named_trigger(const char *name, struct event_trigger_data *data) 827 { 828 data->name = kstrdup(name, GFP_KERNEL); 829 if (!data->name) 830 return -ENOMEM; 831 832 list_add(&data->named_list, &named_triggers); 833 834 return 0; 835 } 836 837 /** 838 * del_named_trigger - delete a trigger from the named trigger list 839 * @data: The trigger data to delete 840 */ 841 void del_named_trigger(struct event_trigger_data *data) 842 { 843 kfree(data->name); 844 data->name = NULL; 845 846 list_del(&data->named_list); 847 } 848 849 static void __pause_named_trigger(struct event_trigger_data *data, bool pause) 850 { 851 struct event_trigger_data *test; 852 853 list_for_each_entry(test, &named_triggers, named_list) { 854 if (strcmp(test->name, data->name) == 0) { 855 if (pause) { 856 test->paused_tmp = test->paused; 857 test->paused = true; 858 } else { 859 test->paused = test->paused_tmp; 860 } 861 } 862 } 863 } 864 865 /** 866 * pause_named_trigger - Pause all named triggers with the same name 867 * @data: The trigger data of a named trigger to pause 868 * 869 * Pauses a named trigger along with all other triggers having the 870 * same name. Because named triggers share a common set of data, 871 * pausing only one is meaningless, so pausing one named trigger needs 872 * to pause all triggers with the same name. 873 */ 874 void pause_named_trigger(struct event_trigger_data *data) 875 { 876 __pause_named_trigger(data, true); 877 } 878 879 /** 880 * unpause_named_trigger - Un-pause all named triggers with the same name 881 * @data: The trigger data of a named trigger to unpause 882 * 883 * Un-pauses a named trigger along with all other triggers having the 884 * same name. Because named triggers share a common set of data, 885 * unpausing only one is meaningless, so unpausing one named trigger 886 * needs to unpause all triggers with the same name. 887 */ 888 void unpause_named_trigger(struct event_trigger_data *data) 889 { 890 __pause_named_trigger(data, false); 891 } 892 893 /** 894 * set_named_trigger_data - Associate common named trigger data 895 * @data: The trigger data of a named trigger to unpause 896 * 897 * Named triggers are sets of triggers that share a common set of 898 * trigger data. The first named trigger registered with a given name 899 * owns the common trigger data that the others subsequently 900 * registered with the same name will reference. This function 901 * associates the common trigger data from the first trigger with the 902 * given trigger. 903 */ 904 void set_named_trigger_data(struct event_trigger_data *data, 905 struct event_trigger_data *named_data) 906 { 907 data->named_data = named_data; 908 } 909 910 static void 911 traceon_trigger(struct event_trigger_data *data, void *rec) 912 { 913 if (tracing_is_on()) 914 return; 915 916 tracing_on(); 917 } 918 919 static void 920 traceon_count_trigger(struct event_trigger_data *data, void *rec) 921 { 922 if (tracing_is_on()) 923 return; 924 925 if (!data->count) 926 return; 927 928 if (data->count != -1) 929 (data->count)--; 930 931 tracing_on(); 932 } 933 934 static void 935 traceoff_trigger(struct event_trigger_data *data, void *rec) 936 { 937 if (!tracing_is_on()) 938 return; 939 940 tracing_off(); 941 } 942 943 static void 944 traceoff_count_trigger(struct event_trigger_data *data, void *rec) 945 { 946 if (!tracing_is_on()) 947 return; 948 949 if (!data->count) 950 return; 951 952 if (data->count != -1) 953 (data->count)--; 954 955 tracing_off(); 956 } 957 958 static int 959 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 960 struct event_trigger_data *data) 961 { 962 return event_trigger_print("traceon", m, (void *)data->count, 963 data->filter_str); 964 } 965 966 static int 967 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 968 struct event_trigger_data *data) 969 { 970 return event_trigger_print("traceoff", m, (void *)data->count, 971 data->filter_str); 972 } 973 974 static struct event_trigger_ops traceon_trigger_ops = { 975 .func = traceon_trigger, 976 .print = traceon_trigger_print, 977 .init = event_trigger_init, 978 .free = event_trigger_free, 979 }; 980 981 static struct event_trigger_ops traceon_count_trigger_ops = { 982 .func = traceon_count_trigger, 983 .print = traceon_trigger_print, 984 .init = event_trigger_init, 985 .free = event_trigger_free, 986 }; 987 988 static struct event_trigger_ops traceoff_trigger_ops = { 989 .func = traceoff_trigger, 990 .print = traceoff_trigger_print, 991 .init = event_trigger_init, 992 .free = event_trigger_free, 993 }; 994 995 static struct event_trigger_ops traceoff_count_trigger_ops = { 996 .func = traceoff_count_trigger, 997 .print = traceoff_trigger_print, 998 .init = event_trigger_init, 999 .free = event_trigger_free, 1000 }; 1001 1002 static struct event_trigger_ops * 1003 onoff_get_trigger_ops(char *cmd, char *param) 1004 { 1005 struct event_trigger_ops *ops; 1006 1007 /* we register both traceon and traceoff to this callback */ 1008 if (strcmp(cmd, "traceon") == 0) 1009 ops = param ? &traceon_count_trigger_ops : 1010 &traceon_trigger_ops; 1011 else 1012 ops = param ? &traceoff_count_trigger_ops : 1013 &traceoff_trigger_ops; 1014 1015 return ops; 1016 } 1017 1018 static struct event_command trigger_traceon_cmd = { 1019 .name = "traceon", 1020 .trigger_type = ETT_TRACE_ONOFF, 1021 .func = event_trigger_callback, 1022 .reg = register_trigger, 1023 .unreg = unregister_trigger, 1024 .get_trigger_ops = onoff_get_trigger_ops, 1025 .set_filter = set_trigger_filter, 1026 }; 1027 1028 static struct event_command trigger_traceoff_cmd = { 1029 .name = "traceoff", 1030 .trigger_type = ETT_TRACE_ONOFF, 1031 .flags = EVENT_CMD_FL_POST_TRIGGER, 1032 .func = event_trigger_callback, 1033 .reg = register_trigger, 1034 .unreg = unregister_trigger, 1035 .get_trigger_ops = onoff_get_trigger_ops, 1036 .set_filter = set_trigger_filter, 1037 }; 1038 1039 #ifdef CONFIG_TRACER_SNAPSHOT 1040 static void 1041 snapshot_trigger(struct event_trigger_data *data, void *rec) 1042 { 1043 tracing_snapshot(); 1044 } 1045 1046 static void 1047 snapshot_count_trigger(struct event_trigger_data *data, void *rec) 1048 { 1049 if (!data->count) 1050 return; 1051 1052 if (data->count != -1) 1053 (data->count)--; 1054 1055 snapshot_trigger(data, rec); 1056 } 1057 1058 static int 1059 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops, 1060 struct event_trigger_data *data, 1061 struct trace_event_file *file) 1062 { 1063 int ret = register_trigger(glob, ops, data, file); 1064 1065 if (ret > 0 && tracing_alloc_snapshot() != 0) { 1066 unregister_trigger(glob, ops, data, file); 1067 ret = 0; 1068 } 1069 1070 return ret; 1071 } 1072 1073 static int 1074 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 1075 struct event_trigger_data *data) 1076 { 1077 return event_trigger_print("snapshot", m, (void *)data->count, 1078 data->filter_str); 1079 } 1080 1081 static struct event_trigger_ops snapshot_trigger_ops = { 1082 .func = snapshot_trigger, 1083 .print = snapshot_trigger_print, 1084 .init = event_trigger_init, 1085 .free = event_trigger_free, 1086 }; 1087 1088 static struct event_trigger_ops snapshot_count_trigger_ops = { 1089 .func = snapshot_count_trigger, 1090 .print = snapshot_trigger_print, 1091 .init = event_trigger_init, 1092 .free = event_trigger_free, 1093 }; 1094 1095 static struct event_trigger_ops * 1096 snapshot_get_trigger_ops(char *cmd, char *param) 1097 { 1098 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops; 1099 } 1100 1101 static struct event_command trigger_snapshot_cmd = { 1102 .name = "snapshot", 1103 .trigger_type = ETT_SNAPSHOT, 1104 .func = event_trigger_callback, 1105 .reg = register_snapshot_trigger, 1106 .unreg = unregister_trigger, 1107 .get_trigger_ops = snapshot_get_trigger_ops, 1108 .set_filter = set_trigger_filter, 1109 }; 1110 1111 static __init int register_trigger_snapshot_cmd(void) 1112 { 1113 int ret; 1114 1115 ret = register_event_command(&trigger_snapshot_cmd); 1116 WARN_ON(ret < 0); 1117 1118 return ret; 1119 } 1120 #else 1121 static __init int register_trigger_snapshot_cmd(void) { return 0; } 1122 #endif /* CONFIG_TRACER_SNAPSHOT */ 1123 1124 #ifdef CONFIG_STACKTRACE 1125 /* 1126 * Skip 3: 1127 * stacktrace_trigger() 1128 * event_triggers_post_call() 1129 * trace_event_raw_event_xxx() 1130 */ 1131 #define STACK_SKIP 3 1132 1133 static void 1134 stacktrace_trigger(struct event_trigger_data *data, void *rec) 1135 { 1136 trace_dump_stack(STACK_SKIP); 1137 } 1138 1139 static void 1140 stacktrace_count_trigger(struct event_trigger_data *data, void *rec) 1141 { 1142 if (!data->count) 1143 return; 1144 1145 if (data->count != -1) 1146 (data->count)--; 1147 1148 stacktrace_trigger(data, rec); 1149 } 1150 1151 static int 1152 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 1153 struct event_trigger_data *data) 1154 { 1155 return event_trigger_print("stacktrace", m, (void *)data->count, 1156 data->filter_str); 1157 } 1158 1159 static struct event_trigger_ops stacktrace_trigger_ops = { 1160 .func = stacktrace_trigger, 1161 .print = stacktrace_trigger_print, 1162 .init = event_trigger_init, 1163 .free = event_trigger_free, 1164 }; 1165 1166 static struct event_trigger_ops stacktrace_count_trigger_ops = { 1167 .func = stacktrace_count_trigger, 1168 .print = stacktrace_trigger_print, 1169 .init = event_trigger_init, 1170 .free = event_trigger_free, 1171 }; 1172 1173 static struct event_trigger_ops * 1174 stacktrace_get_trigger_ops(char *cmd, char *param) 1175 { 1176 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops; 1177 } 1178 1179 static struct event_command trigger_stacktrace_cmd = { 1180 .name = "stacktrace", 1181 .trigger_type = ETT_STACKTRACE, 1182 .flags = EVENT_CMD_FL_POST_TRIGGER, 1183 .func = event_trigger_callback, 1184 .reg = register_trigger, 1185 .unreg = unregister_trigger, 1186 .get_trigger_ops = stacktrace_get_trigger_ops, 1187 .set_filter = set_trigger_filter, 1188 }; 1189 1190 static __init int register_trigger_stacktrace_cmd(void) 1191 { 1192 int ret; 1193 1194 ret = register_event_command(&trigger_stacktrace_cmd); 1195 WARN_ON(ret < 0); 1196 1197 return ret; 1198 } 1199 #else 1200 static __init int register_trigger_stacktrace_cmd(void) { return 0; } 1201 #endif /* CONFIG_STACKTRACE */ 1202 1203 static __init void unregister_trigger_traceon_traceoff_cmds(void) 1204 { 1205 unregister_event_command(&trigger_traceon_cmd); 1206 unregister_event_command(&trigger_traceoff_cmd); 1207 } 1208 1209 static void 1210 event_enable_trigger(struct event_trigger_data *data, void *rec) 1211 { 1212 struct enable_trigger_data *enable_data = data->private_data; 1213 1214 if (enable_data->enable) 1215 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); 1216 else 1217 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); 1218 } 1219 1220 static void 1221 event_enable_count_trigger(struct event_trigger_data *data, void *rec) 1222 { 1223 struct enable_trigger_data *enable_data = data->private_data; 1224 1225 if (!data->count) 1226 return; 1227 1228 /* Skip if the event is in a state we want to switch to */ 1229 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) 1230 return; 1231 1232 if (data->count != -1) 1233 (data->count)--; 1234 1235 event_enable_trigger(data, rec); 1236 } 1237 1238 int event_enable_trigger_print(struct seq_file *m, 1239 struct event_trigger_ops *ops, 1240 struct event_trigger_data *data) 1241 { 1242 struct enable_trigger_data *enable_data = data->private_data; 1243 1244 seq_printf(m, "%s:%s:%s", 1245 enable_data->hist ? 1246 (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) : 1247 (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR), 1248 enable_data->file->event_call->class->system, 1249 trace_event_name(enable_data->file->event_call)); 1250 1251 if (data->count == -1) 1252 seq_puts(m, ":unlimited"); 1253 else 1254 seq_printf(m, ":count=%ld", data->count); 1255 1256 if (data->filter_str) 1257 seq_printf(m, " if %s\n", data->filter_str); 1258 else 1259 seq_putc(m, '\n'); 1260 1261 return 0; 1262 } 1263 1264 void event_enable_trigger_free(struct event_trigger_ops *ops, 1265 struct event_trigger_data *data) 1266 { 1267 struct enable_trigger_data *enable_data = data->private_data; 1268 1269 if (WARN_ON_ONCE(data->ref <= 0)) 1270 return; 1271 1272 data->ref--; 1273 if (!data->ref) { 1274 /* Remove the SOFT_MODE flag */ 1275 trace_event_enable_disable(enable_data->file, 0, 1); 1276 module_put(enable_data->file->event_call->mod); 1277 trigger_data_free(data); 1278 kfree(enable_data); 1279 } 1280 } 1281 1282 static struct event_trigger_ops event_enable_trigger_ops = { 1283 .func = event_enable_trigger, 1284 .print = event_enable_trigger_print, 1285 .init = event_trigger_init, 1286 .free = event_enable_trigger_free, 1287 }; 1288 1289 static struct event_trigger_ops event_enable_count_trigger_ops = { 1290 .func = event_enable_count_trigger, 1291 .print = event_enable_trigger_print, 1292 .init = event_trigger_init, 1293 .free = event_enable_trigger_free, 1294 }; 1295 1296 static struct event_trigger_ops event_disable_trigger_ops = { 1297 .func = event_enable_trigger, 1298 .print = event_enable_trigger_print, 1299 .init = event_trigger_init, 1300 .free = event_enable_trigger_free, 1301 }; 1302 1303 static struct event_trigger_ops event_disable_count_trigger_ops = { 1304 .func = event_enable_count_trigger, 1305 .print = event_enable_trigger_print, 1306 .init = event_trigger_init, 1307 .free = event_enable_trigger_free, 1308 }; 1309 1310 int event_enable_trigger_func(struct event_command *cmd_ops, 1311 struct trace_event_file *file, 1312 char *glob, char *cmd, char *param) 1313 { 1314 struct trace_event_file *event_enable_file; 1315 struct enable_trigger_data *enable_data; 1316 struct event_trigger_data *trigger_data; 1317 struct event_trigger_ops *trigger_ops; 1318 struct trace_array *tr = file->tr; 1319 const char *system; 1320 const char *event; 1321 bool hist = false; 1322 char *trigger; 1323 char *number; 1324 bool enable; 1325 int ret; 1326 1327 if (!param) 1328 return -EINVAL; 1329 1330 /* separate the trigger from the filter (s:e:n [if filter]) */ 1331 trigger = strsep(¶m, " \t"); 1332 if (!trigger) 1333 return -EINVAL; 1334 1335 system = strsep(&trigger, ":"); 1336 if (!trigger) 1337 return -EINVAL; 1338 1339 event = strsep(&trigger, ":"); 1340 1341 ret = -EINVAL; 1342 event_enable_file = find_event_file(tr, system, event); 1343 if (!event_enable_file) 1344 goto out; 1345 1346 #ifdef CONFIG_HIST_TRIGGERS 1347 hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) || 1348 (strcmp(cmd, DISABLE_HIST_STR) == 0)); 1349 1350 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) || 1351 (strcmp(cmd, ENABLE_HIST_STR) == 0)); 1352 #else 1353 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 1354 #endif 1355 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); 1356 1357 ret = -ENOMEM; 1358 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); 1359 if (!trigger_data) 1360 goto out; 1361 1362 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL); 1363 if (!enable_data) { 1364 kfree(trigger_data); 1365 goto out; 1366 } 1367 1368 trigger_data->count = -1; 1369 trigger_data->ops = trigger_ops; 1370 trigger_data->cmd_ops = cmd_ops; 1371 INIT_LIST_HEAD(&trigger_data->list); 1372 RCU_INIT_POINTER(trigger_data->filter, NULL); 1373 1374 enable_data->hist = hist; 1375 enable_data->enable = enable; 1376 enable_data->file = event_enable_file; 1377 trigger_data->private_data = enable_data; 1378 1379 if (glob[0] == '!') { 1380 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 1381 kfree(trigger_data); 1382 kfree(enable_data); 1383 ret = 0; 1384 goto out; 1385 } 1386 1387 if (trigger) { 1388 number = strsep(&trigger, ":"); 1389 1390 ret = -EINVAL; 1391 if (!strlen(number)) 1392 goto out_free; 1393 1394 /* 1395 * We use the callback data field (which is a pointer) 1396 * as our counter. 1397 */ 1398 ret = kstrtoul(number, 0, &trigger_data->count); 1399 if (ret) 1400 goto out_free; 1401 } 1402 1403 if (!param) /* if param is non-empty, it's supposed to be a filter */ 1404 goto out_reg; 1405 1406 if (!cmd_ops->set_filter) 1407 goto out_reg; 1408 1409 ret = cmd_ops->set_filter(param, trigger_data, file); 1410 if (ret < 0) 1411 goto out_free; 1412 1413 out_reg: 1414 /* Don't let event modules unload while probe registered */ 1415 ret = try_module_get(event_enable_file->event_call->mod); 1416 if (!ret) { 1417 ret = -EBUSY; 1418 goto out_free; 1419 } 1420 1421 ret = trace_event_enable_disable(event_enable_file, 1, 1); 1422 if (ret < 0) 1423 goto out_put; 1424 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 1425 /* 1426 * The above returns on success the # of functions enabled, 1427 * but if it didn't find any functions it returns zero. 1428 * Consider no functions a failure too. 1429 */ 1430 if (!ret) { 1431 ret = -ENOENT; 1432 goto out_disable; 1433 } else if (ret < 0) 1434 goto out_disable; 1435 /* Just return zero, not the number of enabled functions */ 1436 ret = 0; 1437 out: 1438 return ret; 1439 1440 out_disable: 1441 trace_event_enable_disable(event_enable_file, 0, 1); 1442 out_put: 1443 module_put(event_enable_file->event_call->mod); 1444 out_free: 1445 if (cmd_ops->set_filter) 1446 cmd_ops->set_filter(NULL, trigger_data, NULL); 1447 kfree(trigger_data); 1448 kfree(enable_data); 1449 goto out; 1450 } 1451 1452 int event_enable_register_trigger(char *glob, 1453 struct event_trigger_ops *ops, 1454 struct event_trigger_data *data, 1455 struct trace_event_file *file) 1456 { 1457 struct enable_trigger_data *enable_data = data->private_data; 1458 struct enable_trigger_data *test_enable_data; 1459 struct event_trigger_data *test; 1460 int ret = 0; 1461 1462 list_for_each_entry_rcu(test, &file->triggers, list) { 1463 test_enable_data = test->private_data; 1464 if (test_enable_data && 1465 (test->cmd_ops->trigger_type == 1466 data->cmd_ops->trigger_type) && 1467 (test_enable_data->file == enable_data->file)) { 1468 ret = -EEXIST; 1469 goto out; 1470 } 1471 } 1472 1473 if (data->ops->init) { 1474 ret = data->ops->init(data->ops, data); 1475 if (ret < 0) 1476 goto out; 1477 } 1478 1479 list_add_rcu(&data->list, &file->triggers); 1480 ret++; 1481 1482 update_cond_flag(file); 1483 if (trace_event_trigger_enable_disable(file, 1) < 0) { 1484 list_del_rcu(&data->list); 1485 update_cond_flag(file); 1486 ret--; 1487 } 1488 out: 1489 return ret; 1490 } 1491 1492 void event_enable_unregister_trigger(char *glob, 1493 struct event_trigger_ops *ops, 1494 struct event_trigger_data *test, 1495 struct trace_event_file *file) 1496 { 1497 struct enable_trigger_data *test_enable_data = test->private_data; 1498 struct enable_trigger_data *enable_data; 1499 struct event_trigger_data *data; 1500 bool unregistered = false; 1501 1502 list_for_each_entry_rcu(data, &file->triggers, list) { 1503 enable_data = data->private_data; 1504 if (enable_data && 1505 (data->cmd_ops->trigger_type == 1506 test->cmd_ops->trigger_type) && 1507 (enable_data->file == test_enable_data->file)) { 1508 unregistered = true; 1509 list_del_rcu(&data->list); 1510 trace_event_trigger_enable_disable(file, 0); 1511 update_cond_flag(file); 1512 break; 1513 } 1514 } 1515 1516 if (unregistered && data->ops->free) 1517 data->ops->free(data->ops, data); 1518 } 1519 1520 static struct event_trigger_ops * 1521 event_enable_get_trigger_ops(char *cmd, char *param) 1522 { 1523 struct event_trigger_ops *ops; 1524 bool enable; 1525 1526 #ifdef CONFIG_HIST_TRIGGERS 1527 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) || 1528 (strcmp(cmd, ENABLE_HIST_STR) == 0)); 1529 #else 1530 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 1531 #endif 1532 if (enable) 1533 ops = param ? &event_enable_count_trigger_ops : 1534 &event_enable_trigger_ops; 1535 else 1536 ops = param ? &event_disable_count_trigger_ops : 1537 &event_disable_trigger_ops; 1538 1539 return ops; 1540 } 1541 1542 static struct event_command trigger_enable_cmd = { 1543 .name = ENABLE_EVENT_STR, 1544 .trigger_type = ETT_EVENT_ENABLE, 1545 .func = event_enable_trigger_func, 1546 .reg = event_enable_register_trigger, 1547 .unreg = event_enable_unregister_trigger, 1548 .get_trigger_ops = event_enable_get_trigger_ops, 1549 .set_filter = set_trigger_filter, 1550 }; 1551 1552 static struct event_command trigger_disable_cmd = { 1553 .name = DISABLE_EVENT_STR, 1554 .trigger_type = ETT_EVENT_ENABLE, 1555 .func = event_enable_trigger_func, 1556 .reg = event_enable_register_trigger, 1557 .unreg = event_enable_unregister_trigger, 1558 .get_trigger_ops = event_enable_get_trigger_ops, 1559 .set_filter = set_trigger_filter, 1560 }; 1561 1562 static __init void unregister_trigger_enable_disable_cmds(void) 1563 { 1564 unregister_event_command(&trigger_enable_cmd); 1565 unregister_event_command(&trigger_disable_cmd); 1566 } 1567 1568 static __init int register_trigger_enable_disable_cmds(void) 1569 { 1570 int ret; 1571 1572 ret = register_event_command(&trigger_enable_cmd); 1573 if (WARN_ON(ret < 0)) 1574 return ret; 1575 ret = register_event_command(&trigger_disable_cmd); 1576 if (WARN_ON(ret < 0)) 1577 unregister_trigger_enable_disable_cmds(); 1578 1579 return ret; 1580 } 1581 1582 static __init int register_trigger_traceon_traceoff_cmds(void) 1583 { 1584 int ret; 1585 1586 ret = register_event_command(&trigger_traceon_cmd); 1587 if (WARN_ON(ret < 0)) 1588 return ret; 1589 ret = register_event_command(&trigger_traceoff_cmd); 1590 if (WARN_ON(ret < 0)) 1591 unregister_trigger_traceon_traceoff_cmds(); 1592 1593 return ret; 1594 } 1595 1596 __init int register_trigger_cmds(void) 1597 { 1598 register_trigger_traceon_traceoff_cmds(); 1599 register_trigger_snapshot_cmd(); 1600 register_trigger_stacktrace_cmd(); 1601 register_trigger_enable_disable_cmds(); 1602 register_trigger_hist_enable_disable_cmds(); 1603 register_trigger_hist_cmd(); 1604 1605 return 0; 1606 } 1607