1 /* 2 * trace_events_trigger - trace event triggers 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com> 19 */ 20 21 #include <linux/module.h> 22 #include <linux/ctype.h> 23 #include <linux/mutex.h> 24 #include <linux/slab.h> 25 26 #include "trace.h" 27 28 static LIST_HEAD(trigger_commands); 29 static DEFINE_MUTEX(trigger_cmd_mutex); 30 31 void trigger_data_free(struct event_trigger_data *data) 32 { 33 if (data->cmd_ops->set_filter) 34 data->cmd_ops->set_filter(NULL, data, NULL); 35 36 synchronize_sched(); /* make sure current triggers exit before free */ 37 kfree(data); 38 } 39 40 /** 41 * event_triggers_call - Call triggers associated with a trace event 42 * @file: The trace_event_file associated with the event 43 * @rec: The trace entry for the event, NULL for unconditional invocation 44 * 45 * For each trigger associated with an event, invoke the trigger 46 * function registered with the associated trigger command. If rec is 47 * non-NULL, it means that the trigger requires further processing and 48 * shouldn't be unconditionally invoked. If rec is non-NULL and the 49 * trigger has a filter associated with it, rec will checked against 50 * the filter and if the record matches the trigger will be invoked. 51 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked 52 * in any case until the current event is written, the trigger 53 * function isn't invoked but the bit associated with the deferred 54 * trigger is set in the return value. 55 * 56 * Returns an enum event_trigger_type value containing a set bit for 57 * any trigger that should be deferred, ETT_NONE if nothing to defer. 58 * 59 * Called from tracepoint handlers (with rcu_read_lock_sched() held). 60 * 61 * Return: an enum event_trigger_type value containing a set bit for 62 * any trigger that should be deferred, ETT_NONE if nothing to defer. 63 */ 64 enum event_trigger_type 65 event_triggers_call(struct trace_event_file *file, void *rec) 66 { 67 struct event_trigger_data *data; 68 enum event_trigger_type tt = ETT_NONE; 69 struct event_filter *filter; 70 71 if (list_empty(&file->triggers)) 72 return tt; 73 74 list_for_each_entry_rcu(data, &file->triggers, list) { 75 if (data->paused) 76 continue; 77 if (!rec) { 78 data->ops->func(data, rec); 79 continue; 80 } 81 filter = rcu_dereference_sched(data->filter); 82 if (filter && !filter_match_preds(filter, rec)) 83 continue; 84 if (event_command_post_trigger(data->cmd_ops)) { 85 tt |= data->cmd_ops->trigger_type; 86 continue; 87 } 88 data->ops->func(data, rec); 89 } 90 return tt; 91 } 92 EXPORT_SYMBOL_GPL(event_triggers_call); 93 94 /** 95 * event_triggers_post_call - Call 'post_triggers' for a trace event 96 * @file: The trace_event_file associated with the event 97 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke 98 * @rec: The trace entry for the event 99 * 100 * For each trigger associated with an event, invoke the trigger 101 * function registered with the associated trigger command, if the 102 * corresponding bit is set in the tt enum passed into this function. 103 * See @event_triggers_call for details on how those bits are set. 104 * 105 * Called from tracepoint handlers (with rcu_read_lock_sched() held). 106 */ 107 void 108 event_triggers_post_call(struct trace_event_file *file, 109 enum event_trigger_type tt, 110 void *rec) 111 { 112 struct event_trigger_data *data; 113 114 list_for_each_entry_rcu(data, &file->triggers, list) { 115 if (data->paused) 116 continue; 117 if (data->cmd_ops->trigger_type & tt) 118 data->ops->func(data, rec); 119 } 120 } 121 EXPORT_SYMBOL_GPL(event_triggers_post_call); 122 123 #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL) 124 125 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos) 126 { 127 struct trace_event_file *event_file = event_file_data(m->private); 128 129 if (t == SHOW_AVAILABLE_TRIGGERS) 130 return NULL; 131 132 return seq_list_next(t, &event_file->triggers, pos); 133 } 134 135 static void *trigger_start(struct seq_file *m, loff_t *pos) 136 { 137 struct trace_event_file *event_file; 138 139 /* ->stop() is called even if ->start() fails */ 140 mutex_lock(&event_mutex); 141 event_file = event_file_data(m->private); 142 if (unlikely(!event_file)) 143 return ERR_PTR(-ENODEV); 144 145 if (list_empty(&event_file->triggers)) 146 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL; 147 148 return seq_list_start(&event_file->triggers, *pos); 149 } 150 151 static void trigger_stop(struct seq_file *m, void *t) 152 { 153 mutex_unlock(&event_mutex); 154 } 155 156 static int trigger_show(struct seq_file *m, void *v) 157 { 158 struct event_trigger_data *data; 159 struct event_command *p; 160 161 if (v == SHOW_AVAILABLE_TRIGGERS) { 162 seq_puts(m, "# Available triggers:\n"); 163 seq_putc(m, '#'); 164 mutex_lock(&trigger_cmd_mutex); 165 list_for_each_entry_reverse(p, &trigger_commands, list) 166 seq_printf(m, " %s", p->name); 167 seq_putc(m, '\n'); 168 mutex_unlock(&trigger_cmd_mutex); 169 return 0; 170 } 171 172 data = list_entry(v, struct event_trigger_data, list); 173 data->ops->print(m, data->ops, data); 174 175 return 0; 176 } 177 178 static const struct seq_operations event_triggers_seq_ops = { 179 .start = trigger_start, 180 .next = trigger_next, 181 .stop = trigger_stop, 182 .show = trigger_show, 183 }; 184 185 static int event_trigger_regex_open(struct inode *inode, struct file *file) 186 { 187 int ret = 0; 188 189 mutex_lock(&event_mutex); 190 191 if (unlikely(!event_file_data(file))) { 192 mutex_unlock(&event_mutex); 193 return -ENODEV; 194 } 195 196 if ((file->f_mode & FMODE_WRITE) && 197 (file->f_flags & O_TRUNC)) { 198 struct trace_event_file *event_file; 199 struct event_command *p; 200 201 event_file = event_file_data(file); 202 203 list_for_each_entry(p, &trigger_commands, list) { 204 if (p->unreg_all) 205 p->unreg_all(event_file); 206 } 207 } 208 209 if (file->f_mode & FMODE_READ) { 210 ret = seq_open(file, &event_triggers_seq_ops); 211 if (!ret) { 212 struct seq_file *m = file->private_data; 213 m->private = file; 214 } 215 } 216 217 mutex_unlock(&event_mutex); 218 219 return ret; 220 } 221 222 static int trigger_process_regex(struct trace_event_file *file, char *buff) 223 { 224 char *command, *next = buff; 225 struct event_command *p; 226 int ret = -EINVAL; 227 228 command = strsep(&next, ": \t"); 229 command = (command[0] != '!') ? command : command + 1; 230 231 mutex_lock(&trigger_cmd_mutex); 232 list_for_each_entry(p, &trigger_commands, list) { 233 if (strcmp(p->name, command) == 0) { 234 ret = p->func(p, file, buff, command, next); 235 goto out_unlock; 236 } 237 } 238 out_unlock: 239 mutex_unlock(&trigger_cmd_mutex); 240 241 return ret; 242 } 243 244 static ssize_t event_trigger_regex_write(struct file *file, 245 const char __user *ubuf, 246 size_t cnt, loff_t *ppos) 247 { 248 struct trace_event_file *event_file; 249 ssize_t ret; 250 char *buf; 251 252 if (!cnt) 253 return 0; 254 255 if (cnt >= PAGE_SIZE) 256 return -EINVAL; 257 258 buf = memdup_user_nul(ubuf, cnt); 259 if (IS_ERR(buf)) 260 return PTR_ERR(buf); 261 262 strim(buf); 263 264 mutex_lock(&event_mutex); 265 event_file = event_file_data(file); 266 if (unlikely(!event_file)) { 267 mutex_unlock(&event_mutex); 268 kfree(buf); 269 return -ENODEV; 270 } 271 ret = trigger_process_regex(event_file, buf); 272 mutex_unlock(&event_mutex); 273 274 kfree(buf); 275 if (ret < 0) 276 goto out; 277 278 *ppos += cnt; 279 ret = cnt; 280 out: 281 return ret; 282 } 283 284 static int event_trigger_regex_release(struct inode *inode, struct file *file) 285 { 286 mutex_lock(&event_mutex); 287 288 if (file->f_mode & FMODE_READ) 289 seq_release(inode, file); 290 291 mutex_unlock(&event_mutex); 292 293 return 0; 294 } 295 296 static ssize_t 297 event_trigger_write(struct file *filp, const char __user *ubuf, 298 size_t cnt, loff_t *ppos) 299 { 300 return event_trigger_regex_write(filp, ubuf, cnt, ppos); 301 } 302 303 static int 304 event_trigger_open(struct inode *inode, struct file *filp) 305 { 306 return event_trigger_regex_open(inode, filp); 307 } 308 309 static int 310 event_trigger_release(struct inode *inode, struct file *file) 311 { 312 return event_trigger_regex_release(inode, file); 313 } 314 315 const struct file_operations event_trigger_fops = { 316 .open = event_trigger_open, 317 .read = seq_read, 318 .write = event_trigger_write, 319 .llseek = tracing_lseek, 320 .release = event_trigger_release, 321 }; 322 323 /* 324 * Currently we only register event commands from __init, so mark this 325 * __init too. 326 */ 327 __init int register_event_command(struct event_command *cmd) 328 { 329 struct event_command *p; 330 int ret = 0; 331 332 mutex_lock(&trigger_cmd_mutex); 333 list_for_each_entry(p, &trigger_commands, list) { 334 if (strcmp(cmd->name, p->name) == 0) { 335 ret = -EBUSY; 336 goto out_unlock; 337 } 338 } 339 list_add(&cmd->list, &trigger_commands); 340 out_unlock: 341 mutex_unlock(&trigger_cmd_mutex); 342 343 return ret; 344 } 345 346 /* 347 * Currently we only unregister event commands from __init, so mark 348 * this __init too. 349 */ 350 __init int unregister_event_command(struct event_command *cmd) 351 { 352 struct event_command *p, *n; 353 int ret = -ENODEV; 354 355 mutex_lock(&trigger_cmd_mutex); 356 list_for_each_entry_safe(p, n, &trigger_commands, list) { 357 if (strcmp(cmd->name, p->name) == 0) { 358 ret = 0; 359 list_del_init(&p->list); 360 goto out_unlock; 361 } 362 } 363 out_unlock: 364 mutex_unlock(&trigger_cmd_mutex); 365 366 return ret; 367 } 368 369 /** 370 * event_trigger_print - Generic event_trigger_ops @print implementation 371 * @name: The name of the event trigger 372 * @m: The seq_file being printed to 373 * @data: Trigger-specific data 374 * @filter_str: filter_str to print, if present 375 * 376 * Common implementation for event triggers to print themselves. 377 * 378 * Usually wrapped by a function that simply sets the @name of the 379 * trigger command and then invokes this. 380 * 381 * Return: 0 on success, errno otherwise 382 */ 383 static int 384 event_trigger_print(const char *name, struct seq_file *m, 385 void *data, char *filter_str) 386 { 387 long count = (long)data; 388 389 seq_puts(m, name); 390 391 if (count == -1) 392 seq_puts(m, ":unlimited"); 393 else 394 seq_printf(m, ":count=%ld", count); 395 396 if (filter_str) 397 seq_printf(m, " if %s\n", filter_str); 398 else 399 seq_putc(m, '\n'); 400 401 return 0; 402 } 403 404 /** 405 * event_trigger_init - Generic event_trigger_ops @init implementation 406 * @ops: The trigger ops associated with the trigger 407 * @data: Trigger-specific data 408 * 409 * Common implementation of event trigger initialization. 410 * 411 * Usually used directly as the @init method in event trigger 412 * implementations. 413 * 414 * Return: 0 on success, errno otherwise 415 */ 416 int event_trigger_init(struct event_trigger_ops *ops, 417 struct event_trigger_data *data) 418 { 419 data->ref++; 420 return 0; 421 } 422 423 /** 424 * event_trigger_free - Generic event_trigger_ops @free implementation 425 * @ops: The trigger ops associated with the trigger 426 * @data: Trigger-specific data 427 * 428 * Common implementation of event trigger de-initialization. 429 * 430 * Usually used directly as the @free method in event trigger 431 * implementations. 432 */ 433 static void 434 event_trigger_free(struct event_trigger_ops *ops, 435 struct event_trigger_data *data) 436 { 437 if (WARN_ON_ONCE(data->ref <= 0)) 438 return; 439 440 data->ref--; 441 if (!data->ref) 442 trigger_data_free(data); 443 } 444 445 int trace_event_trigger_enable_disable(struct trace_event_file *file, 446 int trigger_enable) 447 { 448 int ret = 0; 449 450 if (trigger_enable) { 451 if (atomic_inc_return(&file->tm_ref) > 1) 452 return ret; 453 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags); 454 ret = trace_event_enable_disable(file, 1, 1); 455 } else { 456 if (atomic_dec_return(&file->tm_ref) > 0) 457 return ret; 458 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags); 459 ret = trace_event_enable_disable(file, 0, 1); 460 } 461 462 return ret; 463 } 464 465 /** 466 * clear_event_triggers - Clear all triggers associated with a trace array 467 * @tr: The trace array to clear 468 * 469 * For each trigger, the triggering event has its tm_ref decremented 470 * via trace_event_trigger_enable_disable(), and any associated event 471 * (in the case of enable/disable_event triggers) will have its sm_ref 472 * decremented via free()->trace_event_enable_disable(). That 473 * combination effectively reverses the soft-mode/trigger state added 474 * by trigger registration. 475 * 476 * Must be called with event_mutex held. 477 */ 478 void 479 clear_event_triggers(struct trace_array *tr) 480 { 481 struct trace_event_file *file; 482 483 list_for_each_entry(file, &tr->events, list) { 484 struct event_trigger_data *data; 485 list_for_each_entry_rcu(data, &file->triggers, list) { 486 trace_event_trigger_enable_disable(file, 0); 487 if (data->ops->free) 488 data->ops->free(data->ops, data); 489 } 490 } 491 } 492 493 /** 494 * update_cond_flag - Set or reset the TRIGGER_COND bit 495 * @file: The trace_event_file associated with the event 496 * 497 * If an event has triggers and any of those triggers has a filter or 498 * a post_trigger, trigger invocation needs to be deferred until after 499 * the current event has logged its data, and the event should have 500 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be 501 * cleared. 502 */ 503 void update_cond_flag(struct trace_event_file *file) 504 { 505 struct event_trigger_data *data; 506 bool set_cond = false; 507 508 list_for_each_entry_rcu(data, &file->triggers, list) { 509 if (data->filter || event_command_post_trigger(data->cmd_ops) || 510 event_command_needs_rec(data->cmd_ops)) { 511 set_cond = true; 512 break; 513 } 514 } 515 516 if (set_cond) 517 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags); 518 else 519 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags); 520 } 521 522 /** 523 * register_trigger - Generic event_command @reg implementation 524 * @glob: The raw string used to register the trigger 525 * @ops: The trigger ops associated with the trigger 526 * @data: Trigger-specific data to associate with the trigger 527 * @file: The trace_event_file associated with the event 528 * 529 * Common implementation for event trigger registration. 530 * 531 * Usually used directly as the @reg method in event command 532 * implementations. 533 * 534 * Return: 0 on success, errno otherwise 535 */ 536 static int register_trigger(char *glob, struct event_trigger_ops *ops, 537 struct event_trigger_data *data, 538 struct trace_event_file *file) 539 { 540 struct event_trigger_data *test; 541 int ret = 0; 542 543 list_for_each_entry_rcu(test, &file->triggers, list) { 544 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) { 545 ret = -EEXIST; 546 goto out; 547 } 548 } 549 550 if (data->ops->init) { 551 ret = data->ops->init(data->ops, data); 552 if (ret < 0) 553 goto out; 554 } 555 556 list_add_rcu(&data->list, &file->triggers); 557 ret++; 558 559 update_cond_flag(file); 560 if (trace_event_trigger_enable_disable(file, 1) < 0) { 561 list_del_rcu(&data->list); 562 update_cond_flag(file); 563 ret--; 564 } 565 out: 566 return ret; 567 } 568 569 /** 570 * unregister_trigger - Generic event_command @unreg implementation 571 * @glob: The raw string used to register the trigger 572 * @ops: The trigger ops associated with the trigger 573 * @test: Trigger-specific data used to find the trigger to remove 574 * @file: The trace_event_file associated with the event 575 * 576 * Common implementation for event trigger unregistration. 577 * 578 * Usually used directly as the @unreg method in event command 579 * implementations. 580 */ 581 void unregister_trigger(char *glob, struct event_trigger_ops *ops, 582 struct event_trigger_data *test, 583 struct trace_event_file *file) 584 { 585 struct event_trigger_data *data; 586 bool unregistered = false; 587 588 list_for_each_entry_rcu(data, &file->triggers, list) { 589 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) { 590 unregistered = true; 591 list_del_rcu(&data->list); 592 trace_event_trigger_enable_disable(file, 0); 593 update_cond_flag(file); 594 break; 595 } 596 } 597 598 if (unregistered && data->ops->free) 599 data->ops->free(data->ops, data); 600 } 601 602 /** 603 * event_trigger_callback - Generic event_command @func implementation 604 * @cmd_ops: The command ops, used for trigger registration 605 * @file: The trace_event_file associated with the event 606 * @glob: The raw string used to register the trigger 607 * @cmd: The cmd portion of the string used to register the trigger 608 * @param: The params portion of the string used to register the trigger 609 * 610 * Common implementation for event command parsing and trigger 611 * instantiation. 612 * 613 * Usually used directly as the @func method in event command 614 * implementations. 615 * 616 * Return: 0 on success, errno otherwise 617 */ 618 static int 619 event_trigger_callback(struct event_command *cmd_ops, 620 struct trace_event_file *file, 621 char *glob, char *cmd, char *param) 622 { 623 struct event_trigger_data *trigger_data; 624 struct event_trigger_ops *trigger_ops; 625 char *trigger = NULL; 626 char *number; 627 int ret; 628 629 /* separate the trigger from the filter (t:n [if filter]) */ 630 if (param && isdigit(param[0])) 631 trigger = strsep(¶m, " \t"); 632 633 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); 634 635 ret = -ENOMEM; 636 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); 637 if (!trigger_data) 638 goto out; 639 640 trigger_data->count = -1; 641 trigger_data->ops = trigger_ops; 642 trigger_data->cmd_ops = cmd_ops; 643 INIT_LIST_HEAD(&trigger_data->list); 644 INIT_LIST_HEAD(&trigger_data->named_list); 645 646 if (glob[0] == '!') { 647 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 648 kfree(trigger_data); 649 ret = 0; 650 goto out; 651 } 652 653 if (trigger) { 654 number = strsep(&trigger, ":"); 655 656 ret = -EINVAL; 657 if (!strlen(number)) 658 goto out_free; 659 660 /* 661 * We use the callback data field (which is a pointer) 662 * as our counter. 663 */ 664 ret = kstrtoul(number, 0, &trigger_data->count); 665 if (ret) 666 goto out_free; 667 } 668 669 if (!param) /* if param is non-empty, it's supposed to be a filter */ 670 goto out_reg; 671 672 if (!cmd_ops->set_filter) 673 goto out_reg; 674 675 ret = cmd_ops->set_filter(param, trigger_data, file); 676 if (ret < 0) 677 goto out_free; 678 679 out_reg: 680 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 681 /* 682 * The above returns on success the # of functions enabled, 683 * but if it didn't find any functions it returns zero. 684 * Consider no functions a failure too. 685 */ 686 if (!ret) { 687 ret = -ENOENT; 688 goto out_free; 689 } else if (ret < 0) 690 goto out_free; 691 ret = 0; 692 out: 693 return ret; 694 695 out_free: 696 if (cmd_ops->set_filter) 697 cmd_ops->set_filter(NULL, trigger_data, NULL); 698 kfree(trigger_data); 699 goto out; 700 } 701 702 /** 703 * set_trigger_filter - Generic event_command @set_filter implementation 704 * @filter_str: The filter string for the trigger, NULL to remove filter 705 * @trigger_data: Trigger-specific data 706 * @file: The trace_event_file associated with the event 707 * 708 * Common implementation for event command filter parsing and filter 709 * instantiation. 710 * 711 * Usually used directly as the @set_filter method in event command 712 * implementations. 713 * 714 * Also used to remove a filter (if filter_str = NULL). 715 * 716 * Return: 0 on success, errno otherwise 717 */ 718 int set_trigger_filter(char *filter_str, 719 struct event_trigger_data *trigger_data, 720 struct trace_event_file *file) 721 { 722 struct event_trigger_data *data = trigger_data; 723 struct event_filter *filter = NULL, *tmp; 724 int ret = -EINVAL; 725 char *s; 726 727 if (!filter_str) /* clear the current filter */ 728 goto assign; 729 730 s = strsep(&filter_str, " \t"); 731 732 if (!strlen(s) || strcmp(s, "if") != 0) 733 goto out; 734 735 if (!filter_str) 736 goto out; 737 738 /* The filter is for the 'trigger' event, not the triggered event */ 739 ret = create_event_filter(file->event_call, filter_str, false, &filter); 740 if (ret) 741 goto out; 742 assign: 743 tmp = rcu_access_pointer(data->filter); 744 745 rcu_assign_pointer(data->filter, filter); 746 747 if (tmp) { 748 /* Make sure the call is done with the filter */ 749 synchronize_sched(); 750 free_event_filter(tmp); 751 } 752 753 kfree(data->filter_str); 754 data->filter_str = NULL; 755 756 if (filter_str) { 757 data->filter_str = kstrdup(filter_str, GFP_KERNEL); 758 if (!data->filter_str) { 759 free_event_filter(rcu_access_pointer(data->filter)); 760 data->filter = NULL; 761 ret = -ENOMEM; 762 } 763 } 764 out: 765 return ret; 766 } 767 768 static LIST_HEAD(named_triggers); 769 770 /** 771 * find_named_trigger - Find the common named trigger associated with @name 772 * @name: The name of the set of named triggers to find the common data for 773 * 774 * Named triggers are sets of triggers that share a common set of 775 * trigger data. The first named trigger registered with a given name 776 * owns the common trigger data that the others subsequently 777 * registered with the same name will reference. This function 778 * returns the common trigger data associated with that first 779 * registered instance. 780 * 781 * Return: the common trigger data for the given named trigger on 782 * success, NULL otherwise. 783 */ 784 struct event_trigger_data *find_named_trigger(const char *name) 785 { 786 struct event_trigger_data *data; 787 788 if (!name) 789 return NULL; 790 791 list_for_each_entry(data, &named_triggers, named_list) { 792 if (data->named_data) 793 continue; 794 if (strcmp(data->name, name) == 0) 795 return data; 796 } 797 798 return NULL; 799 } 800 801 /** 802 * is_named_trigger - determine if a given trigger is a named trigger 803 * @test: The trigger data to test 804 * 805 * Return: true if 'test' is a named trigger, false otherwise. 806 */ 807 bool is_named_trigger(struct event_trigger_data *test) 808 { 809 struct event_trigger_data *data; 810 811 list_for_each_entry(data, &named_triggers, named_list) { 812 if (test == data) 813 return true; 814 } 815 816 return false; 817 } 818 819 /** 820 * save_named_trigger - save the trigger in the named trigger list 821 * @name: The name of the named trigger set 822 * @data: The trigger data to save 823 * 824 * Return: 0 if successful, negative error otherwise. 825 */ 826 int save_named_trigger(const char *name, struct event_trigger_data *data) 827 { 828 data->name = kstrdup(name, GFP_KERNEL); 829 if (!data->name) 830 return -ENOMEM; 831 832 list_add(&data->named_list, &named_triggers); 833 834 return 0; 835 } 836 837 /** 838 * del_named_trigger - delete a trigger from the named trigger list 839 * @data: The trigger data to delete 840 */ 841 void del_named_trigger(struct event_trigger_data *data) 842 { 843 kfree(data->name); 844 data->name = NULL; 845 846 list_del(&data->named_list); 847 } 848 849 static void __pause_named_trigger(struct event_trigger_data *data, bool pause) 850 { 851 struct event_trigger_data *test; 852 853 list_for_each_entry(test, &named_triggers, named_list) { 854 if (strcmp(test->name, data->name) == 0) { 855 if (pause) { 856 test->paused_tmp = test->paused; 857 test->paused = true; 858 } else { 859 test->paused = test->paused_tmp; 860 } 861 } 862 } 863 } 864 865 /** 866 * pause_named_trigger - Pause all named triggers with the same name 867 * @data: The trigger data of a named trigger to pause 868 * 869 * Pauses a named trigger along with all other triggers having the 870 * same name. Because named triggers share a common set of data, 871 * pausing only one is meaningless, so pausing one named trigger needs 872 * to pause all triggers with the same name. 873 */ 874 void pause_named_trigger(struct event_trigger_data *data) 875 { 876 __pause_named_trigger(data, true); 877 } 878 879 /** 880 * unpause_named_trigger - Un-pause all named triggers with the same name 881 * @data: The trigger data of a named trigger to unpause 882 * 883 * Un-pauses a named trigger along with all other triggers having the 884 * same name. Because named triggers share a common set of data, 885 * unpausing only one is meaningless, so unpausing one named trigger 886 * needs to unpause all triggers with the same name. 887 */ 888 void unpause_named_trigger(struct event_trigger_data *data) 889 { 890 __pause_named_trigger(data, false); 891 } 892 893 /** 894 * set_named_trigger_data - Associate common named trigger data 895 * @data: The trigger data of a named trigger to unpause 896 * 897 * Named triggers are sets of triggers that share a common set of 898 * trigger data. The first named trigger registered with a given name 899 * owns the common trigger data that the others subsequently 900 * registered with the same name will reference. This function 901 * associates the common trigger data from the first trigger with the 902 * given trigger. 903 */ 904 void set_named_trigger_data(struct event_trigger_data *data, 905 struct event_trigger_data *named_data) 906 { 907 data->named_data = named_data; 908 } 909 910 static void 911 traceon_trigger(struct event_trigger_data *data, void *rec) 912 { 913 if (tracing_is_on()) 914 return; 915 916 tracing_on(); 917 } 918 919 static void 920 traceon_count_trigger(struct event_trigger_data *data, void *rec) 921 { 922 if (tracing_is_on()) 923 return; 924 925 if (!data->count) 926 return; 927 928 if (data->count != -1) 929 (data->count)--; 930 931 tracing_on(); 932 } 933 934 static void 935 traceoff_trigger(struct event_trigger_data *data, void *rec) 936 { 937 if (!tracing_is_on()) 938 return; 939 940 tracing_off(); 941 } 942 943 static void 944 traceoff_count_trigger(struct event_trigger_data *data, void *rec) 945 { 946 if (!tracing_is_on()) 947 return; 948 949 if (!data->count) 950 return; 951 952 if (data->count != -1) 953 (data->count)--; 954 955 tracing_off(); 956 } 957 958 static int 959 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 960 struct event_trigger_data *data) 961 { 962 return event_trigger_print("traceon", m, (void *)data->count, 963 data->filter_str); 964 } 965 966 static int 967 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 968 struct event_trigger_data *data) 969 { 970 return event_trigger_print("traceoff", m, (void *)data->count, 971 data->filter_str); 972 } 973 974 static struct event_trigger_ops traceon_trigger_ops = { 975 .func = traceon_trigger, 976 .print = traceon_trigger_print, 977 .init = event_trigger_init, 978 .free = event_trigger_free, 979 }; 980 981 static struct event_trigger_ops traceon_count_trigger_ops = { 982 .func = traceon_count_trigger, 983 .print = traceon_trigger_print, 984 .init = event_trigger_init, 985 .free = event_trigger_free, 986 }; 987 988 static struct event_trigger_ops traceoff_trigger_ops = { 989 .func = traceoff_trigger, 990 .print = traceoff_trigger_print, 991 .init = event_trigger_init, 992 .free = event_trigger_free, 993 }; 994 995 static struct event_trigger_ops traceoff_count_trigger_ops = { 996 .func = traceoff_count_trigger, 997 .print = traceoff_trigger_print, 998 .init = event_trigger_init, 999 .free = event_trigger_free, 1000 }; 1001 1002 static struct event_trigger_ops * 1003 onoff_get_trigger_ops(char *cmd, char *param) 1004 { 1005 struct event_trigger_ops *ops; 1006 1007 /* we register both traceon and traceoff to this callback */ 1008 if (strcmp(cmd, "traceon") == 0) 1009 ops = param ? &traceon_count_trigger_ops : 1010 &traceon_trigger_ops; 1011 else 1012 ops = param ? &traceoff_count_trigger_ops : 1013 &traceoff_trigger_ops; 1014 1015 return ops; 1016 } 1017 1018 static struct event_command trigger_traceon_cmd = { 1019 .name = "traceon", 1020 .trigger_type = ETT_TRACE_ONOFF, 1021 .func = event_trigger_callback, 1022 .reg = register_trigger, 1023 .unreg = unregister_trigger, 1024 .get_trigger_ops = onoff_get_trigger_ops, 1025 .set_filter = set_trigger_filter, 1026 }; 1027 1028 static struct event_command trigger_traceoff_cmd = { 1029 .name = "traceoff", 1030 .trigger_type = ETT_TRACE_ONOFF, 1031 .func = event_trigger_callback, 1032 .reg = register_trigger, 1033 .unreg = unregister_trigger, 1034 .get_trigger_ops = onoff_get_trigger_ops, 1035 .set_filter = set_trigger_filter, 1036 }; 1037 1038 #ifdef CONFIG_TRACER_SNAPSHOT 1039 static void 1040 snapshot_trigger(struct event_trigger_data *data, void *rec) 1041 { 1042 tracing_snapshot(); 1043 } 1044 1045 static void 1046 snapshot_count_trigger(struct event_trigger_data *data, void *rec) 1047 { 1048 if (!data->count) 1049 return; 1050 1051 if (data->count != -1) 1052 (data->count)--; 1053 1054 snapshot_trigger(data, rec); 1055 } 1056 1057 static int 1058 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops, 1059 struct event_trigger_data *data, 1060 struct trace_event_file *file) 1061 { 1062 int ret = register_trigger(glob, ops, data, file); 1063 1064 if (ret > 0 && tracing_alloc_snapshot() != 0) { 1065 unregister_trigger(glob, ops, data, file); 1066 ret = 0; 1067 } 1068 1069 return ret; 1070 } 1071 1072 static int 1073 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 1074 struct event_trigger_data *data) 1075 { 1076 return event_trigger_print("snapshot", m, (void *)data->count, 1077 data->filter_str); 1078 } 1079 1080 static struct event_trigger_ops snapshot_trigger_ops = { 1081 .func = snapshot_trigger, 1082 .print = snapshot_trigger_print, 1083 .init = event_trigger_init, 1084 .free = event_trigger_free, 1085 }; 1086 1087 static struct event_trigger_ops snapshot_count_trigger_ops = { 1088 .func = snapshot_count_trigger, 1089 .print = snapshot_trigger_print, 1090 .init = event_trigger_init, 1091 .free = event_trigger_free, 1092 }; 1093 1094 static struct event_trigger_ops * 1095 snapshot_get_trigger_ops(char *cmd, char *param) 1096 { 1097 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops; 1098 } 1099 1100 static struct event_command trigger_snapshot_cmd = { 1101 .name = "snapshot", 1102 .trigger_type = ETT_SNAPSHOT, 1103 .func = event_trigger_callback, 1104 .reg = register_snapshot_trigger, 1105 .unreg = unregister_trigger, 1106 .get_trigger_ops = snapshot_get_trigger_ops, 1107 .set_filter = set_trigger_filter, 1108 }; 1109 1110 static __init int register_trigger_snapshot_cmd(void) 1111 { 1112 int ret; 1113 1114 ret = register_event_command(&trigger_snapshot_cmd); 1115 WARN_ON(ret < 0); 1116 1117 return ret; 1118 } 1119 #else 1120 static __init int register_trigger_snapshot_cmd(void) { return 0; } 1121 #endif /* CONFIG_TRACER_SNAPSHOT */ 1122 1123 #ifdef CONFIG_STACKTRACE 1124 /* 1125 * Skip 3: 1126 * stacktrace_trigger() 1127 * event_triggers_post_call() 1128 * trace_event_raw_event_xxx() 1129 */ 1130 #define STACK_SKIP 3 1131 1132 static void 1133 stacktrace_trigger(struct event_trigger_data *data, void *rec) 1134 { 1135 trace_dump_stack(STACK_SKIP); 1136 } 1137 1138 static void 1139 stacktrace_count_trigger(struct event_trigger_data *data, void *rec) 1140 { 1141 if (!data->count) 1142 return; 1143 1144 if (data->count != -1) 1145 (data->count)--; 1146 1147 stacktrace_trigger(data, rec); 1148 } 1149 1150 static int 1151 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 1152 struct event_trigger_data *data) 1153 { 1154 return event_trigger_print("stacktrace", m, (void *)data->count, 1155 data->filter_str); 1156 } 1157 1158 static struct event_trigger_ops stacktrace_trigger_ops = { 1159 .func = stacktrace_trigger, 1160 .print = stacktrace_trigger_print, 1161 .init = event_trigger_init, 1162 .free = event_trigger_free, 1163 }; 1164 1165 static struct event_trigger_ops stacktrace_count_trigger_ops = { 1166 .func = stacktrace_count_trigger, 1167 .print = stacktrace_trigger_print, 1168 .init = event_trigger_init, 1169 .free = event_trigger_free, 1170 }; 1171 1172 static struct event_trigger_ops * 1173 stacktrace_get_trigger_ops(char *cmd, char *param) 1174 { 1175 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops; 1176 } 1177 1178 static struct event_command trigger_stacktrace_cmd = { 1179 .name = "stacktrace", 1180 .trigger_type = ETT_STACKTRACE, 1181 .flags = EVENT_CMD_FL_POST_TRIGGER, 1182 .func = event_trigger_callback, 1183 .reg = register_trigger, 1184 .unreg = unregister_trigger, 1185 .get_trigger_ops = stacktrace_get_trigger_ops, 1186 .set_filter = set_trigger_filter, 1187 }; 1188 1189 static __init int register_trigger_stacktrace_cmd(void) 1190 { 1191 int ret; 1192 1193 ret = register_event_command(&trigger_stacktrace_cmd); 1194 WARN_ON(ret < 0); 1195 1196 return ret; 1197 } 1198 #else 1199 static __init int register_trigger_stacktrace_cmd(void) { return 0; } 1200 #endif /* CONFIG_STACKTRACE */ 1201 1202 static __init void unregister_trigger_traceon_traceoff_cmds(void) 1203 { 1204 unregister_event_command(&trigger_traceon_cmd); 1205 unregister_event_command(&trigger_traceoff_cmd); 1206 } 1207 1208 static void 1209 event_enable_trigger(struct event_trigger_data *data, void *rec) 1210 { 1211 struct enable_trigger_data *enable_data = data->private_data; 1212 1213 if (enable_data->enable) 1214 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); 1215 else 1216 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); 1217 } 1218 1219 static void 1220 event_enable_count_trigger(struct event_trigger_data *data, void *rec) 1221 { 1222 struct enable_trigger_data *enable_data = data->private_data; 1223 1224 if (!data->count) 1225 return; 1226 1227 /* Skip if the event is in a state we want to switch to */ 1228 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) 1229 return; 1230 1231 if (data->count != -1) 1232 (data->count)--; 1233 1234 event_enable_trigger(data, rec); 1235 } 1236 1237 int event_enable_trigger_print(struct seq_file *m, 1238 struct event_trigger_ops *ops, 1239 struct event_trigger_data *data) 1240 { 1241 struct enable_trigger_data *enable_data = data->private_data; 1242 1243 seq_printf(m, "%s:%s:%s", 1244 enable_data->hist ? 1245 (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) : 1246 (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR), 1247 enable_data->file->event_call->class->system, 1248 trace_event_name(enable_data->file->event_call)); 1249 1250 if (data->count == -1) 1251 seq_puts(m, ":unlimited"); 1252 else 1253 seq_printf(m, ":count=%ld", data->count); 1254 1255 if (data->filter_str) 1256 seq_printf(m, " if %s\n", data->filter_str); 1257 else 1258 seq_putc(m, '\n'); 1259 1260 return 0; 1261 } 1262 1263 void event_enable_trigger_free(struct event_trigger_ops *ops, 1264 struct event_trigger_data *data) 1265 { 1266 struct enable_trigger_data *enable_data = data->private_data; 1267 1268 if (WARN_ON_ONCE(data->ref <= 0)) 1269 return; 1270 1271 data->ref--; 1272 if (!data->ref) { 1273 /* Remove the SOFT_MODE flag */ 1274 trace_event_enable_disable(enable_data->file, 0, 1); 1275 module_put(enable_data->file->event_call->mod); 1276 trigger_data_free(data); 1277 kfree(enable_data); 1278 } 1279 } 1280 1281 static struct event_trigger_ops event_enable_trigger_ops = { 1282 .func = event_enable_trigger, 1283 .print = event_enable_trigger_print, 1284 .init = event_trigger_init, 1285 .free = event_enable_trigger_free, 1286 }; 1287 1288 static struct event_trigger_ops event_enable_count_trigger_ops = { 1289 .func = event_enable_count_trigger, 1290 .print = event_enable_trigger_print, 1291 .init = event_trigger_init, 1292 .free = event_enable_trigger_free, 1293 }; 1294 1295 static struct event_trigger_ops event_disable_trigger_ops = { 1296 .func = event_enable_trigger, 1297 .print = event_enable_trigger_print, 1298 .init = event_trigger_init, 1299 .free = event_enable_trigger_free, 1300 }; 1301 1302 static struct event_trigger_ops event_disable_count_trigger_ops = { 1303 .func = event_enable_count_trigger, 1304 .print = event_enable_trigger_print, 1305 .init = event_trigger_init, 1306 .free = event_enable_trigger_free, 1307 }; 1308 1309 int event_enable_trigger_func(struct event_command *cmd_ops, 1310 struct trace_event_file *file, 1311 char *glob, char *cmd, char *param) 1312 { 1313 struct trace_event_file *event_enable_file; 1314 struct enable_trigger_data *enable_data; 1315 struct event_trigger_data *trigger_data; 1316 struct event_trigger_ops *trigger_ops; 1317 struct trace_array *tr = file->tr; 1318 const char *system; 1319 const char *event; 1320 bool hist = false; 1321 char *trigger; 1322 char *number; 1323 bool enable; 1324 int ret; 1325 1326 if (!param) 1327 return -EINVAL; 1328 1329 /* separate the trigger from the filter (s:e:n [if filter]) */ 1330 trigger = strsep(¶m, " \t"); 1331 if (!trigger) 1332 return -EINVAL; 1333 1334 system = strsep(&trigger, ":"); 1335 if (!trigger) 1336 return -EINVAL; 1337 1338 event = strsep(&trigger, ":"); 1339 1340 ret = -EINVAL; 1341 event_enable_file = find_event_file(tr, system, event); 1342 if (!event_enable_file) 1343 goto out; 1344 1345 #ifdef CONFIG_HIST_TRIGGERS 1346 hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) || 1347 (strcmp(cmd, DISABLE_HIST_STR) == 0)); 1348 1349 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) || 1350 (strcmp(cmd, ENABLE_HIST_STR) == 0)); 1351 #else 1352 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 1353 #endif 1354 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); 1355 1356 ret = -ENOMEM; 1357 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); 1358 if (!trigger_data) 1359 goto out; 1360 1361 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL); 1362 if (!enable_data) { 1363 kfree(trigger_data); 1364 goto out; 1365 } 1366 1367 trigger_data->count = -1; 1368 trigger_data->ops = trigger_ops; 1369 trigger_data->cmd_ops = cmd_ops; 1370 INIT_LIST_HEAD(&trigger_data->list); 1371 RCU_INIT_POINTER(trigger_data->filter, NULL); 1372 1373 enable_data->hist = hist; 1374 enable_data->enable = enable; 1375 enable_data->file = event_enable_file; 1376 trigger_data->private_data = enable_data; 1377 1378 if (glob[0] == '!') { 1379 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 1380 kfree(trigger_data); 1381 kfree(enable_data); 1382 ret = 0; 1383 goto out; 1384 } 1385 1386 if (trigger) { 1387 number = strsep(&trigger, ":"); 1388 1389 ret = -EINVAL; 1390 if (!strlen(number)) 1391 goto out_free; 1392 1393 /* 1394 * We use the callback data field (which is a pointer) 1395 * as our counter. 1396 */ 1397 ret = kstrtoul(number, 0, &trigger_data->count); 1398 if (ret) 1399 goto out_free; 1400 } 1401 1402 if (!param) /* if param is non-empty, it's supposed to be a filter */ 1403 goto out_reg; 1404 1405 if (!cmd_ops->set_filter) 1406 goto out_reg; 1407 1408 ret = cmd_ops->set_filter(param, trigger_data, file); 1409 if (ret < 0) 1410 goto out_free; 1411 1412 out_reg: 1413 /* Don't let event modules unload while probe registered */ 1414 ret = try_module_get(event_enable_file->event_call->mod); 1415 if (!ret) { 1416 ret = -EBUSY; 1417 goto out_free; 1418 } 1419 1420 ret = trace_event_enable_disable(event_enable_file, 1, 1); 1421 if (ret < 0) 1422 goto out_put; 1423 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 1424 /* 1425 * The above returns on success the # of functions enabled, 1426 * but if it didn't find any functions it returns zero. 1427 * Consider no functions a failure too. 1428 */ 1429 if (!ret) { 1430 ret = -ENOENT; 1431 goto out_disable; 1432 } else if (ret < 0) 1433 goto out_disable; 1434 /* Just return zero, not the number of enabled functions */ 1435 ret = 0; 1436 out: 1437 return ret; 1438 1439 out_disable: 1440 trace_event_enable_disable(event_enable_file, 0, 1); 1441 out_put: 1442 module_put(event_enable_file->event_call->mod); 1443 out_free: 1444 if (cmd_ops->set_filter) 1445 cmd_ops->set_filter(NULL, trigger_data, NULL); 1446 kfree(trigger_data); 1447 kfree(enable_data); 1448 goto out; 1449 } 1450 1451 int event_enable_register_trigger(char *glob, 1452 struct event_trigger_ops *ops, 1453 struct event_trigger_data *data, 1454 struct trace_event_file *file) 1455 { 1456 struct enable_trigger_data *enable_data = data->private_data; 1457 struct enable_trigger_data *test_enable_data; 1458 struct event_trigger_data *test; 1459 int ret = 0; 1460 1461 list_for_each_entry_rcu(test, &file->triggers, list) { 1462 test_enable_data = test->private_data; 1463 if (test_enable_data && 1464 (test->cmd_ops->trigger_type == 1465 data->cmd_ops->trigger_type) && 1466 (test_enable_data->file == enable_data->file)) { 1467 ret = -EEXIST; 1468 goto out; 1469 } 1470 } 1471 1472 if (data->ops->init) { 1473 ret = data->ops->init(data->ops, data); 1474 if (ret < 0) 1475 goto out; 1476 } 1477 1478 list_add_rcu(&data->list, &file->triggers); 1479 ret++; 1480 1481 update_cond_flag(file); 1482 if (trace_event_trigger_enable_disable(file, 1) < 0) { 1483 list_del_rcu(&data->list); 1484 update_cond_flag(file); 1485 ret--; 1486 } 1487 out: 1488 return ret; 1489 } 1490 1491 void event_enable_unregister_trigger(char *glob, 1492 struct event_trigger_ops *ops, 1493 struct event_trigger_data *test, 1494 struct trace_event_file *file) 1495 { 1496 struct enable_trigger_data *test_enable_data = test->private_data; 1497 struct enable_trigger_data *enable_data; 1498 struct event_trigger_data *data; 1499 bool unregistered = false; 1500 1501 list_for_each_entry_rcu(data, &file->triggers, list) { 1502 enable_data = data->private_data; 1503 if (enable_data && 1504 (data->cmd_ops->trigger_type == 1505 test->cmd_ops->trigger_type) && 1506 (enable_data->file == test_enable_data->file)) { 1507 unregistered = true; 1508 list_del_rcu(&data->list); 1509 trace_event_trigger_enable_disable(file, 0); 1510 update_cond_flag(file); 1511 break; 1512 } 1513 } 1514 1515 if (unregistered && data->ops->free) 1516 data->ops->free(data->ops, data); 1517 } 1518 1519 static struct event_trigger_ops * 1520 event_enable_get_trigger_ops(char *cmd, char *param) 1521 { 1522 struct event_trigger_ops *ops; 1523 bool enable; 1524 1525 #ifdef CONFIG_HIST_TRIGGERS 1526 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) || 1527 (strcmp(cmd, ENABLE_HIST_STR) == 0)); 1528 #else 1529 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 1530 #endif 1531 if (enable) 1532 ops = param ? &event_enable_count_trigger_ops : 1533 &event_enable_trigger_ops; 1534 else 1535 ops = param ? &event_disable_count_trigger_ops : 1536 &event_disable_trigger_ops; 1537 1538 return ops; 1539 } 1540 1541 static struct event_command trigger_enable_cmd = { 1542 .name = ENABLE_EVENT_STR, 1543 .trigger_type = ETT_EVENT_ENABLE, 1544 .func = event_enable_trigger_func, 1545 .reg = event_enable_register_trigger, 1546 .unreg = event_enable_unregister_trigger, 1547 .get_trigger_ops = event_enable_get_trigger_ops, 1548 .set_filter = set_trigger_filter, 1549 }; 1550 1551 static struct event_command trigger_disable_cmd = { 1552 .name = DISABLE_EVENT_STR, 1553 .trigger_type = ETT_EVENT_ENABLE, 1554 .func = event_enable_trigger_func, 1555 .reg = event_enable_register_trigger, 1556 .unreg = event_enable_unregister_trigger, 1557 .get_trigger_ops = event_enable_get_trigger_ops, 1558 .set_filter = set_trigger_filter, 1559 }; 1560 1561 static __init void unregister_trigger_enable_disable_cmds(void) 1562 { 1563 unregister_event_command(&trigger_enable_cmd); 1564 unregister_event_command(&trigger_disable_cmd); 1565 } 1566 1567 static __init int register_trigger_enable_disable_cmds(void) 1568 { 1569 int ret; 1570 1571 ret = register_event_command(&trigger_enable_cmd); 1572 if (WARN_ON(ret < 0)) 1573 return ret; 1574 ret = register_event_command(&trigger_disable_cmd); 1575 if (WARN_ON(ret < 0)) 1576 unregister_trigger_enable_disable_cmds(); 1577 1578 return ret; 1579 } 1580 1581 static __init int register_trigger_traceon_traceoff_cmds(void) 1582 { 1583 int ret; 1584 1585 ret = register_event_command(&trigger_traceon_cmd); 1586 if (WARN_ON(ret < 0)) 1587 return ret; 1588 ret = register_event_command(&trigger_traceoff_cmd); 1589 if (WARN_ON(ret < 0)) 1590 unregister_trigger_traceon_traceoff_cmds(); 1591 1592 return ret; 1593 } 1594 1595 __init int register_trigger_cmds(void) 1596 { 1597 register_trigger_traceon_traceoff_cmds(); 1598 register_trigger_snapshot_cmd(); 1599 register_trigger_stacktrace_cmd(); 1600 register_trigger_enable_disable_cmds(); 1601 register_trigger_hist_enable_disable_cmds(); 1602 register_trigger_hist_cmd(); 1603 1604 return 0; 1605 } 1606