1 /* 2 * trace_events_trigger - trace event triggers 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com> 19 */ 20 21 #include <linux/module.h> 22 #include <linux/ctype.h> 23 #include <linux/mutex.h> 24 #include <linux/slab.h> 25 #include <linux/rculist.h> 26 27 #include "trace.h" 28 29 static LIST_HEAD(trigger_commands); 30 static DEFINE_MUTEX(trigger_cmd_mutex); 31 32 void trigger_data_free(struct event_trigger_data *data) 33 { 34 if (data->cmd_ops->set_filter) 35 data->cmd_ops->set_filter(NULL, data, NULL); 36 37 synchronize_sched(); /* make sure current triggers exit before free */ 38 kfree(data); 39 } 40 41 /** 42 * event_triggers_call - Call triggers associated with a trace event 43 * @file: The trace_event_file associated with the event 44 * @rec: The trace entry for the event, NULL for unconditional invocation 45 * 46 * For each trigger associated with an event, invoke the trigger 47 * function registered with the associated trigger command. If rec is 48 * non-NULL, it means that the trigger requires further processing and 49 * shouldn't be unconditionally invoked. If rec is non-NULL and the 50 * trigger has a filter associated with it, rec will checked against 51 * the filter and if the record matches the trigger will be invoked. 52 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked 53 * in any case until the current event is written, the trigger 54 * function isn't invoked but the bit associated with the deferred 55 * trigger is set in the return value. 56 * 57 * Returns an enum event_trigger_type value containing a set bit for 58 * any trigger that should be deferred, ETT_NONE if nothing to defer. 59 * 60 * Called from tracepoint handlers (with rcu_read_lock_sched() held). 61 * 62 * Return: an enum event_trigger_type value containing a set bit for 63 * any trigger that should be deferred, ETT_NONE if nothing to defer. 64 */ 65 enum event_trigger_type 66 event_triggers_call(struct trace_event_file *file, void *rec, 67 struct ring_buffer_event *event) 68 { 69 struct event_trigger_data *data; 70 enum event_trigger_type tt = ETT_NONE; 71 struct event_filter *filter; 72 73 if (list_empty(&file->triggers)) 74 return tt; 75 76 list_for_each_entry_rcu(data, &file->triggers, list) { 77 if (data->paused) 78 continue; 79 if (!rec) { 80 data->ops->func(data, rec, event); 81 continue; 82 } 83 filter = rcu_dereference_sched(data->filter); 84 if (filter && !filter_match_preds(filter, rec)) 85 continue; 86 if (event_command_post_trigger(data->cmd_ops)) { 87 tt |= data->cmd_ops->trigger_type; 88 continue; 89 } 90 data->ops->func(data, rec, event); 91 } 92 return tt; 93 } 94 EXPORT_SYMBOL_GPL(event_triggers_call); 95 96 /** 97 * event_triggers_post_call - Call 'post_triggers' for a trace event 98 * @file: The trace_event_file associated with the event 99 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke 100 * 101 * For each trigger associated with an event, invoke the trigger 102 * function registered with the associated trigger command, if the 103 * corresponding bit is set in the tt enum passed into this function. 104 * See @event_triggers_call for details on how those bits are set. 105 * 106 * Called from tracepoint handlers (with rcu_read_lock_sched() held). 107 */ 108 void 109 event_triggers_post_call(struct trace_event_file *file, 110 enum event_trigger_type tt) 111 { 112 struct event_trigger_data *data; 113 114 list_for_each_entry_rcu(data, &file->triggers, list) { 115 if (data->paused) 116 continue; 117 if (data->cmd_ops->trigger_type & tt) 118 data->ops->func(data, NULL, NULL); 119 } 120 } 121 EXPORT_SYMBOL_GPL(event_triggers_post_call); 122 123 #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL) 124 125 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos) 126 { 127 struct trace_event_file *event_file = event_file_data(m->private); 128 129 if (t == SHOW_AVAILABLE_TRIGGERS) 130 return NULL; 131 132 return seq_list_next(t, &event_file->triggers, pos); 133 } 134 135 static void *trigger_start(struct seq_file *m, loff_t *pos) 136 { 137 struct trace_event_file *event_file; 138 139 /* ->stop() is called even if ->start() fails */ 140 mutex_lock(&event_mutex); 141 event_file = event_file_data(m->private); 142 if (unlikely(!event_file)) 143 return ERR_PTR(-ENODEV); 144 145 if (list_empty(&event_file->triggers)) 146 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL; 147 148 return seq_list_start(&event_file->triggers, *pos); 149 } 150 151 static void trigger_stop(struct seq_file *m, void *t) 152 { 153 mutex_unlock(&event_mutex); 154 } 155 156 static int trigger_show(struct seq_file *m, void *v) 157 { 158 struct event_trigger_data *data; 159 struct event_command *p; 160 161 if (v == SHOW_AVAILABLE_TRIGGERS) { 162 seq_puts(m, "# Available triggers:\n"); 163 seq_putc(m, '#'); 164 mutex_lock(&trigger_cmd_mutex); 165 list_for_each_entry_reverse(p, &trigger_commands, list) 166 seq_printf(m, " %s", p->name); 167 seq_putc(m, '\n'); 168 mutex_unlock(&trigger_cmd_mutex); 169 return 0; 170 } 171 172 data = list_entry(v, struct event_trigger_data, list); 173 data->ops->print(m, data->ops, data); 174 175 return 0; 176 } 177 178 static const struct seq_operations event_triggers_seq_ops = { 179 .start = trigger_start, 180 .next = trigger_next, 181 .stop = trigger_stop, 182 .show = trigger_show, 183 }; 184 185 static int event_trigger_regex_open(struct inode *inode, struct file *file) 186 { 187 int ret = 0; 188 189 mutex_lock(&event_mutex); 190 191 if (unlikely(!event_file_data(file))) { 192 mutex_unlock(&event_mutex); 193 return -ENODEV; 194 } 195 196 if ((file->f_mode & FMODE_WRITE) && 197 (file->f_flags & O_TRUNC)) { 198 struct trace_event_file *event_file; 199 struct event_command *p; 200 201 event_file = event_file_data(file); 202 203 list_for_each_entry(p, &trigger_commands, list) { 204 if (p->unreg_all) 205 p->unreg_all(event_file); 206 } 207 } 208 209 if (file->f_mode & FMODE_READ) { 210 ret = seq_open(file, &event_triggers_seq_ops); 211 if (!ret) { 212 struct seq_file *m = file->private_data; 213 m->private = file; 214 } 215 } 216 217 mutex_unlock(&event_mutex); 218 219 return ret; 220 } 221 222 static int trigger_process_regex(struct trace_event_file *file, char *buff) 223 { 224 char *command, *next = buff; 225 struct event_command *p; 226 int ret = -EINVAL; 227 228 command = strsep(&next, ": \t"); 229 command = (command[0] != '!') ? command : command + 1; 230 231 mutex_lock(&trigger_cmd_mutex); 232 list_for_each_entry(p, &trigger_commands, list) { 233 if (strcmp(p->name, command) == 0) { 234 ret = p->func(p, file, buff, command, next); 235 goto out_unlock; 236 } 237 } 238 out_unlock: 239 mutex_unlock(&trigger_cmd_mutex); 240 241 return ret; 242 } 243 244 static ssize_t event_trigger_regex_write(struct file *file, 245 const char __user *ubuf, 246 size_t cnt, loff_t *ppos) 247 { 248 struct trace_event_file *event_file; 249 ssize_t ret; 250 char *buf; 251 252 if (!cnt) 253 return 0; 254 255 if (cnt >= PAGE_SIZE) 256 return -EINVAL; 257 258 buf = memdup_user_nul(ubuf, cnt); 259 if (IS_ERR(buf)) 260 return PTR_ERR(buf); 261 262 strim(buf); 263 264 mutex_lock(&event_mutex); 265 event_file = event_file_data(file); 266 if (unlikely(!event_file)) { 267 mutex_unlock(&event_mutex); 268 kfree(buf); 269 return -ENODEV; 270 } 271 ret = trigger_process_regex(event_file, buf); 272 mutex_unlock(&event_mutex); 273 274 kfree(buf); 275 if (ret < 0) 276 goto out; 277 278 *ppos += cnt; 279 ret = cnt; 280 out: 281 return ret; 282 } 283 284 static int event_trigger_regex_release(struct inode *inode, struct file *file) 285 { 286 mutex_lock(&event_mutex); 287 288 if (file->f_mode & FMODE_READ) 289 seq_release(inode, file); 290 291 mutex_unlock(&event_mutex); 292 293 return 0; 294 } 295 296 static ssize_t 297 event_trigger_write(struct file *filp, const char __user *ubuf, 298 size_t cnt, loff_t *ppos) 299 { 300 return event_trigger_regex_write(filp, ubuf, cnt, ppos); 301 } 302 303 static int 304 event_trigger_open(struct inode *inode, struct file *filp) 305 { 306 return event_trigger_regex_open(inode, filp); 307 } 308 309 static int 310 event_trigger_release(struct inode *inode, struct file *file) 311 { 312 return event_trigger_regex_release(inode, file); 313 } 314 315 const struct file_operations event_trigger_fops = { 316 .open = event_trigger_open, 317 .read = seq_read, 318 .write = event_trigger_write, 319 .llseek = tracing_lseek, 320 .release = event_trigger_release, 321 }; 322 323 /* 324 * Currently we only register event commands from __init, so mark this 325 * __init too. 326 */ 327 __init int register_event_command(struct event_command *cmd) 328 { 329 struct event_command *p; 330 int ret = 0; 331 332 mutex_lock(&trigger_cmd_mutex); 333 list_for_each_entry(p, &trigger_commands, list) { 334 if (strcmp(cmd->name, p->name) == 0) { 335 ret = -EBUSY; 336 goto out_unlock; 337 } 338 } 339 list_add(&cmd->list, &trigger_commands); 340 out_unlock: 341 mutex_unlock(&trigger_cmd_mutex); 342 343 return ret; 344 } 345 346 /* 347 * Currently we only unregister event commands from __init, so mark 348 * this __init too. 349 */ 350 __init int unregister_event_command(struct event_command *cmd) 351 { 352 struct event_command *p, *n; 353 int ret = -ENODEV; 354 355 mutex_lock(&trigger_cmd_mutex); 356 list_for_each_entry_safe(p, n, &trigger_commands, list) { 357 if (strcmp(cmd->name, p->name) == 0) { 358 ret = 0; 359 list_del_init(&p->list); 360 goto out_unlock; 361 } 362 } 363 out_unlock: 364 mutex_unlock(&trigger_cmd_mutex); 365 366 return ret; 367 } 368 369 /** 370 * event_trigger_print - Generic event_trigger_ops @print implementation 371 * @name: The name of the event trigger 372 * @m: The seq_file being printed to 373 * @data: Trigger-specific data 374 * @filter_str: filter_str to print, if present 375 * 376 * Common implementation for event triggers to print themselves. 377 * 378 * Usually wrapped by a function that simply sets the @name of the 379 * trigger command and then invokes this. 380 * 381 * Return: 0 on success, errno otherwise 382 */ 383 static int 384 event_trigger_print(const char *name, struct seq_file *m, 385 void *data, char *filter_str) 386 { 387 long count = (long)data; 388 389 seq_puts(m, name); 390 391 if (count == -1) 392 seq_puts(m, ":unlimited"); 393 else 394 seq_printf(m, ":count=%ld", count); 395 396 if (filter_str) 397 seq_printf(m, " if %s\n", filter_str); 398 else 399 seq_putc(m, '\n'); 400 401 return 0; 402 } 403 404 /** 405 * event_trigger_init - Generic event_trigger_ops @init implementation 406 * @ops: The trigger ops associated with the trigger 407 * @data: Trigger-specific data 408 * 409 * Common implementation of event trigger initialization. 410 * 411 * Usually used directly as the @init method in event trigger 412 * implementations. 413 * 414 * Return: 0 on success, errno otherwise 415 */ 416 int event_trigger_init(struct event_trigger_ops *ops, 417 struct event_trigger_data *data) 418 { 419 data->ref++; 420 return 0; 421 } 422 423 /** 424 * event_trigger_free - Generic event_trigger_ops @free implementation 425 * @ops: The trigger ops associated with the trigger 426 * @data: Trigger-specific data 427 * 428 * Common implementation of event trigger de-initialization. 429 * 430 * Usually used directly as the @free method in event trigger 431 * implementations. 432 */ 433 static void 434 event_trigger_free(struct event_trigger_ops *ops, 435 struct event_trigger_data *data) 436 { 437 if (WARN_ON_ONCE(data->ref <= 0)) 438 return; 439 440 data->ref--; 441 if (!data->ref) 442 trigger_data_free(data); 443 } 444 445 int trace_event_trigger_enable_disable(struct trace_event_file *file, 446 int trigger_enable) 447 { 448 int ret = 0; 449 450 if (trigger_enable) { 451 if (atomic_inc_return(&file->tm_ref) > 1) 452 return ret; 453 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags); 454 ret = trace_event_enable_disable(file, 1, 1); 455 } else { 456 if (atomic_dec_return(&file->tm_ref) > 0) 457 return ret; 458 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags); 459 ret = trace_event_enable_disable(file, 0, 1); 460 } 461 462 return ret; 463 } 464 465 /** 466 * clear_event_triggers - Clear all triggers associated with a trace array 467 * @tr: The trace array to clear 468 * 469 * For each trigger, the triggering event has its tm_ref decremented 470 * via trace_event_trigger_enable_disable(), and any associated event 471 * (in the case of enable/disable_event triggers) will have its sm_ref 472 * decremented via free()->trace_event_enable_disable(). That 473 * combination effectively reverses the soft-mode/trigger state added 474 * by trigger registration. 475 * 476 * Must be called with event_mutex held. 477 */ 478 void 479 clear_event_triggers(struct trace_array *tr) 480 { 481 struct trace_event_file *file; 482 483 list_for_each_entry(file, &tr->events, list) { 484 struct event_trigger_data *data, *n; 485 list_for_each_entry_safe(data, n, &file->triggers, list) { 486 trace_event_trigger_enable_disable(file, 0); 487 list_del_rcu(&data->list); 488 if (data->ops->free) 489 data->ops->free(data->ops, data); 490 } 491 } 492 } 493 494 /** 495 * update_cond_flag - Set or reset the TRIGGER_COND bit 496 * @file: The trace_event_file associated with the event 497 * 498 * If an event has triggers and any of those triggers has a filter or 499 * a post_trigger, trigger invocation needs to be deferred until after 500 * the current event has logged its data, and the event should have 501 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be 502 * cleared. 503 */ 504 void update_cond_flag(struct trace_event_file *file) 505 { 506 struct event_trigger_data *data; 507 bool set_cond = false; 508 509 list_for_each_entry_rcu(data, &file->triggers, list) { 510 if (data->filter || event_command_post_trigger(data->cmd_ops) || 511 event_command_needs_rec(data->cmd_ops)) { 512 set_cond = true; 513 break; 514 } 515 } 516 517 if (set_cond) 518 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags); 519 else 520 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags); 521 } 522 523 /** 524 * register_trigger - Generic event_command @reg implementation 525 * @glob: The raw string used to register the trigger 526 * @ops: The trigger ops associated with the trigger 527 * @data: Trigger-specific data to associate with the trigger 528 * @file: The trace_event_file associated with the event 529 * 530 * Common implementation for event trigger registration. 531 * 532 * Usually used directly as the @reg method in event command 533 * implementations. 534 * 535 * Return: 0 on success, errno otherwise 536 */ 537 static int register_trigger(char *glob, struct event_trigger_ops *ops, 538 struct event_trigger_data *data, 539 struct trace_event_file *file) 540 { 541 struct event_trigger_data *test; 542 int ret = 0; 543 544 list_for_each_entry_rcu(test, &file->triggers, list) { 545 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) { 546 ret = -EEXIST; 547 goto out; 548 } 549 } 550 551 if (data->ops->init) { 552 ret = data->ops->init(data->ops, data); 553 if (ret < 0) 554 goto out; 555 } 556 557 list_add_rcu(&data->list, &file->triggers); 558 ret++; 559 560 update_cond_flag(file); 561 if (trace_event_trigger_enable_disable(file, 1) < 0) { 562 list_del_rcu(&data->list); 563 update_cond_flag(file); 564 ret--; 565 } 566 out: 567 return ret; 568 } 569 570 /** 571 * unregister_trigger - Generic event_command @unreg implementation 572 * @glob: The raw string used to register the trigger 573 * @ops: The trigger ops associated with the trigger 574 * @test: Trigger-specific data used to find the trigger to remove 575 * @file: The trace_event_file associated with the event 576 * 577 * Common implementation for event trigger unregistration. 578 * 579 * Usually used directly as the @unreg method in event command 580 * implementations. 581 */ 582 void unregister_trigger(char *glob, struct event_trigger_ops *ops, 583 struct event_trigger_data *test, 584 struct trace_event_file *file) 585 { 586 struct event_trigger_data *data; 587 bool unregistered = false; 588 589 list_for_each_entry_rcu(data, &file->triggers, list) { 590 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) { 591 unregistered = true; 592 list_del_rcu(&data->list); 593 trace_event_trigger_enable_disable(file, 0); 594 update_cond_flag(file); 595 break; 596 } 597 } 598 599 if (unregistered && data->ops->free) 600 data->ops->free(data->ops, data); 601 } 602 603 /** 604 * event_trigger_callback - Generic event_command @func implementation 605 * @cmd_ops: The command ops, used for trigger registration 606 * @file: The trace_event_file associated with the event 607 * @glob: The raw string used to register the trigger 608 * @cmd: The cmd portion of the string used to register the trigger 609 * @param: The params portion of the string used to register the trigger 610 * 611 * Common implementation for event command parsing and trigger 612 * instantiation. 613 * 614 * Usually used directly as the @func method in event command 615 * implementations. 616 * 617 * Return: 0 on success, errno otherwise 618 */ 619 static int 620 event_trigger_callback(struct event_command *cmd_ops, 621 struct trace_event_file *file, 622 char *glob, char *cmd, char *param) 623 { 624 struct event_trigger_data *trigger_data; 625 struct event_trigger_ops *trigger_ops; 626 char *trigger = NULL; 627 char *number; 628 int ret; 629 630 /* separate the trigger from the filter (t:n [if filter]) */ 631 if (param && isdigit(param[0])) 632 trigger = strsep(¶m, " \t"); 633 634 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); 635 636 ret = -ENOMEM; 637 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); 638 if (!trigger_data) 639 goto out; 640 641 trigger_data->count = -1; 642 trigger_data->ops = trigger_ops; 643 trigger_data->cmd_ops = cmd_ops; 644 trigger_data->private_data = file; 645 INIT_LIST_HEAD(&trigger_data->list); 646 INIT_LIST_HEAD(&trigger_data->named_list); 647 648 if (glob[0] == '!') { 649 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 650 kfree(trigger_data); 651 ret = 0; 652 goto out; 653 } 654 655 if (trigger) { 656 number = strsep(&trigger, ":"); 657 658 ret = -EINVAL; 659 if (!strlen(number)) 660 goto out_free; 661 662 /* 663 * We use the callback data field (which is a pointer) 664 * as our counter. 665 */ 666 ret = kstrtoul(number, 0, &trigger_data->count); 667 if (ret) 668 goto out_free; 669 } 670 671 if (!param) /* if param is non-empty, it's supposed to be a filter */ 672 goto out_reg; 673 674 if (!cmd_ops->set_filter) 675 goto out_reg; 676 677 ret = cmd_ops->set_filter(param, trigger_data, file); 678 if (ret < 0) 679 goto out_free; 680 681 out_reg: 682 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 683 /* 684 * The above returns on success the # of functions enabled, 685 * but if it didn't find any functions it returns zero. 686 * Consider no functions a failure too. 687 */ 688 if (!ret) { 689 ret = -ENOENT; 690 goto out_free; 691 } else if (ret < 0) 692 goto out_free; 693 ret = 0; 694 out: 695 return ret; 696 697 out_free: 698 if (cmd_ops->set_filter) 699 cmd_ops->set_filter(NULL, trigger_data, NULL); 700 kfree(trigger_data); 701 goto out; 702 } 703 704 /** 705 * set_trigger_filter - Generic event_command @set_filter implementation 706 * @filter_str: The filter string for the trigger, NULL to remove filter 707 * @trigger_data: Trigger-specific data 708 * @file: The trace_event_file associated with the event 709 * 710 * Common implementation for event command filter parsing and filter 711 * instantiation. 712 * 713 * Usually used directly as the @set_filter method in event command 714 * implementations. 715 * 716 * Also used to remove a filter (if filter_str = NULL). 717 * 718 * Return: 0 on success, errno otherwise 719 */ 720 int set_trigger_filter(char *filter_str, 721 struct event_trigger_data *trigger_data, 722 struct trace_event_file *file) 723 { 724 struct event_trigger_data *data = trigger_data; 725 struct event_filter *filter = NULL, *tmp; 726 int ret = -EINVAL; 727 char *s; 728 729 if (!filter_str) /* clear the current filter */ 730 goto assign; 731 732 s = strsep(&filter_str, " \t"); 733 734 if (!strlen(s) || strcmp(s, "if") != 0) 735 goto out; 736 737 if (!filter_str) 738 goto out; 739 740 /* The filter is for the 'trigger' event, not the triggered event */ 741 ret = create_event_filter(file->event_call, filter_str, false, &filter); 742 if (ret) 743 goto out; 744 assign: 745 tmp = rcu_access_pointer(data->filter); 746 747 rcu_assign_pointer(data->filter, filter); 748 749 if (tmp) { 750 /* Make sure the call is done with the filter */ 751 synchronize_sched(); 752 free_event_filter(tmp); 753 } 754 755 kfree(data->filter_str); 756 data->filter_str = NULL; 757 758 if (filter_str) { 759 data->filter_str = kstrdup(filter_str, GFP_KERNEL); 760 if (!data->filter_str) { 761 free_event_filter(rcu_access_pointer(data->filter)); 762 data->filter = NULL; 763 ret = -ENOMEM; 764 } 765 } 766 out: 767 return ret; 768 } 769 770 static LIST_HEAD(named_triggers); 771 772 /** 773 * find_named_trigger - Find the common named trigger associated with @name 774 * @name: The name of the set of named triggers to find the common data for 775 * 776 * Named triggers are sets of triggers that share a common set of 777 * trigger data. The first named trigger registered with a given name 778 * owns the common trigger data that the others subsequently 779 * registered with the same name will reference. This function 780 * returns the common trigger data associated with that first 781 * registered instance. 782 * 783 * Return: the common trigger data for the given named trigger on 784 * success, NULL otherwise. 785 */ 786 struct event_trigger_data *find_named_trigger(const char *name) 787 { 788 struct event_trigger_data *data; 789 790 if (!name) 791 return NULL; 792 793 list_for_each_entry(data, &named_triggers, named_list) { 794 if (data->named_data) 795 continue; 796 if (strcmp(data->name, name) == 0) 797 return data; 798 } 799 800 return NULL; 801 } 802 803 /** 804 * is_named_trigger - determine if a given trigger is a named trigger 805 * @test: The trigger data to test 806 * 807 * Return: true if 'test' is a named trigger, false otherwise. 808 */ 809 bool is_named_trigger(struct event_trigger_data *test) 810 { 811 struct event_trigger_data *data; 812 813 list_for_each_entry(data, &named_triggers, named_list) { 814 if (test == data) 815 return true; 816 } 817 818 return false; 819 } 820 821 /** 822 * save_named_trigger - save the trigger in the named trigger list 823 * @name: The name of the named trigger set 824 * @data: The trigger data to save 825 * 826 * Return: 0 if successful, negative error otherwise. 827 */ 828 int save_named_trigger(const char *name, struct event_trigger_data *data) 829 { 830 data->name = kstrdup(name, GFP_KERNEL); 831 if (!data->name) 832 return -ENOMEM; 833 834 list_add(&data->named_list, &named_triggers); 835 836 return 0; 837 } 838 839 /** 840 * del_named_trigger - delete a trigger from the named trigger list 841 * @data: The trigger data to delete 842 */ 843 void del_named_trigger(struct event_trigger_data *data) 844 { 845 kfree(data->name); 846 data->name = NULL; 847 848 list_del(&data->named_list); 849 } 850 851 static void __pause_named_trigger(struct event_trigger_data *data, bool pause) 852 { 853 struct event_trigger_data *test; 854 855 list_for_each_entry(test, &named_triggers, named_list) { 856 if (strcmp(test->name, data->name) == 0) { 857 if (pause) { 858 test->paused_tmp = test->paused; 859 test->paused = true; 860 } else { 861 test->paused = test->paused_tmp; 862 } 863 } 864 } 865 } 866 867 /** 868 * pause_named_trigger - Pause all named triggers with the same name 869 * @data: The trigger data of a named trigger to pause 870 * 871 * Pauses a named trigger along with all other triggers having the 872 * same name. Because named triggers share a common set of data, 873 * pausing only one is meaningless, so pausing one named trigger needs 874 * to pause all triggers with the same name. 875 */ 876 void pause_named_trigger(struct event_trigger_data *data) 877 { 878 __pause_named_trigger(data, true); 879 } 880 881 /** 882 * unpause_named_trigger - Un-pause all named triggers with the same name 883 * @data: The trigger data of a named trigger to unpause 884 * 885 * Un-pauses a named trigger along with all other triggers having the 886 * same name. Because named triggers share a common set of data, 887 * unpausing only one is meaningless, so unpausing one named trigger 888 * needs to unpause all triggers with the same name. 889 */ 890 void unpause_named_trigger(struct event_trigger_data *data) 891 { 892 __pause_named_trigger(data, false); 893 } 894 895 /** 896 * set_named_trigger_data - Associate common named trigger data 897 * @data: The trigger data of a named trigger to unpause 898 * 899 * Named triggers are sets of triggers that share a common set of 900 * trigger data. The first named trigger registered with a given name 901 * owns the common trigger data that the others subsequently 902 * registered with the same name will reference. This function 903 * associates the common trigger data from the first trigger with the 904 * given trigger. 905 */ 906 void set_named_trigger_data(struct event_trigger_data *data, 907 struct event_trigger_data *named_data) 908 { 909 data->named_data = named_data; 910 } 911 912 struct event_trigger_data * 913 get_named_trigger_data(struct event_trigger_data *data) 914 { 915 return data->named_data; 916 } 917 918 static void 919 traceon_trigger(struct event_trigger_data *data, void *rec, 920 struct ring_buffer_event *event) 921 { 922 if (tracing_is_on()) 923 return; 924 925 tracing_on(); 926 } 927 928 static void 929 traceon_count_trigger(struct event_trigger_data *data, void *rec, 930 struct ring_buffer_event *event) 931 { 932 if (tracing_is_on()) 933 return; 934 935 if (!data->count) 936 return; 937 938 if (data->count != -1) 939 (data->count)--; 940 941 tracing_on(); 942 } 943 944 static void 945 traceoff_trigger(struct event_trigger_data *data, void *rec, 946 struct ring_buffer_event *event) 947 { 948 if (!tracing_is_on()) 949 return; 950 951 tracing_off(); 952 } 953 954 static void 955 traceoff_count_trigger(struct event_trigger_data *data, void *rec, 956 struct ring_buffer_event *event) 957 { 958 if (!tracing_is_on()) 959 return; 960 961 if (!data->count) 962 return; 963 964 if (data->count != -1) 965 (data->count)--; 966 967 tracing_off(); 968 } 969 970 static int 971 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 972 struct event_trigger_data *data) 973 { 974 return event_trigger_print("traceon", m, (void *)data->count, 975 data->filter_str); 976 } 977 978 static int 979 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 980 struct event_trigger_data *data) 981 { 982 return event_trigger_print("traceoff", m, (void *)data->count, 983 data->filter_str); 984 } 985 986 static struct event_trigger_ops traceon_trigger_ops = { 987 .func = traceon_trigger, 988 .print = traceon_trigger_print, 989 .init = event_trigger_init, 990 .free = event_trigger_free, 991 }; 992 993 static struct event_trigger_ops traceon_count_trigger_ops = { 994 .func = traceon_count_trigger, 995 .print = traceon_trigger_print, 996 .init = event_trigger_init, 997 .free = event_trigger_free, 998 }; 999 1000 static struct event_trigger_ops traceoff_trigger_ops = { 1001 .func = traceoff_trigger, 1002 .print = traceoff_trigger_print, 1003 .init = event_trigger_init, 1004 .free = event_trigger_free, 1005 }; 1006 1007 static struct event_trigger_ops traceoff_count_trigger_ops = { 1008 .func = traceoff_count_trigger, 1009 .print = traceoff_trigger_print, 1010 .init = event_trigger_init, 1011 .free = event_trigger_free, 1012 }; 1013 1014 static struct event_trigger_ops * 1015 onoff_get_trigger_ops(char *cmd, char *param) 1016 { 1017 struct event_trigger_ops *ops; 1018 1019 /* we register both traceon and traceoff to this callback */ 1020 if (strcmp(cmd, "traceon") == 0) 1021 ops = param ? &traceon_count_trigger_ops : 1022 &traceon_trigger_ops; 1023 else 1024 ops = param ? &traceoff_count_trigger_ops : 1025 &traceoff_trigger_ops; 1026 1027 return ops; 1028 } 1029 1030 static struct event_command trigger_traceon_cmd = { 1031 .name = "traceon", 1032 .trigger_type = ETT_TRACE_ONOFF, 1033 .func = event_trigger_callback, 1034 .reg = register_trigger, 1035 .unreg = unregister_trigger, 1036 .get_trigger_ops = onoff_get_trigger_ops, 1037 .set_filter = set_trigger_filter, 1038 }; 1039 1040 static struct event_command trigger_traceoff_cmd = { 1041 .name = "traceoff", 1042 .trigger_type = ETT_TRACE_ONOFF, 1043 .flags = EVENT_CMD_FL_POST_TRIGGER, 1044 .func = event_trigger_callback, 1045 .reg = register_trigger, 1046 .unreg = unregister_trigger, 1047 .get_trigger_ops = onoff_get_trigger_ops, 1048 .set_filter = set_trigger_filter, 1049 }; 1050 1051 #ifdef CONFIG_TRACER_SNAPSHOT 1052 static void 1053 snapshot_trigger(struct event_trigger_data *data, void *rec, 1054 struct ring_buffer_event *event) 1055 { 1056 struct trace_event_file *file = data->private_data; 1057 1058 if (file) 1059 tracing_snapshot_instance(file->tr); 1060 else 1061 tracing_snapshot(); 1062 } 1063 1064 static void 1065 snapshot_count_trigger(struct event_trigger_data *data, void *rec, 1066 struct ring_buffer_event *event) 1067 { 1068 if (!data->count) 1069 return; 1070 1071 if (data->count != -1) 1072 (data->count)--; 1073 1074 snapshot_trigger(data, rec, event); 1075 } 1076 1077 static int 1078 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops, 1079 struct event_trigger_data *data, 1080 struct trace_event_file *file) 1081 { 1082 int ret = register_trigger(glob, ops, data, file); 1083 1084 if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) { 1085 unregister_trigger(glob, ops, data, file); 1086 ret = 0; 1087 } 1088 1089 return ret; 1090 } 1091 1092 static int 1093 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 1094 struct event_trigger_data *data) 1095 { 1096 return event_trigger_print("snapshot", m, (void *)data->count, 1097 data->filter_str); 1098 } 1099 1100 static struct event_trigger_ops snapshot_trigger_ops = { 1101 .func = snapshot_trigger, 1102 .print = snapshot_trigger_print, 1103 .init = event_trigger_init, 1104 .free = event_trigger_free, 1105 }; 1106 1107 static struct event_trigger_ops snapshot_count_trigger_ops = { 1108 .func = snapshot_count_trigger, 1109 .print = snapshot_trigger_print, 1110 .init = event_trigger_init, 1111 .free = event_trigger_free, 1112 }; 1113 1114 static struct event_trigger_ops * 1115 snapshot_get_trigger_ops(char *cmd, char *param) 1116 { 1117 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops; 1118 } 1119 1120 static struct event_command trigger_snapshot_cmd = { 1121 .name = "snapshot", 1122 .trigger_type = ETT_SNAPSHOT, 1123 .func = event_trigger_callback, 1124 .reg = register_snapshot_trigger, 1125 .unreg = unregister_trigger, 1126 .get_trigger_ops = snapshot_get_trigger_ops, 1127 .set_filter = set_trigger_filter, 1128 }; 1129 1130 static __init int register_trigger_snapshot_cmd(void) 1131 { 1132 int ret; 1133 1134 ret = register_event_command(&trigger_snapshot_cmd); 1135 WARN_ON(ret < 0); 1136 1137 return ret; 1138 } 1139 #else 1140 static __init int register_trigger_snapshot_cmd(void) { return 0; } 1141 #endif /* CONFIG_TRACER_SNAPSHOT */ 1142 1143 #ifdef CONFIG_STACKTRACE 1144 #ifdef CONFIG_UNWINDER_ORC 1145 /* Skip 2: 1146 * event_triggers_post_call() 1147 * trace_event_raw_event_xxx() 1148 */ 1149 # define STACK_SKIP 2 1150 #else 1151 /* 1152 * Skip 4: 1153 * stacktrace_trigger() 1154 * event_triggers_post_call() 1155 * trace_event_buffer_commit() 1156 * trace_event_raw_event_xxx() 1157 */ 1158 #define STACK_SKIP 4 1159 #endif 1160 1161 static void 1162 stacktrace_trigger(struct event_trigger_data *data, void *rec, 1163 struct ring_buffer_event *event) 1164 { 1165 trace_dump_stack(STACK_SKIP); 1166 } 1167 1168 static void 1169 stacktrace_count_trigger(struct event_trigger_data *data, void *rec, 1170 struct ring_buffer_event *event) 1171 { 1172 if (!data->count) 1173 return; 1174 1175 if (data->count != -1) 1176 (data->count)--; 1177 1178 stacktrace_trigger(data, rec, event); 1179 } 1180 1181 static int 1182 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, 1183 struct event_trigger_data *data) 1184 { 1185 return event_trigger_print("stacktrace", m, (void *)data->count, 1186 data->filter_str); 1187 } 1188 1189 static struct event_trigger_ops stacktrace_trigger_ops = { 1190 .func = stacktrace_trigger, 1191 .print = stacktrace_trigger_print, 1192 .init = event_trigger_init, 1193 .free = event_trigger_free, 1194 }; 1195 1196 static struct event_trigger_ops stacktrace_count_trigger_ops = { 1197 .func = stacktrace_count_trigger, 1198 .print = stacktrace_trigger_print, 1199 .init = event_trigger_init, 1200 .free = event_trigger_free, 1201 }; 1202 1203 static struct event_trigger_ops * 1204 stacktrace_get_trigger_ops(char *cmd, char *param) 1205 { 1206 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops; 1207 } 1208 1209 static struct event_command trigger_stacktrace_cmd = { 1210 .name = "stacktrace", 1211 .trigger_type = ETT_STACKTRACE, 1212 .flags = EVENT_CMD_FL_POST_TRIGGER, 1213 .func = event_trigger_callback, 1214 .reg = register_trigger, 1215 .unreg = unregister_trigger, 1216 .get_trigger_ops = stacktrace_get_trigger_ops, 1217 .set_filter = set_trigger_filter, 1218 }; 1219 1220 static __init int register_trigger_stacktrace_cmd(void) 1221 { 1222 int ret; 1223 1224 ret = register_event_command(&trigger_stacktrace_cmd); 1225 WARN_ON(ret < 0); 1226 1227 return ret; 1228 } 1229 #else 1230 static __init int register_trigger_stacktrace_cmd(void) { return 0; } 1231 #endif /* CONFIG_STACKTRACE */ 1232 1233 static __init void unregister_trigger_traceon_traceoff_cmds(void) 1234 { 1235 unregister_event_command(&trigger_traceon_cmd); 1236 unregister_event_command(&trigger_traceoff_cmd); 1237 } 1238 1239 static void 1240 event_enable_trigger(struct event_trigger_data *data, void *rec, 1241 struct ring_buffer_event *event) 1242 { 1243 struct enable_trigger_data *enable_data = data->private_data; 1244 1245 if (enable_data->enable) 1246 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); 1247 else 1248 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); 1249 } 1250 1251 static void 1252 event_enable_count_trigger(struct event_trigger_data *data, void *rec, 1253 struct ring_buffer_event *event) 1254 { 1255 struct enable_trigger_data *enable_data = data->private_data; 1256 1257 if (!data->count) 1258 return; 1259 1260 /* Skip if the event is in a state we want to switch to */ 1261 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) 1262 return; 1263 1264 if (data->count != -1) 1265 (data->count)--; 1266 1267 event_enable_trigger(data, rec, event); 1268 } 1269 1270 int event_enable_trigger_print(struct seq_file *m, 1271 struct event_trigger_ops *ops, 1272 struct event_trigger_data *data) 1273 { 1274 struct enable_trigger_data *enable_data = data->private_data; 1275 1276 seq_printf(m, "%s:%s:%s", 1277 enable_data->hist ? 1278 (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) : 1279 (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR), 1280 enable_data->file->event_call->class->system, 1281 trace_event_name(enable_data->file->event_call)); 1282 1283 if (data->count == -1) 1284 seq_puts(m, ":unlimited"); 1285 else 1286 seq_printf(m, ":count=%ld", data->count); 1287 1288 if (data->filter_str) 1289 seq_printf(m, " if %s\n", data->filter_str); 1290 else 1291 seq_putc(m, '\n'); 1292 1293 return 0; 1294 } 1295 1296 void event_enable_trigger_free(struct event_trigger_ops *ops, 1297 struct event_trigger_data *data) 1298 { 1299 struct enable_trigger_data *enable_data = data->private_data; 1300 1301 if (WARN_ON_ONCE(data->ref <= 0)) 1302 return; 1303 1304 data->ref--; 1305 if (!data->ref) { 1306 /* Remove the SOFT_MODE flag */ 1307 trace_event_enable_disable(enable_data->file, 0, 1); 1308 module_put(enable_data->file->event_call->mod); 1309 trigger_data_free(data); 1310 kfree(enable_data); 1311 } 1312 } 1313 1314 static struct event_trigger_ops event_enable_trigger_ops = { 1315 .func = event_enable_trigger, 1316 .print = event_enable_trigger_print, 1317 .init = event_trigger_init, 1318 .free = event_enable_trigger_free, 1319 }; 1320 1321 static struct event_trigger_ops event_enable_count_trigger_ops = { 1322 .func = event_enable_count_trigger, 1323 .print = event_enable_trigger_print, 1324 .init = event_trigger_init, 1325 .free = event_enable_trigger_free, 1326 }; 1327 1328 static struct event_trigger_ops event_disable_trigger_ops = { 1329 .func = event_enable_trigger, 1330 .print = event_enable_trigger_print, 1331 .init = event_trigger_init, 1332 .free = event_enable_trigger_free, 1333 }; 1334 1335 static struct event_trigger_ops event_disable_count_trigger_ops = { 1336 .func = event_enable_count_trigger, 1337 .print = event_enable_trigger_print, 1338 .init = event_trigger_init, 1339 .free = event_enable_trigger_free, 1340 }; 1341 1342 int event_enable_trigger_func(struct event_command *cmd_ops, 1343 struct trace_event_file *file, 1344 char *glob, char *cmd, char *param) 1345 { 1346 struct trace_event_file *event_enable_file; 1347 struct enable_trigger_data *enable_data; 1348 struct event_trigger_data *trigger_data; 1349 struct event_trigger_ops *trigger_ops; 1350 struct trace_array *tr = file->tr; 1351 const char *system; 1352 const char *event; 1353 bool hist = false; 1354 char *trigger; 1355 char *number; 1356 bool enable; 1357 int ret; 1358 1359 if (!param) 1360 return -EINVAL; 1361 1362 /* separate the trigger from the filter (s:e:n [if filter]) */ 1363 trigger = strsep(¶m, " \t"); 1364 if (!trigger) 1365 return -EINVAL; 1366 1367 system = strsep(&trigger, ":"); 1368 if (!trigger) 1369 return -EINVAL; 1370 1371 event = strsep(&trigger, ":"); 1372 1373 ret = -EINVAL; 1374 event_enable_file = find_event_file(tr, system, event); 1375 if (!event_enable_file) 1376 goto out; 1377 1378 #ifdef CONFIG_HIST_TRIGGERS 1379 hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) || 1380 (strcmp(cmd, DISABLE_HIST_STR) == 0)); 1381 1382 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) || 1383 (strcmp(cmd, ENABLE_HIST_STR) == 0)); 1384 #else 1385 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 1386 #endif 1387 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); 1388 1389 ret = -ENOMEM; 1390 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL); 1391 if (!trigger_data) 1392 goto out; 1393 1394 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL); 1395 if (!enable_data) { 1396 kfree(trigger_data); 1397 goto out; 1398 } 1399 1400 trigger_data->count = -1; 1401 trigger_data->ops = trigger_ops; 1402 trigger_data->cmd_ops = cmd_ops; 1403 INIT_LIST_HEAD(&trigger_data->list); 1404 RCU_INIT_POINTER(trigger_data->filter, NULL); 1405 1406 enable_data->hist = hist; 1407 enable_data->enable = enable; 1408 enable_data->file = event_enable_file; 1409 trigger_data->private_data = enable_data; 1410 1411 if (glob[0] == '!') { 1412 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file); 1413 kfree(trigger_data); 1414 kfree(enable_data); 1415 ret = 0; 1416 goto out; 1417 } 1418 1419 if (trigger) { 1420 number = strsep(&trigger, ":"); 1421 1422 ret = -EINVAL; 1423 if (!strlen(number)) 1424 goto out_free; 1425 1426 /* 1427 * We use the callback data field (which is a pointer) 1428 * as our counter. 1429 */ 1430 ret = kstrtoul(number, 0, &trigger_data->count); 1431 if (ret) 1432 goto out_free; 1433 } 1434 1435 if (!param) /* if param is non-empty, it's supposed to be a filter */ 1436 goto out_reg; 1437 1438 if (!cmd_ops->set_filter) 1439 goto out_reg; 1440 1441 ret = cmd_ops->set_filter(param, trigger_data, file); 1442 if (ret < 0) 1443 goto out_free; 1444 1445 out_reg: 1446 /* Don't let event modules unload while probe registered */ 1447 ret = try_module_get(event_enable_file->event_call->mod); 1448 if (!ret) { 1449 ret = -EBUSY; 1450 goto out_free; 1451 } 1452 1453 ret = trace_event_enable_disable(event_enable_file, 1, 1); 1454 if (ret < 0) 1455 goto out_put; 1456 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 1457 /* 1458 * The above returns on success the # of functions enabled, 1459 * but if it didn't find any functions it returns zero. 1460 * Consider no functions a failure too. 1461 */ 1462 if (!ret) { 1463 ret = -ENOENT; 1464 goto out_disable; 1465 } else if (ret < 0) 1466 goto out_disable; 1467 /* Just return zero, not the number of enabled functions */ 1468 ret = 0; 1469 out: 1470 return ret; 1471 1472 out_disable: 1473 trace_event_enable_disable(event_enable_file, 0, 1); 1474 out_put: 1475 module_put(event_enable_file->event_call->mod); 1476 out_free: 1477 if (cmd_ops->set_filter) 1478 cmd_ops->set_filter(NULL, trigger_data, NULL); 1479 kfree(trigger_data); 1480 kfree(enable_data); 1481 goto out; 1482 } 1483 1484 int event_enable_register_trigger(char *glob, 1485 struct event_trigger_ops *ops, 1486 struct event_trigger_data *data, 1487 struct trace_event_file *file) 1488 { 1489 struct enable_trigger_data *enable_data = data->private_data; 1490 struct enable_trigger_data *test_enable_data; 1491 struct event_trigger_data *test; 1492 int ret = 0; 1493 1494 list_for_each_entry_rcu(test, &file->triggers, list) { 1495 test_enable_data = test->private_data; 1496 if (test_enable_data && 1497 (test->cmd_ops->trigger_type == 1498 data->cmd_ops->trigger_type) && 1499 (test_enable_data->file == enable_data->file)) { 1500 ret = -EEXIST; 1501 goto out; 1502 } 1503 } 1504 1505 if (data->ops->init) { 1506 ret = data->ops->init(data->ops, data); 1507 if (ret < 0) 1508 goto out; 1509 } 1510 1511 list_add_rcu(&data->list, &file->triggers); 1512 ret++; 1513 1514 update_cond_flag(file); 1515 if (trace_event_trigger_enable_disable(file, 1) < 0) { 1516 list_del_rcu(&data->list); 1517 update_cond_flag(file); 1518 ret--; 1519 } 1520 out: 1521 return ret; 1522 } 1523 1524 void event_enable_unregister_trigger(char *glob, 1525 struct event_trigger_ops *ops, 1526 struct event_trigger_data *test, 1527 struct trace_event_file *file) 1528 { 1529 struct enable_trigger_data *test_enable_data = test->private_data; 1530 struct enable_trigger_data *enable_data; 1531 struct event_trigger_data *data; 1532 bool unregistered = false; 1533 1534 list_for_each_entry_rcu(data, &file->triggers, list) { 1535 enable_data = data->private_data; 1536 if (enable_data && 1537 (data->cmd_ops->trigger_type == 1538 test->cmd_ops->trigger_type) && 1539 (enable_data->file == test_enable_data->file)) { 1540 unregistered = true; 1541 list_del_rcu(&data->list); 1542 trace_event_trigger_enable_disable(file, 0); 1543 update_cond_flag(file); 1544 break; 1545 } 1546 } 1547 1548 if (unregistered && data->ops->free) 1549 data->ops->free(data->ops, data); 1550 } 1551 1552 static struct event_trigger_ops * 1553 event_enable_get_trigger_ops(char *cmd, char *param) 1554 { 1555 struct event_trigger_ops *ops; 1556 bool enable; 1557 1558 #ifdef CONFIG_HIST_TRIGGERS 1559 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) || 1560 (strcmp(cmd, ENABLE_HIST_STR) == 0)); 1561 #else 1562 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; 1563 #endif 1564 if (enable) 1565 ops = param ? &event_enable_count_trigger_ops : 1566 &event_enable_trigger_ops; 1567 else 1568 ops = param ? &event_disable_count_trigger_ops : 1569 &event_disable_trigger_ops; 1570 1571 return ops; 1572 } 1573 1574 static struct event_command trigger_enable_cmd = { 1575 .name = ENABLE_EVENT_STR, 1576 .trigger_type = ETT_EVENT_ENABLE, 1577 .func = event_enable_trigger_func, 1578 .reg = event_enable_register_trigger, 1579 .unreg = event_enable_unregister_trigger, 1580 .get_trigger_ops = event_enable_get_trigger_ops, 1581 .set_filter = set_trigger_filter, 1582 }; 1583 1584 static struct event_command trigger_disable_cmd = { 1585 .name = DISABLE_EVENT_STR, 1586 .trigger_type = ETT_EVENT_ENABLE, 1587 .func = event_enable_trigger_func, 1588 .reg = event_enable_register_trigger, 1589 .unreg = event_enable_unregister_trigger, 1590 .get_trigger_ops = event_enable_get_trigger_ops, 1591 .set_filter = set_trigger_filter, 1592 }; 1593 1594 static __init void unregister_trigger_enable_disable_cmds(void) 1595 { 1596 unregister_event_command(&trigger_enable_cmd); 1597 unregister_event_command(&trigger_disable_cmd); 1598 } 1599 1600 static __init int register_trigger_enable_disable_cmds(void) 1601 { 1602 int ret; 1603 1604 ret = register_event_command(&trigger_enable_cmd); 1605 if (WARN_ON(ret < 0)) 1606 return ret; 1607 ret = register_event_command(&trigger_disable_cmd); 1608 if (WARN_ON(ret < 0)) 1609 unregister_trigger_enable_disable_cmds(); 1610 1611 return ret; 1612 } 1613 1614 static __init int register_trigger_traceon_traceoff_cmds(void) 1615 { 1616 int ret; 1617 1618 ret = register_event_command(&trigger_traceon_cmd); 1619 if (WARN_ON(ret < 0)) 1620 return ret; 1621 ret = register_event_command(&trigger_traceoff_cmd); 1622 if (WARN_ON(ret < 0)) 1623 unregister_trigger_traceon_traceoff_cmds(); 1624 1625 return ret; 1626 } 1627 1628 __init int register_trigger_cmds(void) 1629 { 1630 register_trigger_traceon_traceoff_cmds(); 1631 register_trigger_snapshot_cmd(); 1632 register_trigger_stacktrace_cmd(); 1633 register_trigger_enable_disable_cmds(); 1634 register_trigger_hist_enable_disable_cmds(); 1635 register_trigger_hist_cmd(); 1636 1637 return 0; 1638 } 1639